summaryrefslogtreecommitdiff
path: root/chromium/v8
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2022-02-02 12:21:57 +0100
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2022-02-12 08:13:00 +0000
commit606d85f2a5386472314d39923da28c70c60dc8e7 (patch)
treea8f4d7bf997f349f45605e6058259fba0630e4d7 /chromium/v8
parent5786336dda477d04fb98483dca1a5426eebde2d7 (diff)
downloadqtwebengine-chromium-606d85f2a5386472314d39923da28c70c60dc8e7.tar.gz
BASELINE: Update Chromium to 96.0.4664.181
Change-Id: I762cd1da89d73aa6313b4a753fe126c34833f046 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/v8')
-rw-r--r--chromium/v8/.flake81
-rw-r--r--chromium/v8/.vpython15
-rw-r--r--chromium/v8/AUTHORS30
-rw-r--r--chromium/v8/BUILD.bazel73
-rw-r--r--chromium/v8/BUILD.gn389
-rw-r--r--chromium/v8/COMMON_OWNERS3
-rw-r--r--chromium/v8/DEPS63
-rw-r--r--chromium/v8/ENG_REVIEW_OWNERS2
-rw-r--r--chromium/v8/LOONG_OWNERS3
-rw-r--r--chromium/v8/MIPS_OWNERS1
-rw-r--r--chromium/v8/OWNERS1
-rw-r--r--chromium/v8/RISCV_OWNERS1
-rw-r--r--chromium/v8/WATCHLISTS12
-rw-r--r--chromium/v8/gni/snapshot_toolchain.gni2
-rw-r--r--chromium/v8/gni/v8.gni10
-rw-r--r--chromium/v8/include/cppgc/README.md17
-rw-r--r--chromium/v8/include/cppgc/allocation.h16
-rw-r--r--chromium/v8/include/cppgc/cross-thread-persistent.h104
-rw-r--r--chromium/v8/include/cppgc/internal/caged-heap-local-data.h4
-rw-r--r--chromium/v8/include/cppgc/internal/finalizer-trait.h2
-rw-r--r--chromium/v8/include/cppgc/internal/gc-info.h100
-rw-r--r--chromium/v8/include/cppgc/internal/name-trait.h11
-rw-r--r--chromium/v8/include/cppgc/internal/persistent-node.h56
-rw-r--r--chromium/v8/include/cppgc/internal/pointer-policies.h33
-rw-r--r--chromium/v8/include/cppgc/internal/write-barrier.h10
-rw-r--r--chromium/v8/include/cppgc/persistent.h2
-rw-r--r--chromium/v8/include/cppgc/prefinalizer.h2
-rw-r--r--chromium/v8/include/js_protocol.pdl44
-rw-r--r--chromium/v8/include/v8-array-buffer.h433
-rw-r--r--chromium/v8/include/v8-callbacks.h377
-rw-r--r--chromium/v8/include/v8-container.h129
-rw-r--r--chromium/v8/include/v8-context.h418
-rw-r--r--chromium/v8/include/v8-cppgc.h7
-rw-r--r--chromium/v8/include/v8-data.h65
-rw-r--r--chromium/v8/include/v8-date.h43
-rw-r--r--chromium/v8/include/v8-debug.h151
-rw-r--r--chromium/v8/include/v8-embedder-heap.h238
-rw-r--r--chromium/v8/include/v8-exception.h224
-rw-r--r--chromium/v8/include/v8-extension.h62
-rw-r--r--chromium/v8/include/v8-external.h37
-rw-r--r--chromium/v8/include/v8-fast-api-calls.h74
-rw-r--r--chromium/v8/include/v8-forward.h81
-rw-r--r--chromium/v8/include/v8-function-callback.h475
-rw-r--r--chromium/v8/include/v8-function.h122
-rw-r--r--chromium/v8/include/v8-initialization.h282
-rw-r--r--chromium/v8/include/v8-inspector.h32
-rw-r--r--chromium/v8/include/v8-internal.h99
-rw-r--r--chromium/v8/include/v8-isolate.h1662
-rw-r--r--chromium/v8/include/v8-json.h47
-rw-r--r--chromium/v8/include/v8-local-handle.h459
-rw-r--r--chromium/v8/include/v8-locker.h148
-rw-r--r--chromium/v8/include/v8-maybe.h137
-rw-r--r--chromium/v8/include/v8-memory-span.h43
-rw-r--r--chromium/v8/include/v8-message.h237
-rw-r--r--chromium/v8/include/v8-metrics.h13
-rw-r--r--chromium/v8/include/v8-microtask-queue.h152
-rw-r--r--chromium/v8/include/v8-microtask.h28
-rw-r--r--chromium/v8/include/v8-object.h770
-rw-r--r--chromium/v8/include/v8-persistent-handle.h590
-rw-r--r--chromium/v8/include/v8-platform.h40
-rw-r--r--chromium/v8/include/v8-primitive-object.h118
-rw-r--r--chromium/v8/include/v8-primitive.h858
-rw-r--r--chromium/v8/include/v8-profiler.h5
-rw-r--r--chromium/v8/include/v8-promise.h174
-rw-r--r--chromium/v8/include/v8-proxy.h50
-rw-r--r--chromium/v8/include/v8-regexp.h105
-rw-r--r--chromium/v8/include/v8-script.h771
-rw-r--r--chromium/v8/include/v8-snapshot.h198
-rw-r--r--chromium/v8/include/v8-statistics.h215
-rw-r--r--chromium/v8/include/v8-template.h1052
-rw-r--r--chromium/v8/include/v8-traced-handle.h605
-rw-r--r--chromium/v8/include/v8-typed-array.h282
-rw-r--r--chromium/v8/include/v8-unwinder-state.h7
-rw-r--r--chromium/v8/include/v8-unwinder.h129
-rw-r--r--chromium/v8/include/v8-util.h8
-rw-r--r--chromium/v8/include/v8-value-serializer.h249
-rw-r--r--chromium/v8/include/v8-value.h526
-rw-r--r--chromium/v8/include/v8-version.h6
-rw-r--r--chromium/v8/include/v8-wasm.h251
-rw-r--r--chromium/v8/include/v8-weak-callback-info.h73
-rw-r--r--chromium/v8/include/v8.h12336
-rw-r--r--chromium/v8/infra/mb/mb_config.pyl36
-rw-r--r--chromium/v8/infra/testing/builders.pyl206
-rw-r--r--chromium/v8/samples/cppgc/hello-world.cc (renamed from chromium/v8/samples/cppgc/cppgc-sample.cc)0
-rw-r--r--chromium/v8/samples/hello-world.cc7
-rw-r--r--chromium/v8/samples/process.cc21
-rw-r--r--chromium/v8/samples/shell.cc17
-rw-r--r--chromium/v8/src/DEPS5
-rw-r--r--chromium/v8/src/api/api-arguments.h1
-rw-r--r--chromium/v8/src/api/api-inl.h17
-rw-r--r--chromium/v8/src/api/api-natives.h2
-rw-r--r--chromium/v8/src/api/api.cc384
-rw-r--r--chromium/v8/src/api/api.h17
-rw-r--r--chromium/v8/src/asmjs/asm-parser.cc34
-rw-r--r--chromium/v8/src/ast/ast.cc7
-rw-r--r--chromium/v8/src/ast/prettyprinter.cc38
-rw-r--r--chromium/v8/src/ast/prettyprinter.h3
-rw-r--r--chromium/v8/src/ast/scopes.cc13
-rw-r--r--chromium/v8/src/base/atomicops.h25
-rw-r--r--chromium/v8/src/base/bounded-page-allocator.cc80
-rw-r--r--chromium/v8/src/base/bounded-page-allocator.h19
-rw-r--r--chromium/v8/src/base/build_config.h12
-rw-r--r--chromium/v8/src/base/compiler-specific.h12
-rw-r--r--chromium/v8/src/base/flags.h66
-rw-r--r--chromium/v8/src/base/macros.h12
-rw-r--r--chromium/v8/src/base/optional.h2
-rw-r--r--chromium/v8/src/base/page-allocator.cc4
-rw-r--r--chromium/v8/src/base/page-allocator.h2
-rw-r--r--chromium/v8/src/base/platform/platform-fuchsia.cc8
-rw-r--r--chromium/v8/src/base/platform/platform-posix.cc56
-rw-r--r--chromium/v8/src/base/platform/platform-win32.cc15
-rw-r--r--chromium/v8/src/base/platform/platform.h4
-rw-r--r--chromium/v8/src/base/region-allocator.cc29
-rw-r--r--chromium/v8/src/base/region-allocator.h5
-rw-r--r--chromium/v8/src/base/sanitizer/asan.h7
-rw-r--r--chromium/v8/src/base/sanitizer/tsan.h20
-rw-r--r--chromium/v8/src/base/vlq.h2
-rw-r--r--chromium/v8/src/base/win32-headers.h6
-rw-r--r--chromium/v8/src/baseline/arm/baseline-assembler-arm-inl.h13
-rw-r--r--chromium/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h12
-rw-r--r--chromium/v8/src/baseline/baseline-assembler-inl.h20
-rw-r--r--chromium/v8/src/baseline/baseline-assembler.h15
-rw-r--r--chromium/v8/src/baseline/baseline-batch-compiler.cc8
-rw-r--r--chromium/v8/src/baseline/baseline-compiler.cc90
-rw-r--r--chromium/v8/src/baseline/baseline-compiler.h1
-rw-r--r--chromium/v8/src/baseline/baseline.cc7
-rw-r--r--chromium/v8/src/baseline/bytecode-offset-iterator.cc2
-rw-r--r--chromium/v8/src/baseline/bytecode-offset-iterator.h2
-rw-r--r--chromium/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h15
-rw-r--r--chromium/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h503
-rw-r--r--chromium/v8/src/baseline/loong64/baseline-compiler-loong64-inl.h77
-rw-r--r--chromium/v8/src/baseline/mips/baseline-assembler-mips-inl.h6
-rw-r--r--chromium/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h6
-rw-r--r--chromium/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h272
-rw-r--r--chromium/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h64
-rw-r--r--chromium/v8/src/baseline/x64/baseline-assembler-x64-inl.h15
-rw-r--r--chromium/v8/src/bigint/bigint-internal.h3
-rw-r--r--chromium/v8/src/bigint/bigint.h125
-rw-r--r--chromium/v8/src/bigint/bitwise.cc262
-rw-r--r--chromium/v8/src/bigint/fromstring.cc263
-rw-r--r--chromium/v8/src/bigint/mul-fft.cc2
-rw-r--r--chromium/v8/src/bigint/vector-arithmetic.cc17
-rw-r--r--chromium/v8/src/builtins/accessors.cc1
-rw-r--r--chromium/v8/src/builtins/accessors.h2
-rw-r--r--chromium/v8/src/builtins/arm/builtins-arm.cc403
-rw-r--r--chromium/v8/src/builtins/arm64/builtins-arm64.cc377
-rw-r--r--chromium/v8/src/builtins/array-concat.tq2
-rw-r--r--chromium/v8/src/builtins/array-filter.tq2
-rw-r--r--chromium/v8/src/builtins/array-from.tq2
-rw-r--r--chromium/v8/src/builtins/array-join.tq17
-rw-r--r--chromium/v8/src/builtins/array-lastindexof.tq6
-rw-r--r--chromium/v8/src/builtins/array-map.tq7
-rw-r--r--chromium/v8/src/builtins/array-reverse.tq15
-rw-r--r--chromium/v8/src/builtins/array-shift.tq2
-rw-r--r--chromium/v8/src/builtins/array-slice.tq49
-rw-r--r--chromium/v8/src/builtins/array-unshift.tq2
-rw-r--r--chromium/v8/src/builtins/array.tq13
-rw-r--r--chromium/v8/src/builtins/arraybuffer.tq4
-rw-r--r--chromium/v8/src/builtins/base.tq75
-rw-r--r--chromium/v8/src/builtins/builtins-array-gen.cc120
-rw-r--r--chromium/v8/src/builtins/builtins-array-gen.h2
-rw-r--r--chromium/v8/src/builtins/builtins-array.cc12
-rw-r--r--chromium/v8/src/builtins/builtins-arraybuffer.cc53
-rw-r--r--chromium/v8/src/builtins/builtins-async-function-gen.cc13
-rw-r--r--chromium/v8/src/builtins/builtins-async-gen.cc10
-rw-r--r--chromium/v8/src/builtins/builtins-async-generator-gen.cc14
-rw-r--r--chromium/v8/src/builtins/builtins-async-iterator-gen.cc16
-rw-r--r--chromium/v8/src/builtins/builtins-bigint.cc8
-rw-r--r--chromium/v8/src/builtins/builtins-bigint.tq6
-rw-r--r--chromium/v8/src/builtins/builtins-call-gen.cc17
-rw-r--r--chromium/v8/src/builtins/builtins-collections-gen.cc74
-rw-r--r--chromium/v8/src/builtins/builtins-constructor-gen.cc17
-rw-r--r--chromium/v8/src/builtins/builtins-dataview.cc63
-rw-r--r--chromium/v8/src/builtins/builtins-date.cc169
-rw-r--r--chromium/v8/src/builtins/builtins-definitions.h298
-rw-r--r--chromium/v8/src/builtins/builtins-descriptors.h27
-rw-r--r--chromium/v8/src/builtins/builtins-generator-gen.cc34
-rw-r--r--chromium/v8/src/builtins/builtins-internal-gen.cc124
-rw-r--r--chromium/v8/src/builtins/builtins-intl-gen.cc4
-rw-r--r--chromium/v8/src/builtins/builtins-intl.cc128
-rw-r--r--chromium/v8/src/builtins/builtins-lazy-gen.cc7
-rw-r--r--chromium/v8/src/builtins/builtins-microtask-queue-gen.cc8
-rw-r--r--chromium/v8/src/builtins/builtins-number.cc6
-rw-r--r--chromium/v8/src/builtins/builtins-object-gen.cc52
-rw-r--r--chromium/v8/src/builtins/builtins-proxy-gen.cc30
-rw-r--r--chromium/v8/src/builtins/builtins-regexp-gen.cc206
-rw-r--r--chromium/v8/src/builtins/builtins-sharedarraybuffer-gen.cc49
-rw-r--r--chromium/v8/src/builtins/builtins-string-gen.cc47
-rw-r--r--chromium/v8/src/builtins/builtins-string.cc18
-rw-r--r--chromium/v8/src/builtins/builtins-string.tq8
-rw-r--r--chromium/v8/src/builtins/builtins-typed-array-gen.cc57
-rw-r--r--chromium/v8/src/builtins/builtins-typed-array-gen.h5
-rw-r--r--chromium/v8/src/builtins/builtins-typed-array.cc84
-rw-r--r--chromium/v8/src/builtins/builtins.cc52
-rw-r--r--chromium/v8/src/builtins/builtins.h15
-rw-r--r--chromium/v8/src/builtins/cast.tq6
-rw-r--r--chromium/v8/src/builtins/console.tq3
-rw-r--r--chromium/v8/src/builtins/convert.tq9
-rw-r--r--chromium/v8/src/builtins/data-view.tq24
-rw-r--r--chromium/v8/src/builtins/finalization-registry.tq14
-rw-r--r--chromium/v8/src/builtins/frame-arguments.tq10
-rw-r--r--chromium/v8/src/builtins/frames.tq8
-rw-r--r--chromium/v8/src/builtins/function.tq5
-rw-r--r--chromium/v8/src/builtins/growable-fixed-array-gen.cc8
-rw-r--r--chromium/v8/src/builtins/growable-fixed-array.tq12
-rw-r--r--chromium/v8/src/builtins/ia32/builtins-ia32.cc448
-rw-r--r--chromium/v8/src/builtins/ic-callable.tq22
-rw-r--r--chromium/v8/src/builtins/ic-dynamic-check-maps.tq6
-rw-r--r--chromium/v8/src/builtins/ic.tq3
-rw-r--r--chromium/v8/src/builtins/internal-coverage.tq4
-rw-r--r--chromium/v8/src/builtins/internal.tq10
-rw-r--r--chromium/v8/src/builtins/iterator.tq8
-rw-r--r--chromium/v8/src/builtins/loong64/builtins-loong64.cc3755
-rw-r--r--chromium/v8/src/builtins/math.tq2
-rw-r--r--chromium/v8/src/builtins/mips/builtins-mips.cc42
-rw-r--r--chromium/v8/src/builtins/mips64/builtins-mips64.cc43
-rw-r--r--chromium/v8/src/builtins/number.tq12
-rw-r--r--chromium/v8/src/builtins/object-fromentries.tq2
-rw-r--r--chromium/v8/src/builtins/ppc/builtins-ppc.cc97
-rw-r--r--chromium/v8/src/builtins/promise-abstract-operations.tq4
-rw-r--r--chromium/v8/src/builtins/promise-all-element-closure.tq6
-rw-r--r--chromium/v8/src/builtins/promise-all.tq8
-rw-r--r--chromium/v8/src/builtins/promise-any.tq16
-rw-r--r--chromium/v8/src/builtins/promise-finally.tq6
-rw-r--r--chromium/v8/src/builtins/promise-misc.tq25
-rw-r--r--chromium/v8/src/builtins/promise-race.tq2
-rw-r--r--chromium/v8/src/builtins/promise-resolve.tq4
-rw-r--r--chromium/v8/src/builtins/proxy-delete-property.tq8
-rw-r--r--chromium/v8/src/builtins/proxy-get-property.tq6
-rw-r--r--chromium/v8/src/builtins/proxy-get-prototype-of.tq4
-rw-r--r--chromium/v8/src/builtins/proxy-has-property.tq8
-rw-r--r--chromium/v8/src/builtins/proxy-is-extensible.tq2
-rw-r--r--chromium/v8/src/builtins/proxy-prevent-extensions.tq4
-rw-r--r--chromium/v8/src/builtins/proxy-revoke.tq2
-rw-r--r--chromium/v8/src/builtins/proxy-set-property.tq8
-rw-r--r--chromium/v8/src/builtins/proxy-set-prototype-of.tq6
-rw-r--r--chromium/v8/src/builtins/proxy.tq6
-rw-r--r--chromium/v8/src/builtins/regexp-match-all.tq8
-rw-r--r--chromium/v8/src/builtins/regexp-match.tq10
-rw-r--r--chromium/v8/src/builtins/regexp-replace.tq9
-rw-r--r--chromium/v8/src/builtins/regexp-search.tq2
-rw-r--r--chromium/v8/src/builtins/regexp.tq6
-rw-r--r--chromium/v8/src/builtins/riscv64/builtins-riscv64.cc140
-rw-r--r--chromium/v8/src/builtins/s390/builtins-s390.cc97
-rw-r--r--chromium/v8/src/builtins/setup-builtins-internal.cc17
-rw-r--r--chromium/v8/src/builtins/string-pad.tq10
-rw-r--r--chromium/v8/src/builtins/string-repeat.tq6
-rw-r--r--chromium/v8/src/builtins/string-substr.tq2
-rw-r--r--chromium/v8/src/builtins/torque-csa-header-includes.h1
-rw-r--r--chromium/v8/src/builtins/torque-internal.tq26
-rw-r--r--chromium/v8/src/builtins/typed-array-at.tq9
-rw-r--r--chromium/v8/src/builtins/typed-array-createtypedarray.tq21
-rw-r--r--chromium/v8/src/builtins/typed-array-every.tq48
-rw-r--r--chromium/v8/src/builtins/typed-array-filter.tq12
-rw-r--r--chromium/v8/src/builtins/typed-array-find.tq35
-rw-r--r--chromium/v8/src/builtins/typed-array-findindex.tq28
-rw-r--r--chromium/v8/src/builtins/typed-array-findlast.tq56
-rw-r--r--chromium/v8/src/builtins/typed-array-findlastindex.tq57
-rw-r--r--chromium/v8/src/builtins/typed-array-foreach.tq23
-rw-r--r--chromium/v8/src/builtins/typed-array-reduce.tq12
-rw-r--r--chromium/v8/src/builtins/typed-array-reduceright.tq12
-rw-r--r--chromium/v8/src/builtins/typed-array-set.tq16
-rw-r--r--chromium/v8/src/builtins/typed-array-slice.tq40
-rw-r--r--chromium/v8/src/builtins/typed-array-some.tq52
-rw-r--r--chromium/v8/src/builtins/typed-array-sort.tq6
-rw-r--r--chromium/v8/src/builtins/typed-array.tq56
-rw-r--r--chromium/v8/src/builtins/wasm.tq19
-rw-r--r--chromium/v8/src/builtins/weak-ref.tq3
-rw-r--r--chromium/v8/src/builtins/x64/builtins-x64.cc422
-rw-r--r--chromium/v8/src/codegen/OWNERS3
-rw-r--r--chromium/v8/src/codegen/arm/assembler-arm-inl.h2
-rw-r--r--chromium/v8/src/codegen/arm/assembler-arm.cc5
-rw-r--r--chromium/v8/src/codegen/arm/assembler-arm.h8
-rw-r--r--chromium/v8/src/codegen/arm/macro-assembler-arm.cc122
-rw-r--r--chromium/v8/src/codegen/arm/macro-assembler-arm.h9
-rw-r--r--chromium/v8/src/codegen/arm/register-arm.h1
-rw-r--r--chromium/v8/src/codegen/arm64/assembler-arm64-inl.h4
-rw-r--r--chromium/v8/src/codegen/arm64/macro-assembler-arm64-inl.h13
-rw-r--r--chromium/v8/src/codegen/arm64/macro-assembler-arm64.cc200
-rw-r--r--chromium/v8/src/codegen/arm64/macro-assembler-arm64.h81
-rw-r--r--chromium/v8/src/codegen/arm64/register-arm64.h2
-rw-r--r--chromium/v8/src/codegen/assembler-arch.h2
-rw-r--r--chromium/v8/src/codegen/assembler-inl.h2
-rw-r--r--chromium/v8/src/codegen/assembler.cc6
-rw-r--r--chromium/v8/src/codegen/assembler.h10
-rw-r--r--chromium/v8/src/codegen/atomic-memory-order.h35
-rw-r--r--chromium/v8/src/codegen/code-factory.cc57
-rw-r--r--chromium/v8/src/codegen/code-factory.h3
-rw-r--r--chromium/v8/src/codegen/code-stub-assembler.cc635
-rw-r--r--chromium/v8/src/codegen/code-stub-assembler.h223
-rw-r--r--chromium/v8/src/codegen/compiler.cc111
-rw-r--r--chromium/v8/src/codegen/compiler.h35
-rw-r--r--chromium/v8/src/codegen/constant-pool.cc3
-rw-r--r--chromium/v8/src/codegen/constants-arch.h2
-rw-r--r--chromium/v8/src/codegen/cpu-features.h3
-rw-r--r--chromium/v8/src/codegen/external-reference.cc106
-rw-r--r--chromium/v8/src/codegen/external-reference.h39
-rw-r--r--chromium/v8/src/codegen/ia32/assembler-ia32-inl.h2
-rw-r--r--chromium/v8/src/codegen/ia32/assembler-ia32.cc175
-rw-r--r--chromium/v8/src/codegen/ia32/assembler-ia32.h188
-rw-r--r--chromium/v8/src/codegen/ia32/macro-assembler-ia32.cc539
-rw-r--r--chromium/v8/src/codegen/ia32/macro-assembler-ia32.h118
-rw-r--r--chromium/v8/src/codegen/ia32/register-ia32.h3
-rw-r--r--chromium/v8/src/codegen/ia32/sse-instr.h25
-rw-r--r--chromium/v8/src/codegen/interface-descriptors-inl.h11
-rw-r--r--chromium/v8/src/codegen/interface-descriptors.h16
-rw-r--r--chromium/v8/src/codegen/loong64/assembler-loong64-inl.h249
-rw-r--r--chromium/v8/src/codegen/loong64/assembler-loong64.cc2405
-rw-r--r--chromium/v8/src/codegen/loong64/assembler-loong64.h1129
-rw-r--r--chromium/v8/src/codegen/loong64/constants-loong64.cc100
-rw-r--r--chromium/v8/src/codegen/loong64/constants-loong64.h1291
-rw-r--r--chromium/v8/src/codegen/loong64/cpu-loong64.cc38
-rw-r--r--chromium/v8/src/codegen/loong64/interface-descriptors-loong64-inl.h278
-rw-r--r--chromium/v8/src/codegen/loong64/macro-assembler-loong64.cc4108
-rw-r--r--chromium/v8/src/codegen/loong64/macro-assembler-loong64.h1071
-rw-r--r--chromium/v8/src/codegen/loong64/register-loong64.h288
-rw-r--r--chromium/v8/src/codegen/macro-assembler.h15
-rw-r--r--chromium/v8/src/codegen/mips/assembler-mips-inl.h2
-rw-r--r--chromium/v8/src/codegen/mips/assembler-mips.cc1
-rw-r--r--chromium/v8/src/codegen/mips/macro-assembler-mips.cc33
-rw-r--r--chromium/v8/src/codegen/mips/macro-assembler-mips.h8
-rw-r--r--chromium/v8/src/codegen/mips/register-mips.h1
-rw-r--r--chromium/v8/src/codegen/mips64/assembler-mips64-inl.h2
-rw-r--r--chromium/v8/src/codegen/mips64/macro-assembler-mips64.cc37
-rw-r--r--chromium/v8/src/codegen/mips64/macro-assembler-mips64.h23
-rw-r--r--chromium/v8/src/codegen/mips64/register-mips64.h1
-rw-r--r--chromium/v8/src/codegen/optimized-compilation-info.cc36
-rw-r--r--chromium/v8/src/codegen/optimized-compilation-info.h44
-rw-r--r--chromium/v8/src/codegen/ppc/assembler-ppc-inl.h4
-rw-r--r--chromium/v8/src/codegen/ppc/assembler-ppc.cc9
-rw-r--r--chromium/v8/src/codegen/ppc/assembler-ppc.h48
-rw-r--r--chromium/v8/src/codegen/ppc/constants-ppc.h37
-rw-r--r--chromium/v8/src/codegen/ppc/macro-assembler-ppc.cc311
-rw-r--r--chromium/v8/src/codegen/ppc/macro-assembler-ppc.h223
-rw-r--r--chromium/v8/src/codegen/ppc/register-ppc.h1
-rw-r--r--chromium/v8/src/codegen/register-arch.h2
-rw-r--r--chromium/v8/src/codegen/register-configuration.cc42
-rw-r--r--chromium/v8/src/codegen/reloc-info.cc2
-rw-r--r--chromium/v8/src/codegen/reloc-info.h5
-rw-r--r--chromium/v8/src/codegen/riscv64/assembler-riscv64-inl.h4
-rw-r--r--chromium/v8/src/codegen/riscv64/assembler-riscv64.cc748
-rw-r--r--chromium/v8/src/codegen/riscv64/assembler-riscv64.h367
-rw-r--r--chromium/v8/src/codegen/riscv64/constants-riscv64.cc41
-rw-r--r--chromium/v8/src/codegen/riscv64/constants-riscv64.h626
-rw-r--r--chromium/v8/src/codegen/riscv64/macro-assembler-riscv64.cc279
-rw-r--r--chromium/v8/src/codegen/riscv64/macro-assembler-riscv64.h76
-rw-r--r--chromium/v8/src/codegen/riscv64/register-riscv64.h87
-rw-r--r--chromium/v8/src/codegen/s390/assembler-s390-inl.h4
-rw-r--r--chromium/v8/src/codegen/s390/assembler-s390.cc1
-rw-r--r--chromium/v8/src/codegen/s390/constants-s390.h20
-rw-r--r--chromium/v8/src/codegen/s390/macro-assembler-s390.cc247
-rw-r--r--chromium/v8/src/codegen/s390/macro-assembler-s390.h202
-rw-r--r--chromium/v8/src/codegen/s390/register-s390.h1
-rw-r--r--chromium/v8/src/codegen/script-details.h1
-rw-r--r--chromium/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc548
-rw-r--r--chromium/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h590
-rw-r--r--chromium/v8/src/codegen/source-position.h2
-rw-r--r--chromium/v8/src/codegen/turbo-assembler.cc2
-rw-r--r--chromium/v8/src/codegen/x64/assembler-x64-inl.h10
-rw-r--r--chromium/v8/src/codegen/x64/assembler-x64.cc125
-rw-r--r--chromium/v8/src/codegen/x64/assembler-x64.h183
-rw-r--r--chromium/v8/src/codegen/x64/cpu-x64.cc2
-rw-r--r--chromium/v8/src/codegen/x64/fma-instr.h8
-rw-r--r--chromium/v8/src/codegen/x64/interface-descriptors-x64-inl.h4
-rw-r--r--chromium/v8/src/codegen/x64/macro-assembler-x64.cc797
-rw-r--r--chromium/v8/src/codegen/x64/macro-assembler-x64.h168
-rw-r--r--chromium/v8/src/codegen/x64/register-x64.h54
-rw-r--r--chromium/v8/src/codegen/x64/sse-instr.h1
-rw-r--r--chromium/v8/src/common/globals.h43
-rw-r--r--chromium/v8/src/common/message-template.h5
-rw-r--r--chromium/v8/src/compiler-dispatcher/OWNERS1
-rw-r--r--chromium/v8/src/compiler-dispatcher/lazy-compile-dispatcher.cc2
-rw-r--r--chromium/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc19
-rw-r--r--chromium/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h4
-rw-r--r--chromium/v8/src/compiler/OWNERS1
-rw-r--r--chromium/v8/src/compiler/access-builder.cc122
-rw-r--r--chromium/v8/src/compiler/access-builder.h10
-rw-r--r--chromium/v8/src/compiler/access-info.cc23
-rw-r--r--chromium/v8/src/compiler/backend/arm/code-generator-arm.cc208
-rw-r--r--chromium/v8/src/compiler/backend/arm/instruction-codes-arm.h707
-rw-r--r--chromium/v8/src/compiler/backend/arm/instruction-selector-arm.cc211
-rw-r--r--chromium/v8/src/compiler/backend/arm64/code-generator-arm64.cc678
-rw-r--r--chromium/v8/src/compiler/backend/arm64/instruction-codes-arm64.h755
-rw-r--r--chromium/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc198
-rw-r--r--chromium/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc836
-rw-r--r--chromium/v8/src/compiler/backend/code-generator.cc104
-rw-r--r--chromium/v8/src/compiler/backend/code-generator.h37
-rw-r--r--chromium/v8/src/compiler/backend/ia32/code-generator-ia32.cc1190
-rw-r--r--chromium/v8/src/compiler/backend/ia32/instruction-codes-ia32.h751
-rw-r--r--chromium/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc256
-rw-r--r--chromium/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc726
-rw-r--r--chromium/v8/src/compiler/backend/instruction-codes.h181
-rw-r--r--chromium/v8/src/compiler/backend/instruction-scheduler.cc101
-rw-r--r--chromium/v8/src/compiler/backend/instruction-scheduler.h8
-rw-r--r--chromium/v8/src/compiler/backend/instruction-selector.cc173
-rw-r--r--chromium/v8/src/compiler/backend/instruction-selector.h40
-rw-r--r--chromium/v8/src/compiler/backend/instruction.cc42
-rw-r--r--chromium/v8/src/compiler/backend/instruction.h10
-rw-r--r--chromium/v8/src/compiler/backend/jump-threading.cc156
-rw-r--r--chromium/v8/src/compiler/backend/loong64/code-generator-loong64.cc2636
-rw-r--r--chromium/v8/src/compiler/backend/loong64/instruction-codes-loong64.h402
-rw-r--r--chromium/v8/src/compiler/backend/loong64/instruction-scheduler-loong64.cc26
-rw-r--r--chromium/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc3108
-rw-r--r--chromium/v8/src/compiler/backend/mips/code-generator-mips.cc197
-rw-r--r--chromium/v8/src/compiler/backend/mips/instruction-codes-mips.h731
-rw-r--r--chromium/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc46
-rw-r--r--chromium/v8/src/compiler/backend/mips/instruction-selector-mips.cc71
-rw-r--r--chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc410
-rw-r--r--chromium/v8/src/compiler/backend/mips64/instruction-codes-mips64.h805
-rw-r--r--chromium/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc68
-rw-r--r--chromium/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc391
-rw-r--r--chromium/v8/src/compiler/backend/ppc/code-generator-ppc.cc326
-rw-r--r--chromium/v8/src/compiler/backend/ppc/instruction-codes-ppc.h816
-rw-r--r--chromium/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc11
-rw-r--r--chromium/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc107
-rw-r--r--chromium/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc1208
-rw-r--r--chromium/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h807
-rw-r--r--chromium/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc73
-rw-r--r--chromium/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc399
-rw-r--r--chromium/v8/src/compiler/backend/s390/code-generator-s390.cc459
-rw-r--r--chromium/v8/src/compiler/backend/s390/instruction-codes-s390.h781
-rw-r--r--chromium/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc44
-rw-r--r--chromium/v8/src/compiler/backend/s390/instruction-selector-s390.cc231
-rw-r--r--chromium/v8/src/compiler/backend/x64/code-generator-x64.cc1152
-rw-r--r--chromium/v8/src/compiler/backend/x64/instruction-codes-x64.h795
-rw-r--r--chromium/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc42
-rw-r--r--chromium/v8/src/compiler/backend/x64/instruction-selector-x64.cc415
-rw-r--r--chromium/v8/src/compiler/branch-elimination.cc143
-rw-r--r--chromium/v8/src/compiler/branch-elimination.h6
-rw-r--r--chromium/v8/src/compiler/bytecode-graph-builder.cc17
-rw-r--r--chromium/v8/src/compiler/c-linkage.cc14
-rw-r--r--chromium/v8/src/compiler/code-assembler.cc92
-rw-r--r--chromium/v8/src/compiler/code-assembler.h65
-rw-r--r--chromium/v8/src/compiler/common-operator.cc212
-rw-r--r--chromium/v8/src/compiler/common-operator.h62
-rw-r--r--chromium/v8/src/compiler/compilation-dependencies.cc183
-rw-r--r--chromium/v8/src/compiler/compilation-dependencies.h5
-rw-r--r--chromium/v8/src/compiler/compilation-dependency.h37
-rw-r--r--chromium/v8/src/compiler/csa-load-elimination.cc2
-rw-r--r--chromium/v8/src/compiler/decompression-optimizer.cc7
-rw-r--r--chromium/v8/src/compiler/effect-control-linearizer.cc81
-rw-r--r--chromium/v8/src/compiler/effect-control-linearizer.h4
-rw-r--r--chromium/v8/src/compiler/escape-analysis.cc15
-rw-r--r--chromium/v8/src/compiler/escape-analysis.h5
-rw-r--r--chromium/v8/src/compiler/frame-states.cc7
-rw-r--r--chromium/v8/src/compiler/globals.h3
-rw-r--r--chromium/v8/src/compiler/graph-assembler.cc41
-rw-r--r--chromium/v8/src/compiler/graph-assembler.h42
-rw-r--r--chromium/v8/src/compiler/heap-refs.cc906
-rw-r--r--chromium/v8/src/compiler/heap-refs.h127
-rw-r--r--chromium/v8/src/compiler/int64-lowering.cc18
-rw-r--r--chromium/v8/src/compiler/js-call-reducer.cc71
-rw-r--r--chromium/v8/src/compiler/js-context-specialization.cc22
-rw-r--r--chromium/v8/src/compiler/js-create-lowering.cc61
-rw-r--r--chromium/v8/src/compiler/js-generic-lowering.cc16
-rw-r--r--chromium/v8/src/compiler/js-heap-broker.cc35
-rw-r--r--chromium/v8/src/compiler/js-heap-broker.h28
-rw-r--r--chromium/v8/src/compiler/js-inlining-heuristic.cc77
-rw-r--r--chromium/v8/src/compiler/js-inlining.cc22
-rw-r--r--chromium/v8/src/compiler/js-native-context-specialization.cc72
-rw-r--r--chromium/v8/src/compiler/js-type-hint-lowering.cc19
-rw-r--r--chromium/v8/src/compiler/js-typed-lowering.cc33
-rw-r--r--chromium/v8/src/compiler/linkage.cc19
-rw-r--r--chromium/v8/src/compiler/linkage.h43
-rw-r--r--chromium/v8/src/compiler/loop-analysis.cc19
-rw-r--r--chromium/v8/src/compiler/loop-unrolling.cc10
-rw-r--r--chromium/v8/src/compiler/machine-graph-verifier.cc40
-rw-r--r--chromium/v8/src/compiler/machine-operator-reducer.cc30
-rw-r--r--chromium/v8/src/compiler/machine-operator.cc372
-rw-r--r--chromium/v8/src/compiler/machine-operator.h92
-rw-r--r--chromium/v8/src/compiler/memory-lowering.cc33
-rw-r--r--chromium/v8/src/compiler/memory-lowering.h3
-rw-r--r--chromium/v8/src/compiler/memory-optimizer.cc13
-rw-r--r--chromium/v8/src/compiler/memory-optimizer.h1
-rw-r--r--chromium/v8/src/compiler/node-matchers.h10
-rw-r--r--chromium/v8/src/compiler/opcodes.h7
-rw-r--r--chromium/v8/src/compiler/pipeline-statistics.cc27
-rw-r--r--chromium/v8/src/compiler/pipeline-statistics.h9
-rw-r--r--chromium/v8/src/compiler/pipeline.cc129
-rw-r--r--chromium/v8/src/compiler/pipeline.h11
-rw-r--r--chromium/v8/src/compiler/property-access-builder.cc8
-rw-r--r--chromium/v8/src/compiler/raw-machine-assembler.cc41
-rw-r--r--chromium/v8/src/compiler/raw-machine-assembler.h76
-rw-r--r--chromium/v8/src/compiler/scheduler.cc23
-rw-r--r--chromium/v8/src/compiler/simplified-lowering.cc28
-rw-r--r--chromium/v8/src/compiler/simplified-lowering.h3
-rw-r--r--chromium/v8/src/compiler/simplified-operator-reducer.cc2
-rw-r--r--chromium/v8/src/compiler/simplified-operator.cc267
-rw-r--r--chromium/v8/src/compiler/simplified-operator.h19
-rw-r--r--chromium/v8/src/compiler/typed-optimization.cc6
-rw-r--r--chromium/v8/src/compiler/typer.cc45
-rw-r--r--chromium/v8/src/compiler/types.cc1
-rw-r--r--chromium/v8/src/compiler/types.h4
-rw-r--r--chromium/v8/src/compiler/verifier.cc10
-rw-r--r--chromium/v8/src/compiler/wasm-compiler.cc387
-rw-r--r--chromium/v8/src/compiler/wasm-compiler.h34
-rw-r--r--chromium/v8/src/compiler/wasm-inlining.cc311
-rw-r--r--chromium/v8/src/compiler/wasm-inlining.h108
-rw-r--r--chromium/v8/src/d8/async-hooks-wrapper.cc198
-rw-r--r--chromium/v8/src/d8/async-hooks-wrapper.h9
-rw-r--r--chromium/v8/src/d8/d8-platforms.cc11
-rw-r--r--chromium/v8/src/d8/d8-posix.cc12
-rw-r--r--chromium/v8/src/d8/d8-test.cc24
-rw-r--r--chromium/v8/src/d8/d8.cc119
-rw-r--r--chromium/v8/src/d8/d8.h11
-rw-r--r--chromium/v8/src/date/date.cc78
-rw-r--r--chromium/v8/src/date/date.h11
-rw-r--r--chromium/v8/src/debug/debug-evaluate.cc113
-rw-r--r--chromium/v8/src/debug/debug-evaluate.h1
-rw-r--r--chromium/v8/src/debug/debug-interface.cc109
-rw-r--r--chromium/v8/src/debug/debug-interface.h35
-rw-r--r--chromium/v8/src/debug/debug-property-iterator.cc75
-rw-r--r--chromium/v8/src/debug/debug-property-iterator.h27
-rw-r--r--chromium/v8/src/debug/debug.cc60
-rw-r--r--chromium/v8/src/debug/interface-types.h5
-rw-r--r--chromium/v8/src/deoptimizer/arm/deoptimizer-arm.cc13
-rw-r--r--chromium/v8/src/deoptimizer/deoptimized-frame-info.cc10
-rw-r--r--chromium/v8/src/deoptimizer/deoptimizer.cc47
-rw-r--r--chromium/v8/src/deoptimizer/loong64/deoptimizer-loong64.cc42
-rw-r--r--chromium/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc17
-rw-r--r--chromium/v8/src/deoptimizer/riscv64/deoptimizer-riscv64.cc6
-rw-r--r--chromium/v8/src/deoptimizer/s390/deoptimizer-s390.cc13
-rw-r--r--chromium/v8/src/deoptimizer/translated-state.cc32
-rw-r--r--chromium/v8/src/deoptimizer/x64/deoptimizer-x64.cc19
-rw-r--r--chromium/v8/src/diagnostics/arm/disasm-arm.cc17
-rw-r--r--chromium/v8/src/diagnostics/arm/eh-frame-arm.cc2
-rw-r--r--chromium/v8/src/diagnostics/arm/unwinder-arm.cc2
-rw-r--r--chromium/v8/src/diagnostics/arm64/disasm-arm64.cc6
-rw-r--r--chromium/v8/src/diagnostics/arm64/eh-frame-arm64.cc2
-rw-r--r--chromium/v8/src/diagnostics/compilation-statistics.cc23
-rw-r--r--chromium/v8/src/diagnostics/compilation-statistics.h2
-rw-r--r--chromium/v8/src/diagnostics/disassembler.cc23
-rw-r--r--chromium/v8/src/diagnostics/eh-frame.cc2
-rw-r--r--chromium/v8/src/diagnostics/gdb-jit.cc206
-rw-r--r--chromium/v8/src/diagnostics/gdb-jit.h12
-rw-r--r--chromium/v8/src/diagnostics/ia32/disasm-ia32.cc159
-rw-r--r--chromium/v8/src/diagnostics/loong64/disasm-loong64.cc1711
-rw-r--r--chromium/v8/src/diagnostics/loong64/unwinder-loong64.cc14
-rw-r--r--chromium/v8/src/diagnostics/mips/disasm-mips.cc8
-rw-r--r--chromium/v8/src/diagnostics/mips64/disasm-mips64.cc9
-rw-r--r--chromium/v8/src/diagnostics/objects-debug.cc68
-rw-r--r--chromium/v8/src/diagnostics/objects-printer.cc210
-rw-r--r--chromium/v8/src/diagnostics/perf-jit.h3
-rw-r--r--chromium/v8/src/diagnostics/ppc/disasm-ppc.cc31
-rw-r--r--chromium/v8/src/diagnostics/ppc/eh-frame-ppc.cc2
-rw-r--r--chromium/v8/src/diagnostics/riscv64/disasm-riscv64.cc1210
-rw-r--r--chromium/v8/src/diagnostics/s390/eh-frame-s390.cc2
-rw-r--r--chromium/v8/src/diagnostics/system-jit-win.cc6
-rw-r--r--chromium/v8/src/diagnostics/unwinder.cc2
-rw-r--r--chromium/v8/src/diagnostics/unwinding-info-win64.h4
-rw-r--r--chromium/v8/src/diagnostics/x64/disasm-x64.cc515
-rw-r--r--chromium/v8/src/execution/OWNERS1
-rw-r--r--chromium/v8/src/execution/arguments-inl.h9
-rw-r--r--chromium/v8/src/execution/arguments.h16
-rw-r--r--chromium/v8/src/execution/arm/simulator-arm.cc82
-rw-r--r--chromium/v8/src/execution/arm64/simulator-arm64.cc14
-rw-r--r--chromium/v8/src/execution/execution.cc14
-rw-r--r--chromium/v8/src/execution/frame-constants.h6
-rw-r--r--chromium/v8/src/execution/frames.cc81
-rw-r--r--chromium/v8/src/execution/frames.h14
-rw-r--r--chromium/v8/src/execution/futex-emulation.cc15
-rw-r--r--chromium/v8/src/execution/futex-emulation.h4
-rw-r--r--chromium/v8/src/execution/isolate-data.h272
-rw-r--r--chromium/v8/src/execution/isolate.cc250
-rw-r--r--chromium/v8/src/execution/isolate.h58
-rw-r--r--chromium/v8/src/execution/local-isolate-inl.h5
-rw-r--r--chromium/v8/src/execution/local-isolate.h1
-rw-r--r--chromium/v8/src/execution/loong64/frame-constants-loong64.cc32
-rw-r--r--chromium/v8/src/execution/loong64/frame-constants-loong64.h76
-rw-r--r--chromium/v8/src/execution/loong64/simulator-loong64.cc5538
-rw-r--r--chromium/v8/src/execution/loong64/simulator-loong64.h647
-rw-r--r--chromium/v8/src/execution/messages.cc96
-rw-r--r--chromium/v8/src/execution/messages.h1
-rw-r--r--chromium/v8/src/execution/microtask-queue.h3
-rw-r--r--chromium/v8/src/execution/mips/simulator-mips.cc23
-rw-r--r--chromium/v8/src/execution/mips64/simulator-mips64.cc20
-rw-r--r--chromium/v8/src/execution/mips64/simulator-mips64.h4
-rw-r--r--chromium/v8/src/execution/ppc/simulator-ppc.cc98
-rw-r--r--chromium/v8/src/execution/riscv64/simulator-riscv64.cc2085
-rw-r--r--chromium/v8/src/execution/riscv64/simulator-riscv64.h368
-rw-r--r--chromium/v8/src/execution/runtime-profiler.cc23
-rw-r--r--chromium/v8/src/execution/s390/simulator-s390.cc245
-rw-r--r--chromium/v8/src/execution/s390/simulator-s390.h20
-rw-r--r--chromium/v8/src/execution/simulator-base.h6
-rw-r--r--chromium/v8/src/execution/simulator.h2
-rw-r--r--chromium/v8/src/execution/thread-local-top.h9
-rw-r--r--chromium/v8/src/execution/v8threads.cc11
-rw-r--r--chromium/v8/src/execution/vm-state.h2
-rw-r--r--chromium/v8/src/extensions/cputracemark-extension.cc3
-rw-r--r--chromium/v8/src/extensions/cputracemark-extension.h6
-rw-r--r--chromium/v8/src/extensions/externalize-string-extension.cc1
-rw-r--r--chromium/v8/src/extensions/externalize-string-extension.h6
-rw-r--r--chromium/v8/src/extensions/gc-extension.cc6
-rw-r--r--chromium/v8/src/extensions/gc-extension.h7
-rw-r--r--chromium/v8/src/extensions/ignition-statistics-extension.cc9
-rw-r--r--chromium/v8/src/extensions/ignition-statistics-extension.h6
-rw-r--r--chromium/v8/src/extensions/statistics-extension.cc1
-rw-r--r--chromium/v8/src/extensions/statistics-extension.h6
-rw-r--r--chromium/v8/src/extensions/trigger-failure-extension.cc1
-rw-r--r--chromium/v8/src/extensions/trigger-failure-extension.h6
-rw-r--r--chromium/v8/src/extensions/vtunedomain-support-extension.cc4
-rw-r--r--chromium/v8/src/extensions/vtunedomain-support-extension.h6
-rw-r--r--chromium/v8/src/flags/flag-definitions.h106
-rw-r--r--chromium/v8/src/flags/flags.cc68
-rw-r--r--chromium/v8/src/flags/flags.h11
-rw-r--r--chromium/v8/src/handles/DIR_METADATA4
-rw-r--r--chromium/v8/src/handles/global-handles-inl.h33
-rw-r--r--chromium/v8/src/handles/global-handles.cc2
-rw-r--r--chromium/v8/src/handles/global-handles.h17
-rw-r--r--chromium/v8/src/handles/handles.cc2
-rw-r--r--chromium/v8/src/handles/handles.h4
-rw-r--r--chromium/v8/src/heap/DIR_METADATA4
-rw-r--r--chromium/v8/src/heap/array-buffer-sweeper.cc235
-rw-r--r--chromium/v8/src/heap/array-buffer-sweeper.h118
-rw-r--r--chromium/v8/src/heap/base/asm/loong64/push_registers_asm.cc48
-rw-r--r--chromium/v8/src/heap/base/stack.cc10
-rw-r--r--chromium/v8/src/heap/basic-memory-chunk.cc24
-rw-r--r--chromium/v8/src/heap/basic-memory-chunk.h104
-rw-r--r--chromium/v8/src/heap/concurrent-marking.cc46
-rw-r--r--chromium/v8/src/heap/concurrent-marking.h2
-rw-r--r--chromium/v8/src/heap/cppgc-js/DEPS3
-rw-r--r--chromium/v8/src/heap/cppgc-js/cpp-heap.cc29
-rw-r--r--chromium/v8/src/heap/cppgc-js/cpp-heap.h4
-rw-r--r--chromium/v8/src/heap/cppgc-js/cpp-snapshot.cc77
-rw-r--r--chromium/v8/src/heap/cppgc-js/unified-heap-marking-state.h1
-rw-r--r--chromium/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc1
-rw-r--r--chromium/v8/src/heap/cppgc/DEPS3
-rw-r--r--chromium/v8/src/heap/cppgc/allocation.cc14
-rw-r--r--chromium/v8/src/heap/cppgc/caged-heap-local-data.cc10
-rw-r--r--chromium/v8/src/heap/cppgc/caged-heap.cc37
-rw-r--r--chromium/v8/src/heap/cppgc/caged-heap.h20
-rw-r--r--chromium/v8/src/heap/cppgc/gc-info.cc77
-rw-r--r--chromium/v8/src/heap/cppgc/heap-base.cc33
-rw-r--r--chromium/v8/src/heap/cppgc/heap-base.h15
-rw-r--r--chromium/v8/src/heap/cppgc/heap-object-header.h15
-rw-r--r--chromium/v8/src/heap/cppgc/heap-statistics-collector.cc4
-rw-r--r--chromium/v8/src/heap/cppgc/heap-statistics-collector.h4
-rw-r--r--chromium/v8/src/heap/cppgc/heap.cc14
-rw-r--r--chromium/v8/src/heap/cppgc/marker.cc49
-rw-r--r--chromium/v8/src/heap/cppgc/marker.h3
-rw-r--r--chromium/v8/src/heap/cppgc/marking-verifier.cc12
-rw-r--r--chromium/v8/src/heap/cppgc/marking-verifier.h3
-rw-r--r--chromium/v8/src/heap/cppgc/memory.cc2
-rw-r--r--chromium/v8/src/heap/cppgc/memory.h6
-rw-r--r--chromium/v8/src/heap/cppgc/object-allocator.cc70
-rw-r--r--chromium/v8/src/heap/cppgc/object-allocator.h17
-rw-r--r--chromium/v8/src/heap/cppgc/page-memory.cc118
-rw-r--r--chromium/v8/src/heap/cppgc/page-memory.h25
-rw-r--r--chromium/v8/src/heap/cppgc/persistent-node.cc37
-rw-r--r--chromium/v8/src/heap/cppgc/platform.cc44
-rw-r--r--chromium/v8/src/heap/cppgc/platform.h43
-rw-r--r--chromium/v8/src/heap/cppgc/pointer-policies.cc38
-rw-r--r--chromium/v8/src/heap/cppgc/prefinalizer-handler.cc27
-rw-r--r--chromium/v8/src/heap/cppgc/prefinalizer-handler.h9
-rw-r--r--chromium/v8/src/heap/cppgc/stats-collector.cc22
-rw-r--r--chromium/v8/src/heap/cppgc/stats-collector.h7
-rw-r--r--chromium/v8/src/heap/cppgc/sweeper.cc2
-rw-r--r--chromium/v8/src/heap/cppgc/visitor.cc7
-rw-r--r--chromium/v8/src/heap/cppgc/write-barrier.cc4
-rw-r--r--chromium/v8/src/heap/embedder-tracing.h3
-rw-r--r--chromium/v8/src/heap/factory-base.cc12
-rw-r--r--chromium/v8/src/heap/factory-inl.h9
-rw-r--r--chromium/v8/src/heap/factory.cc58
-rw-r--r--chromium/v8/src/heap/factory.h8
-rw-r--r--chromium/v8/src/heap/gc-tracer.cc13
-rw-r--r--chromium/v8/src/heap/gc-tracer.h44
-rw-r--r--chromium/v8/src/heap/heap-inl.h10
-rw-r--r--chromium/v8/src/heap/heap.cc247
-rw-r--r--chromium/v8/src/heap/heap.h33
-rw-r--r--chromium/v8/src/heap/large-spaces.cc40
-rw-r--r--chromium/v8/src/heap/mark-compact-inl.h27
-rw-r--r--chromium/v8/src/heap/mark-compact.cc375
-rw-r--r--chromium/v8/src/heap/mark-compact.h28
-rw-r--r--chromium/v8/src/heap/marking-barrier-inl.h15
-rw-r--r--chromium/v8/src/heap/marking-barrier.cc32
-rw-r--r--chromium/v8/src/heap/marking-barrier.h3
-rw-r--r--chromium/v8/src/heap/marking-visitor-inl.h48
-rw-r--r--chromium/v8/src/heap/marking-visitor.h18
-rw-r--r--chromium/v8/src/heap/memory-chunk-layout.h3
-rw-r--r--chromium/v8/src/heap/memory-chunk.cc12
-rw-r--r--chromium/v8/src/heap/memory-chunk.h36
-rw-r--r--chromium/v8/src/heap/memory-measurement.cc3
-rw-r--r--chromium/v8/src/heap/memory-measurement.h1
-rw-r--r--chromium/v8/src/heap/new-spaces.cc15
-rw-r--r--chromium/v8/src/heap/new-spaces.h2
-rw-r--r--chromium/v8/src/heap/object-stats.cc2
-rw-r--r--chromium/v8/src/heap/objects-visiting-inl.h20
-rw-r--r--chromium/v8/src/heap/objects-visiting.cc6
-rw-r--r--chromium/v8/src/heap/objects-visiting.h10
-rw-r--r--chromium/v8/src/heap/paged-spaces.cc24
-rw-r--r--chromium/v8/src/heap/paged-spaces.h2
-rw-r--r--chromium/v8/src/heap/progress-bar.h61
-rw-r--r--chromium/v8/src/heap/safepoint.cc11
-rw-r--r--chromium/v8/src/heap/scavenger-inl.h8
-rw-r--r--chromium/v8/src/heap/scavenger.cc6
-rw-r--r--chromium/v8/src/heap/setup-heap-internal.cc8
-rw-r--r--chromium/v8/src/heap/spaces.cc5
-rw-r--r--chromium/v8/src/heap/spaces.h10
-rw-r--r--chromium/v8/src/heap/sweeper.cc5
-rw-r--r--chromium/v8/src/heap/third-party/heap-api.h4
-rw-r--r--chromium/v8/src/heap/weak-object-worklists.cc13
-rw-r--r--chromium/v8/src/heap/weak-object-worklists.h4
-rw-r--r--chromium/v8/src/ic/OWNERS1
-rw-r--r--chromium/v8/src/ic/accessor-assembler.cc438
-rw-r--r--chromium/v8/src/ic/handler-configuration-inl.h65
-rw-r--r--chromium/v8/src/ic/handler-configuration.cc8
-rw-r--r--chromium/v8/src/ic/handler-configuration.h4
-rw-r--r--chromium/v8/src/ic/ic.cc28
-rw-r--r--chromium/v8/src/ic/keyed-store-generic.cc13
-rw-r--r--chromium/v8/src/ic/unary-op-assembler.cc6
-rw-r--r--chromium/v8/src/init/bootstrapper.cc92
-rw-r--r--chromium/v8/src/init/bootstrapper.h6
-rw-r--r--chromium/v8/src/init/heap-symbols.h11
-rw-r--r--chromium/v8/src/init/isolate-allocator.cc34
-rw-r--r--chromium/v8/src/init/startup-data-util.cc7
-rw-r--r--chromium/v8/src/init/startup-data-util.h2
-rw-r--r--chromium/v8/src/init/v8.cc30
-rw-r--r--chromium/v8/src/init/v8.h4
-rw-r--r--chromium/v8/src/init/vm-cage.cc97
-rw-r--r--chromium/v8/src/init/vm-cage.h129
-rw-r--r--chromium/v8/src/inspector/DEPS1
-rw-r--r--chromium/v8/src/inspector/custom-preview.cc5
-rw-r--r--chromium/v8/src/inspector/injected-script.cc15
-rw-r--r--chromium/v8/src/inspector/injected-script.h8
-rw-r--r--chromium/v8/src/inspector/inspected-context.cc19
-rw-r--r--chromium/v8/src/inspector/inspected-context.h10
-rw-r--r--chromium/v8/src/inspector/test-interface.h2
-rw-r--r--chromium/v8/src/inspector/v8-console-message.cc4
-rw-r--r--chromium/v8/src/inspector/v8-console-message.h3
-rw-r--r--chromium/v8/src/inspector/v8-console.cc7
-rw-r--r--chromium/v8/src/inspector/v8-console.h9
-rw-r--r--chromium/v8/src/inspector/v8-debugger-agent-impl.cc3
-rw-r--r--chromium/v8/src/inspector/v8-debugger-script.h8
-rw-r--r--chromium/v8/src/inspector/v8-debugger.cc28
-rw-r--r--chromium/v8/src/inspector/v8-debugger.h2
-rw-r--r--chromium/v8/src/inspector/v8-heap-profiler-agent-impl.cc1
-rw-r--r--chromium/v8/src/inspector/v8-heap-profiler-agent-impl.h4
-rw-r--r--chromium/v8/src/inspector/v8-inspector-impl.cc54
-rw-r--r--chromium/v8/src/inspector/v8-inspector-impl.h19
-rw-r--r--chromium/v8/src/inspector/v8-profiler-agent-impl.cc120
-rw-r--r--chromium/v8/src/inspector/v8-profiler-agent-impl.h15
-rw-r--r--chromium/v8/src/inspector/v8-regex.cc8
-rw-r--r--chromium/v8/src/inspector/v8-regex.h5
-rw-r--r--chromium/v8/src/inspector/v8-runtime-agent-impl.cc9
-rw-r--r--chromium/v8/src/inspector/v8-runtime-agent-impl.h8
-rw-r--r--chromium/v8/src/inspector/v8-stack-trace-impl.cc7
-rw-r--r--chromium/v8/src/inspector/v8-stack-trace-impl.h10
-rw-r--r--chromium/v8/src/inspector/v8-value-utils.cc4
-rw-r--r--chromium/v8/src/inspector/v8-value-utils.h3
-rw-r--r--chromium/v8/src/inspector/value-mirror.cc48
-rw-r--r--chromium/v8/src/inspector/value-mirror.h4
-rw-r--r--chromium/v8/src/interpreter/OWNERS3
-rw-r--r--chromium/v8/src/interpreter/bytecode-generator.cc217
-rw-r--r--chromium/v8/src/interpreter/bytecode-generator.h1
-rw-r--r--chromium/v8/src/interpreter/bytecodes.h4
-rw-r--r--chromium/v8/src/interpreter/interpreter-assembler.cc173
-rw-r--r--chromium/v8/src/interpreter/interpreter-assembler.h53
-rw-r--r--chromium/v8/src/interpreter/interpreter-generator.cc73
-rw-r--r--chromium/v8/src/interpreter/interpreter.cc32
-rw-r--r--chromium/v8/src/interpreter/interpreter.h2
-rw-r--r--chromium/v8/src/json/json-parser.cc15
-rw-r--r--chromium/v8/src/json/json-parser.h1
-rw-r--r--chromium/v8/src/json/json-stringifier.cc9
-rw-r--r--chromium/v8/src/libplatform/default-platform.cc1
-rw-r--r--chromium/v8/src/libsampler/sampler.cc7
-rw-r--r--chromium/v8/src/libsampler/sampler.h6
-rw-r--r--chromium/v8/src/logging/counters-definitions.h4
-rw-r--r--chromium/v8/src/logging/counters.h2
-rw-r--r--chromium/v8/src/logging/log-utils.cc7
-rw-r--r--chromium/v8/src/logging/log.cc29
-rw-r--r--chromium/v8/src/logging/log.h5
-rw-r--r--chromium/v8/src/logging/runtime-call-stats-scope.h6
-rw-r--r--chromium/v8/src/logging/runtime-call-stats.cc33
-rw-r--r--chromium/v8/src/logging/runtime-call-stats.h6
-rw-r--r--chromium/v8/src/numbers/conversions.cc37
-rw-r--r--chromium/v8/src/objects/allocation-site-inl.h30
-rw-r--r--chromium/v8/src/objects/allocation-site.h2
-rw-r--r--chromium/v8/src/objects/api-callbacks.tq3
-rw-r--r--chromium/v8/src/objects/arguments.h18
-rw-r--r--chromium/v8/src/objects/arguments.tq9
-rw-r--r--chromium/v8/src/objects/backing-store.cc184
-rw-r--r--chromium/v8/src/objects/backing-store.h43
-rw-r--r--chromium/v8/src/objects/bigint.cc539
-rw-r--r--chromium/v8/src/objects/bigint.tq3
-rw-r--r--chromium/v8/src/objects/cell-inl.h4
-rw-r--r--chromium/v8/src/objects/cell.h3
-rw-r--r--chromium/v8/src/objects/cell.tq5
-rw-r--r--chromium/v8/src/objects/code-inl.h86
-rw-r--r--chromium/v8/src/objects/code.cc6
-rw-r--r--chromium/v8/src/objects/code.h18
-rw-r--r--chromium/v8/src/objects/contexts.h7
-rw-r--r--chromium/v8/src/objects/contexts.tq11
-rw-r--r--chromium/v8/src/objects/data-handler.h1
-rw-r--r--chromium/v8/src/objects/data-handler.tq8
-rw-r--r--chromium/v8/src/objects/debug-objects.tq3
-rw-r--r--chromium/v8/src/objects/descriptor-array-inl.h4
-rw-r--r--chromium/v8/src/objects/descriptor-array.tq1
-rw-r--r--chromium/v8/src/objects/elements-kind.h17
-rw-r--r--chromium/v8/src/objects/elements.cc14
-rw-r--r--chromium/v8/src/objects/embedder-data-array-inl.h2
-rw-r--r--chromium/v8/src/objects/feedback-cell-inl.h2
-rw-r--r--chromium/v8/src/objects/feedback-vector.cc7
-rw-r--r--chromium/v8/src/objects/fixed-array.tq5
-rw-r--r--chromium/v8/src/objects/heap-object.h1
-rw-r--r--chromium/v8/src/objects/instance-type.h16
-rw-r--r--chromium/v8/src/objects/intl-objects.cc428
-rw-r--r--chromium/v8/src/objects/intl-objects.h106
-rw-r--r--chromium/v8/src/objects/js-array-buffer-inl.h61
-rw-r--r--chromium/v8/src/objects/js-array-buffer.cc29
-rw-r--r--chromium/v8/src/objects/js-array-buffer.h83
-rw-r--r--chromium/v8/src/objects/js-array-inl.h10
-rw-r--r--chromium/v8/src/objects/js-array.h50
-rw-r--r--chromium/v8/src/objects/js-array.tq49
-rw-r--r--chromium/v8/src/objects/js-break-iterator.cc4
-rw-r--r--chromium/v8/src/objects/js-break-iterator.h1
-rw-r--r--chromium/v8/src/objects/js-collator.cc34
-rw-r--r--chromium/v8/src/objects/js-date-time-format-inl.h2
-rw-r--r--chromium/v8/src/objects/js-date-time-format.cc48
-rw-r--r--chromium/v8/src/objects/js-date-time-format.h5
-rw-r--r--chromium/v8/src/objects/js-date-time-format.tq9
-rw-r--r--chromium/v8/src/objects/js-display-names.cc77
-rw-r--r--chromium/v8/src/objects/js-function-inl.h14
-rw-r--r--chromium/v8/src/objects/js-function.cc86
-rw-r--r--chromium/v8/src/objects/js-function.h38
-rw-r--r--chromium/v8/src/objects/js-function.tq13
-rw-r--r--chromium/v8/src/objects/js-list-format.cc16
-rw-r--r--chromium/v8/src/objects/js-locale.cc137
-rw-r--r--chromium/v8/src/objects/js-number-format.cc52
-rw-r--r--chromium/v8/src/objects/js-objects-inl.h17
-rw-r--r--chromium/v8/src/objects/js-objects.cc39
-rw-r--r--chromium/v8/src/objects/js-objects.h54
-rw-r--r--chromium/v8/src/objects/js-objects.tq13
-rw-r--r--chromium/v8/src/objects/js-plural-rules.cc7
-rw-r--r--chromium/v8/src/objects/js-promise.h1
-rw-r--r--chromium/v8/src/objects/js-promise.tq4
-rw-r--r--chromium/v8/src/objects/js-proxy.h9
-rw-r--r--chromium/v8/src/objects/js-proxy.tq1
-rw-r--r--chromium/v8/src/objects/js-regexp-inl.h78
-rw-r--r--chromium/v8/src/objects/js-regexp.cc122
-rw-r--r--chromium/v8/src/objects/js-regexp.h303
-rw-r--r--chromium/v8/src/objects/js-regexp.tq3
-rw-r--r--chromium/v8/src/objects/js-relative-time-format.cc15
-rw-r--r--chromium/v8/src/objects/js-segment-iterator.cc2
-rw-r--r--chromium/v8/src/objects/js-segmenter.cc11
-rw-r--r--chromium/v8/src/objects/js-segments.cc2
-rw-r--r--chromium/v8/src/objects/js-weak-refs-inl.h13
-rw-r--r--chromium/v8/src/objects/js-weak-refs.h23
-rw-r--r--chromium/v8/src/objects/js-weak-refs.tq1
-rw-r--r--chromium/v8/src/objects/keys.cc4
-rw-r--r--chromium/v8/src/objects/keys.h13
-rw-r--r--chromium/v8/src/objects/literal-objects.h1
-rw-r--r--chromium/v8/src/objects/lookup.cc21
-rw-r--r--chromium/v8/src/objects/managed-inl.h64
-rw-r--r--chromium/v8/src/objects/managed.cc2
-rw-r--r--chromium/v8/src/objects/managed.h33
-rw-r--r--chromium/v8/src/objects/map-inl.h35
-rw-r--r--chromium/v8/src/objects/map-updater.cc59
-rw-r--r--chromium/v8/src/objects/map-updater.h2
-rw-r--r--chromium/v8/src/objects/map.cc40
-rw-r--r--chromium/v8/src/objects/map.h9
-rw-r--r--chromium/v8/src/objects/megadom-handler.tq1
-rw-r--r--chromium/v8/src/objects/microtask.h4
-rw-r--r--chromium/v8/src/objects/module.cc39
-rw-r--r--chromium/v8/src/objects/module.h6
-rw-r--r--chromium/v8/src/objects/name.tq10
-rw-r--r--chromium/v8/src/objects/object-list-macros.h1
-rw-r--r--chromium/v8/src/objects/object-macros-undef.h2
-rw-r--r--chromium/v8/src/objects/object-macros.h14
-rw-r--r--chromium/v8/src/objects/objects-body-descriptors-inl.h9
-rw-r--r--chromium/v8/src/objects/objects-definitions.h1
-rw-r--r--chromium/v8/src/objects/objects-inl.h23
-rw-r--r--chromium/v8/src/objects/objects.cc12
-rw-r--r--chromium/v8/src/objects/objects.h27
-rw-r--r--chromium/v8/src/objects/option-utils.cc172
-rw-r--r--chromium/v8/src/objects/option-utils.h95
-rw-r--r--chromium/v8/src/objects/ordered-hash-table.h1
-rw-r--r--chromium/v8/src/objects/ordered-hash-table.tq5
-rw-r--r--chromium/v8/src/objects/promise.h15
-rw-r--r--chromium/v8/src/objects/property-array.h1
-rw-r--r--chromium/v8/src/objects/property-cell-inl.h3
-rw-r--r--chromium/v8/src/objects/property-cell.h1
-rw-r--r--chromium/v8/src/objects/property-descriptor-object.tq1
-rw-r--r--chromium/v8/src/objects/property-descriptor.cc4
-rw-r--r--chromium/v8/src/objects/property-descriptor.h4
-rw-r--r--chromium/v8/src/objects/property-details.h45
-rw-r--r--chromium/v8/src/objects/property.cc10
-rw-r--r--chromium/v8/src/objects/regexp-match-info.h1
-rw-r--r--chromium/v8/src/objects/script.h5
-rw-r--r--chromium/v8/src/objects/shared-function-info-inl.h202
-rw-r--r--chromium/v8/src/objects/shared-function-info.cc20
-rw-r--r--chromium/v8/src/objects/shared-function-info.h43
-rw-r--r--chromium/v8/src/objects/shared-function-info.tq49
-rw-r--r--chromium/v8/src/objects/source-text-module.h1
-rw-r--r--chromium/v8/src/objects/source-text-module.tq1
-rw-r--r--chromium/v8/src/objects/stack-frame-info.cc2
-rw-r--r--chromium/v8/src/objects/stack-frame-info.h1
-rw-r--r--chromium/v8/src/objects/string-inl.h141
-rw-r--r--chromium/v8/src/objects/string-table.cc9
-rw-r--r--chromium/v8/src/objects/string.cc264
-rw-r--r--chromium/v8/src/objects/string.h51
-rw-r--r--chromium/v8/src/objects/string.tq12
-rw-r--r--chromium/v8/src/objects/struct.h4
-rw-r--r--chromium/v8/src/objects/struct.tq2
-rw-r--r--chromium/v8/src/objects/swiss-hash-table-helpers.tq6
-rw-r--r--chromium/v8/src/objects/swiss-name-dictionary.tq29
-rw-r--r--chromium/v8/src/objects/synthetic-module.h1
-rw-r--r--chromium/v8/src/objects/tagged-impl.h1
-rw-r--r--chromium/v8/src/objects/template-objects.tq2
-rw-r--r--chromium/v8/src/objects/templates.tq1
-rw-r--r--chromium/v8/src/objects/transitions-inl.h3
-rw-r--r--chromium/v8/src/objects/transitions.cc3
-rw-r--r--chromium/v8/src/objects/value-serializer.cc14
-rw-r--r--chromium/v8/src/objects/value-serializer.h2
-rw-r--r--chromium/v8/src/objects/visitors-inl.h43
-rw-r--r--chromium/v8/src/objects/visitors.h40
-rw-r--r--chromium/v8/src/parsing/parse-info.h1
-rw-r--r--chromium/v8/src/parsing/parser-base.h135
-rw-r--r--chromium/v8/src/parsing/parser.h15
-rw-r--r--chromium/v8/src/parsing/pending-compilation-error-handler.cc80
-rw-r--r--chromium/v8/src/parsing/pending-compilation-error-handler.h74
-rw-r--r--chromium/v8/src/parsing/preparse-data.cc10
-rw-r--r--chromium/v8/src/parsing/preparser.h24
-rw-r--r--chromium/v8/src/parsing/scanner-character-streams.cc6
-rw-r--r--chromium/v8/src/parsing/scanner-character-streams.h2
-rw-r--r--chromium/v8/src/parsing/scanner.cc18
-rw-r--r--chromium/v8/src/parsing/scanner.h4
-rw-r--r--chromium/v8/src/profiler/allocation-tracker.cc2
-rw-r--r--chromium/v8/src/profiler/allocation-tracker.h2
-rw-r--r--chromium/v8/src/profiler/cpu-profiler.cc18
-rw-r--r--chromium/v8/src/profiler/cpu-profiler.h1
-rw-r--r--chromium/v8/src/profiler/heap-snapshot-generator.cc53
-rw-r--r--chromium/v8/src/profiler/heap-snapshot-generator.h5
-rw-r--r--chromium/v8/src/profiler/profile-generator.cc49
-rw-r--r--chromium/v8/src/profiler/profile-generator.h4
-rw-r--r--chromium/v8/src/profiler/strings-storage.cc8
-rw-r--r--chromium/v8/src/profiler/strings-storage.h4
-rw-r--r--chromium/v8/src/profiler/tick-sample.cc2
-rw-r--r--chromium/v8/src/profiler/tick-sample.h2
-rw-r--r--chromium/v8/src/profiler/weak-code-registry.cc3
-rw-r--r--chromium/v8/src/regexp/arm/regexp-macro-assembler-arm.cc214
-rw-r--r--chromium/v8/src/regexp/arm/regexp-macro-assembler-arm.h41
-rw-r--r--chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc262
-rw-r--r--chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h60
-rw-r--r--chromium/v8/src/regexp/experimental/experimental-compiler.cc27
-rw-r--r--chromium/v8/src/regexp/experimental/experimental-compiler.h5
-rw-r--r--chromium/v8/src/regexp/experimental/experimental-interpreter.h5
-rw-r--r--chromium/v8/src/regexp/experimental/experimental.cc73
-rw-r--r--chromium/v8/src/regexp/experimental/experimental.h6
-rw-r--r--chromium/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc213
-rw-r--r--chromium/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h35
-rw-r--r--chromium/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc1317
-rw-r--r--chromium/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h223
-rw-r--r--chromium/v8/src/regexp/mips/regexp-macro-assembler-mips.cc118
-rw-r--r--chromium/v8/src/regexp/mips/regexp-macro-assembler-mips.h41
-rw-r--r--chromium/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc134
-rw-r--r--chromium/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h62
-rw-r--r--chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc210
-rw-r--r--chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h46
-rw-r--r--chromium/v8/src/regexp/regexp-ast.h16
-rw-r--r--chromium/v8/src/regexp/regexp-bytecode-generator-inl.h25
-rw-r--r--chromium/v8/src/regexp/regexp-bytecode-generator.cc22
-rw-r--r--chromium/v8/src/regexp/regexp-bytecode-generator.h8
-rw-r--r--chromium/v8/src/regexp/regexp-bytecode-peephole.cc4
-rw-r--r--chromium/v8/src/regexp/regexp-compiler-tonode.cc50
-rw-r--r--chromium/v8/src/regexp/regexp-compiler.cc50
-rw-r--r--chromium/v8/src/regexp/regexp-compiler.h41
-rw-r--r--chromium/v8/src/regexp/regexp-error.h5
-rw-r--r--chromium/v8/src/regexp/regexp-flags.h71
-rw-r--r--chromium/v8/src/regexp/regexp-interpreter.cc13
-rw-r--r--chromium/v8/src/regexp/regexp-interpreter.h8
-rw-r--r--chromium/v8/src/regexp/regexp-macro-assembler-arch.h2
-rw-r--r--chromium/v8/src/regexp/regexp-macro-assembler-tracer.cc4
-rw-r--r--chromium/v8/src/regexp/regexp-macro-assembler.cc55
-rw-r--r--chromium/v8/src/regexp/regexp-macro-assembler.h25
-rw-r--r--chromium/v8/src/regexp/regexp-nodes.h22
-rw-r--r--chromium/v8/src/regexp/regexp-parser.cc1310
-rw-r--r--chromium/v8/src/regexp/regexp-parser.h366
-rw-r--r--chromium/v8/src/regexp/regexp-stack.cc23
-rw-r--r--chromium/v8/src/regexp/regexp-stack.h67
-rw-r--r--chromium/v8/src/regexp/regexp-utils.cc27
-rw-r--r--chromium/v8/src/regexp/regexp-utils.h9
-rw-r--r--chromium/v8/src/regexp/regexp.cc170
-rw-r--r--chromium/v8/src/regexp/regexp.h40
-rw-r--r--chromium/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.cc151
-rw-r--r--chromium/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.h43
-rw-r--r--chromium/v8/src/regexp/s390/regexp-macro-assembler-s390.cc207
-rw-r--r--chromium/v8/src/regexp/s390/regexp-macro-assembler-s390.h48
-rw-r--r--chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.cc256
-rw-r--r--chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.h50
-rw-r--r--chromium/v8/src/roots/DIR_METADATA4
-rw-r--r--chromium/v8/src/runtime/runtime-atomics.cc4
-rw-r--r--chromium/v8/src/runtime/runtime-classes.cc24
-rw-r--r--chromium/v8/src/runtime/runtime-collections.cc8
-rw-r--r--chromium/v8/src/runtime/runtime-compiler.cc4
-rw-r--r--chromium/v8/src/runtime/runtime-debug.cc13
-rw-r--r--chromium/v8/src/runtime/runtime-generator.cc5
-rw-r--r--chromium/v8/src/runtime/runtime-internal.cc42
-rw-r--r--chromium/v8/src/runtime/runtime-literals.cc5
-rw-r--r--chromium/v8/src/runtime/runtime-module.cc24
-rw-r--r--chromium/v8/src/runtime/runtime-object.cc29
-rw-r--r--chromium/v8/src/runtime/runtime-regexp.cc55
-rw-r--r--chromium/v8/src/runtime/runtime-scopes.cc6
-rw-r--r--chromium/v8/src/runtime/runtime-test-wasm.cc1
-rw-r--r--chromium/v8/src/runtime/runtime-test.cc109
-rw-r--r--chromium/v8/src/runtime/runtime-typedarray.cc2
-rw-r--r--chromium/v8/src/runtime/runtime-wasm.cc14
-rw-r--r--chromium/v8/src/runtime/runtime.cc7
-rw-r--r--chromium/v8/src/runtime/runtime.h7
-rw-r--r--chromium/v8/src/snapshot/context-deserializer.cc1
-rw-r--r--chromium/v8/src/snapshot/context-serializer.cc4
-rw-r--r--chromium/v8/src/snapshot/deserializer.cc6
-rw-r--r--chromium/v8/src/snapshot/embedded/embedded-data.cc4
-rw-r--r--chromium/v8/src/snapshot/embedded/embedded-empty.cc12
-rw-r--r--chromium/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc6
-rw-r--r--chromium/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc11
-rw-r--r--chromium/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc8
-rw-r--r--chromium/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc7
-rw-r--r--chromium/v8/src/snapshot/mksnapshot.cc6
-rw-r--r--chromium/v8/src/snapshot/serializer.cc29
-rw-r--r--chromium/v8/src/snapshot/serializer.h2
-rw-r--r--chromium/v8/src/snapshot/snapshot.h2
-rw-r--r--chromium/v8/src/strings/string-builder.cc2
-rw-r--r--chromium/v8/src/strings/string-stream.cc2
-rw-r--r--chromium/v8/src/tasks/OWNERS1
-rw-r--r--chromium/v8/src/third_party/vtune/BUILD.gn5
-rw-r--r--chromium/v8/src/third_party/vtune/v8-vtune.h2
-rw-r--r--chromium/v8/src/third_party/vtune/vtune-jit.cc8
-rw-r--r--chromium/v8/src/third_party/vtune/vtune-jit.h5
-rw-r--r--chromium/v8/src/torque/ast.h20
-rw-r--r--chromium/v8/src/torque/cc-generator.cc1
-rw-r--r--chromium/v8/src/torque/constants.h30
-rw-r--r--chromium/v8/src/torque/cpp-builder.cc9
-rw-r--r--chromium/v8/src/torque/cpp-builder.h15
-rw-r--r--chromium/v8/src/torque/csa-generator.cc1
-rw-r--r--chromium/v8/src/torque/declarable.cc15
-rw-r--r--chromium/v8/src/torque/declaration-visitor.cc58
-rw-r--r--chromium/v8/src/torque/declarations.cc9
-rw-r--r--chromium/v8/src/torque/declarations.h5
-rw-r--r--chromium/v8/src/torque/earley-parser.cc8
-rw-r--r--chromium/v8/src/torque/global-context.cc4
-rw-r--r--chromium/v8/src/torque/global-context.h3
-rw-r--r--chromium/v8/src/torque/implementation-visitor.cc400
-rw-r--r--chromium/v8/src/torque/implementation-visitor.h18
-rw-r--r--chromium/v8/src/torque/kythe-data.cc187
-rw-r--r--chromium/v8/src/torque/kythe-data.h110
-rw-r--r--chromium/v8/src/torque/ls/message-handler.cc29
-rw-r--r--chromium/v8/src/torque/source-positions.h17
-rw-r--r--chromium/v8/src/torque/torque-compiler.cc37
-rw-r--r--chromium/v8/src/torque/torque-compiler.h14
-rw-r--r--chromium/v8/src/torque/torque-parser.cc149
-rw-r--r--chromium/v8/src/torque/type-inference.cc4
-rw-r--r--chromium/v8/src/torque/type-visitor.cc25
-rw-r--r--chromium/v8/src/torque/types.cc21
-rw-r--r--chromium/v8/src/torque/types.h14
-rw-r--r--chromium/v8/src/torque/utils.h4
-rw-r--r--chromium/v8/src/trap-handler/handler-inside-posix.cc2
-rw-r--r--chromium/v8/src/trap-handler/handler-inside-win.cc54
-rw-r--r--chromium/v8/src/trap-handler/handler-outside-simulator.cc10
-rw-r--r--chromium/v8/src/trap-handler/trap-handler.h5
-rw-r--r--chromium/v8/src/utils/address-map.h1
-rw-r--r--chromium/v8/src/utils/allocation.cc43
-rw-r--r--chromium/v8/src/utils/allocation.h32
-rw-r--r--chromium/v8/src/utils/utils.h3
-rw-r--r--chromium/v8/src/utils/v8dll-main.cc2
-rw-r--r--chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h28
-rw-r--r--chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h52
-rw-r--r--chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h313
-rw-r--r--chromium/v8/src/wasm/baseline/liftoff-assembler-defs.h16
-rw-r--r--chromium/v8/src/wasm/baseline/liftoff-assembler.h54
-rw-r--r--chromium/v8/src/wasm/baseline/liftoff-compiler.cc404
-rw-r--r--chromium/v8/src/wasm/baseline/liftoff-register.h8
-rw-r--r--chromium/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h2817
-rw-r--r--chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h12
-rw-r--r--chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h25
-rw-r--r--chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h955
-rw-r--r--chromium/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h509
-rw-r--r--chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h421
-rw-r--r--chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h341
-rw-r--r--chromium/v8/src/wasm/c-api.cc10
-rw-r--r--chromium/v8/src/wasm/c-api.h3
-rw-r--r--chromium/v8/src/wasm/code-space-access.cc12
-rw-r--r--chromium/v8/src/wasm/code-space-access.h3
-rw-r--r--chromium/v8/src/wasm/compilation-environment.h16
-rw-r--r--chromium/v8/src/wasm/function-body-decoder-impl.h409
-rw-r--r--chromium/v8/src/wasm/function-body-decoder.cc11
-rw-r--r--chromium/v8/src/wasm/function-compiler.cc46
-rw-r--r--chromium/v8/src/wasm/graph-builder-interface.cc373
-rw-r--r--chromium/v8/src/wasm/graph-builder-interface.h5
-rw-r--r--chromium/v8/src/wasm/init-expr-interface.cc42
-rw-r--r--chromium/v8/src/wasm/jump-table-assembler.cc30
-rw-r--r--chromium/v8/src/wasm/jump-table-assembler.h5
-rw-r--r--chromium/v8/src/wasm/memory-protection-key.cc25
-rw-r--r--chromium/v8/src/wasm/memory-protection-key.h4
-rw-r--r--chromium/v8/src/wasm/module-compiler.cc133
-rw-r--r--chromium/v8/src/wasm/module-compiler.h2
-rw-r--r--chromium/v8/src/wasm/module-decoder.cc160
-rw-r--r--chromium/v8/src/wasm/module-instantiate.cc139
-rw-r--r--chromium/v8/src/wasm/streaming-decoder.cc29
-rw-r--r--chromium/v8/src/wasm/streaming-decoder.h5
-rw-r--r--chromium/v8/src/wasm/sync-streaming-decoder.cc4
-rw-r--r--chromium/v8/src/wasm/value-type.h4
-rw-r--r--chromium/v8/src/wasm/wasm-code-manager.cc202
-rw-r--r--chromium/v8/src/wasm/wasm-code-manager.h103
-rw-r--r--chromium/v8/src/wasm/wasm-constants.h28
-rw-r--r--chromium/v8/src/wasm/wasm-debug.cc6
-rw-r--r--chromium/v8/src/wasm/wasm-engine.cc19
-rw-r--r--chromium/v8/src/wasm/wasm-engine.h1
-rw-r--r--chromium/v8/src/wasm/wasm-external-refs.cc62
-rw-r--r--chromium/v8/src/wasm/wasm-external-refs.h5
-rw-r--r--chromium/v8/src/wasm/wasm-feature-flags.h41
-rw-r--r--chromium/v8/src/wasm/wasm-init-expr.cc4
-rw-r--r--chromium/v8/src/wasm/wasm-init-expr.h42
-rw-r--r--chromium/v8/src/wasm/wasm-js.cc285
-rw-r--r--chromium/v8/src/wasm/wasm-limits.h5
-rw-r--r--chromium/v8/src/wasm/wasm-linkage.h9
-rw-r--r--chromium/v8/src/wasm/wasm-module-builder.cc255
-rw-r--r--chromium/v8/src/wasm/wasm-module-builder.h122
-rw-r--r--chromium/v8/src/wasm/wasm-module-sourcemap.cc9
-rw-r--r--chromium/v8/src/wasm/wasm-module-sourcemap.h5
-rw-r--r--chromium/v8/src/wasm/wasm-module.cc29
-rw-r--r--chromium/v8/src/wasm/wasm-module.h32
-rw-r--r--chromium/v8/src/wasm/wasm-objects-inl.h54
-rw-r--r--chromium/v8/src/wasm/wasm-objects.cc45
-rw-r--r--chromium/v8/src/wasm/wasm-objects.h39
-rw-r--r--chromium/v8/src/wasm/wasm-opcodes-inl.h9
-rw-r--r--chromium/v8/src/wasm/wasm-opcodes.h13
-rw-r--r--chromium/v8/src/wasm/wasm-serialization.cc4
-rw-r--r--chromium/v8/src/wasm/wasm-subtyping.cc45
-rw-r--r--chromium/v8/src/wasm/wasm-subtyping.h14
-rw-r--r--chromium/v8/src/web-snapshot/web-snapshot.cc42
-rw-r--r--chromium/v8/src/zone/accounting-allocator.cc13
-rw-r--r--chromium/v8/src/zone/accounting-allocator.h4
-rw-r--r--chromium/v8/src/zone/zone.cc60
-rw-r--r--chromium/v8/src/zone/zone.h28
-rw-r--r--chromium/v8/test/cctest/BUILD.gn13
-rw-r--r--chromium/v8/test/torque/test-torque.tq184
-rw-r--r--chromium/v8/test/unittests/BUILD.gn32
-rw-r--r--chromium/v8/testing/gtest-support.h13
-rw-r--r--chromium/v8/third_party/v8/builtins/array-sort.tq145
-rw-r--r--chromium/v8/tools/OWNERS2
-rw-r--r--chromium/v8/tools/clusterfuzz/js_fuzzer/build_db.js3
-rw-r--r--chromium/v8/tools/clusterfuzz/js_fuzzer/db.js30
-rw-r--r--chromium/v8/tools/clusterfuzz/js_fuzzer/exceptions.js26
-rw-r--r--chromium/v8/tools/clusterfuzz/js_fuzzer/mutators/crossover_mutator.js5
-rw-r--r--chromium/v8/tools/clusterfuzz/js_fuzzer/source_helpers.js27
-rw-r--r--chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/cross_over_mutator_class_input.js (renamed from chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/cross_over_mutator_class_input.js)0
-rw-r--r--chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/db/this/file.js (renamed from chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/this/file.js)0
-rw-r--r--chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load.js4
-rw-r--r--chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_0.js6
-rw-r--r--chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_1.js2
-rw-r--r--chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_self.js2
-rw-r--r--chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/cross_over_mutator_input.js7
-rw-r--r--chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/destructuring/input.js7
-rw-r--r--chromium/v8/tools/clusterfuzz/js_fuzzer/test_db.js5
-rw-r--r--chromium/v8/tools/clusterfuzz/v8_commands.py3
-rwxr-xr-xchromium/v8/tools/clusterfuzz/v8_foozzie.py18
-rw-r--r--chromium/v8/tools/clusterfuzz/v8_smoke_tests.js2
-rwxr-xr-xchromium/v8/tools/cppgc/gen_cmake.py12
-rwxr-xr-xchromium/v8/tools/cppgc/test_cmake.sh2
-rwxr-xr-xchromium/v8/tools/dev/gm.py23
-rw-r--r--chromium/v8/tools/gen-postmortem-metadata.py21
-rwxr-xr-xchromium/v8/tools/generate-header-include-checks.py4
-rwxr-xr-xchromium/v8/tools/mb/mb.py66
-rwxr-xr-xchromium/v8/tools/mb/mb_unittest.py22
-rw-r--r--chromium/v8/tools/profile.mjs5
-rw-r--r--chromium/v8/tools/release/PRESUBMIT.py8
-rwxr-xr-xchromium/v8/tools/release/auto_tag.py2
-rwxr-xr-xchromium/v8/tools/release/check_clusterfuzz.py2
-rw-r--r--chromium/v8/tools/release/common_includes.py26
-rwxr-xr-xchromium/v8/tools/release/create_release.py22
-rwxr-xr-xchromium/v8/tools/release/list_deprecated.py234
-rwxr-xr-xchromium/v8/tools/release/merge_to_branch.py4
-rwxr-xr-xchromium/v8/tools/release/mergeinfo.py10
-rwxr-xr-xchromium/v8/tools/release/roll_merge.py4
-rwxr-xr-xchromium/v8/tools/release/search_related_commits.py2
-rwxr-xr-xchromium/v8/tools/release/test_mergeinfo.py10
-rwxr-xr-xchromium/v8/tools/release/test_scripts.py44
-rwxr-xr-xchromium/v8/tools/release/test_search_related_commits.py38
-rw-r--r--chromium/v8/tools/run_perf.py34
-rw-r--r--chromium/v8/tools/system-analyzer/view/timeline/timeline-track-base.mjs3
-rw-r--r--chromium/v8/tools/system-analyzer/view/timeline/timeline-track-stacked-base.mjs2
-rw-r--r--chromium/v8/tools/testrunner/base_runner.py31
-rw-r--r--chromium/v8/tools/testrunner/local/android.py6
-rw-r--r--chromium/v8/tools/testrunner/local/statusfile.py14
-rw-r--r--chromium/v8/tools/testrunner/local/testsuite.py2
-rw-r--r--chromium/v8/tools/testrunner/local/utils.py2
-rw-r--r--chromium/v8/tools/testrunner/local/variants.py9
-rwxr-xr-xchromium/v8/tools/testrunner/num_fuzzer.py17
-rw-r--r--chromium/v8/tools/testrunner/objects/testcase.py42
-rw-r--r--chromium/v8/tools/testrunner/outproc/base.py2
-rwxr-xr-xchromium/v8/tools/testrunner/standard_runner.py9
-rw-r--r--chromium/v8/tools/testrunner/testproc/expectation.py5
-rw-r--r--chromium/v8/tools/testrunner/testproc/filter.py2
-rw-r--r--chromium/v8/tools/testrunner/testproc/fuzzer.py6
-rwxr-xr-xchromium/v8/tools/unittests/run_tests_test.py3
-rw-r--r--chromium/v8/tools/v8heapconst.py557
-rwxr-xr-xchromium/v8/tools/wasm/update-wasm-spec-tests.sh20
-rw-r--r--chromium/v8/tools/whitespace.txt9
1198 files changed, 90078 insertions, 42365 deletions
diff --git a/chromium/v8/.flake8 b/chromium/v8/.flake8
index c58d00ca051..22eebf3de49 100644
--- a/chromium/v8/.flake8
+++ b/chromium/v8/.flake8
@@ -4,7 +4,6 @@ exclude =
./third_party/, # third-party code
./build/, # third-party code
./buildtools/, # third-party code
- ./tools/swarming_client/, # third-party code
./test/wasm-js/, # third-party code
./test/wasm-js/data/, # third-party code
./test/test262/data/, # third-party code
diff --git a/chromium/v8/.vpython b/chromium/v8/.vpython
index 3b7cb32468e..d4a07677ca9 100644
--- a/chromium/v8/.vpython
+++ b/chromium/v8/.vpython
@@ -24,6 +24,21 @@
python_version: "2.7"
+# The default set of platforms vpython checks does not yet include mac-arm64.
+# Setting `verify_pep425_tag` to the list of platforms we explicitly must support
+# allows us to ensure that vpython specs stay mac-arm64-friendly
+verify_pep425_tag: [
+ {python: "cp27", abi: "cp27mu", platform: "manylinux1_x86_64"},
+ {python: "cp27", abi: "cp27mu", platform: "linux_arm64"},
+ {python: "cp27", abi: "cp27mu", platform: "linux_armv6l"},
+
+ {python: "cp27", abi: "cp27m", platform: "macosx_10_10_intel"},
+ {python: "cp27", abi: "cp27m", platform: "macosx_11_0_arm64"},
+
+ {python: "cp27", abi: "cp27m", platform: "win32"},
+ {python: "cp27", abi: "cp27m", platform: "win_amd64"}
+]
+
# Needed by third_party/catapult/devil/devil, which is imported by
# build/android/test_runner.py when running performance tests.
wheel: <
diff --git a/chromium/v8/AUTHORS b/chromium/v8/AUTHORS
index d9eb05985c5..7307ced9fc2 100644
--- a/chromium/v8/AUTHORS
+++ b/chromium/v8/AUTHORS
@@ -57,13 +57,12 @@ Alexis Campailla <alexis@janeasystems.com>
Allan Sandfeld Jensen <allan.jensen@qt.io>
Amos Lim <eui-sang.lim@samsung.com>
Andreas Anyuru <andreas.anyuru@gmail.com>
-Andrew Paprocki <andrew@ishiboo.com>
Andrei Kashcha <anvaka@gmail.com>
+Andrew Paprocki <andrew@ishiboo.com>
Anna Henningsen <anna@addaleax.net>
Antoine du Hamel <duhamelantoine1995@gmail.com>
Anton Bikineev <ant.bikineev@gmail.com>
Bangfu Tao <bangfu.tao@samsung.com>
-Daniel Shelton <d1.shelton@samsung.com>
Ben Coe <bencoe@gmail.com>
Ben Newman <ben@meteor.com>
Ben Noordhuis <info@bnoordhuis.nl>
@@ -74,7 +73,6 @@ Brice Dobry <brice.dobry@futurewei.com>
Burcu Dogan <burcujdogan@gmail.com>
Caitlin Potter <caitpotter88@gmail.com>
Chao Wang <chao.w@rioslab.org>
-Craig Schlenter <craig.schlenter@gmail.com>
Charles Kerr <charles@charleskerr.com>
Chengzhong Wu <legendecas@gmail.com>
Choongwoo Han <cwhan.tunz@gmail.com>
@@ -82,10 +80,13 @@ Chris Nardi <hichris123@gmail.com>
Christopher A. Taylor <chris@gameclosure.com>
Colin Ihrig <cjihrig@gmail.com>
Cong Zuo <zckevinzc@gmail.com>
+Craig Schlenter <craig.schlenter@gmail.com>
Daniel Andersson <kodandersson@gmail.com>
Daniel Bevenius <daniel.bevenius@gmail.com>
Daniel Dromboski <dandromb@gmail.com>
Daniel James <dnljms@gmail.com>
+Daniel Shelton <d1.shelton@samsung.com>
+Darshan Sen <raisinten@gmail.com>
David Carlier <devnexen@gmail.com>
David Manouchehri <david@davidmanouchehri.com>
Deepak Mohan <hop2deep@gmail.com>
@@ -119,13 +120,13 @@ Ingvar Stepanyan <me@rreverser.com>
Ioseb Dzmanashvili <ioseb.dzmanashvili@gmail.com>
Isiah Meadows <impinball@gmail.com>
Jaime Bernardo <jaime@janeasystems.com>
-Jan de Mooij <jandemooij@gmail.com>
+James M Snell <jasnell@gmail.com>
+James Pike <g00gle@chilon.net>
Jan Krems <jan.krems@gmail.com>
+Jan de Mooij <jandemooij@gmail.com>
Janusz Majnert <jmajnert@gmail.com>
-Jay Freeman <saurik@saurik.com>
-James Pike <g00gle@chilon.net>
-James M Snell <jasnell@gmail.com>
Javad Amiri <javad.amiri@anu.edu.au>
+Jay Freeman <saurik@saurik.com>
Jesper van den Ende <jespertheend@gmail.com>
Ji Qiu <qiuji@iscas.ac.cn>
Jianghua Yang <jianghua.yjh@alibaba-inc.com>
@@ -135,8 +136,8 @@ Joel Stanley <joel@jms.id.au>
Johan Bergström <johan@bergstroem.nu>
Jonathan Liu <net147@gmail.com>
Julien Brianceau <jbriance@cisco.com>
-Junha Park <jpark3@scu.edu>
JunHo Seo <sejunho@gmail.com>
+Junha Park <jpark3@scu.edu>
Junming Huang <kiminghjm@gmail.com>
Kang-Hao (Kenny) Lu <kennyluck@csail.mit.edu>
Karl Skomski <karl@skomski.com>
@@ -172,6 +173,7 @@ Milton Chiang <milton.chiang@mediatek.com>
Mu Tao <pamilty@gmail.com>
Myeong-bo Shim <m0609.shim@samsung.com>
Nicolas Antonius Ernst Leopold Maria Kaiser <nikai@nikai.net>
+Nicolò Ribaudo <nicolo.ribaudo@gmail.com>
Niek van der Maas <mail@niekvandermaas.nl>
Niklas Hambüchen <mail@nh2.me>
Noj Vek <nojvek@gmail.com>
@@ -179,20 +181,21 @@ Oleksandr Chekhovskyi <oleksandr.chekhovskyi@gmail.com>
Oliver Dunk <oliver@oliverdunk.com>
Paolo Giarrusso <p.giarrusso@gmail.com>
Patrick Gansterer <paroga@paroga.com>
+Paul Lind <plind44@gmail.com>
+Pavel Medvedev <pmedvedev@gmail.com>
Peng Fei <pfgenyun@gmail.com>
Peng Wu <peng.w@rioslab.org>
Peng-Yu Chen <pengyu@libstarrify.so>
Peter Rybin <peter.rybin@gmail.com>
Peter Varga <pvarga@inf.u-szeged.hu>
Peter Wong <peter.wm.wong@gmail.com>
-Paul Lind <plind44@gmail.com>
-Pavel Medvedev <pmedvedev@gmail.com>
PhistucK <phistuck@gmail.com>
Qingyan Li <qingyan.liqy@alibaba-inc.com>
Qiuyi Zhang <qiuyi.zqy@alibaba-inc.com>
Rafal Krypa <rafal@krypa.net>
Raul Tambre <raul@tambre.ee>
Ray Glover <ray@rayglover.net>
+Ray Wang <ray@isrc.iscas.ac.cn>
Refael Ackermann <refack@gmail.com>
Rene Rebe <rene@exactcode.de>
Reza Yazdani <ryazdani@futurewei.com>
@@ -217,11 +220,14 @@ Stefan Penner <stefan.penner@gmail.com>
Stephan Hartmann <stha09@googlemail.com>
Stephen Belanger <stephen.belanger@datadoghq.com>
Sylvestre Ledru <sledru@mozilla.com>
+Takeshi Yoneda <takeshi@tetrate.io>
Taketoshi Aono <brn@b6n.ch>
Tao Liqiang <taolq@outlook.com>
Teddy Katz <teddy.katz@gmail.com>
Thomas Young <wenzhang5800@gmail.com>
Tiancheng "Timothy" Gu <timothygu99@gmail.com>
+Tianping Yang <yangtianping@oppo.com>
+Timo Teräs <timo.teras@iki.fi>
Tobias Burnus <burnus@net-b.de>
Tobias Nießen <tniessen@tnie.de>
Ujjwal Sharma <usharma1998@gmail.com>
@@ -245,13 +251,11 @@ Yi Wang <wangyi8848@gmail.com>
Yong Wang <ccyongwang@tencent.com>
Youfeng Hao <ajihyf@gmail.com>
Yu Yin <xwafish@gmail.com>
-Yusif Khudhur <yusif.khudhur@gmail.com>
Yuri Iozzelli <yuri@leaningtech.com>
+Yusif Khudhur <yusif.khudhur@gmail.com>
Zac Hansen <xaxxon@gmail.com>
Zeynep Cankara <zeynepcankara402@gmail.com>
Zhao Jiazhong <kyslie3100@gmail.com>
Zheng Liu <i6122f@gmail.com>
Zhongping Wang <kewpie.w.zp@gmail.com>
柳荣一 <admin@web-tinker.com>
-Tianping Yang <yangtianping@oppo.com>
-Takeshi Yoneda <takeshi@tetrate.io>
diff --git a/chromium/v8/BUILD.bazel b/chromium/v8/BUILD.bazel
index c5b4a94f911..23bce0f4bdc 100644
--- a/chromium/v8/BUILD.bazel
+++ b/chromium/v8/BUILD.bazel
@@ -150,7 +150,6 @@ config_setting(
# v8_can_use_fpu_instructions
# v8_use_mips_abi_hardfloat
# v8_enable_gdbjit
-# v8_untrusted_code_mitigations
# v8_enable_minor_mc
# v8_check_header_includes
# v8_enable_shared_ro_heap
@@ -164,10 +163,10 @@ config_setting(
# v8_verify_torque_generation_invariance
# v8_enable_snapshot_compression
# v8_control_flow_integrity
-# cppgc_enable_object_names
+# v8_enable_virtual_memory_cage
# cppgc_enable_caged_heap
-# cppgc_enable_verify_live_bytes
-# cppgc_enable_check_assignments_in_prefinalizers
+# cppgc_enable_object_names
+# cppgc_enable_verify_heap
# cppgc_enable_young_generation
# v8_enable_zone_compression
# v8_enable_heap_sandbox
@@ -306,9 +305,6 @@ v8_config(
"V8_TARGET_OS_MACOSX",
],
}) + select({
- ":is_android_x86": [ "DISABLE_UNTRUSTED_CODE_MITIGATIONS" ],
- "//conditions:default": [],
- }) + select({
":is_v8_enable_pointer_compression": [
"V8_COMPRESS_POINTERS",
"V8_31BIT_SMIS_ON_64BIT_ARCH",
@@ -403,11 +399,53 @@ filegroup(
srcs = [
":cppgc_headers_files",
":v8_version_files",
+ "include/v8-array-buffer.h",
+ "include/v8-callbacks.h",
+ "include/v8-container.h",
+ "include/v8-context.h",
"include/v8-cppgc.h",
+ "include/v8-data.h",
+ "include/v8-date.h",
+ "include/v8-debug.h",
+ "include/v8-embedder-heap.h",
+ "include/v8-exception.h",
+ "include/v8-extension.h",
+ "include/v8-external.h",
"include/v8-fast-api-calls.h",
+ "include/v8-forward.h",
+ "include/v8-function.h",
+ "include/v8-function-callback.h",
+ "include/v8-initialization.h",
"include/v8-internal.h",
+ "include/v8-isolate.h",
+ "include/v8-json.h",
+ "include/v8-local-handle.h",
+ "include/v8-locker.h",
+ "include/v8-maybe.h",
+ "include/v8-memory-span.h",
+ "include/v8-message.h",
+ "include/v8-microtask-queue.h",
+ "include/v8-microtask.h",
+ "include/v8-object.h",
+ "include/v8-persistent-handle.h",
+ "include/v8-primitive-object.h",
+ "include/v8-primitive.h",
"include/v8-profiler.h",
+ "include/v8-promise.h",
+ "include/v8-proxy.h",
+ "include/v8-regexp.h",
+ "include/v8-script.h",
+ "include/v8-snapshot.h",
+ "include/v8-statistics.h",
+ "include/v8-template.h",
+ "include/v8-traced-handle.h",
+ "include/v8-typed-array.h",
+ "include/v8-unwinder.h",
"include/v8-util.h",
+ "include/v8-value-serializer.h",
+ "include/v8-value.h",
+ "include/v8-wasm.h",
+ "include/v8-weak-callback-info.h",
"include/v8.h",
],
)
@@ -855,6 +893,8 @@ filegroup(
"src/torque/instance-type-generator.cc",
"src/torque/instructions.cc",
"src/torque/instructions.h",
+ "src/torque/kythe-data.cc",
+ "src/torque/kythe-data.h",
"src/torque/parameter-difference.h",
"src/torque/server-data.cc",
"src/torque/server-data.h",
@@ -975,6 +1015,7 @@ filegroup(
"src/codegen/assembler-inl.h",
"src/codegen/assembler.cc",
"src/codegen/assembler.h",
+ "src/codegen/atomic-memory-order.h",
"src/codegen/bailout-reason.cc",
"src/codegen/bailout-reason.h",
"src/codegen/callable.h",
@@ -1178,6 +1219,7 @@ filegroup(
"src/flags/flag-definitions.h",
"src/flags/flags.cc",
"src/flags/flags.h",
+ "src/handles/global-handles-inl.h",
"src/handles/global-handles.cc",
"src/handles/global-handles.h",
"src/handles/handles-inl.h",
@@ -1309,6 +1351,7 @@ filegroup(
"src/heap/paged-spaces.h",
"src/heap/parallel-work-item.h",
"src/heap/parked-scope.h",
+ "src/heap/progress-bar.h",
"src/heap/read-only-heap-inl.h",
"src/heap/read-only-heap.cc",
"src/heap/read-only-heap.h",
@@ -1361,6 +1404,8 @@ filegroup(
"src/init/startup-data-util.h",
"src/init/v8.cc",
"src/init/v8.h",
+ "src/init/vm-cage.cc",
+ "src/init/vm-cage.h",
"src/interpreter/block-coverage-builder.h",
"src/interpreter/bytecode-array-builder.cc",
"src/interpreter/bytecode-array-builder.h",
@@ -1545,6 +1590,7 @@ filegroup(
"src/objects/lookup-inl.h",
"src/objects/lookup.cc",
"src/objects/lookup.h",
+ "src/objects/managed-inl.h",
"src/objects/managed.cc",
"src/objects/managed.h",
"src/objects/map-inl.h",
@@ -1576,6 +1622,8 @@ filegroup(
"src/objects/objects-definitions.h",
"src/objects/oddball-inl.h",
"src/objects/oddball.h",
+ "src/objects/option-utils.h",
+ "src/objects/option-utils.cc",
"src/objects/ordered-hash-table-inl.h",
"src/objects/ordered-hash-table.cc",
"src/objects/ordered-hash-table.h",
@@ -1665,6 +1713,7 @@ filegroup(
"src/objects/value-serializer.cc",
"src/objects/value-serializer.h",
"src/objects/visitors.cc",
+ "src/objects/visitors-inl.h",
"src/objects/visitors.h",
"src/parsing/expression-scope.h",
"src/parsing/func-name-inferrer.cc",
@@ -1755,6 +1804,7 @@ filegroup(
"src/regexp/regexp-dotprinter.h",
"src/regexp/regexp-error.cc",
"src/regexp/regexp-error.h",
+ "src/regexp/regexp-flags.h",
"src/regexp/regexp-interpreter.cc",
"src/regexp/regexp-interpreter.h",
"src/regexp/regexp-macro-assembler-arch.h",
@@ -1810,6 +1860,7 @@ filegroup(
"src/base/sanitizer/lsan-page-allocator.cc",
"src/base/sanitizer/lsan-page-allocator.h",
"src/base/sanitizer/msan.h",
+ "src/base/sanitizer/tsan.h",
"src/snapshot/code-serializer.cc",
"src/snapshot/code-serializer.h",
"src/snapshot/context-deserializer.cc",
@@ -2092,6 +2143,7 @@ filegroup(
"src/asmjs/asm-types.h",
"src/compiler/int64-lowering.h",
"src/compiler/wasm-compiler.h",
+ "src/compiler/wasm-inlining.h",
"src/debug/debug-wasm-objects.cc",
"src/debug/debug-wasm-objects.h",
"src/debug/debug-wasm-objects-inl.h",
@@ -2298,7 +2350,6 @@ filegroup(
"src/compiler/common-operator-reducer.h",
"src/compiler/compilation-dependencies.cc",
"src/compiler/compilation-dependencies.h",
- "src/compiler/compilation-dependency.h",
"src/compiler/compiler-source-position-table.cc",
"src/compiler/compiler-source-position-table.h",
"src/compiler/constant-folding-reducer.cc",
@@ -2475,6 +2526,7 @@ filegroup(
":is_v8_enable_webassembly": [
"src/compiler/int64-lowering.cc",
"src/compiler/wasm-compiler.cc",
+ "src/compiler/wasm-inlining.cc",
],
"//conditions:default": [],
}),
@@ -2570,6 +2622,7 @@ filegroup(
name = "cppgc_base_files",
srcs = [
"src/heap/cppgc/allocation.cc",
+ "src/heap/cppgc/caged-heap.h",
"src/heap/cppgc/compaction-worklists.cc",
"src/heap/cppgc/compaction-worklists.h",
"src/heap/cppgc/compactor.cc",
@@ -2631,6 +2684,7 @@ filegroup(
"src/heap/cppgc/page-memory.h",
"src/heap/cppgc/persistent-node.cc",
"src/heap/cppgc/platform.cc",
+ "src/heap/cppgc/platform.h",
"src/heap/cppgc/pointer-policies.cc",
"src/heap/cppgc/prefinalizer-handler.cc",
"src/heap/cppgc/prefinalizer-handler.h",
@@ -2679,6 +2733,7 @@ filegroup(
"src/bigint/bigint-internal.cc",
"src/bigint/bigint-internal.h",
"src/bigint/bigint.h",
+ "src/bigint/bitwise.cc",
"src/bigint/digit-arithmetic.h",
"src/bigint/div-barrett.cc",
"src/bigint/div-burnikel.cc",
@@ -2861,11 +2916,11 @@ v8_torque(
"exported-macros-assembler.h",
"factory.cc",
"factory.inc",
- "field-offsets.h",
"instance-types.h",
"interface-descriptors.inc",
"objects-body-descriptors-inl.inc",
"objects-printer.cc",
+ "visitor-lists.h",
],
args = select({
":is_v8_annotate_torque_ir": [ "-annotate-ir" ],
diff --git a/chromium/v8/BUILD.gn b/chromium/v8/BUILD.gn
index 3e48fb11bff..f491f2a4e64 100644
--- a/chromium/v8/BUILD.gn
+++ b/chromium/v8/BUILD.gn
@@ -41,7 +41,7 @@ declare_args() {
v8_enable_future = false
# Sets -DSYSTEM_INSTRUMENTATION. Enables OS-dependent event tracing
- v8_enable_system_instrumentation = is_win || is_mac
+ v8_enable_system_instrumentation = (is_win || is_mac) && !v8_use_perfetto
# Sets the GUID for the ETW provider
v8_etw_guid = ""
@@ -228,11 +228,6 @@ declare_args() {
(is_linux || is_chromeos || is_mac)) ||
(v8_current_cpu == "ppc64" && (is_linux || is_chromeos))
- # Enable mitigations for executing untrusted code.
- # Disabled by default on ia32 due to conflicting requirements with embedded
- # builtins.
- v8_untrusted_code_mitigations = false
-
# Enable minor mark compact.
v8_enable_minor_mc = true
@@ -291,15 +286,15 @@ declare_args() {
cppgc_enable_object_names = false
# Enable heap reservation of size 4GB. Only possible for 64bit archs.
- cppgc_enable_caged_heap = v8_current_cpu == "x64" || v8_current_cpu == "arm64"
+ cppgc_enable_caged_heap =
+ v8_current_cpu == "x64" || v8_current_cpu == "arm64" ||
+ v8_current_cpu == "loong64"
- # Enable verification of live bytes in the marking verifier.
- # TODO(v8:11785): Enable by default when running with the verifier.
- cppgc_enable_verify_live_bytes = false
+ # Enables additional heap verification phases and checks.
+ cppgc_enable_verify_heap = ""
- # Enable assignment checks for Members/Persistents during prefinalizer invocations.
- # TODO(v8:11749): Enable by default after fixing any existing issues in Blink.
- cppgc_enable_check_assignments_in_prefinalizers = false
+ # Enable allocations during prefinalizer invocations.
+ cppgc_allow_allocations_in_prefinalizers = false
# Enable young generation in cppgc.
cppgc_enable_young_generation = false
@@ -312,6 +307,11 @@ declare_args() {
# Sets -DV8_HEAP_SANDBOX.
v8_enable_heap_sandbox = ""
+ # Enable the Virtual Memory Cage, which contains the pointer compression cage
+ # as well as ArrayBuffer BackingStores and WASM memory cages.
+ # Sets -DV8_VIRTUAL_MEMORY_CAGE.
+ v8_enable_virtual_memory_cage = ""
+
# Experimental feature for collecting per-class zone memory stats.
# Requires use_rtti = true
v8_enable_precise_zone_stats = false
@@ -342,9 +342,20 @@ declare_args() {
# Enable global allocation site tracking.
v8_allocation_site_tracking = true
+
+ # If enabled, the receiver is always included in the actual and formal
+ # parameter count of function with JS linkage.
+ # TODO(v8:11112): Remove once all architectures support the flag and it is
+ # enabled unconditionally.
+ v8_include_receiver_in_argc =
+ v8_current_cpu == "x86" || v8_current_cpu == "x64" ||
+ v8_current_cpu == "arm" || v8_current_cpu == "arm64"
}
# Derived defaults.
+if (cppgc_enable_verify_heap == "") {
+ cppgc_enable_verify_heap = v8_enable_debugging_features || dcheck_always_on
+}
if (v8_enable_verify_heap == "") {
v8_enable_verify_heap = v8_enable_debugging_features
}
@@ -392,6 +403,9 @@ if (v8_enable_zone_compression == "") {
if (v8_enable_heap_sandbox == "") {
v8_enable_heap_sandbox = false
}
+if (v8_enable_virtual_memory_cage == "") {
+ v8_enable_virtual_memory_cage = v8_enable_heap_sandbox
+}
if (v8_enable_short_builtin_calls == "") {
v8_enable_short_builtin_calls =
v8_current_cpu == "x64" || (!is_android && v8_current_cpu == "arm64")
@@ -458,12 +472,16 @@ if (build_with_chromium && v8_current_cpu == "arm64" &&
v8_control_flow_integrity = true
}
+# Enable the virtual memory cage on 64-bit Chromium builds.
+if (build_with_chromium &&
+ (v8_current_cpu == "arm64" || v8_current_cpu == "x64")) {
+ # The cage is incompatible with lsan.
+ v8_enable_virtual_memory_cage = !is_lsan
+}
+
assert(!v8_disable_write_barriers || v8_enable_single_generation,
"Disabling write barriers works only with single generation")
-assert(v8_current_cpu != "x86" || !v8_untrusted_code_mitigations,
- "Untrusted code mitigations are unsupported on ia32")
-
assert(v8_current_cpu == "arm64" || !v8_control_flow_integrity,
"Control-flow integrity is only supported on arm64")
@@ -480,15 +498,22 @@ assert(!v8_enable_map_packing || !v8_enable_pointer_compression,
assert(!v8_enable_map_packing || v8_current_cpu == "x64",
"Map packing is only supported on x64")
-assert(!v8_use_multi_snapshots || !v8_control_flow_integrity,
- "Control-flow integrity does not support multisnapshots")
-
assert(!v8_enable_heap_sandbox || v8_enable_pointer_compression,
"V8 Heap Sandbox requires pointer compression")
assert(!v8_enable_heap_sandbox || !v8_enable_external_code_space,
"V8 Heap Sandbox is not compatible with external code space YET")
+assert(!v8_enable_heap_sandbox || v8_enable_virtual_memory_cage,
+ "The Heap Sandbox requires the virtual memory cage")
+
+assert(
+ !v8_enable_virtual_memory_cage || v8_enable_pointer_compression_shared_cage,
+ "V8 VirtualMemoryCage requires the shared pointer compression cage")
+
+assert(!v8_enable_virtual_memory_cage || !is_lsan,
+ "V8 VirtualMemoryCage is currently incompatible with Leak Sanitizer")
+
assert(
!v8_enable_pointer_compression_shared_cage || v8_enable_pointer_compression,
"Can't share a pointer compression cage if pointers aren't compressed")
@@ -502,7 +527,7 @@ assert(!v8_enable_unconditional_write_barriers || !v8_disable_write_barriers,
"Write barriers can't be both enabled and disabled")
assert(!cppgc_enable_caged_heap || v8_current_cpu == "x64" ||
- v8_current_cpu == "arm64",
+ v8_current_cpu == "arm64" || v8_current_cpu == "loong64",
"CppGC caged heap requires 64bit platforms")
assert(!cppgc_enable_young_generation || cppgc_enable_caged_heap,
@@ -650,6 +675,7 @@ external_v8_defines = [
"V8_31BIT_SMIS_ON_64BIT_ARCH",
"V8_COMPRESS_ZONES",
"V8_HEAP_SANDBOX",
+ "V8_VIRTUAL_MEMORY_CAGE",
"V8_DEPRECATION_WARNINGS",
"V8_IMMINENT_DEPRECATION_WARNINGS",
"V8_NO_ARGUMENTS_ADAPTOR",
@@ -680,6 +706,9 @@ if (v8_enable_zone_compression) {
if (v8_enable_heap_sandbox) {
enabled_external_v8_defines += [ "V8_HEAP_SANDBOX" ]
}
+if (v8_enable_virtual_memory_cage) {
+ enabled_external_v8_defines += [ "V8_VIRTUAL_MEMORY_CAGE" ]
+}
if (v8_deprecation_warnings) {
enabled_external_v8_defines += [ "V8_DEPRECATION_WARNINGS" ]
}
@@ -761,12 +790,12 @@ config("features") {
":cppgc_header_features",
]
- if (cppgc_enable_verify_live_bytes) {
- defines += [ "CPPGC_VERIFY_LIVE_BYTES" ]
+ if (cppgc_enable_verify_heap) {
+ defines += [ "CPPGC_VERIFY_HEAP" ]
}
- if (cppgc_enable_check_assignments_in_prefinalizers) {
- defines += [ "CPPGC_CHECK_ASSIGNMENTS_IN_PREFINALIZERS" ]
+ if (cppgc_allow_allocations_in_prefinalizers) {
+ defines += [ "CPPGC_ALLOW_ALLOCATIONS_IN_PREFINALIZERS" ]
}
if (v8_embedder_string != "") {
@@ -872,9 +901,6 @@ config("features") {
if (v8_enable_lazy_source_positions) {
defines += [ "V8_ENABLE_LAZY_SOURCE_POSITIONS" ]
}
- if (v8_use_multi_snapshots) {
- defines += [ "V8_MULTI_SNAPSHOTS" ]
- }
if (v8_use_siphash) {
defines += [ "V8_USE_SIPHASH" ]
}
@@ -935,6 +961,9 @@ config("features") {
if (v8_advanced_bigint_algorithms) {
defines += [ "V8_ADVANCED_BIGINT_ALGORITHMS" ]
}
+ if (v8_include_receiver_in_argc) {
+ defines += [ "V8_INCLUDE_RECEIVER_IN_ARGC" ]
+ }
}
config("toolchain") {
@@ -1057,6 +1086,15 @@ config("toolchain") {
defines += [ "_MIPS_ARCH_MIPS64R2" ]
}
}
+
+ # loong64 simulators.
+ if (target_is_simulator && v8_current_cpu == "loong64") {
+ defines += [ "_LOONG64_TARGET_SIMULATOR" ]
+ }
+ if (v8_current_cpu == "loong64") {
+ defines += [ "V8_TARGET_ARCH_LOONG64" ]
+ }
+
if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
defines += [ "V8_TARGET_ARCH_S390" ]
cflags += [ "-ffp-contract=off" ]
@@ -1170,10 +1208,6 @@ config("toolchain") {
defines += [ "V8_RUNTIME_CALL_STATS" ]
}
- if (!v8_untrusted_code_mitigations) {
- defines += [ "DISABLE_UNTRUSTED_CODE_MITIGATIONS" ]
- }
-
if (v8_no_inline) {
if (is_win) {
cflags += [ "/Ob0" ]
@@ -1186,7 +1220,12 @@ config("toolchain") {
}
if (is_clang) {
- cflags += [ "-Wmissing-field-initializers" ]
+ cflags += [
+ "-Wmissing-field-initializers",
+
+ # TODO(v8:12245): Fix shadowing instances and remove.
+ "-Wno-shadow",
+ ]
if (v8_current_cpu != "mips" && v8_current_cpu != "mipsel") {
# We exclude MIPS because the IsMipsArchVariant macro causes trouble.
@@ -1222,7 +1261,144 @@ config("toolchain") {
}
if (!is_clang && is_win) {
- cflags += [ "/wd4506" ] # Benign "no definition for inline function"
+ cflags += [
+ "/wd4506", # Benign "no definition for inline function"
+
+ # Warnings permanently disabled:
+
+ # C4091: 'typedef ': ignored on left of 'X' when no variable is
+ # declared.
+ # This happens in a number of Windows headers. Dumb.
+ "/wd4091",
+
+ # C4127: conditional expression is constant
+ # This warning can in theory catch dead code and other problems, but
+ # triggers in far too many desirable cases where the conditional
+ # expression is either set by macros or corresponds some legitimate
+ # compile-time constant expression (due to constant template args,
+ # conditionals comparing the sizes of different types, etc.). Some of
+ # these can be worked around, but it's not worth it.
+ "/wd4127",
+
+ # C4251: 'identifier' : class 'type' needs to have dll-interface to be
+ # used by clients of class 'type2'
+ # This is necessary for the shared library build.
+ "/wd4251",
+
+ # C4275: non dll-interface class used as base for dll-interface class
+ # This points out a potential (but rare) problem with referencing static
+ # fields of a non-exported base, through the base's non-exported inline
+ # functions, or directly. The warning is subtle enough that people just
+ # suppressed it when they saw it, so it's not worth it.
+ "/wd4275",
+
+ # C4312 is a VS 2015 64-bit warning for integer to larger pointer.
+ # TODO(brucedawson): fix warnings, crbug.com/554200
+ "/wd4312",
+
+ # C4324 warns when padding is added to fulfill alignas requirements,
+ # but can trigger in benign cases that are difficult to individually
+ # suppress.
+ "/wd4324",
+
+ # C4351: new behavior: elements of array 'array' will be default
+ # initialized
+ # This is a silly "warning" that basically just alerts you that the
+ # compiler is going to actually follow the language spec like it's
+ # supposed to, instead of not following it like old buggy versions did.
+ # There's absolutely no reason to turn this on.
+ "/wd4351",
+
+ # C4355: 'this': used in base member initializer list
+ # It's commonly useful to pass |this| to objects in a class' initializer
+ # list. While this warning can catch real bugs, most of the time the
+ # constructors in question don't attempt to call methods on the passed-in
+ # pointer (until later), and annotating every legit usage of this is
+ # simply more hassle than the warning is worth.
+ "/wd4355",
+
+ # C4503: 'identifier': decorated name length exceeded, name was
+ # truncated
+ # This only means that some long error messages might have truncated
+ # identifiers in the presence of lots of templates. It has no effect on
+ # program correctness and there's no real reason to waste time trying to
+ # prevent it.
+ "/wd4503",
+
+ # Warning C4589 says: "Constructor of abstract class ignores
+ # initializer for virtual base class." Disable this warning because it
+ # is flaky in VS 2015 RTM. It triggers on compiler generated
+ # copy-constructors in some cases.
+ "/wd4589",
+
+ # C4611: interaction between 'function' and C++ object destruction is
+ # non-portable
+ # This warning is unavoidable when using e.g. setjmp/longjmp. MSDN
+ # suggests using exceptions instead of setjmp/longjmp for C++, but
+ # Chromium code compiles without exception support. We therefore have to
+ # use setjmp/longjmp for e.g. JPEG decode error handling, which means we
+ # have to turn off this warning (and be careful about how object
+ # destruction happens in such cases).
+ "/wd4611",
+
+ # Warnings to evaluate and possibly fix/reenable later:
+
+ "/wd4100", # Unreferenced formal function parameter.
+ "/wd4121", # Alignment of a member was sensitive to packing.
+ "/wd4244", # Conversion: possible loss of data.
+ "/wd4505", # Unreferenced local function has been removed.
+ "/wd4510", # Default constructor could not be generated.
+ "/wd4512", # Assignment operator could not be generated.
+ "/wd4610", # Class can never be instantiated, constructor required.
+ "/wd4838", # Narrowing conversion. Doesn't seem to be very useful.
+ "/wd4995", # 'X': name was marked as #pragma deprecated
+ "/wd4996", # Deprecated function warning.
+
+ # These are variable shadowing warnings that are new in VS2015. We
+ # should work through these at some point -- they may be removed from
+ # the RTM release in the /W4 set.
+ "/wd4456",
+ "/wd4457",
+ "/wd4458",
+ "/wd4459",
+
+ # All of our compilers support the extensions below.
+ "/wd4200", # nonstandard extension used: zero-sized array in struct/union
+ "/wd4201", # nonstandard extension used: nameless struct/union
+ "/wd4204", # nonstandard extension used : non-constant aggregate
+ # initializer
+
+ "/wd4221", # nonstandard extension used : 'identifier' : cannot be
+ # initialized using address of automatic variable
+
+ # http://crbug.com/588506 - Conversion suppressions waiting on Clang
+ # -Wconversion.
+ "/wd4245", # 'conversion' : conversion from 'type1' to 'type2',
+ # signed/unsigned mismatch
+
+ "/wd4267", # 'var' : conversion from 'size_t' to 'type', possible loss of
+ # data
+
+ "/wd4305", # 'identifier' : truncation from 'type1' to 'type2'
+ "/wd4389", # 'operator' : signed/unsigned mismatch
+
+ "/wd4702", # unreachable code
+
+ # http://crbug.com/848979 - MSVC is more conservative than Clang with
+ # regards to variables initialized and consumed in different branches.
+ "/wd4701", # Potentially uninitialized local variable 'name' used
+ "/wd4703", # Potentially uninitialized local pointer variable 'name' used
+
+ # http://crbug.com/848979 - Remaining Clang permitted warnings.
+ "/wd4661", # 'identifier' : no suitable definition provided for explicit
+ # template instantiation request
+
+ "/wd4706", # assignment within conditional expression
+ # MSVC is stricter and requires a boolean expression.
+
+ "/wd4715", # 'function' : not all control paths return a value'
+ # MSVC does not analyze switch (enum) for completeness.
+ ]
}
if (!is_clang && !is_win) {
@@ -1309,8 +1485,6 @@ template("asm_to_inline_asm") {
if (is_android && enable_java_templates) {
android_assets("v8_external_startup_data_assets") {
if (v8_use_external_startup_data) {
- # We don't support side-by-side snapshots on Android within Chromium.
- assert(!v8_use_multi_snapshots)
deps = [ "//v8" ]
renaming_sources = [ "$root_out_dir/snapshot_blob.bin" ]
if (current_cpu == "arm" || current_cpu == "x86" ||
@@ -1671,11 +1845,11 @@ template("run_torque") {
"$destination_folder/exported-macros-assembler.h",
"$destination_folder/factory.cc",
"$destination_folder/factory.inc",
- "$destination_folder/field-offsets.h",
"$destination_folder/instance-types.h",
"$destination_folder/interface-descriptors.inc",
"$destination_folder/objects-body-descriptors-inl.inc",
"$destination_folder/objects-printer.cc",
+ "$destination_folder/visitor-lists.h",
]
foreach(file, torque_files) {
@@ -1987,17 +2161,6 @@ if (emit_builtins_as_inline_asm) {
args = []
}
}
-if (v8_use_multi_snapshots) {
- run_mksnapshot("trusted") {
- args = [ "--no-untrusted-code-mitigations" ]
- embedded_variant = "Trusted"
- }
- if (emit_builtins_as_inline_asm) {
- asm_to_inline_asm("trusted") {
- args = []
- }
- }
-}
action("v8_dump_build_config") {
script = "tools/testrunner/utils/dump_build_config.py"
@@ -2034,6 +2197,7 @@ action("v8_dump_build_config") {
"v8_enable_pointer_compression=$v8_enable_pointer_compression",
"v8_enable_pointer_compression_shared_cage=" +
"$v8_enable_pointer_compression_shared_cage",
+ "v8_enable_virtual_memory_cage=$v8_enable_virtual_memory_cage",
"v8_enable_third_party_heap=$v8_enable_third_party_heap",
"v8_enable_webassembly=$v8_enable_webassembly",
"v8_control_flow_integrity=$v8_control_flow_integrity",
@@ -2086,16 +2250,6 @@ v8_source_set("v8_snapshot") {
deps += [ ":v8_base" ]
sources += [ "src/snapshot/snapshot-external.cc" ]
-
- if (v8_use_multi_snapshots) {
- public_deps += [ ":run_mksnapshot_trusted" ]
- if (emit_builtins_as_inline_asm) {
- deps += [ ":asm_to_inline_asm_trusted" ]
- sources += [ "$target_gen_dir/embedded_trusted.cc" ]
- } else {
- sources += [ "$target_gen_dir/embedded_trusted.S" ]
- }
- }
} else {
# Also top-level visibility targets can depend on this.
visibility += [ "//:gn_visibility" ]
@@ -2230,6 +2384,11 @@ v8_source_set("v8_initializers") {
### gcmole(arch:mips64el) ###
"src/builtins/mips64/builtins-mips64.cc",
]
+ } else if (v8_current_cpu == "loong64") {
+ sources += [
+ ### gcmole(arch:loong64) ###
+ "src/builtins/loong64/builtins-loong64.cc",
+ ]
} else if (v8_current_cpu == "ppc") {
sources += [
### gcmole(arch:ppc) ###
@@ -2313,11 +2472,53 @@ v8_header_set("v8_headers") {
public_configs = [ ":headers_config" ]
sources = [
+ "include/v8-array-buffer.h",
+ "include/v8-callbacks.h",
+ "include/v8-container.h",
+ "include/v8-context.h",
"include/v8-cppgc.h",
+ "include/v8-data.h",
+ "include/v8-date.h",
+ "include/v8-debug.h",
+ "include/v8-embedder-heap.h",
+ "include/v8-exception.h",
+ "include/v8-extension.h",
+ "include/v8-external.h",
"include/v8-fast-api-calls.h",
+ "include/v8-forward.h",
+ "include/v8-function-callback.h",
+ "include/v8-function.h",
+ "include/v8-initialization.h",
"include/v8-internal.h",
+ "include/v8-isolate.h",
+ "include/v8-json.h",
+ "include/v8-local-handle.h",
+ "include/v8-locker.h",
+ "include/v8-maybe.h",
+ "include/v8-memory-span.h",
+ "include/v8-message.h",
+ "include/v8-microtask-queue.h",
+ "include/v8-microtask.h",
+ "include/v8-object.h",
+ "include/v8-persistent-handle.h",
+ "include/v8-primitive-object.h",
+ "include/v8-primitive.h",
"include/v8-profiler.h",
+ "include/v8-promise.h",
+ "include/v8-proxy.h",
+ "include/v8-regexp.h",
+ "include/v8-script.h",
+ "include/v8-snapshot.h",
+ "include/v8-statistics.h",
+ "include/v8-template.h",
+ "include/v8-traced-handle.h",
+ "include/v8-typed-array.h",
+ "include/v8-unwinder.h",
"include/v8-util.h",
+ "include/v8-value-serializer.h",
+ "include/v8-value.h",
+ "include/v8-wasm.h",
+ "include/v8-weak-callback-info.h",
"include/v8.h",
]
@@ -2450,6 +2651,7 @@ v8_header_set("v8_internal_headers") {
"src/codegen/assembler-arch.h",
"src/codegen/assembler-inl.h",
"src/codegen/assembler.h",
+ "src/codegen/atomic-memory-order.h",
"src/codegen/bailout-reason.h",
"src/codegen/callable.h",
"src/codegen/code-comments.h",
@@ -2532,7 +2734,6 @@ v8_header_set("v8_internal_headers") {
"src/compiler/common-operator-reducer.h",
"src/compiler/common-operator.h",
"src/compiler/compilation-dependencies.h",
- "src/compiler/compilation-dependency.h",
"src/compiler/compiler-source-position-table.h",
"src/compiler/constant-folding-reducer.h",
"src/compiler/control-equivalence.h",
@@ -2693,6 +2894,7 @@ v8_header_set("v8_internal_headers") {
"src/extensions/ignition-statistics-extension.h",
"src/extensions/statistics-extension.h",
"src/extensions/trigger-failure-extension.h",
+ "src/handles/global-handles-inl.h",
"src/handles/global-handles.h",
"src/handles/handles-inl.h",
"src/handles/handles.h",
@@ -2775,6 +2977,7 @@ v8_header_set("v8_internal_headers") {
"src/heap/paged-spaces.h",
"src/heap/parallel-work-item.h",
"src/heap/parked-scope.h",
+ "src/heap/progress-bar.h",
"src/heap/read-only-heap-inl.h",
"src/heap/read-only-heap.h",
"src/heap/read-only-spaces.h",
@@ -2806,6 +3009,7 @@ v8_header_set("v8_internal_headers") {
"src/init/setup-isolate.h",
"src/init/startup-data-util.h",
"src/init/v8.h",
+ "src/init/vm-cage.h",
"src/interpreter/block-coverage-builder.h",
"src/interpreter/bytecode-array-builder.h",
"src/interpreter/bytecode-array-iterator.h",
@@ -2944,6 +3148,7 @@ v8_header_set("v8_internal_headers") {
"src/objects/lookup-cache.h",
"src/objects/lookup-inl.h",
"src/objects/lookup.h",
+ "src/objects/managed-inl.h",
"src/objects/managed.h",
"src/objects/map-inl.h",
"src/objects/map-updater.h",
@@ -2969,6 +3174,7 @@ v8_header_set("v8_internal_headers") {
"src/objects/objects.h",
"src/objects/oddball-inl.h",
"src/objects/oddball.h",
+ "src/objects/option-utils.h",
"src/objects/ordered-hash-table-inl.h",
"src/objects/ordered-hash-table.h",
"src/objects/osr-optimized-code-cache-inl.h",
@@ -3037,6 +3243,7 @@ v8_header_set("v8_internal_headers") {
"src/objects/transitions.h",
"src/objects/type-hints.h",
"src/objects/value-serializer.h",
+ "src/objects/visitors-inl.h",
"src/objects/visitors.h",
"src/parsing/expression-scope.h",
"src/parsing/func-name-inferrer.h",
@@ -3088,6 +3295,7 @@ v8_header_set("v8_internal_headers") {
"src/regexp/regexp-compiler.h",
"src/regexp/regexp-dotprinter.h",
"src/regexp/regexp-error.h",
+ "src/regexp/regexp-flags.h",
"src/regexp/regexp-interpreter.h",
"src/regexp/regexp-macro-assembler-arch.h",
"src/regexp/regexp-macro-assembler-tracer.h",
@@ -3188,6 +3396,7 @@ v8_header_set("v8_internal_headers") {
"src/asmjs/asm-types.h",
"src/compiler/int64-lowering.h",
"src/compiler/wasm-compiler.h",
+ "src/compiler/wasm-inlining.h",
"src/debug/debug-wasm-objects-inl.h",
"src/debug/debug-wasm-objects.h",
"src/trap-handler/trap-handler-internal.h",
@@ -3405,7 +3614,8 @@ v8_header_set("v8_internal_headers") {
(current_cpu == "x64" && (is_linux || is_chromeos || is_mac))) {
sources += [ "src/trap-handler/handler-inside-posix.h" ]
}
- if (current_cpu == "x64" && (is_linux || is_chromeos || is_mac)) {
+ if (current_cpu == "x64" &&
+ (is_linux || is_chromeos || is_mac || is_win)) {
sources += [ "src/trap-handler/trap-handler-simulator.h" ]
}
}
@@ -3442,6 +3652,21 @@ v8_header_set("v8_internal_headers") {
"src/regexp/mips64/regexp-macro-assembler-mips64.h",
"src/wasm/baseline/mips64/liftoff-assembler-mips64.h",
]
+ } else if (v8_current_cpu == "loong64") {
+ sources += [ ### gcmole(arch:loong64) ###
+ "src/baseline/loong64/baseline-assembler-loong64-inl.h",
+ "src/baseline/loong64/baseline-compiler-loong64-inl.h",
+ "src/codegen/loong64/assembler-loong64-inl.h",
+ "src/codegen/loong64/assembler-loong64.h",
+ "src/codegen/loong64/constants-loong64.h",
+ "src/codegen/loong64/macro-assembler-loong64.h",
+ "src/codegen/loong64/register-loong64.h",
+ "src/compiler/backend/loong64/instruction-codes-loong64.h",
+ "src/execution/loong64/frame-constants-loong64.h",
+ "src/execution/loong64/simulator-loong64.h",
+ "src/regexp/loong64/regexp-macro-assembler-loong64.h",
+ "src/wasm/baseline/loong64/liftoff-assembler-loong64.h",
+ ]
} else if (v8_current_cpu == "ppc") {
sources += [ ### gcmole(arch:ppc) ###
"src/codegen/ppc/assembler-ppc-inl.h",
@@ -3639,6 +3864,7 @@ if (v8_enable_webassembly) {
v8_compiler_sources += [
"src/compiler/int64-lowering.cc",
"src/compiler/wasm-compiler.cc",
+ "src/compiler/wasm-inlining.cc",
]
}
@@ -3923,6 +4149,7 @@ v8_source_set("v8_base_without_compiler") {
"src/init/isolate-allocator.cc",
"src/init/startup-data-util.cc",
"src/init/v8.cc",
+ "src/init/vm-cage.cc",
"src/interpreter/bytecode-array-builder.cc",
"src/interpreter/bytecode-array-iterator.cc",
"src/interpreter/bytecode-array-random-iterator.cc",
@@ -3993,6 +4220,7 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/module.cc",
"src/objects/object-type.cc",
"src/objects/objects.cc",
+ "src/objects/option-utils.cc",
"src/objects/ordered-hash-table.cc",
"src/objects/osr-optimized-code-cache.cc",
"src/objects/property-descriptor.cc",
@@ -4309,16 +4537,22 @@ v8_source_set("v8_base_without_compiler") {
"src/regexp/arm64/regexp-macro-assembler-arm64.cc",
]
if (v8_enable_webassembly) {
- # Trap handling is enabled on arm64 Mac and in simulators on x64 on Linux
- # and Mac.
+ # Trap handling is enabled on arm64 Mac and in simulators on x64 on Linux,
+ # Mac, and Windows.
if ((current_cpu == "arm64" && is_mac) ||
(current_cpu == "x64" && (is_linux || is_chromeos || is_mac))) {
sources += [
"src/trap-handler/handler-inside-posix.cc",
"src/trap-handler/handler-outside-posix.cc",
]
+ } else if (current_cpu == "x64" && is_win) {
+ sources += [
+ "src/trap-handler/handler-inside-win.cc",
+ "src/trap-handler/handler-outside-win.cc",
+ ]
}
- if (current_cpu == "x64" && (is_linux || is_chromeos || is_mac)) {
+ if (current_cpu == "x64" &&
+ (is_linux || is_chromeos || is_mac || is_win)) {
sources += [ "src/trap-handler/handler-outside-simulator.cc" ]
}
}
@@ -4359,6 +4593,23 @@ v8_source_set("v8_base_without_compiler") {
"src/execution/mips64/simulator-mips64.cc",
"src/regexp/mips64/regexp-macro-assembler-mips64.cc",
]
+ } else if (v8_current_cpu == "loong64") {
+ sources += [ ### gcmole(arch:loong64) ###
+ "src/codegen/loong64/assembler-loong64.cc",
+ "src/codegen/loong64/constants-loong64.cc",
+ "src/codegen/loong64/cpu-loong64.cc",
+ "src/codegen/loong64/interface-descriptors-loong64-inl.h",
+ "src/codegen/loong64/macro-assembler-loong64.cc",
+ "src/compiler/backend/loong64/code-generator-loong64.cc",
+ "src/compiler/backend/loong64/instruction-scheduler-loong64.cc",
+ "src/compiler/backend/loong64/instruction-selector-loong64.cc",
+ "src/deoptimizer/loong64/deoptimizer-loong64.cc",
+ "src/diagnostics/loong64/disasm-loong64.cc",
+ "src/diagnostics/loong64/unwinder-loong64.cc",
+ "src/execution/loong64/frame-constants-loong64.cc",
+ "src/execution/loong64/simulator-loong64.cc",
+ "src/regexp/loong64/regexp-macro-assembler-loong64.cc",
+ ]
} else if (v8_current_cpu == "ppc") {
sources += [ ### gcmole(arch:ppc) ###
"src/codegen/ppc/assembler-ppc.cc",
@@ -4579,6 +4830,8 @@ v8_source_set("torque_base") {
"src/torque/instance-type-generator.cc",
"src/torque/instructions.cc",
"src/torque/instructions.h",
+ "src/torque/kythe-data.cc",
+ "src/torque/kythe-data.h",
"src/torque/parameter-difference.h",
"src/torque/server-data.cc",
"src/torque/server-data.h",
@@ -4757,6 +5010,7 @@ v8_component("v8_libbase") {
"src/base/sanitizer/lsan-page-allocator.h",
"src/base/sanitizer/lsan.h",
"src/base/sanitizer/msan.h",
+ "src/base/sanitizer/tsan.h",
"src/base/small-vector.h",
"src/base/strings.cc",
"src/base/strings.h",
@@ -5009,6 +5263,7 @@ v8_source_set("v8_bigint") {
"src/bigint/bigint-internal.cc",
"src/bigint/bigint-internal.h",
"src/bigint/bigint.h",
+ "src/bigint/bitwise.cc",
"src/bigint/digit-arithmetic.h",
"src/bigint/div-burnikel.cc",
"src/bigint/div-helpers.cc",
@@ -5060,6 +5315,8 @@ v8_source_set("v8_cppgc_shared") {
sources += [ "src/heap/base/asm/mips/push_registers_asm.cc" ]
} else if (current_cpu == "mips64el") {
sources += [ "src/heap/base/asm/mips64/push_registers_asm.cc" ]
+ } else if (current_cpu == "loong64") {
+ sources += [ "src/heap/base/asm/loong64/push_registers_asm.cc" ]
} else if (current_cpu == "riscv64") {
sources += [ "src/heap/base/asm/riscv64/push_registers_asm.cc" ]
}
@@ -5093,6 +5350,7 @@ v8_header_set("cppgc_headers") {
sources = [
"include/cppgc/allocation.h",
"include/cppgc/common.h",
+ "include/cppgc/cross-thread-persistent.h",
"include/cppgc/custom-space.h",
"include/cppgc/default-platform.h",
"include/cppgc/ephemeron-pair.h",
@@ -5211,6 +5469,7 @@ v8_source_set("cppgc_base") {
"src/heap/cppgc/page-memory.h",
"src/heap/cppgc/persistent-node.cc",
"src/heap/cppgc/platform.cc",
+ "src/heap/cppgc/platform.h",
"src/heap/cppgc/pointer-policies.cc",
"src/heap/cppgc/prefinalizer-handler.cc",
"src/heap/cppgc/prefinalizer-handler.h",
@@ -5285,10 +5544,12 @@ if (v8_check_header_includes) {
":torque_ls_base",
":v8_base_without_compiler",
":v8_bigint",
+ ":v8_headers",
":v8_initializers",
":v8_internal_headers",
":v8_libbase",
":v8_maybe_icu",
+ ":v8_version",
":wee8",
"src/inspector:inspector",
"src/inspector:inspector_string_conversions",
@@ -5854,8 +6115,8 @@ if (want_v8_shell) {
}
}
-v8_executable("cppgc_sample") {
- sources = [ "samples/cppgc/cppgc-sample.cc" ]
+v8_executable("cppgc_hello_world") {
+ sources = [ "samples/cppgc/hello-world.cc" ]
if (v8_current_cpu == "riscv64") {
libs = [ "atomic" ]
diff --git a/chromium/v8/COMMON_OWNERS b/chromium/v8/COMMON_OWNERS
index 69222f9843c..dc831c0e977 100644
--- a/chromium/v8/COMMON_OWNERS
+++ b/chromium/v8/COMMON_OWNERS
@@ -24,14 +24,11 @@ marja@chromium.org
mlippautz@chromium.org
mslekova@chromium.org
mvstanton@chromium.org
-mythria@chromium.org
neis@chromium.org
nicohartmann@chromium.org
omerkatz@chromium.org
pthier@chromium.org
-rmcilroy@chromium.org
sigurds@chromium.org
-solanes@chromium.org
syg@chromium.org
szuend@chromium.org
thibaudm@chromium.org
diff --git a/chromium/v8/DEPS b/chromium/v8/DEPS
index 439f45ca583..587b7e53759 100644
--- a/chromium/v8/DEPS
+++ b/chromium/v8/DEPS
@@ -46,13 +46,13 @@ vars = {
'checkout_reclient': False,
# reclient CIPD package version
- 'reclient_version': 're_client_version:0.33.0.3e223d5',
+ 'reclient_version': 're_client_version:0.40.0.40ff5a5',
# GN CIPD package version.
- 'gn_version': 'git_revision:eea3906f0e2a8d3622080127d2005ff214d51383',
+ 'gn_version': 'git_revision:0153d369bbccc908f4da4993b1ba82728055926a',
# luci-go CIPD package version.
- 'luci_go': 'git_revision:1120f810b7ab7eb71bd618c4c57fe82a60d4f2fe',
+ 'luci_go': 'git_revision:a373a19da0fbbbe81b2b684e3797260294393e40',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_build-tools_version
@@ -73,7 +73,7 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_platform-tools_version
# and whatever else without interference from each other.
- 'android_sdk_platform-tools_version': 'qi_k82nm6j9nz4dQosOoqXew4_TFAy8rcGOHDLptx1sC',
+ 'android_sdk_platform-tools_version': 'g7n_-r6yJd_SGRklujGB1wEt8iyr77FZTUJVS9w6O34C',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_platforms_version
# and whatever else without interference from each other.
@@ -85,16 +85,16 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_tools-lint_version
# and whatever else without interference from each other.
- 'android_sdk_cmdline-tools_version': 'ZT3JmI6GMG4YVcZ1OtECRVMOLLJAWAdPbi-OclubJLMC',
+ 'android_sdk_cmdline-tools_version': 'AuYa11pULKT8AI14_owabJrkZoRGuovL-nvwmiONlYEC',
}
deps = {
'base/trace_event/common':
- Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '3da1e2fcf66acd5c7194497b4285ac163f32e239',
+ Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '68d816952258c9d817bba656ee2664b35507f01b',
'build':
- Var('chromium_url') + '/chromium/src/build.git' + '@' + 'bbf7f0ed65548c4df862d2a2748e3a9b908a3217',
+ Var('chromium_url') + '/chromium/src/build.git' + '@' + 'ebad8533842661f66b9b905e0ee9890a32f628d5',
'buildtools':
- Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '37dc929ecb351687006a61744b116cda601753d7',
+ Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + 'a9bc3e283182a586998338a665c7eae17406ec54',
'buildtools/clang_format/script':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + '99803d74e35962f63a775f29477882afd4d57d94',
'buildtools/linux64': {
@@ -120,9 +120,9 @@ deps = {
'buildtools/third_party/libc++/trunk':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '79a2e924d96e2fc1e4b937c42efd08898fa472d7',
'buildtools/third_party/libc++abi/trunk':
- Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '24e92c2beed59b76ddabe7ceb5ee4b40f09e0712',
+ Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '9959b06ccd7291269796e85c7c8f7b432af414bd',
'buildtools/third_party/libunwind/trunk':
- Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'b825591df326b2725e6b88bdf74fdc88fefdf460',
+ Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'a002c725cf03e16d3bc47dd9b7962aa22f7ee1d9',
'buildtools/win': {
'packages': [
{
@@ -148,14 +148,14 @@ deps = {
'test/mozilla/data':
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
'test/test262/data':
- Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'ab353c6e732b9e175d3ad6779e3acf3ea82d3761',
+ Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '50dd431dffe5cf86e9064a652d6b01dbbe542cf0',
'test/test262/harness':
Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '278bcfaed0dcaa13936831fb1769d15e7c1e3b2b',
'third_party/aemu-linux-x64': {
'packages': [
{
'package': 'fuchsia/third_party/aemu/linux-amd64',
- 'version': 'qWiGSH8A_xdaUVO-GsDJsJ5HCkIRwZqb-HDyxsLiuWwC'
+ 'version': 'FAd7QuRV-mCjbKgg2SO4BBlRCvGIsI672THjo3tEIZAC'
},
],
'condition': 'host_os == "linux" and checkout_fuchsia',
@@ -176,7 +176,7 @@ deps = {
'condition': 'checkout_android',
},
'third_party/android_platform': {
- 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + 'e98c753917587d320f4e7a24f1c7474535adac3f',
+ 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + '7a11b799efba1cd679b4f5d14889465e9e1fb1f4',
'condition': 'checkout_android',
},
'third_party/android_sdk/public': {
@@ -218,7 +218,7 @@ deps = {
'dep_type': 'cipd',
},
'third_party/catapult': {
- 'url': Var('chromium_url') + '/catapult.git' + '@' + 'abc7ba7d871fe3c25b0a1bec7fc84fb309034cb7',
+ 'url': Var('chromium_url') + '/catapult.git' + '@' + 'c0b9d253fbf9a729be51d3890fa78be4b5eb3352',
'condition': 'checkout_android',
},
'third_party/colorama/src': {
@@ -226,20 +226,20 @@ deps = {
'condition': 'checkout_android',
},
'third_party/depot_tools':
- Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '49a703f3d915b140c9f373107e1ba17f30e2487d',
+ Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '0e2fb336b2e7ddbbb9c5ab70eab25f82f55dff2b',
'third_party/fuchsia-sdk': {
'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '18896843130c33372c455c153ad07d2217bd2085',
'condition': 'checkout_fuchsia',
},
'third_party/google_benchmark/src': {
- 'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + '4124223bf5303d1d65fe2c40f33e28372bbb986c',
+ 'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + '0baacde3618ca617da95375e0af13ce1baadea47',
},
'third_party/googletest/src':
- Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '47f819c3ca54fb602f432904443e00a0a1fe2f42',
+ Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '3b49be074d5c1340eeb447e6a8e78427051e675a',
'third_party/icu':
- Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '75e34bcccea0be165c31fdb278b3712c516c5876',
+ Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '3f443830bd52d3aa5fab3c1aa2b6d0848bb5039d',
'third_party/instrumented_libraries':
- Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '9a8087bbbf43a355950fc1667575d1a753f8aaa4',
+ Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '5df06a49fc485f3371e8ca2f4957dac4840ba3bb',
'third_party/ittapi': {
# Force checkout ittapi libraries to pass v8 header includes check on
# bots that has check_v8_header_includes enabled.
@@ -247,7 +247,7 @@ deps = {
'condition': "checkout_ittapi or check_v8_header_includes",
},
'third_party/jinja2':
- Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + '7c54c1f227727e0c4c1d3dc19dd71cd601a2db95',
+ Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + '6db8da1615a13fdfab925688bc4bf2eb394a73af',
'third_party/jsoncpp/source':
Var('chromium_url') + '/external/github.com/open-source-parsers/jsoncpp.git'+ '@' + '9059f5cad030ba11d37818847443a53918c327b1',
'third_party/logdog/logdog':
@@ -283,9 +283,9 @@ deps = {
'condition': 'checkout_android',
},
'third_party/zlib':
- Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '563140dd9c24f84bf40919196e9e7666d351cc0d',
+ Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + 'dfa96e81458fb3b39676e45f7e9e000dff789b05',
'tools/clang':
- Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '6a8e571efd68de48d226950d1e10cb8982e71496',
+ Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'c06edd1f455183fc89e9f8c2cf745db8f564d8ea',
'tools/clang/dsymutil': {
'packages': [
{
@@ -314,8 +314,6 @@ deps = {
'condition': 'host_cpu != "s390" and host_os != "aix"',
'dep_type': 'cipd',
},
- 'tools/swarming_client':
- Var('chromium_url') + '/infra/luci/client-py.git' + '@' + 'a32a1607f6093d338f756c7e7c7b4333b0c50c9c',
}
include_rules = [
@@ -323,7 +321,18 @@ include_rules = [
'+include',
'+unicode',
'+third_party/fdlibm',
- '+third_party/ittapi/include'
+ '+third_party/ittapi/include',
+ # Abseil features are allow-listed. Please use your best judgement when adding
+ # to this set -- if in doubt, email v8-dev@. For general guidance, refer to
+ # the Chromium guidelines (though note that some requirements in V8 may be
+ # different to Chromium's):
+ # https://chromium.googlesource.com/chromium/src/+/main/styleguide/c++/c++11.md
+ '+absl/types/optional.h',
+ '+absl/types/variant.h',
+ '+absl/status',
+ # Some abseil features are explicitly banned.
+ '-absl/types/any.h', # Requires RTTI.
+ '-absl/types/flags', # Requires RTTI.
]
# checkdeps.py shouldn't check for includes in these directories:
@@ -485,7 +494,7 @@ hooks = [
'--no_resume',
'--no_auth',
'--bucket', 'chromium-instrumented-libraries',
- '-s', 'third_party/instrumented_libraries/binaries/msan-chained-origins-trusty.tgz.sha1',
+ '-s', 'third_party/instrumented_libraries/binaries/msan-chained-origins-xenial.tgz.sha1',
],
},
{
@@ -496,7 +505,7 @@ hooks = [
'--no_resume',
'--no_auth',
'--bucket', 'chromium-instrumented-libraries',
- '-s', 'third_party/instrumented_libraries/binaries/msan-no-origins-trusty.tgz.sha1',
+ '-s', 'third_party/instrumented_libraries/binaries/msan-no-origins-xenial.tgz.sha1',
],
},
{
diff --git a/chromium/v8/ENG_REVIEW_OWNERS b/chromium/v8/ENG_REVIEW_OWNERS
index 173f6d6aeee..3943c49432c 100644
--- a/chromium/v8/ENG_REVIEW_OWNERS
+++ b/chromium/v8/ENG_REVIEW_OWNERS
@@ -5,4 +5,4 @@
adamk@chromium.org
danno@chromium.org
hpayer@chromium.org
-rmcilroy@chromium.org
+verwaest@chromium.org
diff --git a/chromium/v8/LOONG_OWNERS b/chromium/v8/LOONG_OWNERS
new file mode 100644
index 00000000000..cda25c27005
--- /dev/null
+++ b/chromium/v8/LOONG_OWNERS
@@ -0,0 +1,3 @@
+liuyu@loongson.cn
+yuyin-hf@loongson.cn
+zhaojiazhong-hf@loongson.cn
diff --git a/chromium/v8/MIPS_OWNERS b/chromium/v8/MIPS_OWNERS
index 6c65e34e9c3..fc3d3e43968 100644
--- a/chromium/v8/MIPS_OWNERS
+++ b/chromium/v8/MIPS_OWNERS
@@ -1,2 +1,3 @@
xwafish@gmail.com
zhaojiazhong-hf@loongson.cn
+liuyu@loongson.cn
diff --git a/chromium/v8/OWNERS b/chromium/v8/OWNERS
index 4fcf830fcc2..7174da6f15a 100644
--- a/chromium/v8/OWNERS
+++ b/chromium/v8/OWNERS
@@ -31,6 +31,7 @@ per-file WATCHLISTS=file:COMMON_OWNERS
per-file WATCHLISTS=v8-ci-autoroll-builder@chops-service-accounts.iam.gserviceaccount.com
per-file DEPS=v8-ci-autoroll-builder@chops-service-accounts.iam.gserviceaccount.com
+per-file ...-loong64*=file:LOONG_OWNERS
per-file ...-mips*=file:MIPS_OWNERS
per-file ...-mips64*=file:MIPS_OWNERS
per-file ...-ppc*=file:PPC_OWNERS
diff --git a/chromium/v8/RISCV_OWNERS b/chromium/v8/RISCV_OWNERS
index 8f8e15a40a0..e3e11fdf494 100644
--- a/chromium/v8/RISCV_OWNERS
+++ b/chromium/v8/RISCV_OWNERS
@@ -1,3 +1,4 @@
brice.dobry@futurewei.com
peng.w@rioslab.org
qiuji@iscas.ac.cn
+yahan@iscas.ac.cn
diff --git a/chromium/v8/WATCHLISTS b/chromium/v8/WATCHLISTS
index b8b7eac99a2..ad065a98428 100644
--- a/chromium/v8/WATCHLISTS
+++ b/chromium/v8/WATCHLISTS
@@ -104,6 +104,12 @@
'|WORKSPACE' \
'|bazel/',
},
+ 'cppgc': {
+ 'filepath': 'src/heap/cppgc/' \
+ '|src/heap/cppgc-js/' \
+ '|include/cppgc/' \
+ '|test/unittests/heap/',
+ },
},
'WATCHLISTS': {
@@ -119,9 +125,6 @@
'devtools': [
'devtools-reviews+v8@chromium.org',
],
- 'interpreter': [
- 'rmcilroy@chromium.org',
- ],
'baseline': [
'leszeks+watch@chromium.org',
'verwaest+watch@chromium.org',
@@ -169,5 +172,8 @@
'api': [
'cbruni+watch@chromium.org',
],
+ 'cppgc': [
+ 'oilpan-reviews+v8@chromium.org',
+ ],
},
}
diff --git a/chromium/v8/gni/snapshot_toolchain.gni b/chromium/v8/gni/snapshot_toolchain.gni
index e855b88e430..feabd079e00 100644
--- a/chromium/v8/gni/snapshot_toolchain.gni
+++ b/chromium/v8/gni/snapshot_toolchain.gni
@@ -84,7 +84,7 @@ if (v8_snapshot_toolchain == "") {
if (v8_current_cpu == "x64" || v8_current_cpu == "x86") {
_cpus = v8_current_cpu
} else if (v8_current_cpu == "arm64" || v8_current_cpu == "mips64el" ||
- v8_current_cpu == "riscv64") {
+ v8_current_cpu == "riscv64" || v8_current_cpu == "loong64") {
if (is_win && v8_current_cpu == "arm64") {
# set _cpus to blank for Windows ARM64 so host_toolchain could be
# selected as snapshot toolchain later.
diff --git a/chromium/v8/gni/v8.gni b/chromium/v8/gni/v8.gni
index a3346517978..fe445307f92 100644
--- a/chromium/v8/gni/v8.gni
+++ b/chromium/v8/gni/v8.gni
@@ -35,9 +35,6 @@ declare_args() {
# as an argument to profiler's method `takeHeapSnapshot`.
v8_enable_raw_heap_snapshots = false
- # Enable several snapshots side-by-side (e.g. default and for trusted code).
- v8_use_multi_snapshots = false
-
# Use external files for startup data blobs:
# the JS builtins sources and the start snapshot.
v8_use_external_startup_data = ""
@@ -99,13 +96,6 @@ if (v8_use_external_startup_data == "") {
v8_use_external_startup_data = !is_ios
}
-if (v8_use_multi_snapshots) {
- # Silently disable multi snapshots if they're incompatible with the current
- # build configuration. This allows us to set v8_use_multi_snapshots=true on
- # all bots, and e.g. no-snapshot bots will automatically do the right thing.
- v8_use_multi_snapshots = v8_use_external_startup_data && !build_with_chromium
-}
-
if (v8_enable_backtrace == "") {
v8_enable_backtrace = is_debug && !v8_optimized_debug
}
diff --git a/chromium/v8/include/cppgc/README.md b/chromium/v8/include/cppgc/README.md
index 3a2db6dfa97..e454399853a 100644
--- a/chromium/v8/include/cppgc/README.md
+++ b/chromium/v8/include/cppgc/README.md
@@ -1,5 +1,16 @@
-# C++ Garbage Collection
+# Oilpan: C++ Garbage Collection
-This directory provides an open-source garbage collection library for C++.
+Oilpan is an open-source garbage collection library for C++ that can be used stand-alone or in collaboration with V8's JavaScript garbage collector.
-The library is under construction, meaning that *all APIs in this directory are incomplete and considered unstable and should not be used*. \ No newline at end of file
+**Key properties**
+- Trace-based garbage collection;
+- Precise on-heap memory layout;
+- Conservative on-stack memory layout;
+- Allows for collection with and without considering stack;
+- Incremental and concurrent marking;
+- Incremental and concurrent sweeping;
+- Non-incremental and non-concurrent compaction for selected spaces;
+
+See the [Hello World](https://chromium.googlesource.com/v8/v8/+/main/samples/cppgc/hello-world.cc) example on how to get started using Oilpan to manage C++ code.
+
+Oilpan follows V8's project organization, see e.g. on how we accept [contributions](https://v8.dev/docs/contribute) and [provide a stable API](https://v8.dev/docs/api).
diff --git a/chromium/v8/include/cppgc/allocation.h b/chromium/v8/include/cppgc/allocation.h
index d75f1a97296..a3112dd61fb 100644
--- a/chromium/v8/include/cppgc/allocation.h
+++ b/chromium/v8/include/cppgc/allocation.h
@@ -36,8 +36,13 @@ class V8_EXPORT MakeGarbageCollectedTraitInternal {
const_cast<uint16_t*>(reinterpret_cast<const uint16_t*>(
reinterpret_cast<const uint8_t*>(payload) -
api_constants::kFullyConstructedBitFieldOffsetFromPayload)));
- atomic_mutable_bitfield->fetch_or(api_constants::kFullyConstructedBitMask,
- std::memory_order_release);
+ // It's safe to split use load+store here (instead of a read-modify-write
+ // operation), since it's guaranteed that this 16-bit bitfield is only
+ // modified by a single thread. This is cheaper in terms of code bloat (on
+ // ARM) and performance.
+ uint16_t value = atomic_mutable_bitfield->load(std::memory_order_relaxed);
+ value |= api_constants::kFullyConstructedBitMask;
+ atomic_mutable_bitfield->store(value, std::memory_order_release);
}
template <typename U, typename CustomSpace>
@@ -202,7 +207,7 @@ struct PostConstructionCallbackTrait {
* \returns an instance of type T.
*/
template <typename T, typename... Args>
-T* MakeGarbageCollected(AllocationHandle& handle, Args&&... args) {
+V8_INLINE T* MakeGarbageCollected(AllocationHandle& handle, Args&&... args) {
T* object =
MakeGarbageCollectedTrait<T>::Call(handle, std::forward<Args>(args)...);
PostConstructionCallbackTrait<T>::Call(object);
@@ -220,8 +225,9 @@ T* MakeGarbageCollected(AllocationHandle& handle, Args&&... args) {
* \returns an instance of type T.
*/
template <typename T, typename... Args>
-T* MakeGarbageCollected(AllocationHandle& handle,
- AdditionalBytes additional_bytes, Args&&... args) {
+V8_INLINE T* MakeGarbageCollected(AllocationHandle& handle,
+ AdditionalBytes additional_bytes,
+ Args&&... args) {
T* object = MakeGarbageCollectedTrait<T>::Call(handle, additional_bytes,
std::forward<Args>(args)...);
PostConstructionCallbackTrait<T>::Call(object);
diff --git a/chromium/v8/include/cppgc/cross-thread-persistent.h b/chromium/v8/include/cppgc/cross-thread-persistent.h
index 0a9afdcd2bd..c8751e1d641 100644
--- a/chromium/v8/include/cppgc/cross-thread-persistent.h
+++ b/chromium/v8/include/cppgc/cross-thread-persistent.h
@@ -34,7 +34,35 @@ class CrossThreadPersistentBase : public PersistentBase {
V8_CLANG_NO_SANITIZE("address")
void ClearFromGC() const {
raw_ = nullptr;
- node_ = nullptr;
+ SetNodeSafe(nullptr);
+ }
+
+ // GetNodeSafe() can be used for a thread-safe IsValid() check in a
+ // double-checked locking pattern. See ~BasicCrossThreadPersistent.
+ PersistentNode* GetNodeSafe() const {
+ return reinterpret_cast<std::atomic<PersistentNode*>*>(&node_)->load(
+ std::memory_order_acquire);
+ }
+
+ // The GC writes using SetNodeSafe() while holding the lock.
+ V8_CLANG_NO_SANITIZE("address")
+ void SetNodeSafe(PersistentNode* value) const {
+#if defined(__has_feature)
+#if __has_feature(address_sanitizer)
+#define V8_IS_ASAN 1
+#endif
+#endif
+
+#ifdef V8_IS_ASAN
+ __atomic_store(&node_, &value, __ATOMIC_RELEASE);
+#else // !V8_IS_ASAN
+ // Non-ASAN builds can use atomics. This also covers MSVC which does not
+ // have the __atomic_store intrinsic.
+ reinterpret_cast<std::atomic<PersistentNode*>*>(&node_)->store(
+ value, std::memory_order_release);
+#endif // !V8_IS_ASAN
+
+#undef V8_IS_ASAN
}
};
@@ -48,7 +76,31 @@ class BasicCrossThreadPersistent final : public CrossThreadPersistentBase,
using typename WeaknessPolicy::IsStrongPersistent;
using PointeeType = T;
- ~BasicCrossThreadPersistent() { Clear(); }
+ ~BasicCrossThreadPersistent() {
+ // This implements fast path for destroying empty/sentinel.
+ //
+ // Simplified version of `AssignUnsafe()` to allow calling without a
+ // complete type `T`. Uses double-checked locking with a simple thread-safe
+ // check for a valid handle based on a node.
+ if (GetNodeSafe()) {
+ PersistentRegionLock guard;
+ const void* old_value = GetValue();
+ // The fast path check (GetNodeSafe()) does not acquire the lock. Recheck
+ // validity while holding the lock to ensure the reference has not been
+ // cleared.
+ if (IsValid(old_value)) {
+ CrossThreadPersistentRegion& region =
+ this->GetPersistentRegion(old_value);
+ region.FreeNode(GetNode());
+ SetNode(nullptr);
+ } else {
+ CPPGC_DCHECK(!GetNode());
+ }
+ }
+ // No need to call SetValue() as the handle is not used anymore. This can
+ // leave behind stale sentinel values but will always destroy the underlying
+ // node.
+ }
BasicCrossThreadPersistent(
const SourceLocation& loc = SourceLocation::Current())
@@ -135,7 +187,7 @@ class BasicCrossThreadPersistent final : public CrossThreadPersistentBase,
BasicCrossThreadPersistent& operator=(
const BasicCrossThreadPersistent& other) {
PersistentRegionLock guard;
- AssignUnsafe(other.Get());
+ AssignSafe(guard, other.Get());
return *this;
}
@@ -147,7 +199,7 @@ class BasicCrossThreadPersistent final : public CrossThreadPersistentBase,
OtherLocationPolicy,
OtherCheckingPolicy>& other) {
PersistentRegionLock guard;
- AssignUnsafe(other.Get());
+ AssignSafe(guard, other.Get());
return *this;
}
@@ -165,8 +217,13 @@ class BasicCrossThreadPersistent final : public CrossThreadPersistentBase,
return *this;
}
+ /**
+ * Assigns a raw pointer.
+ *
+ * Note: **Not thread-safe.**
+ */
BasicCrossThreadPersistent& operator=(T* other) {
- Assign(other);
+ AssignUnsafe(other);
return *this;
}
@@ -181,13 +238,24 @@ class BasicCrossThreadPersistent final : public CrossThreadPersistentBase,
return operator=(member.Get());
}
+ /**
+ * Assigns a nullptr.
+ *
+ * \returns the handle.
+ */
BasicCrossThreadPersistent& operator=(std::nullptr_t) {
Clear();
return *this;
}
+ /**
+ * Assigns the sentinel pointer.
+ *
+ * \returns the handle.
+ */
BasicCrossThreadPersistent& operator=(SentinelPointer s) {
- Assign(s);
+ PersistentRegionLock guard;
+ AssignSafe(guard, s);
return *this;
}
@@ -209,24 +277,8 @@ class BasicCrossThreadPersistent final : public CrossThreadPersistentBase,
* Clears the stored object.
*/
void Clear() {
- // Simplified version of `Assign()` to allow calling without a complete type
- // `T`.
- const void* old_value = GetValue();
- if (IsValid(old_value)) {
- PersistentRegionLock guard;
- old_value = GetValue();
- // The fast path check (IsValid()) does not acquire the lock. Reload
- // the value to ensure the reference has not been cleared.
- if (IsValid(old_value)) {
- CrossThreadPersistentRegion& region =
- this->GetPersistentRegion(old_value);
- region.FreeNode(GetNode());
- SetNode(nullptr);
- } else {
- CPPGC_DCHECK(!GetNode());
- }
- }
- SetValue(nullptr);
+ PersistentRegionLock guard;
+ AssignSafe(guard, nullptr);
}
/**
@@ -302,7 +354,7 @@ class BasicCrossThreadPersistent final : public CrossThreadPersistentBase,
v->TraceRoot(*handle, handle->Location());
}
- void Assign(T* ptr) {
+ void AssignUnsafe(T* ptr) {
const void* old_value = GetValue();
if (IsValid(old_value)) {
PersistentRegionLock guard;
@@ -330,7 +382,7 @@ class BasicCrossThreadPersistent final : public CrossThreadPersistentBase,
this->CheckPointer(ptr);
}
- void AssignUnsafe(T* ptr) {
+ void AssignSafe(PersistentRegionLock&, T* ptr) {
PersistentRegionLock::AssertLocked();
const void* old_value = GetValue();
if (IsValid(old_value)) {
diff --git a/chromium/v8/include/cppgc/internal/caged-heap-local-data.h b/chromium/v8/include/cppgc/internal/caged-heap-local-data.h
index 1fa60b69536..5b30d670292 100644
--- a/chromium/v8/include/cppgc/internal/caged-heap-local-data.h
+++ b/chromium/v8/include/cppgc/internal/caged-heap-local-data.h
@@ -53,10 +53,10 @@ static_assert(sizeof(AgeTable) == 1 * api_constants::kMB,
#endif // CPPGC_YOUNG_GENERATION
struct CagedHeapLocalData final {
- explicit CagedHeapLocalData(HeapBase* heap_base) : heap_base(heap_base) {}
+ CagedHeapLocalData(HeapBase&, PageAllocator&);
bool is_incremental_marking_in_progress = false;
- HeapBase* heap_base = nullptr;
+ HeapBase& heap_base;
#if defined(CPPGC_YOUNG_GENERATION)
AgeTable age_table;
#endif
diff --git a/chromium/v8/include/cppgc/internal/finalizer-trait.h b/chromium/v8/include/cppgc/internal/finalizer-trait.h
index a95126591cb..7bd6f83bf60 100644
--- a/chromium/v8/include/cppgc/internal/finalizer-trait.h
+++ b/chromium/v8/include/cppgc/internal/finalizer-trait.h
@@ -76,6 +76,8 @@ struct FinalizerTrait {
}
public:
+ static constexpr bool HasFinalizer() { return kNonTrivialFinalizer; }
+
// The callback used to finalize an object of type T.
static constexpr FinalizationCallback kCallback =
kNonTrivialFinalizer ? Finalize : nullptr;
diff --git a/chromium/v8/include/cppgc/internal/gc-info.h b/chromium/v8/include/cppgc/internal/gc-info.h
index 0830b194909..82a0d053431 100644
--- a/chromium/v8/include/cppgc/internal/gc-info.h
+++ b/chromium/v8/include/cppgc/internal/gc-info.h
@@ -19,11 +19,94 @@ namespace internal {
using GCInfoIndex = uint16_t;
-// Acquires a new GC info object and returns the index. In addition, also
-// updates `registered_index` atomically.
-V8_EXPORT GCInfoIndex
-EnsureGCInfoIndex(std::atomic<GCInfoIndex>& registered_index,
- FinalizationCallback, TraceCallback, NameCallback, bool);
+struct V8_EXPORT EnsureGCInfoIndexTrait final {
+ // Acquires a new GC info object and returns the index. In addition, also
+ // updates `registered_index` atomically.
+ template <typename T>
+ V8_INLINE static GCInfoIndex EnsureIndex(
+ std::atomic<GCInfoIndex>& registered_index) {
+ return EnsureGCInfoIndexTraitDispatch<T>{}(registered_index);
+ }
+
+ private:
+ template <typename T, bool = std::is_polymorphic<T>::value,
+ bool = FinalizerTrait<T>::HasFinalizer(),
+ bool = NameTrait<T>::HasNonHiddenName()>
+ struct EnsureGCInfoIndexTraitDispatch;
+
+ static GCInfoIndex EnsureGCInfoIndexPolymorphic(std::atomic<GCInfoIndex>&,
+ TraceCallback,
+ FinalizationCallback,
+ NameCallback);
+ static GCInfoIndex EnsureGCInfoIndexPolymorphic(std::atomic<GCInfoIndex>&,
+ TraceCallback,
+ FinalizationCallback);
+ static GCInfoIndex EnsureGCInfoIndexPolymorphic(std::atomic<GCInfoIndex>&,
+ TraceCallback, NameCallback);
+ static GCInfoIndex EnsureGCInfoIndexPolymorphic(std::atomic<GCInfoIndex>&,
+ TraceCallback);
+ static GCInfoIndex EnsureGCInfoIndexNonPolymorphic(std::atomic<GCInfoIndex>&,
+ TraceCallback,
+ FinalizationCallback,
+
+ NameCallback);
+ static GCInfoIndex EnsureGCInfoIndexNonPolymorphic(std::atomic<GCInfoIndex>&,
+ TraceCallback,
+ FinalizationCallback);
+ static GCInfoIndex EnsureGCInfoIndexNonPolymorphic(std::atomic<GCInfoIndex>&,
+ TraceCallback,
+ NameCallback);
+ static GCInfoIndex EnsureGCInfoIndexNonPolymorphic(std::atomic<GCInfoIndex>&,
+ TraceCallback);
+};
+
+#define DISPATCH(is_polymorphic, has_finalizer, has_non_hidden_name, function) \
+ template <typename T> \
+ struct EnsureGCInfoIndexTrait::EnsureGCInfoIndexTraitDispatch< \
+ T, is_polymorphic, has_finalizer, has_non_hidden_name> { \
+ V8_INLINE GCInfoIndex \
+ operator()(std::atomic<GCInfoIndex>& registered_index) { \
+ return function; \
+ } \
+ };
+
+// --------------------------------------------------------------------- //
+// DISPATCH(is_polymorphic, has_finalizer, has_non_hidden_name, function)
+// --------------------------------------------------------------------- //
+DISPATCH(true, true, true, //
+ EnsureGCInfoIndexPolymorphic(registered_index, //
+ TraceTrait<T>::Trace, //
+ FinalizerTrait<T>::kCallback, //
+ NameTrait<T>::GetName)) //
+DISPATCH(true, true, false, //
+ EnsureGCInfoIndexPolymorphic(registered_index, //
+ TraceTrait<T>::Trace, //
+ FinalizerTrait<T>::kCallback)) //
+DISPATCH(true, false, true, //
+ EnsureGCInfoIndexPolymorphic(registered_index, //
+ TraceTrait<T>::Trace, //
+ NameTrait<T>::GetName)) //
+DISPATCH(true, false, false, //
+ EnsureGCInfoIndexPolymorphic(registered_index, //
+ TraceTrait<T>::Trace)) //
+DISPATCH(false, true, true, //
+ EnsureGCInfoIndexNonPolymorphic(registered_index, //
+ TraceTrait<T>::Trace, //
+ FinalizerTrait<T>::kCallback, //
+ NameTrait<T>::GetName)) //
+DISPATCH(false, true, false, //
+ EnsureGCInfoIndexNonPolymorphic(registered_index, //
+ TraceTrait<T>::Trace, //
+ FinalizerTrait<T>::kCallback)) //
+DISPATCH(false, false, true, //
+ EnsureGCInfoIndexNonPolymorphic(registered_index, //
+ TraceTrait<T>::Trace, //
+ NameTrait<T>::GetName)) //
+DISPATCH(false, false, false, //
+ EnsureGCInfoIndexNonPolymorphic(registered_index, //
+ TraceTrait<T>::Trace)) //
+
+#undef DISPATCH
// Fold types based on finalizer behavior. Note that finalizer characteristics
// align with trace behavior, i.e., destructors are virtual when trace methods
@@ -57,16 +140,13 @@ struct GCInfoFolding {
// finalization, and naming.
template <typename T>
struct GCInfoTrait final {
- static GCInfoIndex Index() {
+ V8_INLINE static GCInfoIndex Index() {
static_assert(sizeof(T), "T must be fully defined");
static std::atomic<GCInfoIndex>
registered_index; // Uses zero initialization.
const GCInfoIndex index = registered_index.load(std::memory_order_acquire);
return index ? index
- : EnsureGCInfoIndex(
- registered_index, FinalizerTrait<T>::kCallback,
- TraceTrait<T>::Trace, NameTrait<T>::GetName,
- std::is_polymorphic<T>::value);
+ : EnsureGCInfoIndexTrait::EnsureIndex<T>(registered_index);
}
};
diff --git a/chromium/v8/include/cppgc/internal/name-trait.h b/chromium/v8/include/cppgc/internal/name-trait.h
index 2e2da1eab4a..32a33478592 100644
--- a/chromium/v8/include/cppgc/internal/name-trait.h
+++ b/chromium/v8/include/cppgc/internal/name-trait.h
@@ -6,6 +6,7 @@
#define INCLUDE_CPPGC_INTERNAL_NAME_TRAIT_H_
#include <cstddef>
+#include <type_traits>
#include "cppgc/name-provider.h"
#include "v8config.h" // NOLINT(build/include_directory)
@@ -67,6 +68,16 @@ class V8_EXPORT NameTraitBase {
template <typename T>
class NameTrait final : public NameTraitBase {
public:
+ static constexpr bool HasNonHiddenName() {
+#if CPPGC_SUPPORTS_COMPILE_TIME_TYPENAME
+ return true;
+#elif CPPGC_SUPPORTS_OBJECT_NAMES
+ return true;
+#else // !CPPGC_SUPPORTS_OBJECT_NAMES
+ return std::is_base_of<NameProvider, T>::value;
+#endif // !CPPGC_SUPPORTS_OBJECT_NAMES
+ }
+
static HeapObjectName GetName(const void* obj) {
return GetNameFor(static_cast<const T*>(obj));
}
diff --git a/chromium/v8/include/cppgc/internal/persistent-node.h b/chromium/v8/include/cppgc/internal/persistent-node.h
index b5dba476a47..1fea667848b 100644
--- a/chromium/v8/include/cppgc/internal/persistent-node.h
+++ b/chromium/v8/include/cppgc/internal/persistent-node.h
@@ -75,16 +75,16 @@ class PersistentNode final {
TraceCallback trace_ = nullptr;
};
-class V8_EXPORT PersistentRegion {
+class V8_EXPORT PersistentRegionBase {
using PersistentNodeSlots = std::array<PersistentNode, 256u>;
public:
- PersistentRegion() = default;
+ PersistentRegionBase() = default;
// Clears Persistent fields to avoid stale pointers after heap teardown.
- ~PersistentRegion();
+ ~PersistentRegionBase();
- PersistentRegion(const PersistentRegion&) = delete;
- PersistentRegion& operator=(const PersistentRegion&) = delete;
+ PersistentRegionBase(const PersistentRegionBase&) = delete;
+ PersistentRegionBase& operator=(const PersistentRegionBase&) = delete;
PersistentNode* AllocateNode(void* owner, TraceCallback trace) {
if (!free_list_head_) {
@@ -126,8 +126,39 @@ class V8_EXPORT PersistentRegion {
friend class CrossThreadPersistentRegion;
};
-// CrossThreadPersistent uses PersistentRegion but protects it using this lock
-// when needed.
+// Variant of PersistentRegionBase that checks whether the allocation and
+// freeing happens only on the thread that created the region.
+class V8_EXPORT PersistentRegion final : public PersistentRegionBase {
+ public:
+ PersistentRegion();
+ // Clears Persistent fields to avoid stale pointers after heap teardown.
+ ~PersistentRegion() = default;
+
+ PersistentRegion(const PersistentRegion&) = delete;
+ PersistentRegion& operator=(const PersistentRegion&) = delete;
+
+ V8_INLINE PersistentNode* AllocateNode(void* owner, TraceCallback trace) {
+#if V8_ENABLE_CHECKS
+ CheckIsCreationThread();
+#endif // V8_ENABLE_CHECKS
+ return PersistentRegionBase::AllocateNode(owner, trace);
+ }
+
+ V8_INLINE void FreeNode(PersistentNode* node) {
+#if V8_ENABLE_CHECKS
+ CheckIsCreationThread();
+#endif // V8_ENABLE_CHECKS
+ PersistentRegionBase::FreeNode(node);
+ }
+
+ private:
+ void CheckIsCreationThread();
+
+ int creation_thread_id_;
+};
+
+// CrossThreadPersistent uses PersistentRegionBase but protects it using this
+// lock when needed.
class V8_EXPORT PersistentRegionLock final {
public:
PersistentRegionLock();
@@ -136,9 +167,10 @@ class V8_EXPORT PersistentRegionLock final {
static void AssertLocked();
};
-// Variant of PersistentRegion that checks whether the PersistentRegionLock is
-// locked.
-class V8_EXPORT CrossThreadPersistentRegion final : protected PersistentRegion {
+// Variant of PersistentRegionBase that checks whether the PersistentRegionLock
+// is locked.
+class V8_EXPORT CrossThreadPersistentRegion final
+ : protected PersistentRegionBase {
public:
CrossThreadPersistentRegion() = default;
// Clears Persistent fields to avoid stale pointers after heap teardown.
@@ -150,12 +182,12 @@ class V8_EXPORT CrossThreadPersistentRegion final : protected PersistentRegion {
V8_INLINE PersistentNode* AllocateNode(void* owner, TraceCallback trace) {
PersistentRegionLock::AssertLocked();
- return PersistentRegion::AllocateNode(owner, trace);
+ return PersistentRegionBase::AllocateNode(owner, trace);
}
V8_INLINE void FreeNode(PersistentNode* node) {
PersistentRegionLock::AssertLocked();
- PersistentRegion::FreeNode(node);
+ PersistentRegionBase::FreeNode(node);
}
void Trace(Visitor*);
diff --git a/chromium/v8/include/cppgc/internal/pointer-policies.h b/chromium/v8/include/cppgc/internal/pointer-policies.h
index cdf0bb693d6..7c4f4a0862a 100644
--- a/chromium/v8/include/cppgc/internal/pointer-policies.h
+++ b/chromium/v8/include/cppgc/internal/pointer-policies.h
@@ -51,7 +51,17 @@ struct NoWriteBarrierPolicy {
static void AssigningBarrier(const void*, const void*) {}
};
-class V8_EXPORT EnabledCheckingPolicy {
+class V8_EXPORT SameThreadEnabledCheckingPolicyBase {
+ protected:
+ void CheckPointerImpl(const void* ptr, bool points_to_payload,
+ bool check_off_heap_assignments);
+
+ const HeapBase* heap_ = nullptr;
+};
+
+template <bool kCheckOffHeapAssignments>
+class V8_EXPORT SameThreadEnabledCheckingPolicy
+ : private SameThreadEnabledCheckingPolicyBase {
protected:
template <typename T>
void CheckPointer(const T* ptr) {
@@ -61,23 +71,20 @@ class V8_EXPORT EnabledCheckingPolicy {
}
private:
- void CheckPointerImpl(const void* ptr, bool points_to_payload);
-
template <typename T, bool = IsCompleteV<T>>
struct CheckPointersImplTrampoline {
- static void Call(EnabledCheckingPolicy* policy, const T* ptr) {
- policy->CheckPointerImpl(ptr, false);
+ static void Call(SameThreadEnabledCheckingPolicy* policy, const T* ptr) {
+ policy->CheckPointerImpl(ptr, false, kCheckOffHeapAssignments);
}
};
template <typename T>
struct CheckPointersImplTrampoline<T, true> {
- static void Call(EnabledCheckingPolicy* policy, const T* ptr) {
- policy->CheckPointerImpl(ptr, IsGarbageCollectedTypeV<T>);
+ static void Call(SameThreadEnabledCheckingPolicy* policy, const T* ptr) {
+ policy->CheckPointerImpl(ptr, IsGarbageCollectedTypeV<T>,
+ kCheckOffHeapAssignments);
}
};
-
- const HeapBase* heap_ = nullptr;
};
class DisabledCheckingPolicy {
@@ -86,8 +93,12 @@ class DisabledCheckingPolicy {
};
#if V8_ENABLE_CHECKS
-using DefaultMemberCheckingPolicy = EnabledCheckingPolicy;
-using DefaultPersistentCheckingPolicy = EnabledCheckingPolicy;
+// Off heap members are not connected to object graph and thus cannot ressurect
+// dead objects.
+using DefaultMemberCheckingPolicy =
+ SameThreadEnabledCheckingPolicy<false /* kCheckOffHeapAssignments*/>;
+using DefaultPersistentCheckingPolicy =
+ SameThreadEnabledCheckingPolicy<true /* kCheckOffHeapAssignments*/>;
#else
using DefaultMemberCheckingPolicy = DisabledCheckingPolicy;
using DefaultPersistentCheckingPolicy = DisabledCheckingPolicy;
diff --git a/chromium/v8/include/cppgc/internal/write-barrier.h b/chromium/v8/include/cppgc/internal/write-barrier.h
index 28184dc9c83..67f039c6584 100644
--- a/chromium/v8/include/cppgc/internal/write-barrier.h
+++ b/chromium/v8/include/cppgc/internal/write-barrier.h
@@ -214,6 +214,11 @@ struct WriteBarrierTypeForCagedHeapPolicy::ValueModeDispatch<
static V8_INLINE WriteBarrier::Type Get(const void* slot, const void* value,
WriteBarrier::Params& params,
HeapHandleCallback) {
+#if !defined(CPPGC_YOUNG_GENERATION)
+ if (V8_LIKELY(!WriteBarrier::IsAnyIncrementalOrConcurrentMarking())) {
+ return SetAndReturnType<WriteBarrier::Type::kNone>(params);
+ }
+#endif // !CPPGC_YOUNG_GENERATION
bool within_cage = TryGetCagedHeap(slot, value, params);
if (!within_cage) {
return WriteBarrier::Type::kNone;
@@ -317,7 +322,10 @@ struct WriteBarrierTypeForNonCagedHeapPolicy::ValueModeDispatch<
HeapHandleCallback callback) {
// The following check covers nullptr as well as sentinel pointer.
if (object <= static_cast<void*>(kSentinelPointer)) {
- return WriteBarrier::Type::kNone;
+ return SetAndReturnType<WriteBarrier::Type::kNone>(params);
+ }
+ if (V8_LIKELY(!WriteBarrier::IsAnyIncrementalOrConcurrentMarking())) {
+ return SetAndReturnType<WriteBarrier::Type::kNone>(params);
}
if (IsMarking(object, &params.heap)) {
return SetAndReturnType<WriteBarrier::Type::kMarking>(params);
diff --git a/chromium/v8/include/cppgc/persistent.h b/chromium/v8/include/cppgc/persistent.h
index b83a464576e..182fb08549a 100644
--- a/chromium/v8/include/cppgc/persistent.h
+++ b/chromium/v8/include/cppgc/persistent.h
@@ -45,7 +45,7 @@ class PersistentBase {
mutable const void* raw_ = nullptr;
mutable PersistentNode* node_ = nullptr;
- friend class PersistentRegion;
+ friend class PersistentRegionBase;
};
// The basic class from which all Persistent classes are generated.
diff --git a/chromium/v8/include/cppgc/prefinalizer.h b/chromium/v8/include/cppgc/prefinalizer.h
index 29b18bef909..6153b37ff5d 100644
--- a/chromium/v8/include/cppgc/prefinalizer.h
+++ b/chromium/v8/include/cppgc/prefinalizer.h
@@ -38,7 +38,7 @@ class PrefinalizerRegistration final {
"Only garbage collected objects can have prefinalizers"); \
Class* self = static_cast<Class*>(object); \
if (liveness_broker.IsHeapObjectAlive(self)) return false; \
- self->Class::PreFinalizer(); \
+ self->PreFinalizer(); \
return true; \
} \
\
diff --git a/chromium/v8/include/js_protocol.pdl b/chromium/v8/include/js_protocol.pdl
index ebf9eb7fe82..b34c8551ad6 100644
--- a/chromium/v8/include/js_protocol.pdl
+++ b/chromium/v8/include/js_protocol.pdl
@@ -845,24 +845,6 @@ domain Profiler
# Type profile entries for parameters and return values of the functions in the script.
array of TypeProfileEntry entries
- # Collected counter information.
- experimental type CounterInfo extends object
- properties
- # Counter name.
- string name
- # Counter value.
- integer value
-
- # Runtime call counter information.
- experimental type RuntimeCallCounterInfo extends object
- properties
- # Counter name.
- string name
- # Counter value.
- number value
- # Counter time in seconds.
- number time
-
command disable
command enable
@@ -927,30 +909,6 @@ domain Profiler
# Type profile for all scripts since startTypeProfile() was turned on.
array of ScriptTypeProfile result
- # Enable counters collection.
- experimental command enableCounters
-
- # Disable counters collection.
- experimental command disableCounters
-
- # Retrieve counters.
- experimental command getCounters
- returns
- # Collected counters information.
- array of CounterInfo result
-
- # Enable run time call stats collection.
- experimental command enableRuntimeCallStats
-
- # Disable run time call stats collection.
- experimental command disableRuntimeCallStats
-
- # Retrieve run time call stats.
- experimental command getRuntimeCallStats
- returns
- # Collected runtime call counter information.
- array of RuntimeCallCounterInfo result
-
event consoleProfileFinished
parameters
string id
@@ -1469,6 +1427,8 @@ domain Runtime
experimental optional boolean accessorPropertiesOnly
# Whether preview should be generated for the results.
experimental optional boolean generatePreview
+ # If true, returns non-indexed properties only.
+ experimental optional boolean nonIndexedPropertiesOnly
returns
# Object properties.
array of PropertyDescriptor result
diff --git a/chromium/v8/include/v8-array-buffer.h b/chromium/v8/include/v8-array-buffer.h
new file mode 100644
index 00000000000..0ce2b653684
--- /dev/null
+++ b/chromium/v8/include/v8-array-buffer.h
@@ -0,0 +1,433 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_ARRAY_BUFFER_H_
+#define INCLUDE_V8_ARRAY_BUFFER_H_
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-object.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class SharedArrayBuffer;
+
+#ifndef V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT
+// The number of required internal fields can be defined by embedder.
+#define V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT 2
+#endif
+
+enum class ArrayBufferCreationMode { kInternalized, kExternalized };
+
+/**
+ * A wrapper around the backing store (i.e. the raw memory) of an array buffer.
+ * See a document linked in http://crbug.com/v8/9908 for more information.
+ *
+ * The allocation and destruction of backing stores is generally managed by
+ * V8. Clients should always use standard C++ memory ownership types (i.e.
+ * std::unique_ptr and std::shared_ptr) to manage lifetimes of backing stores
+ * properly, since V8 internal objects may alias backing stores.
+ *
+ * This object does not keep the underlying |ArrayBuffer::Allocator| alive by
+ * default. Use Isolate::CreateParams::array_buffer_allocator_shared when
+ * creating the Isolate to make it hold a reference to the allocator itself.
+ */
+class V8_EXPORT BackingStore : public v8::internal::BackingStoreBase {
+ public:
+ ~BackingStore();
+
+ /**
+ * Return a pointer to the beginning of the memory block for this backing
+ * store. The pointer is only valid as long as this backing store object
+ * lives.
+ */
+ void* Data() const;
+
+ /**
+ * The length (in bytes) of this backing store.
+ */
+ size_t ByteLength() const;
+
+ /**
+ * Indicates whether the backing store was created for an ArrayBuffer or
+ * a SharedArrayBuffer.
+ */
+ bool IsShared() const;
+
+ /**
+ * Prevent implicit instantiation of operator delete with size_t argument.
+ * The size_t argument would be incorrect because ptr points to the
+ * internal BackingStore object.
+ */
+ void operator delete(void* ptr) { ::operator delete(ptr); }
+
+ /**
+ * Wrapper around ArrayBuffer::Allocator::Reallocate that preserves IsShared.
+ * Assumes that the backing_store was allocated by the ArrayBuffer allocator
+ * of the given isolate.
+ */
+ static std::unique_ptr<BackingStore> Reallocate(
+ v8::Isolate* isolate, std::unique_ptr<BackingStore> backing_store,
+ size_t byte_length);
+
+ /**
+ * This callback is used only if the memory block for a BackingStore cannot be
+ * allocated with an ArrayBuffer::Allocator. In such cases the destructor of
+ * the BackingStore invokes the callback to free the memory block.
+ */
+ using DeleterCallback = void (*)(void* data, size_t length,
+ void* deleter_data);
+
+ /**
+ * If the memory block of a BackingStore is static or is managed manually,
+ * then this empty deleter along with nullptr deleter_data can be passed to
+ * ArrayBuffer::NewBackingStore to indicate that.
+ *
+ * The manually managed case should be used with caution and only when it
+ * is guaranteed that the memory block freeing happens after detaching its
+ * ArrayBuffer.
+ */
+ static void EmptyDeleter(void* data, size_t length, void* deleter_data);
+
+ private:
+ /**
+ * See [Shared]ArrayBuffer::GetBackingStore and
+ * [Shared]ArrayBuffer::NewBackingStore.
+ */
+ BackingStore();
+};
+
+#if !defined(V8_IMMINENT_DEPRECATION_WARNINGS)
+// Use v8::BackingStore::DeleterCallback instead.
+using BackingStoreDeleterCallback = void (*)(void* data, size_t length,
+ void* deleter_data);
+
+#endif
+
+/**
+ * An instance of the built-in ArrayBuffer constructor (ES6 draft 15.13.5).
+ */
+class V8_EXPORT ArrayBuffer : public Object {
+ public:
+ /**
+ * A thread-safe allocator that V8 uses to allocate |ArrayBuffer|'s memory.
+ * The allocator is a global V8 setting. It has to be set via
+ * Isolate::CreateParams.
+ *
+ * Memory allocated through this allocator by V8 is accounted for as external
+ * memory by V8. Note that V8 keeps track of the memory for all internalized
+ * |ArrayBuffer|s. Responsibility for tracking external memory (using
+ * Isolate::AdjustAmountOfExternalAllocatedMemory) is handed over to the
+ * embedder upon externalization and taken over upon internalization (creating
+ * an internalized buffer from an existing buffer).
+ *
+ * Note that it is unsafe to call back into V8 from any of the allocator
+ * functions.
+ */
+ class V8_EXPORT Allocator {
+ public:
+ virtual ~Allocator() = default;
+
+ /**
+ * Allocate |length| bytes. Return nullptr if allocation is not successful.
+ * Memory should be initialized to zeroes.
+ */
+ virtual void* Allocate(size_t length) = 0;
+
+ /**
+ * Allocate |length| bytes. Return nullptr if allocation is not successful.
+ * Memory does not have to be initialized.
+ */
+ virtual void* AllocateUninitialized(size_t length) = 0;
+
+ /**
+ * Free the memory block of size |length|, pointed to by |data|.
+ * That memory is guaranteed to be previously allocated by |Allocate|.
+ */
+ virtual void Free(void* data, size_t length) = 0;
+
+ /**
+ * Reallocate the memory block of size |old_length| to a memory block of
+ * size |new_length| by expanding, contracting, or copying the existing
+ * memory block. If |new_length| > |old_length|, then the new part of
+ * the memory must be initialized to zeros. Return nullptr if reallocation
+ * is not successful.
+ *
+ * The caller guarantees that the memory block was previously allocated
+ * using Allocate or AllocateUninitialized.
+ *
+ * The default implementation allocates a new block and copies data.
+ */
+ virtual void* Reallocate(void* data, size_t old_length, size_t new_length);
+
+ /**
+ * ArrayBuffer allocation mode. kNormal is a malloc/free style allocation,
+ * while kReservation is for larger allocations with the ability to set
+ * access permissions.
+ */
+ enum class AllocationMode { kNormal, kReservation };
+
+ /**
+ * Convenience allocator.
+ *
+ * When the virtual memory cage is enabled, this allocator will allocate its
+ * backing memory inside the cage. Otherwise, it will rely on malloc/free.
+ *
+ * Caller takes ownership, i.e. the returned object needs to be freed using
+ * |delete allocator| once it is no longer in use.
+ */
+ static Allocator* NewDefaultAllocator();
+ };
+
+ /**
+ * Data length in bytes.
+ */
+ size_t ByteLength() const;
+
+ /**
+ * Create a new ArrayBuffer. Allocate |byte_length| bytes.
+ * Allocated memory will be owned by a created ArrayBuffer and
+ * will be deallocated when it is garbage-collected,
+ * unless the object is externalized.
+ */
+ static Local<ArrayBuffer> New(Isolate* isolate, size_t byte_length);
+
+ /**
+ * Create a new ArrayBuffer with an existing backing store.
+ * The created array keeps a reference to the backing store until the array
+ * is garbage collected. Note that the IsExternal bit does not affect this
+ * reference from the array to the backing store.
+ *
+ * In future IsExternal bit will be removed. Until then the bit is set as
+ * follows. If the backing store does not own the underlying buffer, then
+ * the array is created in externalized state. Otherwise, the array is created
+ * in internalized state. In the latter case the array can be transitioned
+ * to the externalized state using Externalize(backing_store).
+ */
+ static Local<ArrayBuffer> New(Isolate* isolate,
+ std::shared_ptr<BackingStore> backing_store);
+
+ /**
+ * Returns a new standalone BackingStore that is allocated using the array
+ * buffer allocator of the isolate. The result can be later passed to
+ * ArrayBuffer::New.
+ *
+ * If the allocator returns nullptr, then the function may cause GCs in the
+ * given isolate and re-try the allocation. If GCs do not help, then the
+ * function will crash with an out-of-memory error.
+ */
+ static std::unique_ptr<BackingStore> NewBackingStore(Isolate* isolate,
+ size_t byte_length);
+ /**
+ * Returns a new standalone BackingStore that takes over the ownership of
+ * the given buffer. The destructor of the BackingStore invokes the given
+ * deleter callback.
+ *
+ * The result can be later passed to ArrayBuffer::New. The raw pointer
+ * to the buffer must not be passed again to any V8 API function.
+ */
+ static std::unique_ptr<BackingStore> NewBackingStore(
+ void* data, size_t byte_length, v8::BackingStore::DeleterCallback deleter,
+ void* deleter_data);
+
+ /**
+ * Returns true if this ArrayBuffer may be detached.
+ */
+ bool IsDetachable() const;
+
+ /**
+ * Detaches this ArrayBuffer and all its views (typed arrays).
+ * Detaching sets the byte length of the buffer and all typed arrays to zero,
+ * preventing JavaScript from ever accessing underlying backing store.
+ * ArrayBuffer should have been externalized and must be detachable.
+ */
+ void Detach();
+
+ /**
+ * Get a shared pointer to the backing store of this array buffer. This
+ * pointer coordinates the lifetime management of the internal storage
+ * with any live ArrayBuffers on the heap, even across isolates. The embedder
+ * should not attempt to manage lifetime of the storage through other means.
+ */
+ std::shared_ptr<BackingStore> GetBackingStore();
+
+ V8_INLINE static ArrayBuffer* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<ArrayBuffer*>(value);
+ }
+
+ static const int kInternalFieldCount = V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT;
+ static const int kEmbedderFieldCount = V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT;
+
+ private:
+ ArrayBuffer();
+ static void CheckCast(Value* obj);
+};
+
+#ifndef V8_ARRAY_BUFFER_VIEW_INTERNAL_FIELD_COUNT
+// The number of required internal fields can be defined by embedder.
+#define V8_ARRAY_BUFFER_VIEW_INTERNAL_FIELD_COUNT 2
+#endif
+
+/**
+ * A base class for an instance of one of "views" over ArrayBuffer,
+ * including TypedArrays and DataView (ES6 draft 15.13).
+ */
+class V8_EXPORT ArrayBufferView : public Object {
+ public:
+ /**
+ * Returns underlying ArrayBuffer.
+ */
+ Local<ArrayBuffer> Buffer();
+ /**
+ * Byte offset in |Buffer|.
+ */
+ size_t ByteOffset();
+ /**
+ * Size of a view in bytes.
+ */
+ size_t ByteLength();
+
+ /**
+ * Copy the contents of the ArrayBufferView's buffer to an embedder defined
+ * memory without additional overhead that calling ArrayBufferView::Buffer
+ * might incur.
+ *
+ * Will write at most min(|byte_length|, ByteLength) bytes starting at
+ * ByteOffset of the underlying buffer to the memory starting at |dest|.
+ * Returns the number of bytes actually written.
+ */
+ size_t CopyContents(void* dest, size_t byte_length);
+
+ /**
+ * Returns true if ArrayBufferView's backing ArrayBuffer has already been
+ * allocated.
+ */
+ bool HasBuffer() const;
+
+ V8_INLINE static ArrayBufferView* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<ArrayBufferView*>(value);
+ }
+
+ static const int kInternalFieldCount =
+ V8_ARRAY_BUFFER_VIEW_INTERNAL_FIELD_COUNT;
+ static const int kEmbedderFieldCount =
+ V8_ARRAY_BUFFER_VIEW_INTERNAL_FIELD_COUNT;
+
+ private:
+ ArrayBufferView();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * An instance of DataView constructor (ES6 draft 15.13.7).
+ */
+class V8_EXPORT DataView : public ArrayBufferView {
+ public:
+ static Local<DataView> New(Local<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ static Local<DataView> New(Local<SharedArrayBuffer> shared_array_buffer,
+ size_t byte_offset, size_t length);
+ V8_INLINE static DataView* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<DataView*>(value);
+ }
+
+ private:
+ DataView();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * An instance of the built-in SharedArrayBuffer constructor.
+ */
+class V8_EXPORT SharedArrayBuffer : public Object {
+ public:
+ /**
+ * Data length in bytes.
+ */
+ size_t ByteLength() const;
+
+ /**
+ * Create a new SharedArrayBuffer. Allocate |byte_length| bytes.
+ * Allocated memory will be owned by a created SharedArrayBuffer and
+ * will be deallocated when it is garbage-collected,
+ * unless the object is externalized.
+ */
+ static Local<SharedArrayBuffer> New(Isolate* isolate, size_t byte_length);
+
+ /**
+ * Create a new SharedArrayBuffer with an existing backing store.
+ * The created array keeps a reference to the backing store until the array
+ * is garbage collected. Note that the IsExternal bit does not affect this
+ * reference from the array to the backing store.
+ *
+ * In future IsExternal bit will be removed. Until then the bit is set as
+ * follows. If the backing store does not own the underlying buffer, then
+ * the array is created in externalized state. Otherwise, the array is created
+ * in internalized state. In the latter case the array can be transitioned
+ * to the externalized state using Externalize(backing_store).
+ */
+ static Local<SharedArrayBuffer> New(
+ Isolate* isolate, std::shared_ptr<BackingStore> backing_store);
+
+ /**
+ * Returns a new standalone BackingStore that is allocated using the array
+ * buffer allocator of the isolate. The result can be later passed to
+ * SharedArrayBuffer::New.
+ *
+ * If the allocator returns nullptr, then the function may cause GCs in the
+ * given isolate and re-try the allocation. If GCs do not help, then the
+ * function will crash with an out-of-memory error.
+ */
+ static std::unique_ptr<BackingStore> NewBackingStore(Isolate* isolate,
+ size_t byte_length);
+ /**
+ * Returns a new standalone BackingStore that takes over the ownership of
+ * the given buffer. The destructor of the BackingStore invokes the given
+ * deleter callback.
+ *
+ * The result can be later passed to SharedArrayBuffer::New. The raw pointer
+ * to the buffer must not be passed again to any V8 functions.
+ */
+ static std::unique_ptr<BackingStore> NewBackingStore(
+ void* data, size_t byte_length, v8::BackingStore::DeleterCallback deleter,
+ void* deleter_data);
+
+ /**
+ * Get a shared pointer to the backing store of this array buffer. This
+ * pointer coordinates the lifetime management of the internal storage
+ * with any live ArrayBuffers on the heap, even across isolates. The embedder
+ * should not attempt to manage lifetime of the storage through other means.
+ */
+ std::shared_ptr<BackingStore> GetBackingStore();
+
+ V8_INLINE static SharedArrayBuffer* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<SharedArrayBuffer*>(value);
+ }
+
+ static const int kInternalFieldCount = V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT;
+
+ private:
+ SharedArrayBuffer();
+ static void CheckCast(Value* obj);
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_ARRAY_BUFFER_H_
diff --git a/chromium/v8/include/v8-callbacks.h b/chromium/v8/include/v8-callbacks.h
new file mode 100644
index 00000000000..870df6a8211
--- /dev/null
+++ b/chromium/v8/include/v8-callbacks.h
@@ -0,0 +1,377 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_ISOLATE_CALLBACKS_H_
+#define INCLUDE_V8_ISOLATE_CALLBACKS_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "cppgc/common.h"
+#include "v8-data.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+#if defined(V8_OS_WIN)
+struct _EXCEPTION_POINTERS;
+#endif
+
+namespace v8 {
+
+template <typename T>
+class FunctionCallbackInfo;
+class Isolate;
+class Message;
+class Module;
+class Object;
+class Promise;
+class ScriptOrModule;
+class String;
+class UnboundScript;
+class Value;
+
+/**
+ * A JIT code event is issued each time code is added, moved or removed.
+ *
+ * \note removal events are not currently issued.
+ */
+struct JitCodeEvent {
+ enum EventType {
+ CODE_ADDED,
+ CODE_MOVED,
+ CODE_REMOVED,
+ CODE_ADD_LINE_POS_INFO,
+ CODE_START_LINE_INFO_RECORDING,
+ CODE_END_LINE_INFO_RECORDING
+ };
+ // Definition of the code position type. The "POSITION" type means the place
+ // in the source code which are of interest when making stack traces to
+ // pin-point the source location of a stack frame as close as possible.
+ // The "STATEMENT_POSITION" means the place at the beginning of each
+ // statement, and is used to indicate possible break locations.
+ enum PositionType { POSITION, STATEMENT_POSITION };
+
+ // There are three different kinds of CodeType, one for JIT code generated
+ // by the optimizing compiler, one for byte code generated for the
+ // interpreter, and one for code generated from Wasm. For JIT_CODE and
+ // WASM_CODE, |code_start| points to the beginning of jitted assembly code,
+ // while for BYTE_CODE events, |code_start| points to the first bytecode of
+ // the interpreted function.
+ enum CodeType { BYTE_CODE, JIT_CODE, WASM_CODE };
+
+ // Type of event.
+ EventType type;
+ CodeType code_type;
+ // Start of the instructions.
+ void* code_start;
+ // Size of the instructions.
+ size_t code_len;
+ // Script info for CODE_ADDED event.
+ Local<UnboundScript> script;
+ // User-defined data for *_LINE_INFO_* event. It's used to hold the source
+ // code line information which is returned from the
+ // CODE_START_LINE_INFO_RECORDING event. And it's passed to subsequent
+ // CODE_ADD_LINE_POS_INFO and CODE_END_LINE_INFO_RECORDING events.
+ void* user_data;
+
+ struct name_t {
+ // Name of the object associated with the code, note that the string is not
+ // zero-terminated.
+ const char* str;
+ // Number of chars in str.
+ size_t len;
+ };
+
+ struct line_info_t {
+ // PC offset
+ size_t offset;
+ // Code position
+ size_t pos;
+ // The position type.
+ PositionType position_type;
+ };
+
+ struct wasm_source_info_t {
+ // Source file name.
+ const char* filename;
+ // Length of filename.
+ size_t filename_size;
+ // Line number table, which maps offsets of JITted code to line numbers of
+ // source file.
+ const line_info_t* line_number_table;
+ // Number of entries in the line number table.
+ size_t line_number_table_size;
+ };
+
+ wasm_source_info_t* wasm_source_info;
+
+ union {
+ // Only valid for CODE_ADDED.
+ struct name_t name;
+
+ // Only valid for CODE_ADD_LINE_POS_INFO
+ struct line_info_t line_info;
+
+ // New location of instructions. Only valid for CODE_MOVED.
+ void* new_code_start;
+ };
+
+ Isolate* isolate;
+};
+
+/**
+ * Option flags passed to the SetJitCodeEventHandler function.
+ */
+enum JitCodeEventOptions {
+ kJitCodeEventDefault = 0,
+ // Generate callbacks for already existent code.
+ kJitCodeEventEnumExisting = 1
+};
+
+/**
+ * Callback function passed to SetJitCodeEventHandler.
+ *
+ * \param event code add, move or removal event.
+ */
+using JitCodeEventHandler = void (*)(const JitCodeEvent* event);
+
+// --- Garbage Collection Callbacks ---
+
+/**
+ * Applications can register callback functions which will be called before and
+ * after certain garbage collection operations. Allocations are not allowed in
+ * the callback functions, you therefore cannot manipulate objects (set or
+ * delete properties for example) since it is possible such operations will
+ * result in the allocation of objects.
+ */
+enum GCType {
+ kGCTypeScavenge = 1 << 0,
+ kGCTypeMarkSweepCompact = 1 << 1,
+ kGCTypeIncrementalMarking = 1 << 2,
+ kGCTypeProcessWeakCallbacks = 1 << 3,
+ kGCTypeAll = kGCTypeScavenge | kGCTypeMarkSweepCompact |
+ kGCTypeIncrementalMarking | kGCTypeProcessWeakCallbacks
+};
+
+/**
+ * GCCallbackFlags is used to notify additional information about the GC
+ * callback.
+ * - kGCCallbackFlagConstructRetainedObjectInfos: The GC callback is for
+ * constructing retained object infos.
+ * - kGCCallbackFlagForced: The GC callback is for a forced GC for testing.
+ * - kGCCallbackFlagSynchronousPhantomCallbackProcessing: The GC callback
+ * is called synchronously without getting posted to an idle task.
+ * - kGCCallbackFlagCollectAllAvailableGarbage: The GC callback is called
+ * in a phase where V8 is trying to collect all available garbage
+ * (e.g., handling a low memory notification).
+ * - kGCCallbackScheduleIdleGarbageCollection: The GC callback is called to
+ * trigger an idle garbage collection.
+ */
+enum GCCallbackFlags {
+ kNoGCCallbackFlags = 0,
+ kGCCallbackFlagConstructRetainedObjectInfos = 1 << 1,
+ kGCCallbackFlagForced = 1 << 2,
+ kGCCallbackFlagSynchronousPhantomCallbackProcessing = 1 << 3,
+ kGCCallbackFlagCollectAllAvailableGarbage = 1 << 4,
+ kGCCallbackFlagCollectAllExternalMemory = 1 << 5,
+ kGCCallbackScheduleIdleGarbageCollection = 1 << 6,
+};
+
+using GCCallback = void (*)(GCType type, GCCallbackFlags flags);
+
+using InterruptCallback = void (*)(Isolate* isolate, void* data);
+
+/**
+ * This callback is invoked when the heap size is close to the heap limit and
+ * V8 is likely to abort with out-of-memory error.
+ * The callback can extend the heap limit by returning a value that is greater
+ * than the current_heap_limit. The initial heap limit is the limit that was
+ * set after heap setup.
+ */
+using NearHeapLimitCallback = size_t (*)(void* data, size_t current_heap_limit,
+ size_t initial_heap_limit);
+
+/**
+ * Callback function passed to SetUnhandledExceptionCallback.
+ */
+#if defined(V8_OS_WIN)
+using UnhandledExceptionCallback =
+ int (*)(_EXCEPTION_POINTERS* exception_pointers);
+#endif
+
+// --- Counters Callbacks ---
+
+using CounterLookupCallback = int* (*)(const char* name);
+
+using CreateHistogramCallback = void* (*)(const char* name, int min, int max,
+ size_t buckets);
+
+using AddHistogramSampleCallback = void (*)(void* histogram, int sample);
+
+// --- Exceptions ---
+
+using FatalErrorCallback = void (*)(const char* location, const char* message);
+
+using OOMErrorCallback = void (*)(const char* location, bool is_heap_oom);
+
+using MessageCallback = void (*)(Local<Message> message, Local<Value> data);
+
+// --- Tracing ---
+
+enum LogEventStatus : int { kStart = 0, kEnd = 1, kStamp = 2 };
+using LogEventCallback = void (*)(const char* name,
+ int /* LogEventStatus */ status);
+
+// --- Crashkeys Callback ---
+enum class CrashKeyId {
+ kIsolateAddress,
+ kReadonlySpaceFirstPageAddress,
+ kMapSpaceFirstPageAddress,
+ kCodeSpaceFirstPageAddress,
+ kDumpType,
+};
+
+using AddCrashKeyCallback = void (*)(CrashKeyId id, const std::string& value);
+
+// --- Enter/Leave Script Callback ---
+using BeforeCallEnteredCallback = void (*)(Isolate*);
+using CallCompletedCallback = void (*)(Isolate*);
+
+// --- AllowCodeGenerationFromStrings callbacks ---
+
+/**
+ * Callback to check if code generation from strings is allowed. See
+ * Context::AllowCodeGenerationFromStrings.
+ */
+using AllowCodeGenerationFromStringsCallback = bool (*)(Local<Context> context,
+ Local<String> source);
+
+struct ModifyCodeGenerationFromStringsResult {
+ // If true, proceed with the codegen algorithm. Otherwise, block it.
+ bool codegen_allowed = false;
+ // Overwrite the original source with this string, if present.
+ // Use the original source if empty.
+ // This field is considered only if codegen_allowed is true.
+ MaybeLocal<String> modified_source;
+};
+
+/**
+ * Access type specification.
+ */
+enum AccessType {
+ ACCESS_GET,
+ ACCESS_SET,
+ ACCESS_HAS,
+ ACCESS_DELETE,
+ ACCESS_KEYS
+};
+
+// --- Failed Access Check Callback ---
+
+using FailedAccessCheckCallback = void (*)(Local<Object> target,
+ AccessType type, Local<Value> data);
+
+/**
+ * Callback to check if codegen is allowed from a source object, and convert
+ * the source to string if necessary. See: ModifyCodeGenerationFromStrings.
+ */
+using ModifyCodeGenerationFromStringsCallback =
+ ModifyCodeGenerationFromStringsResult (*)(Local<Context> context,
+ Local<Value> source);
+using ModifyCodeGenerationFromStringsCallback2 =
+ ModifyCodeGenerationFromStringsResult (*)(Local<Context> context,
+ Local<Value> source,
+ bool is_code_like);
+
+// --- WebAssembly compilation callbacks ---
+using ExtensionCallback = bool (*)(const FunctionCallbackInfo<Value>&);
+
+using AllowWasmCodeGenerationCallback = bool (*)(Local<Context> context,
+ Local<String> source);
+
+// --- Callback for APIs defined on v8-supported objects, but implemented
+// by the embedder. Example: WebAssembly.{compile|instantiate}Streaming ---
+using ApiImplementationCallback = void (*)(const FunctionCallbackInfo<Value>&);
+
+// --- Callback for WebAssembly.compileStreaming ---
+using WasmStreamingCallback = void (*)(const FunctionCallbackInfo<Value>&);
+
+// --- Callback for loading source map file for Wasm profiling support
+using WasmLoadSourceMapCallback = Local<String> (*)(Isolate* isolate,
+ const char* name);
+
+// --- Callback for checking if WebAssembly Simd is enabled ---
+using WasmSimdEnabledCallback = bool (*)(Local<Context> context);
+
+// --- Callback for checking if WebAssembly exceptions are enabled ---
+using WasmExceptionsEnabledCallback = bool (*)(Local<Context> context);
+
+// --- Callback for checking if WebAssembly dynamic tiering is enabled ---
+using WasmDynamicTieringEnabledCallback = bool (*)(Local<Context> context);
+
+// --- Callback for checking if the SharedArrayBuffer constructor is enabled ---
+using SharedArrayBufferConstructorEnabledCallback =
+ bool (*)(Local<Context> context);
+
+/**
+ * HostImportModuleDynamicallyWithImportAssertionsCallback is called when we
+ * require the embedder to load a module. This is used as part of the dynamic
+ * import syntax.
+ *
+ * The referrer contains metadata about the script/module that calls
+ * import.
+ *
+ * The specifier is the name of the module that should be imported.
+ *
+ * The import_assertions are import assertions for this request in the form:
+ * [key1, value1, key2, value2, ...] where the keys and values are of type
+ * v8::String. Note, unlike the FixedArray passed to ResolveModuleCallback and
+ * returned from ModuleRequest::GetImportAssertions(), this array does not
+ * contain the source Locations of the assertions.
+ *
+ * The embedder must compile, instantiate, evaluate the Module, and
+ * obtain its namespace object.
+ *
+ * The Promise returned from this function is forwarded to userland
+ * JavaScript. The embedder must resolve this promise with the module
+ * namespace object. In case of an exception, the embedder must reject
+ * this promise with the exception. If the promise creation itself
+ * fails (e.g. due to stack overflow), the embedder must propagate
+ * that exception by returning an empty MaybeLocal.
+ */
+using HostImportModuleDynamicallyWithImportAssertionsCallback =
+ MaybeLocal<Promise> (*)(Local<Context> context,
+ Local<ScriptOrModule> referrer,
+ Local<String> specifier,
+ Local<FixedArray> import_assertions);
+
+/**
+ * HostInitializeImportMetaObjectCallback is called the first time import.meta
+ * is accessed for a module. Subsequent access will reuse the same value.
+ *
+ * The method combines two implementation-defined abstract operations into one:
+ * HostGetImportMetaProperties and HostFinalizeImportMeta.
+ *
+ * The embedder should use v8::Object::CreateDataProperty to add properties on
+ * the meta object.
+ */
+using HostInitializeImportMetaObjectCallback = void (*)(Local<Context> context,
+ Local<Module> module,
+ Local<Object> meta);
+
+/**
+ * PrepareStackTraceCallback is called when the stack property of an error is
+ * first accessed. The return value will be used as the stack value. If this
+ * callback is registed, the |Error.prepareStackTrace| API will be disabled.
+ * |sites| is an array of call sites, specified in
+ * https://v8.dev/docs/stack-trace-api
+ */
+using PrepareStackTraceCallback = MaybeLocal<Value> (*)(Local<Context> context,
+ Local<Value> error,
+ Local<Array> sites);
+
+} // namespace v8
+
+#endif // INCLUDE_V8_ISOLATE_CALLBACKS_H_
diff --git a/chromium/v8/include/v8-container.h b/chromium/v8/include/v8-container.h
new file mode 100644
index 00000000000..ce068603649
--- /dev/null
+++ b/chromium/v8/include/v8-container.h
@@ -0,0 +1,129 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_CONTAINER_H_
+#define INCLUDE_V8_CONTAINER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-object.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Context;
+class Isolate;
+
+/**
+ * An instance of the built-in array constructor (ECMA-262, 15.4.2).
+ */
+class V8_EXPORT Array : public Object {
+ public:
+ uint32_t Length() const;
+
+ /**
+ * Creates a JavaScript array with the given length. If the length
+ * is negative the returned array will have length 0.
+ */
+ static Local<Array> New(Isolate* isolate, int length = 0);
+
+ /**
+ * Creates a JavaScript array out of a Local<Value> array in C++
+ * with a known length.
+ */
+ static Local<Array> New(Isolate* isolate, Local<Value>* elements,
+ size_t length);
+ V8_INLINE static Array* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Array*>(value);
+ }
+
+ private:
+ Array();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * An instance of the built-in Map constructor (ECMA-262, 6th Edition, 23.1.1).
+ */
+class V8_EXPORT Map : public Object {
+ public:
+ size_t Size() const;
+ void Clear();
+ V8_WARN_UNUSED_RESULT MaybeLocal<Value> Get(Local<Context> context,
+ Local<Value> key);
+ V8_WARN_UNUSED_RESULT MaybeLocal<Map> Set(Local<Context> context,
+ Local<Value> key,
+ Local<Value> value);
+ V8_WARN_UNUSED_RESULT Maybe<bool> Has(Local<Context> context,
+ Local<Value> key);
+ V8_WARN_UNUSED_RESULT Maybe<bool> Delete(Local<Context> context,
+ Local<Value> key);
+
+ /**
+ * Returns an array of length Size() * 2, where index N is the Nth key and
+ * index N + 1 is the Nth value.
+ */
+ Local<Array> AsArray() const;
+
+ /**
+ * Creates a new empty Map.
+ */
+ static Local<Map> New(Isolate* isolate);
+
+ V8_INLINE static Map* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Map*>(value);
+ }
+
+ private:
+ Map();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * An instance of the built-in Set constructor (ECMA-262, 6th Edition, 23.2.1).
+ */
+class V8_EXPORT Set : public Object {
+ public:
+ size_t Size() const;
+ void Clear();
+ V8_WARN_UNUSED_RESULT MaybeLocal<Set> Add(Local<Context> context,
+ Local<Value> key);
+ V8_WARN_UNUSED_RESULT Maybe<bool> Has(Local<Context> context,
+ Local<Value> key);
+ V8_WARN_UNUSED_RESULT Maybe<bool> Delete(Local<Context> context,
+ Local<Value> key);
+
+ /**
+ * Returns an array of the keys in this Set.
+ */
+ Local<Array> AsArray() const;
+
+ /**
+ * Creates a new empty Set.
+ */
+ static Local<Set> New(Isolate* isolate);
+
+ V8_INLINE static Set* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Set*>(value);
+ }
+
+ private:
+ Set();
+ static void CheckCast(Value* obj);
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_CONTAINER_H_
diff --git a/chromium/v8/include/v8-context.h b/chromium/v8/include/v8-context.h
new file mode 100644
index 00000000000..bd28c6c9c93
--- /dev/null
+++ b/chromium/v8/include/v8-context.h
@@ -0,0 +1,418 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_CONTEXT_H_
+#define INCLUDE_V8_CONTEXT_H_
+
+#include <stdint.h>
+
+#include "v8-data.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-snapshot.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Function;
+class MicrotaskQueue;
+class Object;
+class ObjectTemplate;
+class Value;
+class String;
+
+/**
+ * A container for extension names.
+ */
+class V8_EXPORT ExtensionConfiguration {
+ public:
+ ExtensionConfiguration() : name_count_(0), names_(nullptr) {}
+ ExtensionConfiguration(int name_count, const char* names[])
+ : name_count_(name_count), names_(names) {}
+
+ const char** begin() const { return &names_[0]; }
+ const char** end() const { return &names_[name_count_]; }
+
+ private:
+ const int name_count_;
+ const char** names_;
+};
+
+/**
+ * A sandboxed execution context with its own set of built-in objects
+ * and functions.
+ */
+class V8_EXPORT Context : public Data {
+ public:
+ /**
+ * Returns the global proxy object.
+ *
+ * Global proxy object is a thin wrapper whose prototype points to actual
+ * context's global object with the properties like Object, etc. This is done
+ * that way for security reasons (for more details see
+ * https://wiki.mozilla.org/Gecko:SplitWindow).
+ *
+ * Please note that changes to global proxy object prototype most probably
+ * would break VM---v8 expects only global object as a prototype of global
+ * proxy object.
+ */
+ Local<Object> Global();
+
+ /**
+ * Detaches the global object from its context before
+ * the global object can be reused to create a new context.
+ */
+ void DetachGlobal();
+
+ /**
+ * Creates a new context and returns a handle to the newly allocated
+ * context.
+ *
+ * \param isolate The isolate in which to create the context.
+ *
+ * \param extensions An optional extension configuration containing
+ * the extensions to be installed in the newly created context.
+ *
+ * \param global_template An optional object template from which the
+ * global object for the newly created context will be created.
+ *
+ * \param global_object An optional global object to be reused for
+ * the newly created context. This global object must have been
+ * created by a previous call to Context::New with the same global
+ * template. The state of the global object will be completely reset
+ * and only object identify will remain.
+ */
+ static Local<Context> New(
+ Isolate* isolate, ExtensionConfiguration* extensions = nullptr,
+ MaybeLocal<ObjectTemplate> global_template = MaybeLocal<ObjectTemplate>(),
+ MaybeLocal<Value> global_object = MaybeLocal<Value>(),
+ DeserializeInternalFieldsCallback internal_fields_deserializer =
+ DeserializeInternalFieldsCallback(),
+ MicrotaskQueue* microtask_queue = nullptr);
+
+ /**
+ * Create a new context from a (non-default) context snapshot. There
+ * is no way to provide a global object template since we do not create
+ * a new global object from template, but we can reuse a global object.
+ *
+ * \param isolate See v8::Context::New.
+ *
+ * \param context_snapshot_index The index of the context snapshot to
+ * deserialize from. Use v8::Context::New for the default snapshot.
+ *
+ * \param embedder_fields_deserializer Optional callback to deserialize
+ * internal fields. It should match the SerializeInternalFieldCallback used
+ * to serialize.
+ *
+ * \param extensions See v8::Context::New.
+ *
+ * \param global_object See v8::Context::New.
+ */
+ static MaybeLocal<Context> FromSnapshot(
+ Isolate* isolate, size_t context_snapshot_index,
+ DeserializeInternalFieldsCallback embedder_fields_deserializer =
+ DeserializeInternalFieldsCallback(),
+ ExtensionConfiguration* extensions = nullptr,
+ MaybeLocal<Value> global_object = MaybeLocal<Value>(),
+ MicrotaskQueue* microtask_queue = nullptr);
+
+ /**
+ * Returns an global object that isn't backed by an actual context.
+ *
+ * The global template needs to have access checks with handlers installed.
+ * If an existing global object is passed in, the global object is detached
+ * from its context.
+ *
+ * Note that this is different from a detached context where all accesses to
+ * the global proxy will fail. Instead, the access check handlers are invoked.
+ *
+ * It is also not possible to detach an object returned by this method.
+ * Instead, the access check handlers need to return nothing to achieve the
+ * same effect.
+ *
+ * It is possible, however, to create a new context from the global object
+ * returned by this method.
+ */
+ static MaybeLocal<Object> NewRemoteContext(
+ Isolate* isolate, Local<ObjectTemplate> global_template,
+ MaybeLocal<Value> global_object = MaybeLocal<Value>());
+
+ /**
+ * Sets the security token for the context. To access an object in
+ * another context, the security tokens must match.
+ */
+ void SetSecurityToken(Local<Value> token);
+
+ /** Restores the security token to the default value. */
+ void UseDefaultSecurityToken();
+
+ /** Returns the security token of this context.*/
+ Local<Value> GetSecurityToken();
+
+ /**
+ * Enter this context. After entering a context, all code compiled
+ * and run is compiled and run in this context. If another context
+ * is already entered, this old context is saved so it can be
+ * restored when the new context is exited.
+ */
+ void Enter();
+
+ /**
+ * Exit this context. Exiting the current context restores the
+ * context that was in place when entering the current context.
+ */
+ void Exit();
+
+ /** Returns the isolate associated with a current context. */
+ Isolate* GetIsolate();
+
+ /** Returns the microtask queue associated with a current context. */
+ MicrotaskQueue* GetMicrotaskQueue();
+
+ /**
+ * The field at kDebugIdIndex used to be reserved for the inspector.
+ * It now serves no purpose.
+ */
+ enum EmbedderDataFields { kDebugIdIndex = 0 };
+
+ /**
+ * Return the number of fields allocated for embedder data.
+ */
+ uint32_t GetNumberOfEmbedderDataFields();
+
+ /**
+ * Gets the embedder data with the given index, which must have been set by a
+ * previous call to SetEmbedderData with the same index.
+ */
+ V8_INLINE Local<Value> GetEmbedderData(int index);
+
+ /**
+ * Gets the binding object used by V8 extras. Extra natives get a reference
+ * to this object and can use it to "export" functionality by adding
+ * properties. Extra natives can also "import" functionality by accessing
+ * properties added by the embedder using the V8 API.
+ */
+ Local<Object> GetExtrasBindingObject();
+
+ /**
+ * Sets the embedder data with the given index, growing the data as
+ * needed. Note that index 0 currently has a special meaning for Chrome's
+ * debugger.
+ */
+ void SetEmbedderData(int index, Local<Value> value);
+
+ /**
+ * Gets a 2-byte-aligned native pointer from the embedder data with the given
+ * index, which must have been set by a previous call to
+ * SetAlignedPointerInEmbedderData with the same index. Note that index 0
+ * currently has a special meaning for Chrome's debugger.
+ */
+ V8_INLINE void* GetAlignedPointerFromEmbedderData(int index);
+
+ /**
+ * Sets a 2-byte-aligned native pointer in the embedder data with the given
+ * index, growing the data as needed. Note that index 0 currently has a
+ * special meaning for Chrome's debugger.
+ */
+ void SetAlignedPointerInEmbedderData(int index, void* value);
+
+ /**
+ * Control whether code generation from strings is allowed. Calling
+ * this method with false will disable 'eval' and the 'Function'
+ * constructor for code running in this context. If 'eval' or the
+ * 'Function' constructor are used an exception will be thrown.
+ *
+ * If code generation from strings is not allowed the
+ * V8::AllowCodeGenerationFromStrings callback will be invoked if
+ * set before blocking the call to 'eval' or the 'Function'
+ * constructor. If that callback returns true, the call will be
+ * allowed, otherwise an exception will be thrown. If no callback is
+ * set an exception will be thrown.
+ */
+ void AllowCodeGenerationFromStrings(bool allow);
+
+ /**
+ * Returns true if code generation from strings is allowed for the context.
+ * For more details see AllowCodeGenerationFromStrings(bool) documentation.
+ */
+ bool IsCodeGenerationFromStringsAllowed() const;
+
+ /**
+ * Sets the error description for the exception that is thrown when
+ * code generation from strings is not allowed and 'eval' or the 'Function'
+ * constructor are called.
+ */
+ void SetErrorMessageForCodeGenerationFromStrings(Local<String> message);
+
+ /**
+ * Return data that was previously attached to the context snapshot via
+ * SnapshotCreator, and removes the reference to it.
+ * Repeated call with the same index returns an empty MaybeLocal.
+ */
+ template <class T>
+ V8_INLINE MaybeLocal<T> GetDataFromSnapshotOnce(size_t index);
+
+ /**
+ * If callback is set, abort any attempt to execute JavaScript in this
+ * context, call the specified callback, and throw an exception.
+ * To unset abort, pass nullptr as callback.
+ */
+ using AbortScriptExecutionCallback = void (*)(Isolate* isolate,
+ Local<Context> context);
+ void SetAbortScriptExecution(AbortScriptExecutionCallback callback);
+
+ /**
+ * Returns the value that was set or restored by
+ * SetContinuationPreservedEmbedderData(), if any.
+ */
+ Local<Value> GetContinuationPreservedEmbedderData() const;
+
+ /**
+ * Sets a value that will be stored on continuations and reset while the
+ * continuation runs.
+ */
+ void SetContinuationPreservedEmbedderData(Local<Value> context);
+
+ /**
+ * Set or clear hooks to be invoked for promise lifecycle operations.
+ * To clear a hook, set it to an empty v8::Function. Each function will
+ * receive the observed promise as the first argument. If a chaining
+ * operation is used on a promise, the init will additionally receive
+ * the parent promise as the second argument.
+ */
+ void SetPromiseHooks(Local<Function> init_hook, Local<Function> before_hook,
+ Local<Function> after_hook,
+ Local<Function> resolve_hook);
+
+ /**
+ * Stack-allocated class which sets the execution context for all
+ * operations executed within a local scope.
+ */
+ class V8_NODISCARD Scope {
+ public:
+ explicit V8_INLINE Scope(Local<Context> context) : context_(context) {
+ context_->Enter();
+ }
+ V8_INLINE ~Scope() { context_->Exit(); }
+
+ private:
+ Local<Context> context_;
+ };
+
+ /**
+ * Stack-allocated class to support the backup incumbent settings object
+ * stack.
+ * https://html.spec.whatwg.org/multipage/webappapis.html#backup-incumbent-settings-object-stack
+ */
+ class V8_EXPORT V8_NODISCARD BackupIncumbentScope final {
+ public:
+ /**
+ * |backup_incumbent_context| is pushed onto the backup incumbent settings
+ * object stack.
+ */
+ explicit BackupIncumbentScope(Local<Context> backup_incumbent_context);
+ ~BackupIncumbentScope();
+
+ /**
+ * Returns address that is comparable with JS stack address. Note that JS
+ * stack may be allocated separately from the native stack. See also
+ * |TryCatch::JSStackComparableAddressPrivate| for details.
+ */
+ V8_DEPRECATE_SOON(
+ "This is private V8 information that should not be exposed in the API.")
+ uintptr_t JSStackComparableAddress() const {
+ return JSStackComparableAddressPrivate();
+ }
+
+ private:
+ friend class internal::Isolate;
+
+ uintptr_t JSStackComparableAddressPrivate() const {
+ return js_stack_comparable_address_;
+ }
+
+ Local<Context> backup_incumbent_context_;
+ uintptr_t js_stack_comparable_address_ = 0;
+ const BackupIncumbentScope* prev_ = nullptr;
+ };
+
+ V8_INLINE static Context* Cast(Data* data);
+
+ private:
+ friend class Value;
+ friend class Script;
+ friend class Object;
+ friend class Function;
+
+ static void CheckCast(Data* obj);
+
+ internal::Address* GetDataFromSnapshotOnce(size_t index);
+ Local<Value> SlowGetEmbedderData(int index);
+ void* SlowGetAlignedPointerFromEmbedderData(int index);
+};
+
+// --- Implementation ---
+
+Local<Value> Context::GetEmbedderData(int index) {
+#ifndef V8_ENABLE_CHECKS
+ using A = internal::Address;
+ using I = internal::Internals;
+ A ctx = *reinterpret_cast<const A*>(this);
+ A embedder_data =
+ I::ReadTaggedPointerField(ctx, I::kNativeContextEmbedderDataOffset);
+ int value_offset =
+ I::kEmbedderDataArrayHeaderSize + (I::kEmbedderDataSlotSize * index);
+ A value = I::ReadRawField<A>(embedder_data, value_offset);
+#ifdef V8_COMPRESS_POINTERS
+ // We read the full pointer value and then decompress it in order to avoid
+ // dealing with potential endiannes issues.
+ value =
+ I::DecompressTaggedAnyField(embedder_data, static_cast<uint32_t>(value));
+#endif
+ internal::Isolate* isolate = internal::IsolateFromNeverReadOnlySpaceObject(
+ *reinterpret_cast<A*>(this));
+ A* result = HandleScope::CreateHandle(isolate, value);
+ return Local<Value>(reinterpret_cast<Value*>(result));
+#else
+ return SlowGetEmbedderData(index);
+#endif
+}
+
+void* Context::GetAlignedPointerFromEmbedderData(int index) {
+#ifndef V8_ENABLE_CHECKS
+ using A = internal::Address;
+ using I = internal::Internals;
+ A ctx = *reinterpret_cast<const A*>(this);
+ A embedder_data =
+ I::ReadTaggedPointerField(ctx, I::kNativeContextEmbedderDataOffset);
+ int value_offset =
+ I::kEmbedderDataArrayHeaderSize + (I::kEmbedderDataSlotSize * index);
+#ifdef V8_HEAP_SANDBOX
+ value_offset += I::kEmbedderDataSlotRawPayloadOffset;
+#endif
+ internal::Isolate* isolate = I::GetIsolateForHeapSandbox(ctx);
+ return reinterpret_cast<void*>(
+ I::ReadExternalPointerField(isolate, embedder_data, value_offset,
+ internal::kEmbedderDataSlotPayloadTag));
+#else
+ return SlowGetAlignedPointerFromEmbedderData(index);
+#endif
+}
+
+template <class T>
+MaybeLocal<T> Context::GetDataFromSnapshotOnce(size_t index) {
+ T* data = reinterpret_cast<T*>(GetDataFromSnapshotOnce(index));
+ if (data) internal::PerformCastCheck(data);
+ return Local<T>(data);
+}
+
+Context* Context::Cast(v8::Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return static_cast<Context*>(data);
+}
+
+} // namespace v8
+
+#endif // INCLUDE_V8_CONTEXT_H_
diff --git a/chromium/v8/include/v8-cppgc.h b/chromium/v8/include/v8-cppgc.h
index 745fb04347e..813e0842fa7 100644
--- a/chromium/v8/include/v8-cppgc.h
+++ b/chromium/v8/include/v8-cppgc.h
@@ -14,8 +14,9 @@
#include "cppgc/heap-statistics.h"
#include "cppgc/internal/write-barrier.h"
#include "cppgc/visitor.h"
-#include "v8-internal.h" // NOLINT(build/include_directory)
-#include "v8.h" // NOLINT(build/include_directory)
+#include "v8-internal.h" // NOLINT(build/include_directory)
+#include "v8-platform.h" // NOLINT(build/include_directory)
+#include "v8-traced-handle.h" // NOLINT(build/include_directory)
namespace cppgc {
class AllocationHandle;
@@ -24,6 +25,8 @@ class HeapHandle;
namespace v8 {
+class Object;
+
namespace internal {
class CppHeap;
} // namespace internal
diff --git a/chromium/v8/include/v8-data.h b/chromium/v8/include/v8-data.h
new file mode 100644
index 00000000000..dbd36c9a035
--- /dev/null
+++ b/chromium/v8/include/v8-data.h
@@ -0,0 +1,65 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_DATA_H_
+#define INCLUDE_V8_DATA_H_
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Context;
+
+/**
+ * The superclass of objects that can reside on V8's heap.
+ */
+class V8_EXPORT Data {
+ public:
+ /**
+ * Returns true if this data is a |v8::Value|.
+ */
+ bool IsValue() const;
+
+ /**
+ * Returns true if this data is a |v8::Module|.
+ */
+ bool IsModule() const;
+
+ /**
+ * Returns true if this data is a |v8::Private|.
+ */
+ bool IsPrivate() const;
+
+ /**
+ * Returns true if this data is a |v8::ObjectTemplate|.
+ */
+ bool IsObjectTemplate() const;
+
+ /**
+ * Returns true if this data is a |v8::FunctionTemplate|.
+ */
+ bool IsFunctionTemplate() const;
+
+ /**
+ * Returns true if this data is a |v8::Context|.
+ */
+ bool IsContext() const;
+
+ private:
+ Data();
+};
+
+/**
+ * A fixed-sized array with elements of type Data.
+ */
+class V8_EXPORT FixedArray : public Data {
+ public:
+ int Length() const;
+ Local<Data> Get(Local<Context> context, int i) const;
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_DATA_H_
diff --git a/chromium/v8/include/v8-date.h b/chromium/v8/include/v8-date.h
new file mode 100644
index 00000000000..e7a01f29b2d
--- /dev/null
+++ b/chromium/v8/include/v8-date.h
@@ -0,0 +1,43 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_DATE_H_
+#define INCLUDE_V8_DATE_H_
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-object.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Context;
+
+/**
+ * An instance of the built-in Date constructor (ECMA-262, 15.9).
+ */
+class V8_EXPORT Date : public Object {
+ public:
+ static V8_WARN_UNUSED_RESULT MaybeLocal<Value> New(Local<Context> context,
+ double time);
+
+ /**
+ * A specialization of Value::NumberValue that is more efficient
+ * because we know the structure of this object.
+ */
+ double ValueOf() const;
+
+ V8_INLINE static Date* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Date*>(value);
+ }
+
+ private:
+ static void CheckCast(Value* obj);
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_DATE_H_
diff --git a/chromium/v8/include/v8-debug.h b/chromium/v8/include/v8-debug.h
new file mode 100644
index 00000000000..a13ae3f6d6c
--- /dev/null
+++ b/chromium/v8/include/v8-debug.h
@@ -0,0 +1,151 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_DEBUG_H_
+#define INCLUDE_V8_DEBUG_H_
+
+#include <stdint.h>
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Isolate;
+class String;
+
+/**
+ * A single JavaScript stack frame.
+ */
+class V8_EXPORT StackFrame {
+ public:
+ /**
+ * Returns the number, 1-based, of the line for the associate function call.
+ * This method will return Message::kNoLineNumberInfo if it is unable to
+ * retrieve the line number, or if kLineNumber was not passed as an option
+ * when capturing the StackTrace.
+ */
+ int GetLineNumber() const;
+
+ /**
+ * Returns the 1-based column offset on the line for the associated function
+ * call.
+ * This method will return Message::kNoColumnInfo if it is unable to retrieve
+ * the column number, or if kColumnOffset was not passed as an option when
+ * capturing the StackTrace.
+ */
+ int GetColumn() const;
+
+ /**
+ * Returns the id of the script for the function for this StackFrame.
+ * This method will return Message::kNoScriptIdInfo if it is unable to
+ * retrieve the script id, or if kScriptId was not passed as an option when
+ * capturing the StackTrace.
+ */
+ int GetScriptId() const;
+
+ /**
+ * Returns the name of the resource that contains the script for the
+ * function for this StackFrame.
+ */
+ Local<String> GetScriptName() const;
+
+ /**
+ * Returns the name of the resource that contains the script for the
+ * function for this StackFrame or sourceURL value if the script name
+ * is undefined and its source ends with //# sourceURL=... string or
+ * deprecated //@ sourceURL=... string.
+ */
+ Local<String> GetScriptNameOrSourceURL() const;
+
+ /**
+ * Returns the source of the script for the function for this StackFrame.
+ */
+ Local<String> GetScriptSource() const;
+
+ /**
+ * Returns the source mapping URL (if one is present) of the script for
+ * the function for this StackFrame.
+ */
+ Local<String> GetScriptSourceMappingURL() const;
+
+ /**
+ * Returns the name of the function associated with this stack frame.
+ */
+ Local<String> GetFunctionName() const;
+
+ /**
+ * Returns whether or not the associated function is compiled via a call to
+ * eval().
+ */
+ bool IsEval() const;
+
+ /**
+ * Returns whether or not the associated function is called as a
+ * constructor via "new".
+ */
+ bool IsConstructor() const;
+
+ /**
+ * Returns whether or not the associated functions is defined in wasm.
+ */
+ bool IsWasm() const;
+
+ /**
+ * Returns whether or not the associated function is defined by the user.
+ */
+ bool IsUserJavaScript() const;
+};
+
+/**
+ * Representation of a JavaScript stack trace. The information collected is a
+ * snapshot of the execution stack and the information remains valid after
+ * execution continues.
+ */
+class V8_EXPORT StackTrace {
+ public:
+ /**
+ * Flags that determine what information is placed captured for each
+ * StackFrame when grabbing the current stack trace.
+ * Note: these options are deprecated and we always collect all available
+ * information (kDetailed).
+ */
+ enum StackTraceOptions {
+ kLineNumber = 1,
+ kColumnOffset = 1 << 1 | kLineNumber,
+ kScriptName = 1 << 2,
+ kFunctionName = 1 << 3,
+ kIsEval = 1 << 4,
+ kIsConstructor = 1 << 5,
+ kScriptNameOrSourceURL = 1 << 6,
+ kScriptId = 1 << 7,
+ kExposeFramesAcrossSecurityOrigins = 1 << 8,
+ kOverview = kLineNumber | kColumnOffset | kScriptName | kFunctionName,
+ kDetailed = kOverview | kIsEval | kIsConstructor | kScriptNameOrSourceURL
+ };
+
+ /**
+ * Returns a StackFrame at a particular index.
+ */
+ Local<StackFrame> GetFrame(Isolate* isolate, uint32_t index) const;
+
+ /**
+ * Returns the number of StackFrames.
+ */
+ int GetFrameCount() const;
+
+ /**
+ * Grab a snapshot of the current JavaScript execution stack.
+ *
+ * \param frame_limit The maximum number of stack frames we want to capture.
+ * \param options Enumerates the set of things we will capture for each
+ * StackFrame.
+ */
+ static Local<StackTrace> CurrentStackTrace(
+ Isolate* isolate, int frame_limit, StackTraceOptions options = kDetailed);
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_DEBUG_H_
diff --git a/chromium/v8/include/v8-embedder-heap.h b/chromium/v8/include/v8-embedder-heap.h
new file mode 100644
index 00000000000..501a4fc523b
--- /dev/null
+++ b/chromium/v8/include/v8-embedder-heap.h
@@ -0,0 +1,238 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_EMBEDDER_HEAP_H_
+#define INCLUDE_V8_EMBEDDER_HEAP_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <utility>
+#include <vector>
+
+#include "cppgc/common.h"
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-traced-handle.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Data;
+class Isolate;
+class Value;
+
+namespace internal {
+class LocalEmbedderHeapTracer;
+} // namespace internal
+
+/**
+ * Handler for embedder roots on non-unified heap garbage collections.
+ */
+class V8_EXPORT EmbedderRootsHandler {
+ public:
+ virtual ~EmbedderRootsHandler() = default;
+
+ /**
+ * Returns true if the TracedGlobal handle should be considered as root for
+ * the currently running non-tracing garbage collection and false otherwise.
+ * The default implementation will keep all TracedGlobal references as roots.
+ *
+ * If this returns false, then V8 may decide that the object referred to by
+ * such a handle is reclaimed. In that case:
+ * - No action is required if handles are used with destructors, i.e., by just
+ * using |TracedGlobal|.
+ * - When run without destructors, i.e., by using |TracedReference|, V8 calls
+ * |ResetRoot|.
+ *
+ * Note that the |handle| is different from the handle that the embedder holds
+ * for retaining the object. The embedder may use |WrapperClassId()| to
+ * distinguish cases where it wants handles to be treated as roots from not
+ * being treated as roots.
+ */
+ virtual bool IsRoot(const v8::TracedReference<v8::Value>& handle) = 0;
+ virtual bool IsRoot(const v8::TracedGlobal<v8::Value>& handle) = 0;
+
+ /**
+ * Used in combination with |IsRoot|. Called by V8 when an
+ * object that is backed by a handle is reclaimed by a non-tracing garbage
+ * collection. It is up to the embedder to reset the original handle.
+ *
+ * Note that the |handle| is different from the handle that the embedder holds
+ * for retaining the object. It is up to the embedder to find the original
+ * handle via the object or class id.
+ */
+ virtual void ResetRoot(const v8::TracedReference<v8::Value>& handle) = 0;
+};
+
+/**
+ * Interface for tracing through the embedder heap. During a V8 garbage
+ * collection, V8 collects hidden fields of all potential wrappers, and at the
+ * end of its marking phase iterates the collection and asks the embedder to
+ * trace through its heap and use reporter to report each JavaScript object
+ * reachable from any of the given wrappers.
+ */
+class V8_EXPORT EmbedderHeapTracer {
+ public:
+ using EmbedderStackState = cppgc::EmbedderStackState;
+
+ enum TraceFlags : uint64_t {
+ kNoFlags = 0,
+ kReduceMemory = 1 << 0,
+ kForced = 1 << 2,
+ };
+
+ /**
+ * Interface for iterating through TracedGlobal handles.
+ */
+ class V8_EXPORT TracedGlobalHandleVisitor {
+ public:
+ virtual ~TracedGlobalHandleVisitor() = default;
+ virtual void VisitTracedGlobalHandle(const TracedGlobal<Value>& handle) {}
+ virtual void VisitTracedReference(const TracedReference<Value>& handle) {}
+ };
+
+ /**
+ * Summary of a garbage collection cycle. See |TraceEpilogue| on how the
+ * summary is reported.
+ */
+ struct TraceSummary {
+ /**
+ * Time spent managing the retained memory in milliseconds. This can e.g.
+ * include the time tracing through objects in the embedder.
+ */
+ double time = 0.0;
+
+ /**
+ * Memory retained by the embedder through the |EmbedderHeapTracer|
+ * mechanism in bytes.
+ */
+ size_t allocated_size = 0;
+ };
+
+ virtual ~EmbedderHeapTracer() = default;
+
+ /**
+ * Iterates all TracedGlobal handles created for the v8::Isolate the tracer is
+ * attached to.
+ */
+ void IterateTracedGlobalHandles(TracedGlobalHandleVisitor* visitor);
+
+ /**
+ * Called by the embedder to set the start of the stack which is e.g. used by
+ * V8 to determine whether handles are used from stack or heap.
+ */
+ void SetStackStart(void* stack_start);
+
+ /**
+ * Called by the embedder to notify V8 of an empty execution stack.
+ */
+ V8_DEPRECATE_SOON(
+ "This call only optimized internal caches which V8 is able to figure out "
+ "on its own now.")
+ void NotifyEmptyEmbedderStack();
+
+ /**
+ * Called by v8 to register internal fields of found wrappers.
+ *
+ * The embedder is expected to store them somewhere and trace reachable
+ * wrappers from them when called through |AdvanceTracing|.
+ */
+ virtual void RegisterV8References(
+ const std::vector<std::pair<void*, void*>>& embedder_fields) = 0;
+
+ void RegisterEmbedderReference(const BasicTracedReference<v8::Data>& ref);
+
+ /**
+ * Called at the beginning of a GC cycle.
+ */
+ virtual void TracePrologue(TraceFlags flags) {}
+
+ /**
+ * Called to advance tracing in the embedder.
+ *
+ * The embedder is expected to trace its heap starting from wrappers reported
+ * by RegisterV8References method, and report back all reachable wrappers.
+ * Furthermore, the embedder is expected to stop tracing by the given
+ * deadline. A deadline of infinity means that tracing should be finished.
+ *
+ * Returns |true| if tracing is done, and false otherwise.
+ */
+ virtual bool AdvanceTracing(double deadline_in_ms) = 0;
+
+ /*
+ * Returns true if there no more tracing work to be done (see AdvanceTracing)
+ * and false otherwise.
+ */
+ virtual bool IsTracingDone() = 0;
+
+ /**
+ * Called at the end of a GC cycle.
+ *
+ * Note that allocation is *not* allowed within |TraceEpilogue|. Can be
+ * overriden to fill a |TraceSummary| that is used by V8 to schedule future
+ * garbage collections.
+ */
+ virtual void TraceEpilogue(TraceSummary* trace_summary) {}
+
+ /**
+ * Called upon entering the final marking pause. No more incremental marking
+ * steps will follow this call.
+ */
+ virtual void EnterFinalPause(EmbedderStackState stack_state) = 0;
+
+ /*
+ * Called by the embedder to request immediate finalization of the currently
+ * running tracing phase that has been started with TracePrologue and not
+ * yet finished with TraceEpilogue.
+ *
+ * Will be a noop when currently not in tracing.
+ *
+ * This is an experimental feature.
+ */
+ void FinalizeTracing();
+
+ /**
+ * See documentation on EmbedderRootsHandler.
+ */
+ virtual bool IsRootForNonTracingGC(
+ const v8::TracedReference<v8::Value>& handle);
+ virtual bool IsRootForNonTracingGC(const v8::TracedGlobal<v8::Value>& handle);
+
+ /**
+ * See documentation on EmbedderRootsHandler.
+ */
+ virtual void ResetHandleInNonTracingGC(
+ const v8::TracedReference<v8::Value>& handle);
+
+ /*
+ * Called by the embedder to immediately perform a full garbage collection.
+ *
+ * Should only be used in testing code.
+ */
+ void GarbageCollectionForTesting(EmbedderStackState stack_state);
+
+ /*
+ * Called by the embedder to signal newly allocated or freed memory. Not bound
+ * to tracing phases. Embedders should trade off when increments are reported
+ * as V8 may consult global heuristics on whether to trigger garbage
+ * collection on this change.
+ */
+ void IncreaseAllocatedSize(size_t bytes);
+ void DecreaseAllocatedSize(size_t bytes);
+
+ /*
+ * Returns the v8::Isolate this tracer is attached too and |nullptr| if it
+ * is not attached to any v8::Isolate.
+ */
+ v8::Isolate* isolate() const { return isolate_; }
+
+ protected:
+ v8::Isolate* isolate_ = nullptr;
+
+ friend class internal::LocalEmbedderHeapTracer;
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_EMBEDDER_HEAP_H_
diff --git a/chromium/v8/include/v8-exception.h b/chromium/v8/include/v8-exception.h
new file mode 100644
index 00000000000..add882da4c4
--- /dev/null
+++ b/chromium/v8/include/v8-exception.h
@@ -0,0 +1,224 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_EXCEPTION_H_
+#define INCLUDE_V8_EXCEPTION_H_
+
+#include <stddef.h>
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Context;
+class Isolate;
+class Message;
+class StackTrace;
+class String;
+class Value;
+
+namespace internal {
+class Isolate;
+class ThreadLocalTop;
+} // namespace internal
+
+/**
+ * Create new error objects by calling the corresponding error object
+ * constructor with the message.
+ */
+class V8_EXPORT Exception {
+ public:
+ static Local<Value> RangeError(Local<String> message);
+ static Local<Value> ReferenceError(Local<String> message);
+ static Local<Value> SyntaxError(Local<String> message);
+ static Local<Value> TypeError(Local<String> message);
+ static Local<Value> WasmCompileError(Local<String> message);
+ static Local<Value> WasmLinkError(Local<String> message);
+ static Local<Value> WasmRuntimeError(Local<String> message);
+ static Local<Value> Error(Local<String> message);
+
+ /**
+ * Creates an error message for the given exception.
+ * Will try to reconstruct the original stack trace from the exception value,
+ * or capture the current stack trace if not available.
+ */
+ static Local<Message> CreateMessage(Isolate* isolate, Local<Value> exception);
+
+ /**
+ * Returns the original stack trace that was captured at the creation time
+ * of a given exception, or an empty handle if not available.
+ */
+ static Local<StackTrace> GetStackTrace(Local<Value> exception);
+};
+
+/**
+ * An external exception handler.
+ */
+class V8_EXPORT TryCatch {
+ public:
+ /**
+ * Creates a new try/catch block and registers it with v8. Note that
+ * all TryCatch blocks should be stack allocated because the memory
+ * location itself is compared against JavaScript try/catch blocks.
+ */
+ explicit TryCatch(Isolate* isolate);
+
+ /**
+ * Unregisters and deletes this try/catch block.
+ */
+ ~TryCatch();
+
+ /**
+ * Returns true if an exception has been caught by this try/catch block.
+ */
+ bool HasCaught() const;
+
+ /**
+ * For certain types of exceptions, it makes no sense to continue execution.
+ *
+ * If CanContinue returns false, the correct action is to perform any C++
+ * cleanup needed and then return. If CanContinue returns false and
+ * HasTerminated returns true, it is possible to call
+ * CancelTerminateExecution in order to continue calling into the engine.
+ */
+ bool CanContinue() const;
+
+ /**
+ * Returns true if an exception has been caught due to script execution
+ * being terminated.
+ *
+ * There is no JavaScript representation of an execution termination
+ * exception. Such exceptions are thrown when the TerminateExecution
+ * methods are called to terminate a long-running script.
+ *
+ * If such an exception has been thrown, HasTerminated will return true,
+ * indicating that it is possible to call CancelTerminateExecution in order
+ * to continue calling into the engine.
+ */
+ bool HasTerminated() const;
+
+ /**
+ * Throws the exception caught by this TryCatch in a way that avoids
+ * it being caught again by this same TryCatch. As with ThrowException
+ * it is illegal to execute any JavaScript operations after calling
+ * ReThrow; the caller must return immediately to where the exception
+ * is caught.
+ */
+ Local<Value> ReThrow();
+
+ /**
+ * Returns the exception caught by this try/catch block. If no exception has
+ * been caught an empty handle is returned.
+ */
+ Local<Value> Exception() const;
+
+ /**
+ * Returns the .stack property of an object. If no .stack
+ * property is present an empty handle is returned.
+ */
+ V8_WARN_UNUSED_RESULT static MaybeLocal<Value> StackTrace(
+ Local<Context> context, Local<Value> exception);
+
+ /**
+ * Returns the .stack property of the thrown object. If no .stack property is
+ * present or if this try/catch block has not caught an exception, an empty
+ * handle is returned.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Value> StackTrace(
+ Local<Context> context) const;
+
+ /**
+ * Returns the message associated with this exception. If there is
+ * no message associated an empty handle is returned.
+ */
+ Local<v8::Message> Message() const;
+
+ /**
+ * Clears any exceptions that may have been caught by this try/catch block.
+ * After this method has been called, HasCaught() will return false. Cancels
+ * the scheduled exception if it is caught and ReThrow() is not called before.
+ *
+ * It is not necessary to clear a try/catch block before using it again; if
+ * another exception is thrown the previously caught exception will just be
+ * overwritten. However, it is often a good idea since it makes it easier
+ * to determine which operation threw a given exception.
+ */
+ void Reset();
+
+ /**
+ * Set verbosity of the external exception handler.
+ *
+ * By default, exceptions that are caught by an external exception
+ * handler are not reported. Call SetVerbose with true on an
+ * external exception handler to have exceptions caught by the
+ * handler reported as if they were not caught.
+ */
+ void SetVerbose(bool value);
+
+ /**
+ * Returns true if verbosity is enabled.
+ */
+ bool IsVerbose() const;
+
+ /**
+ * Set whether or not this TryCatch should capture a Message object
+ * which holds source information about where the exception
+ * occurred. True by default.
+ */
+ void SetCaptureMessage(bool value);
+
+ V8_DEPRECATE_SOON(
+ "This is private information that should not be exposed by the API")
+ static void* JSStackComparableAddress(TryCatch* handler) {
+ if (handler == nullptr) return nullptr;
+ return reinterpret_cast<void*>(handler->JSStackComparableAddressPrivate());
+ }
+
+ TryCatch(const TryCatch&) = delete;
+ void operator=(const TryCatch&) = delete;
+
+ private:
+ // Declaring operator new and delete as deleted is not spec compliant.
+ // Therefore declare them private instead to disable dynamic alloc
+ void* operator new(size_t size);
+ void* operator new[](size_t size);
+ void operator delete(void*, size_t);
+ void operator delete[](void*, size_t);
+
+ /**
+ * There are cases when the raw address of C++ TryCatch object cannot be
+ * used for comparisons with addresses into the JS stack. The cases are:
+ * 1) ARM, ARM64 and MIPS simulators which have separate JS stack.
+ * 2) Address sanitizer allocates local C++ object in the heap when
+ * UseAfterReturn mode is enabled.
+ * This method returns address that can be used for comparisons with
+ * addresses into the JS stack. When neither simulator nor ASAN's
+ * UseAfterReturn is enabled, then the address returned will be the address
+ * of the C++ try catch handler itself.
+ */
+ internal::Address JSStackComparableAddressPrivate() {
+ return js_stack_comparable_address_;
+ }
+
+ void ResetInternal();
+
+ internal::Isolate* isolate_;
+ TryCatch* next_;
+ void* exception_;
+ void* message_obj_;
+ internal::Address js_stack_comparable_address_;
+ bool is_verbose_ : 1;
+ bool can_continue_ : 1;
+ bool capture_message_ : 1;
+ bool rethrow_ : 1;
+ bool has_terminated_ : 1;
+
+ friend class internal::Isolate;
+ friend class internal::ThreadLocalTop;
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_EXCEPTION_H_
diff --git a/chromium/v8/include/v8-extension.h b/chromium/v8/include/v8-extension.h
new file mode 100644
index 00000000000..0705e2afbb8
--- /dev/null
+++ b/chromium/v8/include/v8-extension.h
@@ -0,0 +1,62 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_EXTENSION_H_
+#define INCLUDE_V8_EXTENSION_H_
+
+#include <memory>
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-primitive.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class FunctionTemplate;
+
+// --- Extensions ---
+
+/**
+ * Ignore
+ */
+class V8_EXPORT Extension {
+ public:
+ // Note that the strings passed into this constructor must live as long
+ // as the Extension itself.
+ Extension(const char* name, const char* source = nullptr, int dep_count = 0,
+ const char** deps = nullptr, int source_length = -1);
+ virtual ~Extension() { delete source_; }
+ virtual Local<FunctionTemplate> GetNativeFunctionTemplate(
+ Isolate* isolate, Local<String> name) {
+ return Local<FunctionTemplate>();
+ }
+
+ const char* name() const { return name_; }
+ size_t source_length() const { return source_length_; }
+ const String::ExternalOneByteStringResource* source() const {
+ return source_;
+ }
+ int dependency_count() const { return dep_count_; }
+ const char** dependencies() const { return deps_; }
+ void set_auto_enable(bool value) { auto_enable_ = value; }
+ bool auto_enable() { return auto_enable_; }
+
+ // Disallow copying and assigning.
+ Extension(const Extension&) = delete;
+ void operator=(const Extension&) = delete;
+
+ private:
+ const char* name_;
+ size_t source_length_; // expected to initialize before source_
+ String::ExternalOneByteStringResource* source_;
+ int dep_count_;
+ const char** deps_;
+ bool auto_enable_;
+};
+
+void V8_EXPORT RegisterExtension(std::unique_ptr<Extension>);
+
+} // namespace v8
+
+#endif // INCLUDE_V8_EXTENSION_H_
diff --git a/chromium/v8/include/v8-external.h b/chromium/v8/include/v8-external.h
new file mode 100644
index 00000000000..2e245036f42
--- /dev/null
+++ b/chromium/v8/include/v8-external.h
@@ -0,0 +1,37 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_EXTERNAL_H_
+#define INCLUDE_V8_EXTERNAL_H_
+
+#include "v8-value.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Isolate;
+
+/**
+ * A JavaScript value that wraps a C++ void*. This type of value is mainly used
+ * to associate C++ data structures with JavaScript objects.
+ */
+class V8_EXPORT External : public Value {
+ public:
+ static Local<External> New(Isolate* isolate, void* value);
+ V8_INLINE static External* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<External*>(value);
+ }
+
+ void* Value() const;
+
+ private:
+ static void CheckCast(v8::Value* obj);
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_EXTERNAL_H_
diff --git a/chromium/v8/include/v8-fast-api-calls.h b/chromium/v8/include/v8-fast-api-calls.h
index 8c9d02769ee..854f845aba6 100644
--- a/chromium/v8/include/v8-fast-api-calls.h
+++ b/chromium/v8/include/v8-fast-api-calls.h
@@ -225,9 +225,11 @@
#include <tuple>
#include <type_traits>
-#include "v8-internal.h" // NOLINT(build/include_directory)
-#include "v8.h" // NOLINT(build/include_directory)
-#include "v8config.h" // NOLINT(build/include_directory)
+#include "v8-internal.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-typed-array.h" // NOLINT(build/include_directory)
+#include "v8-value.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
namespace v8 {
@@ -275,6 +277,17 @@ class CTypeInfo {
Flags flags = Flags::kNone)
: type_(type), sequence_type_(sequence_type), flags_(flags) {}
+ typedef uint32_t Identifier;
+ explicit constexpr CTypeInfo(Identifier identifier)
+ : CTypeInfo(static_cast<Type>(identifier >> 16),
+ static_cast<SequenceType>((identifier >> 8) & 255),
+ static_cast<Flags>(identifier & 255)) {}
+ constexpr Identifier GetId() const {
+ return static_cast<uint8_t>(type_) << 16 |
+ static_cast<uint8_t>(sequence_type_) << 8 |
+ static_cast<uint8_t>(flags_);
+ }
+
constexpr Type GetType() const { return type_; }
constexpr SequenceType GetSequenceType() const { return sequence_type_; }
constexpr Flags GetFlags() const { return flags_; }
@@ -322,6 +335,14 @@ struct FastApiTypedArray : public FastApiTypedArrayBase {
return tmp;
}
+ bool getStorageIfAligned(T** elements) const {
+ if (reinterpret_cast<uintptr_t>(data_) % alignof(T) != 0) {
+ return false;
+ }
+ *elements = reinterpret_cast<T*>(data_);
+ return true;
+ }
+
private:
// This pointer should include the typed array offset applied.
// It's not guaranteed that it's aligned to sizeof(T), it's only
@@ -814,23 +835,54 @@ static constexpr CTypeInfo kTypeInfoFloat64 =
* returns true on success. `type_info` will be used for conversions.
*/
template <const CTypeInfo* type_info, typename T>
-bool V8_EXPORT V8_WARN_UNUSED_RESULT TryCopyAndConvertArrayToCppBuffer(
- Local<Array> src, T* dst, uint32_t max_length);
+V8_DEPRECATE_SOON(
+ "Use TryToCopyAndConvertArrayToCppBuffer<CTypeInfo::Identifier, T>()")
+bool V8_EXPORT V8_WARN_UNUSED_RESULT
+ TryCopyAndConvertArrayToCppBuffer(Local<Array> src, T* dst,
+ uint32_t max_length);
template <>
+V8_DEPRECATE_SOON(
+ "Use TryToCopyAndConvertArrayToCppBuffer<CTypeInfo::Identifier, T>()")
inline bool V8_WARN_UNUSED_RESULT
-TryCopyAndConvertArrayToCppBuffer<&kTypeInfoInt32, int32_t>(
- Local<Array> src, int32_t* dst, uint32_t max_length) {
- return CopyAndConvertArrayToCppBufferInt32(src, dst, max_length);
+ TryCopyAndConvertArrayToCppBuffer<&kTypeInfoInt32, int32_t>(
+ Local<Array> src, int32_t* dst, uint32_t max_length) {
+ return false;
}
template <>
+V8_DEPRECATE_SOON(
+ "Use TryToCopyAndConvertArrayToCppBuffer<CTypeInfo::Identifier, T>()")
inline bool V8_WARN_UNUSED_RESULT
-TryCopyAndConvertArrayToCppBuffer<&kTypeInfoFloat64, double>(
- Local<Array> src, double* dst, uint32_t max_length) {
- return CopyAndConvertArrayToCppBufferFloat64(src, dst, max_length);
+ TryCopyAndConvertArrayToCppBuffer<&kTypeInfoFloat64, double>(
+ Local<Array> src, double* dst, uint32_t max_length) {
+ return false;
}
+template <CTypeInfo::Identifier type_info_id, typename T>
+bool V8_EXPORT V8_WARN_UNUSED_RESULT TryToCopyAndConvertArrayToCppBuffer(
+ Local<Array> src, T* dst, uint32_t max_length);
+
+template <>
+bool V8_EXPORT V8_WARN_UNUSED_RESULT TryToCopyAndConvertArrayToCppBuffer<
+ internal::CTypeInfoBuilder<int32_t>::Build().GetId(), int32_t>(
+ Local<Array> src, int32_t* dst, uint32_t max_length);
+
+template <>
+bool V8_EXPORT V8_WARN_UNUSED_RESULT TryToCopyAndConvertArrayToCppBuffer<
+ internal::CTypeInfoBuilder<uint32_t>::Build().GetId(), uint32_t>(
+ Local<Array> src, uint32_t* dst, uint32_t max_length);
+
+template <>
+bool V8_EXPORT V8_WARN_UNUSED_RESULT TryToCopyAndConvertArrayToCppBuffer<
+ internal::CTypeInfoBuilder<float>::Build().GetId(), float>(
+ Local<Array> src, float* dst, uint32_t max_length);
+
+template <>
+bool V8_EXPORT V8_WARN_UNUSED_RESULT TryToCopyAndConvertArrayToCppBuffer<
+ internal::CTypeInfoBuilder<double>::Build().GetId(), double>(
+ Local<Array> src, double* dst, uint32_t max_length);
+
} // namespace v8
#endif // INCLUDE_V8_FAST_API_CALLS_H_
diff --git a/chromium/v8/include/v8-forward.h b/chromium/v8/include/v8-forward.h
new file mode 100644
index 00000000000..db3a2017b7e
--- /dev/null
+++ b/chromium/v8/include/v8-forward.h
@@ -0,0 +1,81 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_FORWARD_H_
+#define INCLUDE_V8_FORWARD_H_
+
+// This header is intended to be used by headers that pass around V8 types,
+// either by pointer or using Local<Type>. The full definitions can be included
+// either via v8.h or the more fine-grained headers.
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class AccessorSignature;
+class Array;
+class ArrayBuffer;
+class ArrayBufferView;
+class BigInt;
+class BigInt64Array;
+class BigIntObject;
+class BigUint64Array;
+class Boolean;
+class BooleanObject;
+class Context;
+class DataView;
+class Data;
+class Date;
+class Extension;
+class External;
+class FixedArray;
+class Float32Array;
+class Float64Array;
+class Function;
+template <class F>
+class FunctionCallbackInfo;
+class FunctionTemplate;
+class Int16Array;
+class Int32;
+class Int32Array;
+class Int8Array;
+class Integer;
+class Isolate;
+class Map;
+class Module;
+class Name;
+class Number;
+class NumberObject;
+class Object;
+class ObjectTemplate;
+class Platform;
+class Primitive;
+class Private;
+class Promise;
+class Proxy;
+class RegExp;
+class Script;
+class Set;
+class SharedArrayBuffer;
+class Signature;
+class String;
+class StringObject;
+class Symbol;
+class SymbolObject;
+class Template;
+class TryCatch;
+class TypedArray;
+class Uint16Array;
+class Uint32;
+class Uint32Array;
+class Uint8Array;
+class Uint8ClampedArray;
+class UnboundModuleScript;
+class Value;
+class WasmMemoryObject;
+class WasmModuleObject;
+
+} // namespace v8
+
+#endif // INCLUDE_V8_FORWARD_H_
diff --git a/chromium/v8/include/v8-function-callback.h b/chromium/v8/include/v8-function-callback.h
new file mode 100644
index 00000000000..2adff99b1cb
--- /dev/null
+++ b/chromium/v8/include/v8-function-callback.h
@@ -0,0 +1,475 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_FUNCTION_CALLBACK_H_
+#define INCLUDE_V8_FUNCTION_CALLBACK_H_
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-primitive.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+template <typename T>
+class BasicTracedReference;
+template <typename T>
+class Global;
+class Object;
+class Value;
+
+namespace internal {
+class FunctionCallbackArguments;
+class PropertyCallbackArguments;
+} // namespace internal
+
+namespace debug {
+class ConsoleCallArguments;
+} // namespace debug
+
+template <typename T>
+class ReturnValue {
+ public:
+ template <class S>
+ V8_INLINE ReturnValue(const ReturnValue<S>& that) : value_(that.value_) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ }
+ // Local setters
+ template <typename S>
+ V8_INLINE void Set(const Global<S>& handle);
+ template <typename S>
+ V8_INLINE void Set(const BasicTracedReference<S>& handle);
+ template <typename S>
+ V8_INLINE void Set(const Local<S> handle);
+ // Fast primitive setters
+ V8_INLINE void Set(bool value);
+ V8_INLINE void Set(double i);
+ V8_INLINE void Set(int32_t i);
+ V8_INLINE void Set(uint32_t i);
+ // Fast JS primitive setters
+ V8_INLINE void SetNull();
+ V8_INLINE void SetUndefined();
+ V8_INLINE void SetEmptyString();
+ // Convenience getter for Isolate
+ V8_INLINE Isolate* GetIsolate() const;
+
+ // Pointer setter: Uncompilable to prevent inadvertent misuse.
+ template <typename S>
+ V8_INLINE void Set(S* whatever);
+
+ // Getter. Creates a new Local<> so it comes with a certain performance
+ // hit. If the ReturnValue was not yet set, this will return the undefined
+ // value.
+ V8_INLINE Local<Value> Get() const;
+
+ private:
+ template <class F>
+ friend class ReturnValue;
+ template <class F>
+ friend class FunctionCallbackInfo;
+ template <class F>
+ friend class PropertyCallbackInfo;
+ template <class F, class G, class H>
+ friend class PersistentValueMapBase;
+ V8_INLINE void SetInternal(internal::Address value) { *value_ = value; }
+ V8_INLINE internal::Address GetDefaultValue();
+ V8_INLINE explicit ReturnValue(internal::Address* slot);
+ internal::Address* value_;
+};
+
+/**
+ * The argument information given to function call callbacks. This
+ * class provides access to information about the context of the call,
+ * including the receiver, the number and values of arguments, and
+ * the holder of the function.
+ */
+template <typename T>
+class FunctionCallbackInfo {
+ public:
+ /** The number of available arguments. */
+ V8_INLINE int Length() const;
+ /**
+ * Accessor for the available arguments. Returns `undefined` if the index
+ * is out of bounds.
+ */
+ V8_INLINE Local<Value> operator[](int i) const;
+ /** Returns the receiver. This corresponds to the "this" value. */
+ V8_INLINE Local<Object> This() const;
+ /**
+ * If the callback was created without a Signature, this is the same
+ * value as This(). If there is a signature, and the signature didn't match
+ * This() but one of its hidden prototypes, this will be the respective
+ * hidden prototype.
+ *
+ * Note that this is not the prototype of This() on which the accessor
+ * referencing this callback was found (which in V8 internally is often
+ * referred to as holder [sic]).
+ */
+ V8_INLINE Local<Object> Holder() const;
+ /** For construct calls, this returns the "new.target" value. */
+ V8_INLINE Local<Value> NewTarget() const;
+ /** Indicates whether this is a regular call or a construct call. */
+ V8_INLINE bool IsConstructCall() const;
+ /** The data argument specified when creating the callback. */
+ V8_INLINE Local<Value> Data() const;
+ /** The current Isolate. */
+ V8_INLINE Isolate* GetIsolate() const;
+ /** The ReturnValue for the call. */
+ V8_INLINE ReturnValue<T> GetReturnValue() const;
+ // This shouldn't be public, but the arm compiler needs it.
+ static const int kArgsLength = 6;
+
+ protected:
+ friend class internal::FunctionCallbackArguments;
+ friend class internal::CustomArguments<FunctionCallbackInfo>;
+ friend class debug::ConsoleCallArguments;
+ static const int kHolderIndex = 0;
+ static const int kIsolateIndex = 1;
+ static const int kReturnValueDefaultValueIndex = 2;
+ static const int kReturnValueIndex = 3;
+ static const int kDataIndex = 4;
+ static const int kNewTargetIndex = 5;
+
+ V8_INLINE FunctionCallbackInfo(internal::Address* implicit_args,
+ internal::Address* values, int length);
+ internal::Address* implicit_args_;
+ internal::Address* values_;
+ int length_;
+};
+
+/**
+ * The information passed to a property callback about the context
+ * of the property access.
+ */
+template <typename T>
+class PropertyCallbackInfo {
+ public:
+ /**
+ * \return The isolate of the property access.
+ */
+ V8_INLINE Isolate* GetIsolate() const;
+
+ /**
+ * \return The data set in the configuration, i.e., in
+ * `NamedPropertyHandlerConfiguration` or
+ * `IndexedPropertyHandlerConfiguration.`
+ */
+ V8_INLINE Local<Value> Data() const;
+
+ /**
+ * \return The receiver. In many cases, this is the object on which the
+ * property access was intercepted. When using
+ * `Reflect.get`, `Function.prototype.call`, or similar functions, it is the
+ * object passed in as receiver or thisArg.
+ *
+ * \code
+ * void GetterCallback(Local<Name> name,
+ * const v8::PropertyCallbackInfo<v8::Value>& info) {
+ * auto context = info.GetIsolate()->GetCurrentContext();
+ *
+ * v8::Local<v8::Value> a_this =
+ * info.This()
+ * ->GetRealNamedProperty(context, v8_str("a"))
+ * .ToLocalChecked();
+ * v8::Local<v8::Value> a_holder =
+ * info.Holder()
+ * ->GetRealNamedProperty(context, v8_str("a"))
+ * .ToLocalChecked();
+ *
+ * CHECK(v8_str("r")->Equals(context, a_this).FromJust());
+ * CHECK(v8_str("obj")->Equals(context, a_holder).FromJust());
+ *
+ * info.GetReturnValue().Set(name);
+ * }
+ *
+ * v8::Local<v8::FunctionTemplate> templ =
+ * v8::FunctionTemplate::New(isolate);
+ * templ->InstanceTemplate()->SetHandler(
+ * v8::NamedPropertyHandlerConfiguration(GetterCallback));
+ * LocalContext env;
+ * env->Global()
+ * ->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
+ * .ToLocalChecked()
+ * ->NewInstance(env.local())
+ * .ToLocalChecked())
+ * .FromJust();
+ *
+ * CompileRun("obj.a = 'obj'; var r = {a: 'r'}; Reflect.get(obj, 'x', r)");
+ * \endcode
+ */
+ V8_INLINE Local<Object> This() const;
+
+ /**
+ * \return The object in the prototype chain of the receiver that has the
+ * interceptor. Suppose you have `x` and its prototype is `y`, and `y`
+ * has an interceptor. Then `info.This()` is `x` and `info.Holder()` is `y`.
+ * The Holder() could be a hidden object (the global object, rather
+ * than the global proxy).
+ *
+ * \note For security reasons, do not pass the object back into the runtime.
+ */
+ V8_INLINE Local<Object> Holder() const;
+
+ /**
+ * \return The return value of the callback.
+ * Can be changed by calling Set().
+ * \code
+ * info.GetReturnValue().Set(...)
+ * \endcode
+ *
+ */
+ V8_INLINE ReturnValue<T> GetReturnValue() const;
+
+ /**
+ * \return True if the intercepted function should throw if an error occurs.
+ * Usually, `true` corresponds to `'use strict'`.
+ *
+ * \note Always `false` when intercepting `Reflect.set()`
+ * independent of the language mode.
+ */
+ V8_INLINE bool ShouldThrowOnError() const;
+
+ // This shouldn't be public, but the arm compiler needs it.
+ static const int kArgsLength = 7;
+
+ protected:
+ friend class MacroAssembler;
+ friend class internal::PropertyCallbackArguments;
+ friend class internal::CustomArguments<PropertyCallbackInfo>;
+ static const int kShouldThrowOnErrorIndex = 0;
+ static const int kHolderIndex = 1;
+ static const int kIsolateIndex = 2;
+ static const int kReturnValueDefaultValueIndex = 3;
+ static const int kReturnValueIndex = 4;
+ static const int kDataIndex = 5;
+ static const int kThisIndex = 6;
+
+ V8_INLINE PropertyCallbackInfo(internal::Address* args) : args_(args) {}
+ internal::Address* args_;
+};
+
+using FunctionCallback = void (*)(const FunctionCallbackInfo<Value>& info);
+
+// --- Implementation ---
+
+template <typename T>
+ReturnValue<T>::ReturnValue(internal::Address* slot) : value_(slot) {}
+
+template <typename T>
+template <typename S>
+void ReturnValue<T>::Set(const Global<S>& handle) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ if (V8_UNLIKELY(handle.IsEmpty())) {
+ *value_ = GetDefaultValue();
+ } else {
+ *value_ = *reinterpret_cast<internal::Address*>(*handle);
+ }
+}
+
+template <typename T>
+template <typename S>
+void ReturnValue<T>::Set(const BasicTracedReference<S>& handle) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ if (V8_UNLIKELY(handle.IsEmpty())) {
+ *value_ = GetDefaultValue();
+ } else {
+ *value_ = *reinterpret_cast<internal::Address*>(handle.val_);
+ }
+}
+
+template <typename T>
+template <typename S>
+void ReturnValue<T>::Set(const Local<S> handle) {
+ static_assert(std::is_void<T>::value || std::is_base_of<T, S>::value,
+ "type check");
+ if (V8_UNLIKELY(handle.IsEmpty())) {
+ *value_ = GetDefaultValue();
+ } else {
+ *value_ = *reinterpret_cast<internal::Address*>(*handle);
+ }
+}
+
+template <typename T>
+void ReturnValue<T>::Set(double i) {
+ static_assert(std::is_base_of<T, Number>::value, "type check");
+ Set(Number::New(GetIsolate(), i));
+}
+
+template <typename T>
+void ReturnValue<T>::Set(int32_t i) {
+ static_assert(std::is_base_of<T, Integer>::value, "type check");
+ using I = internal::Internals;
+ if (V8_LIKELY(I::IsValidSmi(i))) {
+ *value_ = I::IntToSmi(i);
+ return;
+ }
+ Set(Integer::New(GetIsolate(), i));
+}
+
+template <typename T>
+void ReturnValue<T>::Set(uint32_t i) {
+ static_assert(std::is_base_of<T, Integer>::value, "type check");
+ // Can't simply use INT32_MAX here for whatever reason.
+ bool fits_into_int32_t = (i & (1U << 31)) == 0;
+ if (V8_LIKELY(fits_into_int32_t)) {
+ Set(static_cast<int32_t>(i));
+ return;
+ }
+ Set(Integer::NewFromUnsigned(GetIsolate(), i));
+}
+
+template <typename T>
+void ReturnValue<T>::Set(bool value) {
+ static_assert(std::is_base_of<T, Boolean>::value, "type check");
+ using I = internal::Internals;
+ int root_index;
+ if (value) {
+ root_index = I::kTrueValueRootIndex;
+ } else {
+ root_index = I::kFalseValueRootIndex;
+ }
+ *value_ = *I::GetRoot(GetIsolate(), root_index);
+}
+
+template <typename T>
+void ReturnValue<T>::SetNull() {
+ static_assert(std::is_base_of<T, Primitive>::value, "type check");
+ using I = internal::Internals;
+ *value_ = *I::GetRoot(GetIsolate(), I::kNullValueRootIndex);
+}
+
+template <typename T>
+void ReturnValue<T>::SetUndefined() {
+ static_assert(std::is_base_of<T, Primitive>::value, "type check");
+ using I = internal::Internals;
+ *value_ = *I::GetRoot(GetIsolate(), I::kUndefinedValueRootIndex);
+}
+
+template <typename T>
+void ReturnValue<T>::SetEmptyString() {
+ static_assert(std::is_base_of<T, String>::value, "type check");
+ using I = internal::Internals;
+ *value_ = *I::GetRoot(GetIsolate(), I::kEmptyStringRootIndex);
+}
+
+template <typename T>
+Isolate* ReturnValue<T>::GetIsolate() const {
+ // Isolate is always the pointer below the default value on the stack.
+ return *reinterpret_cast<Isolate**>(&value_[-2]);
+}
+
+template <typename T>
+Local<Value> ReturnValue<T>::Get() const {
+ using I = internal::Internals;
+ if (*value_ == *I::GetRoot(GetIsolate(), I::kTheHoleValueRootIndex))
+ return Local<Value>(*Undefined(GetIsolate()));
+ return Local<Value>::New(GetIsolate(), reinterpret_cast<Value*>(value_));
+}
+
+template <typename T>
+template <typename S>
+void ReturnValue<T>::Set(S* whatever) {
+ static_assert(sizeof(S) < 0, "incompilable to prevent inadvertent misuse");
+}
+
+template <typename T>
+internal::Address ReturnValue<T>::GetDefaultValue() {
+ // Default value is always the pointer below value_ on the stack.
+ return value_[-1];
+}
+
+template <typename T>
+FunctionCallbackInfo<T>::FunctionCallbackInfo(internal::Address* implicit_args,
+ internal::Address* values,
+ int length)
+ : implicit_args_(implicit_args), values_(values), length_(length) {}
+
+template <typename T>
+Local<Value> FunctionCallbackInfo<T>::operator[](int i) const {
+ // values_ points to the first argument (not the receiver).
+ if (i < 0 || length_ <= i) return Local<Value>(*Undefined(GetIsolate()));
+ return Local<Value>(reinterpret_cast<Value*>(values_ + i));
+}
+
+template <typename T>
+Local<Object> FunctionCallbackInfo<T>::This() const {
+ // values_ points to the first argument (not the receiver).
+ return Local<Object>(reinterpret_cast<Object*>(values_ - 1));
+}
+
+template <typename T>
+Local<Object> FunctionCallbackInfo<T>::Holder() const {
+ return Local<Object>(
+ reinterpret_cast<Object*>(&implicit_args_[kHolderIndex]));
+}
+
+template <typename T>
+Local<Value> FunctionCallbackInfo<T>::NewTarget() const {
+ return Local<Value>(
+ reinterpret_cast<Value*>(&implicit_args_[kNewTargetIndex]));
+}
+
+template <typename T>
+Local<Value> FunctionCallbackInfo<T>::Data() const {
+ return Local<Value>(reinterpret_cast<Value*>(&implicit_args_[kDataIndex]));
+}
+
+template <typename T>
+Isolate* FunctionCallbackInfo<T>::GetIsolate() const {
+ return *reinterpret_cast<Isolate**>(&implicit_args_[kIsolateIndex]);
+}
+
+template <typename T>
+ReturnValue<T> FunctionCallbackInfo<T>::GetReturnValue() const {
+ return ReturnValue<T>(&implicit_args_[kReturnValueIndex]);
+}
+
+template <typename T>
+bool FunctionCallbackInfo<T>::IsConstructCall() const {
+ return !NewTarget()->IsUndefined();
+}
+
+template <typename T>
+int FunctionCallbackInfo<T>::Length() const {
+ return length_;
+}
+
+template <typename T>
+Isolate* PropertyCallbackInfo<T>::GetIsolate() const {
+ return *reinterpret_cast<Isolate**>(&args_[kIsolateIndex]);
+}
+
+template <typename T>
+Local<Value> PropertyCallbackInfo<T>::Data() const {
+ return Local<Value>(reinterpret_cast<Value*>(&args_[kDataIndex]));
+}
+
+template <typename T>
+Local<Object> PropertyCallbackInfo<T>::This() const {
+ return Local<Object>(reinterpret_cast<Object*>(&args_[kThisIndex]));
+}
+
+template <typename T>
+Local<Object> PropertyCallbackInfo<T>::Holder() const {
+ return Local<Object>(reinterpret_cast<Object*>(&args_[kHolderIndex]));
+}
+
+template <typename T>
+ReturnValue<T> PropertyCallbackInfo<T>::GetReturnValue() const {
+ return ReturnValue<T>(&args_[kReturnValueIndex]);
+}
+
+template <typename T>
+bool PropertyCallbackInfo<T>::ShouldThrowOnError() const {
+ using I = internal::Internals;
+ if (args_[kShouldThrowOnErrorIndex] !=
+ I::IntToSmi(I::kInferShouldThrowMode)) {
+ return args_[kShouldThrowOnErrorIndex] != I::IntToSmi(I::kDontThrow);
+ }
+ return v8::internal::ShouldThrowOnError(
+ reinterpret_cast<v8::internal::Isolate*>(GetIsolate()));
+}
+
+} // namespace v8
+
+#endif // INCLUDE_V8_FUNCTION_CALLBACK_H_
diff --git a/chromium/v8/include/v8-function.h b/chromium/v8/include/v8-function.h
new file mode 100644
index 00000000000..9424a86fdaf
--- /dev/null
+++ b/chromium/v8/include/v8-function.h
@@ -0,0 +1,122 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_FUNCTION_H_
+#define INCLUDE_V8_FUNCTION_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "v8-function-callback.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-message.h" // NOLINT(build/include_directory)
+#include "v8-object.h" // NOLINT(build/include_directory)
+#include "v8-template.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Context;
+
+/**
+ * A JavaScript function object (ECMA-262, 15.3).
+ */
+class V8_EXPORT Function : public Object {
+ public:
+ /**
+ * Create a function in the current execution context
+ * for a given FunctionCallback.
+ */
+ static MaybeLocal<Function> New(
+ Local<Context> context, FunctionCallback callback,
+ Local<Value> data = Local<Value>(), int length = 0,
+ ConstructorBehavior behavior = ConstructorBehavior::kAllow,
+ SideEffectType side_effect_type = SideEffectType::kHasSideEffect);
+
+ V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewInstance(
+ Local<Context> context, int argc, Local<Value> argv[]) const;
+
+ V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewInstance(
+ Local<Context> context) const {
+ return NewInstance(context, 0, nullptr);
+ }
+
+ /**
+ * When side effect checks are enabled, passing kHasNoSideEffect allows the
+ * constructor to be invoked without throwing. Calls made within the
+ * constructor are still checked.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewInstanceWithSideEffectType(
+ Local<Context> context, int argc, Local<Value> argv[],
+ SideEffectType side_effect_type = SideEffectType::kHasSideEffect) const;
+
+ V8_WARN_UNUSED_RESULT MaybeLocal<Value> Call(Local<Context> context,
+ Local<Value> recv, int argc,
+ Local<Value> argv[]);
+
+ void SetName(Local<String> name);
+ Local<Value> GetName() const;
+
+ /**
+ * Name inferred from variable or property assignment of this function.
+ * Used to facilitate debugging and profiling of JavaScript code written
+ * in an OO style, where many functions are anonymous but are assigned
+ * to object properties.
+ */
+ Local<Value> GetInferredName() const;
+
+ /**
+ * displayName if it is set, otherwise name if it is configured, otherwise
+ * function name, otherwise inferred name.
+ */
+ Local<Value> GetDebugName() const;
+
+ /**
+ * Returns zero based line number of function body and
+ * kLineOffsetNotFound if no information available.
+ */
+ int GetScriptLineNumber() const;
+ /**
+ * Returns zero based column number of function body and
+ * kLineOffsetNotFound if no information available.
+ */
+ int GetScriptColumnNumber() const;
+
+ /**
+ * Returns scriptId.
+ */
+ int ScriptId() const;
+
+ /**
+ * Returns the original function if this function is bound, else returns
+ * v8::Undefined.
+ */
+ Local<Value> GetBoundFunction() const;
+
+ /**
+ * Calls builtin Function.prototype.toString on this function.
+ * This is different from Value::ToString() that may call a user-defined
+ * toString() function, and different than Object::ObjectProtoToString() which
+ * always serializes "[object Function]".
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<String> FunctionProtoToString(
+ Local<Context> context);
+
+ ScriptOrigin GetScriptOrigin() const;
+ V8_INLINE static Function* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Function*>(value);
+ }
+
+ static const int kLineOffsetNotFound;
+
+ private:
+ Function();
+ static void CheckCast(Value* obj);
+};
+} // namespace v8
+
+#endif // INCLUDE_V8_FUNCTION_H_
diff --git a/chromium/v8/include/v8-initialization.h b/chromium/v8/include/v8-initialization.h
new file mode 100644
index 00000000000..7c9f26b8927
--- /dev/null
+++ b/chromium/v8/include/v8-initialization.h
@@ -0,0 +1,282 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_INITIALIZATION_H_
+#define INCLUDE_V8_INITIALIZATION_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "v8-internal.h" // NOLINT(build/include_directory)
+#include "v8-isolate.h" // NOLINT(build/include_directory)
+#include "v8-platform.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+// We reserve the V8_* prefix for macros defined in V8 public API and
+// assume there are no name conflicts with the embedder's code.
+
+/**
+ * The v8 JavaScript engine.
+ */
+namespace v8 {
+
+class PageAllocator;
+class Platform;
+template <class K, class V, class T>
+class PersistentValueMapBase;
+
+/**
+ * EntropySource is used as a callback function when v8 needs a source
+ * of entropy.
+ */
+using EntropySource = bool (*)(unsigned char* buffer, size_t length);
+
+/**
+ * ReturnAddressLocationResolver is used as a callback function when v8 is
+ * resolving the location of a return address on the stack. Profilers that
+ * change the return address on the stack can use this to resolve the stack
+ * location to wherever the profiler stashed the original return address.
+ *
+ * \param return_addr_location A location on stack where a machine
+ * return address resides.
+ * \returns Either return_addr_location, or else a pointer to the profiler's
+ * copy of the original return address.
+ *
+ * \note The resolver function must not cause garbage collection.
+ */
+using ReturnAddressLocationResolver =
+ uintptr_t (*)(uintptr_t return_addr_location);
+
+using DcheckErrorCallback = void (*)(const char* file, int line,
+ const char* message);
+
+/**
+ * Container class for static utility functions.
+ */
+class V8_EXPORT V8 {
+ public:
+ /**
+ * Hand startup data to V8, in case the embedder has chosen to build
+ * V8 with external startup data.
+ *
+ * Note:
+ * - By default the startup data is linked into the V8 library, in which
+ * case this function is not meaningful.
+ * - If this needs to be called, it needs to be called before V8
+ * tries to make use of its built-ins.
+ * - To avoid unnecessary copies of data, V8 will point directly into the
+ * given data blob, so pretty please keep it around until V8 exit.
+ * - Compression of the startup blob might be useful, but needs to
+ * handled entirely on the embedders' side.
+ * - The call will abort if the data is invalid.
+ */
+ static void SetSnapshotDataBlob(StartupData* startup_blob);
+
+ /** Set the callback to invoke in case of Dcheck failures. */
+ static void SetDcheckErrorHandler(DcheckErrorCallback that);
+
+ /**
+ * Sets V8 flags from a string.
+ */
+ static void SetFlagsFromString(const char* str);
+ static void SetFlagsFromString(const char* str, size_t length);
+
+ /**
+ * Sets V8 flags from the command line.
+ */
+ static void SetFlagsFromCommandLine(int* argc, char** argv,
+ bool remove_flags);
+
+ /** Get the version string. */
+ static const char* GetVersion();
+
+ /**
+ * Initializes V8. This function needs to be called before the first Isolate
+ * is created. It always returns true.
+ */
+ V8_INLINE static bool Initialize() {
+ const int kBuildConfiguration =
+ (internal::PointerCompressionIsEnabled() ? kPointerCompression : 0) |
+ (internal::SmiValuesAre31Bits() ? k31BitSmis : 0) |
+ (internal::HeapSandboxIsEnabled() ? kHeapSandbox : 0) |
+ (internal::VirtualMemoryCageIsEnabled() ? kVirtualMemoryCage : 0);
+ return Initialize(kBuildConfiguration);
+ }
+
+ /**
+ * Allows the host application to provide a callback which can be used
+ * as a source of entropy for random number generators.
+ */
+ static void SetEntropySource(EntropySource source);
+
+ /**
+ * Allows the host application to provide a callback that allows v8 to
+ * cooperate with a profiler that rewrites return addresses on stack.
+ */
+ static void SetReturnAddressLocationResolver(
+ ReturnAddressLocationResolver return_address_resolver);
+
+ /**
+ * Releases any resources used by v8 and stops any utility threads
+ * that may be running. Note that disposing v8 is permanent, it
+ * cannot be reinitialized.
+ *
+ * It should generally not be necessary to dispose v8 before exiting
+ * a process, this should happen automatically. It is only necessary
+ * to use if the process needs the resources taken up by v8.
+ */
+ static bool Dispose();
+
+ /**
+ * Initialize the ICU library bundled with V8. The embedder should only
+ * invoke this method when using the bundled ICU. Returns true on success.
+ *
+ * If V8 was compiled with the ICU data in an external file, the location
+ * of the data file has to be provided.
+ */
+ static bool InitializeICU(const char* icu_data_file = nullptr);
+
+ /**
+ * Initialize the ICU library bundled with V8. The embedder should only
+ * invoke this method when using the bundled ICU. If V8 was compiled with
+ * the ICU data in an external file and when the default location of that
+ * file should be used, a path to the executable must be provided.
+ * Returns true on success.
+ *
+ * The default is a file called icudtl.dat side-by-side with the executable.
+ *
+ * Optionally, the location of the data file can be provided to override the
+ * default.
+ */
+ static bool InitializeICUDefaultLocation(const char* exec_path,
+ const char* icu_data_file = nullptr);
+
+ /**
+ * Initialize the external startup data. The embedder only needs to
+ * invoke this method when external startup data was enabled in a build.
+ *
+ * If V8 was compiled with the startup data in an external file, then
+ * V8 needs to be given those external files during startup. There are
+ * three ways to do this:
+ * - InitializeExternalStartupData(const char*)
+ * This will look in the given directory for the file "snapshot_blob.bin".
+ * - InitializeExternalStartupDataFromFile(const char*)
+ * As above, but will directly use the given file name.
+ * - Call SetSnapshotDataBlob.
+ * This will read the blobs from the given data structure and will
+ * not perform any file IO.
+ */
+ static void InitializeExternalStartupData(const char* directory_path);
+ static void InitializeExternalStartupDataFromFile(const char* snapshot_blob);
+
+ /**
+ * Sets the v8::Platform to use. This should be invoked before V8 is
+ * initialized.
+ */
+ static void InitializePlatform(Platform* platform);
+
+ /**
+ * Clears all references to the v8::Platform. This should be invoked after
+ * V8 was disposed.
+ */
+ static void ShutdownPlatform();
+
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ //
+ // Virtual Memory Cage related API.
+ //
+ // This API is not yet stable and subject to changes in the future.
+ //
+
+ /**
+ * Initializes the virtual memory cage for V8.
+ *
+ * This must be invoked after the platform was initialized but before V8 is
+ * initialized. The virtual memory cage is torn down during platform shutdown.
+ * Returns true on success, false otherwise.
+ *
+ * TODO(saelo) Once it is no longer optional to create the virtual memory
+ * cage when compiling with V8_VIRTUAL_MEMORY_CAGE, the cage initialization
+ * will likely happen as part of V8::Initialize, at which point this function
+ * should be removed.
+ */
+ static bool InitializeVirtualMemoryCage();
+
+ /**
+ * Provides access to the virtual memory cage page allocator.
+ *
+ * This allocator allocates pages inside the virtual memory cage. It can for
+ * example be used to obtain virtual memory for ArrayBuffer backing stores,
+ * which must be located inside the cage.
+ *
+ * It should be assumed that an attacker can corrupt data inside the cage,
+ * and so in particular the contents of pages returned by this allocator,
+ * arbitrarily and concurrently. Due to this, it is recommended to to only
+ * place pure data buffers in pages obtained through this allocator.
+ *
+ * This function must only be called after initializing the virtual memory
+ * cage and V8.
+ */
+ static PageAllocator* GetVirtualMemoryCagePageAllocator();
+
+ /**
+ * Returns the size of the virtual memory cage in bytes.
+ *
+ * If the cage has not been initialized, or if the initialization failed,
+ * this returns zero.
+ */
+ static size_t GetVirtualMemoryCageSizeInBytes();
+#endif
+
+ /**
+ * Activate trap-based bounds checking for WebAssembly.
+ *
+ * \param use_v8_signal_handler Whether V8 should install its own signal
+ * handler or rely on the embedder's.
+ */
+ static bool EnableWebAssemblyTrapHandler(bool use_v8_signal_handler);
+
+#if defined(V8_OS_WIN)
+ /**
+ * On Win64, by default V8 does not emit unwinding data for jitted code,
+ * which means the OS cannot walk the stack frames and the system Structured
+ * Exception Handling (SEH) cannot unwind through V8-generated code:
+ * https://code.google.com/p/v8/issues/detail?id=3598.
+ *
+ * This function allows embedders to register a custom exception handler for
+ * exceptions in V8-generated code.
+ */
+ static void SetUnhandledExceptionCallback(
+ UnhandledExceptionCallback unhandled_exception_callback);
+#endif
+
+ /**
+ * Get statistics about the shared memory usage.
+ */
+ static void GetSharedMemoryStatistics(SharedMemoryStatistics* statistics);
+
+ private:
+ V8();
+
+ enum BuildConfigurationFeatures {
+ kPointerCompression = 1 << 0,
+ k31BitSmis = 1 << 1,
+ kHeapSandbox = 1 << 2,
+ kVirtualMemoryCage = 1 << 3,
+ };
+
+ /**
+ * Checks that the embedder build configuration is compatible with
+ * the V8 binary and if so initializes V8.
+ */
+ static bool Initialize(int build_config);
+
+ friend class Context;
+ template <class K, class V, class T>
+ friend class PersistentValueMapBase;
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_INITIALIZATION_H_
diff --git a/chromium/v8/include/v8-inspector.h b/chromium/v8/include/v8-inspector.h
index e6621ccd75c..74592fdf573 100644
--- a/chromium/v8/include/v8-inspector.h
+++ b/chromium/v8/include/v8-inspector.h
@@ -6,12 +6,20 @@
#define V8_V8_INSPECTOR_H_
#include <stdint.h>
-#include <cctype>
+#include <cctype>
#include <memory>
-#include <unordered_map>
-#include "v8.h" // NOLINT(build/include_directory)
+#include "v8-isolate.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+class Context;
+class Name;
+class Object;
+class StackTrace;
+class Value;
+} // namespace v8
namespace v8_inspector {
@@ -320,24 +328,6 @@ class V8_EXPORT V8Inspector {
virtual std::unique_ptr<V8StackTrace> createStackTrace(
v8::Local<v8::StackTrace>) = 0;
virtual std::unique_ptr<V8StackTrace> captureStackTrace(bool fullStack) = 0;
-
- // Performance counters.
- class V8_EXPORT Counters : public std::enable_shared_from_this<Counters> {
- public:
- explicit Counters(v8::Isolate* isolate);
- ~Counters();
- const std::unordered_map<std::string, int>& getCountersMap() const {
- return m_countersMap;
- }
-
- private:
- static int* getCounterPtr(const char* name);
-
- v8::Isolate* m_isolate;
- std::unordered_map<std::string, int> m_countersMap;
- };
-
- virtual std::shared_ptr<Counters> enableCounters() = 0;
};
} // namespace v8_inspector
diff --git a/chromium/v8/include/v8-internal.h b/chromium/v8/include/v8-internal.h
index 0222ab2f7e1..e1aee508bbc 100644
--- a/chromium/v8/include/v8-internal.h
+++ b/chromium/v8/include/v8-internal.h
@@ -141,15 +141,12 @@ using ExternalPointer_t = Address;
// the same time.
enum ExternalPointerTag : uint64_t {
kExternalPointerNullTag = 0x0000000000000000,
- kArrayBufferBackingStoreTag = 0x00ff000000000000, // 0b000000011111111
- kTypedArrayExternalPointerTag = 0x017f000000000000, // 0b000000101111111
- kDataViewDataPointerTag = 0x01bf000000000000, // 0b000000110111111
- kExternalStringResourceTag = 0x01df000000000000, // 0b000000111011111
- kExternalStringResourceDataTag = 0x01ef000000000000, // 0b000000111101111
- kForeignForeignAddressTag = 0x01f7000000000000, // 0b000000111110111
- kNativeContextMicrotaskQueueTag = 0x01fb000000000000, // 0b000000111111011
- kEmbedderDataSlotPayloadTag = 0x01fd000000000000, // 0b000000111111101
- kCodeEntryPointTag = 0x01fe000000000000, // 0b000000111111110
+ kExternalStringResourceTag = 0x00ff000000000000, // 0b000000011111111
+ kExternalStringResourceDataTag = 0x017f000000000000, // 0b000000101111111
+ kForeignForeignAddressTag = 0x01bf000000000000, // 0b000000110111111
+ kNativeContextMicrotaskQueueTag = 0x01df000000000000, // 0b000000111011111
+ kEmbedderDataSlotPayloadTag = 0x01ef000000000000, // 0b000000111101111
+ kCodeEntryPointTag = 0x01f7000000000000, // 0b000000111110111
};
constexpr uint64_t kExternalPointerTagMask = 0xffff000000000000;
@@ -227,23 +224,30 @@ class Internals {
static const int kExternalOneByteRepresentationTag = 0x0a;
static const uint32_t kNumIsolateDataSlots = 4;
+ static const int kStackGuardSize = 7 * kApiSystemPointerSize;
+ static const int kBuiltinTier0EntryTableSize = 13 * kApiSystemPointerSize;
+ static const int kBuiltinTier0TableSize = 13 * kApiSystemPointerSize;
// IsolateData layout guarantees.
- static const int kIsolateEmbedderDataOffset = 0;
+ static const int kIsolateCageBaseOffset = 0;
+ static const int kIsolateStackGuardOffset =
+ kIsolateCageBaseOffset + kApiSystemPointerSize;
+ static const int kBuiltinTier0EntryTableOffset =
+ kIsolateStackGuardOffset + kStackGuardSize;
+ static const int kBuiltinTier0TableOffset =
+ kBuiltinTier0EntryTableOffset + kBuiltinTier0EntryTableSize;
+ static const int kIsolateEmbedderDataOffset =
+ kBuiltinTier0TableOffset + kBuiltinTier0TableSize;
static const int kIsolateFastCCallCallerFpOffset =
- kNumIsolateDataSlots * kApiSystemPointerSize;
+ kIsolateEmbedderDataOffset + kNumIsolateDataSlots * kApiSystemPointerSize;
static const int kIsolateFastCCallCallerPcOffset =
kIsolateFastCCallCallerFpOffset + kApiSystemPointerSize;
static const int kIsolateFastApiCallTargetOffset =
kIsolateFastCCallCallerPcOffset + kApiSystemPointerSize;
- static const int kIsolateCageBaseOffset =
- kIsolateFastApiCallTargetOffset + kApiSystemPointerSize;
static const int kIsolateLongTaskStatsCounterOffset =
- kIsolateCageBaseOffset + kApiSystemPointerSize;
- static const int kIsolateStackGuardOffset =
- kIsolateLongTaskStatsCounterOffset + kApiSizetSize;
+ kIsolateFastApiCallTargetOffset + kApiSystemPointerSize;
static const int kIsolateRootsOffset =
- kIsolateStackGuardOffset + 7 * kApiSystemPointerSize;
+ kIsolateLongTaskStatsCounterOffset + kApiSizetSize;
static const int kExternalPointerTableBufferOffset = 0;
static const int kExternalPointerTableLengthOffset =
@@ -482,6 +486,59 @@ class Internals {
#endif // V8_COMPRESS_POINTERS
};
+constexpr bool VirtualMemoryCageIsEnabled() {
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ return true;
+#else
+ return false;
+#endif
+}
+
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+// Size of the virtual memory cage, excluding the guard regions surrounding it.
+constexpr size_t kVirtualMemoryCageSize = size_t{1} << 40; // 1 TB
+
+static_assert(kVirtualMemoryCageSize > Internals::kPtrComprCageReservationSize,
+ "The virtual memory cage must be larger than the pointer "
+ "compression cage contained within it.");
+
+// Required alignment of the virtual memory cage. For simplicity, we require the
+// size of the guard regions to be a multiple of this, so that this specifies
+// the alignment of the cage including and excluding surrounding guard regions.
+// The alignment requirement is due to the pointer compression cage being
+// located at the start of the virtual memory cage.
+constexpr size_t kVirtualMemoryCageAlignment =
+ Internals::kPtrComprCageBaseAlignment;
+
+// Size of the guard regions surrounding the virtual memory cage. This assumes a
+// worst-case scenario of a 32-bit unsigned index being used to access an array
+// of 64-bit values.
+constexpr size_t kVirtualMemoryCageGuardRegionSize = size_t{32} << 30; // 32 GB
+
+static_assert((kVirtualMemoryCageGuardRegionSize %
+ kVirtualMemoryCageAlignment) == 0,
+ "The size of the virtual memory cage guard region must be a "
+ "multiple of its required alignment.");
+
+// Minimum size of the virtual memory cage, excluding the guard regions
+// surrounding it. If the cage reservation fails, its size is currently halved
+// until either the reservation succeeds or the minimum size is reached. A
+// minimum of 32GB allows the 4GB pointer compression region as well as the
+// ArrayBuffer partition and two 10GB WASM memory cages to fit into the cage.
+constexpr size_t kVirtualMemoryCageMinimumSize = size_t{32} << 30; // 32 GB
+
+// For now, even if the virtual memory cage is enabled, we still allow backing
+// stores to be allocated outside of it as fallback. This will simplify the
+// initial rollout. However, if the heap sandbox is also enabled, we already use
+// the "enforcing mode" of the virtual memory cage. This is useful for testing.
+#ifdef V8_HEAP_SANDBOX
+constexpr bool kAllowBackingStoresOutsideCage = false;
+#else
+constexpr bool kAllowBackingStoresOutsideCage = true;
+#endif // V8_HEAP_SANDBOX
+
+#endif // V8_VIRTUAL_MEMORY_CAGE
+
// Only perform cast check for types derived from v8::Data since
// other types do not implement the Cast method.
template <bool PerformCheck>
@@ -512,14 +569,6 @@ class BackingStoreBase {};
} // namespace internal
-V8_EXPORT bool CopyAndConvertArrayToCppBufferInt32(Local<Array> src,
- int32_t* dst,
- uint32_t max_length);
-
-V8_EXPORT bool CopyAndConvertArrayToCppBufferFloat64(Local<Array> src,
- double* dst,
- uint32_t max_length);
-
} // namespace v8
#endif // INCLUDE_V8_INTERNAL_H_
diff --git a/chromium/v8/include/v8-isolate.h b/chromium/v8/include/v8-isolate.h
new file mode 100644
index 00000000000..39276b34a9d
--- /dev/null
+++ b/chromium/v8/include/v8-isolate.h
@@ -0,0 +1,1662 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_ISOLATE_H_
+#define INCLUDE_V8_ISOLATE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "cppgc/common.h"
+#include "v8-array-buffer.h" // NOLINT(build/include_directory)
+#include "v8-callbacks.h" // NOLINT(build/include_directory)
+#include "v8-data.h" // NOLINT(build/include_directory)
+#include "v8-debug.h" // NOLINT(build/include_directory)
+#include "v8-embedder-heap.h" // NOLINT(build/include_directory)
+#include "v8-function-callback.h" // NOLINT(build/include_directory)
+#include "v8-internal.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-microtask.h" // NOLINT(build/include_directory)
+#include "v8-persistent-handle.h" // NOLINT(build/include_directory)
+#include "v8-primitive.h" // NOLINT(build/include_directory)
+#include "v8-statistics.h" // NOLINT(build/include_directory)
+#include "v8-unwinder.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class CppHeap;
+class HeapProfiler;
+class MicrotaskQueue;
+class StartupData;
+class ScriptOrModule;
+class SharedArrayBuffer;
+
+namespace internal {
+class MicrotaskQueue;
+class ThreadLocalTop;
+} // namespace internal
+
+namespace metrics {
+class Recorder;
+} // namespace metrics
+
+/**
+ * A set of constraints that specifies the limits of the runtime's memory use.
+ * You must set the heap size before initializing the VM - the size cannot be
+ * adjusted after the VM is initialized.
+ *
+ * If you are using threads then you should hold the V8::Locker lock while
+ * setting the stack limit and you must set a non-default stack limit separately
+ * for each thread.
+ *
+ * The arguments for set_max_semi_space_size, set_max_old_space_size,
+ * set_max_executable_size, set_code_range_size specify limits in MB.
+ *
+ * The argument for set_max_semi_space_size_in_kb is in KB.
+ */
+class V8_EXPORT ResourceConstraints {
+ public:
+ /**
+ * Configures the constraints with reasonable default values based on the
+ * provided heap size limit. The heap size includes both the young and
+ * the old generation.
+ *
+ * \param initial_heap_size_in_bytes The initial heap size or zero.
+ * By default V8 starts with a small heap and dynamically grows it to
+ * match the set of live objects. This may lead to ineffective
+ * garbage collections at startup if the live set is large.
+ * Setting the initial heap size avoids such garbage collections.
+ * Note that this does not affect young generation garbage collections.
+ *
+ * \param maximum_heap_size_in_bytes The hard limit for the heap size.
+ * When the heap size approaches this limit, V8 will perform series of
+ * garbage collections and invoke the NearHeapLimitCallback. If the garbage
+ * collections do not help and the callback does not increase the limit,
+ * then V8 will crash with V8::FatalProcessOutOfMemory.
+ */
+ void ConfigureDefaultsFromHeapSize(size_t initial_heap_size_in_bytes,
+ size_t maximum_heap_size_in_bytes);
+
+ /**
+ * Configures the constraints with reasonable default values based on the
+ * capabilities of the current device the VM is running on.
+ *
+ * \param physical_memory The total amount of physical memory on the current
+ * device, in bytes.
+ * \param virtual_memory_limit The amount of virtual memory on the current
+ * device, in bytes, or zero, if there is no limit.
+ */
+ void ConfigureDefaults(uint64_t physical_memory,
+ uint64_t virtual_memory_limit);
+
+ /**
+ * The address beyond which the VM's stack may not grow.
+ */
+ uint32_t* stack_limit() const { return stack_limit_; }
+ void set_stack_limit(uint32_t* value) { stack_limit_ = value; }
+
+ /**
+ * The amount of virtual memory reserved for generated code. This is relevant
+ * for 64-bit architectures that rely on code range for calls in code.
+ *
+ * When V8_COMPRESS_POINTERS_IN_SHARED_CAGE is defined, there is a shared
+ * process-wide code range that is lazily initialized. This value is used to
+ * configure that shared code range when the first Isolate is
+ * created. Subsequent Isolates ignore this value.
+ */
+ size_t code_range_size_in_bytes() const { return code_range_size_; }
+ void set_code_range_size_in_bytes(size_t limit) { code_range_size_ = limit; }
+
+ /**
+ * The maximum size of the old generation.
+ * When the old generation approaches this limit, V8 will perform series of
+ * garbage collections and invoke the NearHeapLimitCallback.
+ * If the garbage collections do not help and the callback does not
+ * increase the limit, then V8 will crash with V8::FatalProcessOutOfMemory.
+ */
+ size_t max_old_generation_size_in_bytes() const {
+ return max_old_generation_size_;
+ }
+ void set_max_old_generation_size_in_bytes(size_t limit) {
+ max_old_generation_size_ = limit;
+ }
+
+ /**
+ * The maximum size of the young generation, which consists of two semi-spaces
+ * and a large object space. This affects frequency of Scavenge garbage
+ * collections and should be typically much smaller that the old generation.
+ */
+ size_t max_young_generation_size_in_bytes() const {
+ return max_young_generation_size_;
+ }
+ void set_max_young_generation_size_in_bytes(size_t limit) {
+ max_young_generation_size_ = limit;
+ }
+
+ size_t initial_old_generation_size_in_bytes() const {
+ return initial_old_generation_size_;
+ }
+ void set_initial_old_generation_size_in_bytes(size_t initial_size) {
+ initial_old_generation_size_ = initial_size;
+ }
+
+ size_t initial_young_generation_size_in_bytes() const {
+ return initial_young_generation_size_;
+ }
+ void set_initial_young_generation_size_in_bytes(size_t initial_size) {
+ initial_young_generation_size_ = initial_size;
+ }
+
+ private:
+ static constexpr size_t kMB = 1048576u;
+ size_t code_range_size_ = 0;
+ size_t max_old_generation_size_ = 0;
+ size_t max_young_generation_size_ = 0;
+ size_t initial_old_generation_size_ = 0;
+ size_t initial_young_generation_size_ = 0;
+ uint32_t* stack_limit_ = nullptr;
+};
+
+/**
+ * Option flags passed to the SetRAILMode function.
+ * See documentation https://developers.google.com/web/tools/chrome-devtools/
+ * profile/evaluate-performance/rail
+ */
+enum RAILMode : unsigned {
+ // Response performance mode: In this mode very low virtual machine latency
+ // is provided. V8 will try to avoid JavaScript execution interruptions.
+ // Throughput may be throttled.
+ PERFORMANCE_RESPONSE,
+ // Animation performance mode: In this mode low virtual machine latency is
+ // provided. V8 will try to avoid as many JavaScript execution interruptions
+ // as possible. Throughput may be throttled. This is the default mode.
+ PERFORMANCE_ANIMATION,
+ // Idle performance mode: The embedder is idle. V8 can complete deferred work
+ // in this mode.
+ PERFORMANCE_IDLE,
+ // Load performance mode: In this mode high throughput is provided. V8 may
+ // turn off latency optimizations.
+ PERFORMANCE_LOAD
+};
+
+/**
+ * Memory pressure level for the MemoryPressureNotification.
+ * kNone hints V8 that there is no memory pressure.
+ * kModerate hints V8 to speed up incremental garbage collection at the cost of
+ * of higher latency due to garbage collection pauses.
+ * kCritical hints V8 to free memory as soon as possible. Garbage collection
+ * pauses at this level will be large.
+ */
+enum class MemoryPressureLevel { kNone, kModerate, kCritical };
+
+/**
+ * Isolate represents an isolated instance of the V8 engine. V8 isolates have
+ * completely separate states. Objects from one isolate must not be used in
+ * other isolates. The embedder can create multiple isolates and use them in
+ * parallel in multiple threads. An isolate can be entered by at most one
+ * thread at any given time. The Locker/Unlocker API must be used to
+ * synchronize.
+ */
+class V8_EXPORT Isolate {
+ public:
+ /**
+ * Initial configuration parameters for a new Isolate.
+ */
+ struct V8_EXPORT CreateParams {
+ CreateParams();
+ ~CreateParams();
+
+ /**
+ * Allows the host application to provide the address of a function that is
+ * notified each time code is added, moved or removed.
+ */
+ JitCodeEventHandler code_event_handler = nullptr;
+
+ /**
+ * ResourceConstraints to use for the new Isolate.
+ */
+ ResourceConstraints constraints;
+
+ /**
+ * Explicitly specify a startup snapshot blob. The embedder owns the blob.
+ */
+ StartupData* snapshot_blob = nullptr;
+
+ /**
+ * Enables the host application to provide a mechanism for recording
+ * statistics counters.
+ */
+ CounterLookupCallback counter_lookup_callback = nullptr;
+
+ /**
+ * Enables the host application to provide a mechanism for recording
+ * histograms. The CreateHistogram function returns a
+ * histogram which will later be passed to the AddHistogramSample
+ * function.
+ */
+ CreateHistogramCallback create_histogram_callback = nullptr;
+ AddHistogramSampleCallback add_histogram_sample_callback = nullptr;
+
+ /**
+ * The ArrayBuffer::Allocator to use for allocating and freeing the backing
+ * store of ArrayBuffers.
+ *
+ * If the shared_ptr version is used, the Isolate instance and every
+ * |BackingStore| allocated using this allocator hold a std::shared_ptr
+ * to the allocator, in order to facilitate lifetime
+ * management for the allocator instance.
+ */
+ ArrayBuffer::Allocator* array_buffer_allocator = nullptr;
+ std::shared_ptr<ArrayBuffer::Allocator> array_buffer_allocator_shared;
+
+ /**
+ * Specifies an optional nullptr-terminated array of raw addresses in the
+ * embedder that V8 can match against during serialization and use for
+ * deserialization. This array and its content must stay valid for the
+ * entire lifetime of the isolate.
+ */
+ const intptr_t* external_references = nullptr;
+
+ /**
+ * Whether calling Atomics.wait (a function that may block) is allowed in
+ * this isolate. This can also be configured via SetAllowAtomicsWait.
+ */
+ bool allow_atomics_wait = true;
+
+ /**
+ * Termination is postponed when there is no active SafeForTerminationScope.
+ */
+ bool only_terminate_in_safe_scope = false;
+
+ /**
+ * The following parameters describe the offsets for addressing type info
+ * for wrapped API objects and are used by the fast C API
+ * (for details see v8-fast-api-calls.h).
+ */
+ int embedder_wrapper_type_index = -1;
+ int embedder_wrapper_object_index = -1;
+ };
+
+ /**
+ * Stack-allocated class which sets the isolate for all operations
+ * executed within a local scope.
+ */
+ class V8_EXPORT V8_NODISCARD Scope {
+ public:
+ explicit Scope(Isolate* isolate) : isolate_(isolate) { isolate->Enter(); }
+
+ ~Scope() { isolate_->Exit(); }
+
+ // Prevent copying of Scope objects.
+ Scope(const Scope&) = delete;
+ Scope& operator=(const Scope&) = delete;
+
+ private:
+ Isolate* const isolate_;
+ };
+
+ /**
+ * Assert that no Javascript code is invoked.
+ */
+ class V8_EXPORT V8_NODISCARD DisallowJavascriptExecutionScope {
+ public:
+ enum OnFailure { CRASH_ON_FAILURE, THROW_ON_FAILURE, DUMP_ON_FAILURE };
+
+ DisallowJavascriptExecutionScope(Isolate* isolate, OnFailure on_failure);
+ ~DisallowJavascriptExecutionScope();
+
+ // Prevent copying of Scope objects.
+ DisallowJavascriptExecutionScope(const DisallowJavascriptExecutionScope&) =
+ delete;
+ DisallowJavascriptExecutionScope& operator=(
+ const DisallowJavascriptExecutionScope&) = delete;
+
+ private:
+ OnFailure on_failure_;
+ Isolate* isolate_;
+
+ bool was_execution_allowed_assert_;
+ bool was_execution_allowed_throws_;
+ bool was_execution_allowed_dump_;
+ };
+
+ /**
+ * Introduce exception to DisallowJavascriptExecutionScope.
+ */
+ class V8_EXPORT V8_NODISCARD AllowJavascriptExecutionScope {
+ public:
+ explicit AllowJavascriptExecutionScope(Isolate* isolate);
+ ~AllowJavascriptExecutionScope();
+
+ // Prevent copying of Scope objects.
+ AllowJavascriptExecutionScope(const AllowJavascriptExecutionScope&) =
+ delete;
+ AllowJavascriptExecutionScope& operator=(
+ const AllowJavascriptExecutionScope&) = delete;
+
+ private:
+ Isolate* isolate_;
+ bool was_execution_allowed_assert_;
+ bool was_execution_allowed_throws_;
+ bool was_execution_allowed_dump_;
+ };
+
+ /**
+ * Do not run microtasks while this scope is active, even if microtasks are
+ * automatically executed otherwise.
+ */
+ class V8_EXPORT V8_NODISCARD SuppressMicrotaskExecutionScope {
+ public:
+ explicit SuppressMicrotaskExecutionScope(
+ Isolate* isolate, MicrotaskQueue* microtask_queue = nullptr);
+ ~SuppressMicrotaskExecutionScope();
+
+ // Prevent copying of Scope objects.
+ SuppressMicrotaskExecutionScope(const SuppressMicrotaskExecutionScope&) =
+ delete;
+ SuppressMicrotaskExecutionScope& operator=(
+ const SuppressMicrotaskExecutionScope&) = delete;
+
+ private:
+ internal::Isolate* const isolate_;
+ internal::MicrotaskQueue* const microtask_queue_;
+ internal::Address previous_stack_height_;
+
+ friend class internal::ThreadLocalTop;
+ };
+
+ /**
+ * This scope allows terminations inside direct V8 API calls and forbid them
+ * inside any recursive API calls without explicit SafeForTerminationScope.
+ */
+ class V8_EXPORT V8_NODISCARD SafeForTerminationScope {
+ public:
+ explicit SafeForTerminationScope(v8::Isolate* isolate);
+ ~SafeForTerminationScope();
+
+ // Prevent copying of Scope objects.
+ SafeForTerminationScope(const SafeForTerminationScope&) = delete;
+ SafeForTerminationScope& operator=(const SafeForTerminationScope&) = delete;
+
+ private:
+ internal::Isolate* isolate_;
+ bool prev_value_;
+ };
+
+ /**
+ * Types of garbage collections that can be requested via
+ * RequestGarbageCollectionForTesting.
+ */
+ enum GarbageCollectionType {
+ kFullGarbageCollection,
+ kMinorGarbageCollection
+ };
+
+ /**
+ * Features reported via the SetUseCounterCallback callback. Do not change
+ * assigned numbers of existing items; add new features to the end of this
+ * list.
+ */
+ enum UseCounterFeature {
+ kUseAsm = 0,
+ kBreakIterator = 1,
+ kLegacyConst = 2,
+ kMarkDequeOverflow = 3,
+ kStoreBufferOverflow = 4,
+ kSlotsBufferOverflow = 5,
+ kObjectObserve = 6,
+ kForcedGC = 7,
+ kSloppyMode = 8,
+ kStrictMode = 9,
+ kStrongMode = 10,
+ kRegExpPrototypeStickyGetter = 11,
+ kRegExpPrototypeToString = 12,
+ kRegExpPrototypeUnicodeGetter = 13,
+ kIntlV8Parse = 14,
+ kIntlPattern = 15,
+ kIntlResolved = 16,
+ kPromiseChain = 17,
+ kPromiseAccept = 18,
+ kPromiseDefer = 19,
+ kHtmlCommentInExternalScript = 20,
+ kHtmlComment = 21,
+ kSloppyModeBlockScopedFunctionRedefinition = 22,
+ kForInInitializer = 23,
+ kArrayProtectorDirtied = 24,
+ kArraySpeciesModified = 25,
+ kArrayPrototypeConstructorModified = 26,
+ kArrayInstanceProtoModified = 27,
+ kArrayInstanceConstructorModified = 28,
+ kLegacyFunctionDeclaration = 29,
+ kRegExpPrototypeSourceGetter = 30, // Unused.
+ kRegExpPrototypeOldFlagGetter = 31, // Unused.
+ kDecimalWithLeadingZeroInStrictMode = 32,
+ kLegacyDateParser = 33,
+ kDefineGetterOrSetterWouldThrow = 34,
+ kFunctionConstructorReturnedUndefined = 35,
+ kAssigmentExpressionLHSIsCallInSloppy = 36,
+ kAssigmentExpressionLHSIsCallInStrict = 37,
+ kPromiseConstructorReturnedUndefined = 38,
+ kConstructorNonUndefinedPrimitiveReturn = 39,
+ kLabeledExpressionStatement = 40,
+ kLineOrParagraphSeparatorAsLineTerminator = 41,
+ kIndexAccessor = 42,
+ kErrorCaptureStackTrace = 43,
+ kErrorPrepareStackTrace = 44,
+ kErrorStackTraceLimit = 45,
+ kWebAssemblyInstantiation = 46,
+ kDeoptimizerDisableSpeculation = 47,
+ kArrayPrototypeSortJSArrayModifiedPrototype = 48,
+ kFunctionTokenOffsetTooLongForToString = 49,
+ kWasmSharedMemory = 50,
+ kWasmThreadOpcodes = 51,
+ kAtomicsNotify = 52, // Unused.
+ kAtomicsWake = 53, // Unused.
+ kCollator = 54,
+ kNumberFormat = 55,
+ kDateTimeFormat = 56,
+ kPluralRules = 57,
+ kRelativeTimeFormat = 58,
+ kLocale = 59,
+ kListFormat = 60,
+ kSegmenter = 61,
+ kStringLocaleCompare = 62,
+ kStringToLocaleUpperCase = 63,
+ kStringToLocaleLowerCase = 64,
+ kNumberToLocaleString = 65,
+ kDateToLocaleString = 66,
+ kDateToLocaleDateString = 67,
+ kDateToLocaleTimeString = 68,
+ kAttemptOverrideReadOnlyOnPrototypeSloppy = 69,
+ kAttemptOverrideReadOnlyOnPrototypeStrict = 70,
+ kOptimizedFunctionWithOneShotBytecode = 71, // Unused.
+ kRegExpMatchIsTrueishOnNonJSRegExp = 72,
+ kRegExpMatchIsFalseishOnJSRegExp = 73,
+ kDateGetTimezoneOffset = 74, // Unused.
+ kStringNormalize = 75,
+ kCallSiteAPIGetFunctionSloppyCall = 76,
+ kCallSiteAPIGetThisSloppyCall = 77,
+ kRegExpMatchAllWithNonGlobalRegExp = 78,
+ kRegExpExecCalledOnSlowRegExp = 79,
+ kRegExpReplaceCalledOnSlowRegExp = 80,
+ kDisplayNames = 81,
+ kSharedArrayBufferConstructed = 82,
+ kArrayPrototypeHasElements = 83,
+ kObjectPrototypeHasElements = 84,
+ kNumberFormatStyleUnit = 85,
+ kDateTimeFormatRange = 86,
+ kDateTimeFormatDateTimeStyle = 87,
+ kBreakIteratorTypeWord = 88,
+ kBreakIteratorTypeLine = 89,
+ kInvalidatedArrayBufferDetachingProtector = 90,
+ kInvalidatedArrayConstructorProtector = 91,
+ kInvalidatedArrayIteratorLookupChainProtector = 92,
+ kInvalidatedArraySpeciesLookupChainProtector = 93,
+ kInvalidatedIsConcatSpreadableLookupChainProtector = 94,
+ kInvalidatedMapIteratorLookupChainProtector = 95,
+ kInvalidatedNoElementsProtector = 96,
+ kInvalidatedPromiseHookProtector = 97,
+ kInvalidatedPromiseResolveLookupChainProtector = 98,
+ kInvalidatedPromiseSpeciesLookupChainProtector = 99,
+ kInvalidatedPromiseThenLookupChainProtector = 100,
+ kInvalidatedRegExpSpeciesLookupChainProtector = 101,
+ kInvalidatedSetIteratorLookupChainProtector = 102,
+ kInvalidatedStringIteratorLookupChainProtector = 103,
+ kInvalidatedStringLengthOverflowLookupChainProtector = 104,
+ kInvalidatedTypedArraySpeciesLookupChainProtector = 105,
+ kWasmSimdOpcodes = 106,
+ kVarRedeclaredCatchBinding = 107,
+ kWasmRefTypes = 108,
+ kWasmBulkMemory = 109, // Unused.
+ kWasmMultiValue = 110,
+ kWasmExceptionHandling = 111,
+ kInvalidatedMegaDOMProtector = 112,
+
+ // If you add new values here, you'll also need to update Chromium's:
+ // web_feature.mojom, use_counter_callback.cc, and enums.xml. V8 changes to
+ // this list need to be landed first, then changes on the Chromium side.
+ kUseCounterFeatureCount // This enum value must be last.
+ };
+
+ enum MessageErrorLevel {
+ kMessageLog = (1 << 0),
+ kMessageDebug = (1 << 1),
+ kMessageInfo = (1 << 2),
+ kMessageError = (1 << 3),
+ kMessageWarning = (1 << 4),
+ kMessageAll = kMessageLog | kMessageDebug | kMessageInfo | kMessageError |
+ kMessageWarning,
+ };
+
+ using UseCounterCallback = void (*)(Isolate* isolate,
+ UseCounterFeature feature);
+
+ /**
+ * Allocates a new isolate but does not initialize it. Does not change the
+ * currently entered isolate.
+ *
+ * Only Isolate::GetData() and Isolate::SetData(), which access the
+ * embedder-controlled parts of the isolate, are allowed to be called on the
+ * uninitialized isolate. To initialize the isolate, call
+ * Isolate::Initialize().
+ *
+ * When an isolate is no longer used its resources should be freed
+ * by calling Dispose(). Using the delete operator is not allowed.
+ *
+ * V8::Initialize() must have run prior to this.
+ */
+ static Isolate* Allocate();
+
+ /**
+ * Initialize an Isolate previously allocated by Isolate::Allocate().
+ */
+ static void Initialize(Isolate* isolate, const CreateParams& params);
+
+ /**
+ * Creates a new isolate. Does not change the currently entered
+ * isolate.
+ *
+ * When an isolate is no longer used its resources should be freed
+ * by calling Dispose(). Using the delete operator is not allowed.
+ *
+ * V8::Initialize() must have run prior to this.
+ */
+ static Isolate* New(const CreateParams& params);
+
+ /**
+ * Returns the entered isolate for the current thread or NULL in
+ * case there is no current isolate.
+ *
+ * This method must not be invoked before V8::Initialize() was invoked.
+ */
+ static Isolate* GetCurrent();
+
+ /**
+ * Returns the entered isolate for the current thread or NULL in
+ * case there is no current isolate.
+ *
+ * No checks are performed by this method.
+ */
+ static Isolate* TryGetCurrent();
+
+ /**
+ * Clears the set of objects held strongly by the heap. This set of
+ * objects are originally built when a WeakRef is created or
+ * successfully dereferenced.
+ *
+ * This is invoked automatically after microtasks are run. See
+ * MicrotasksPolicy for when microtasks are run.
+ *
+ * This needs to be manually invoked only if the embedder is manually running
+ * microtasks via a custom MicrotaskQueue class's PerformCheckpoint. In that
+ * case, it is the embedder's responsibility to make this call at a time which
+ * does not interrupt synchronous ECMAScript code execution.
+ */
+ void ClearKeptObjects();
+
+ /**
+ * Custom callback used by embedders to help V8 determine if it should abort
+ * when it throws and no internal handler is predicted to catch the
+ * exception. If --abort-on-uncaught-exception is used on the command line,
+ * then V8 will abort if either:
+ * - no custom callback is set.
+ * - the custom callback set returns true.
+ * Otherwise, the custom callback will not be called and V8 will not abort.
+ */
+ using AbortOnUncaughtExceptionCallback = bool (*)(Isolate*);
+ void SetAbortOnUncaughtExceptionCallback(
+ AbortOnUncaughtExceptionCallback callback);
+
+ /**
+ * This specifies the callback called by the upcoming dynamic
+ * import() language feature to load modules.
+ */
+ void SetHostImportModuleDynamicallyCallback(
+ HostImportModuleDynamicallyWithImportAssertionsCallback callback);
+
+ /**
+ * This specifies the callback called by the upcoming import.meta
+ * language feature to retrieve host-defined meta data for a module.
+ */
+ void SetHostInitializeImportMetaObjectCallback(
+ HostInitializeImportMetaObjectCallback callback);
+
+ /**
+ * This specifies the callback called when the stack property of Error
+ * is accessed.
+ */
+ void SetPrepareStackTraceCallback(PrepareStackTraceCallback callback);
+
+ /**
+ * Optional notification that the system is running low on memory.
+ * V8 uses these notifications to guide heuristics.
+ * It is allowed to call this function from another thread while
+ * the isolate is executing long running JavaScript code.
+ */
+ void MemoryPressureNotification(MemoryPressureLevel level);
+
+ /**
+ * Drop non-essential caches. Should only be called from testing code.
+ * The method can potentially block for a long time and does not necessarily
+ * trigger GC.
+ */
+ void ClearCachesForTesting();
+
+ /**
+ * Methods below this point require holding a lock (using Locker) in
+ * a multi-threaded environment.
+ */
+
+ /**
+ * Sets this isolate as the entered one for the current thread.
+ * Saves the previously entered one (if any), so that it can be
+ * restored when exiting. Re-entering an isolate is allowed.
+ */
+ void Enter();
+
+ /**
+ * Exits this isolate by restoring the previously entered one in the
+ * current thread. The isolate may still stay the same, if it was
+ * entered more than once.
+ *
+ * Requires: this == Isolate::GetCurrent().
+ */
+ void Exit();
+
+ /**
+ * Disposes the isolate. The isolate must not be entered by any
+ * thread to be disposable.
+ */
+ void Dispose();
+
+ /**
+ * Dumps activated low-level V8 internal stats. This can be used instead
+ * of performing a full isolate disposal.
+ */
+ void DumpAndResetStats();
+
+ /**
+ * Discards all V8 thread-specific data for the Isolate. Should be used
+ * if a thread is terminating and it has used an Isolate that will outlive
+ * the thread -- all thread-specific data for an Isolate is discarded when
+ * an Isolate is disposed so this call is pointless if an Isolate is about
+ * to be Disposed.
+ */
+ void DiscardThreadSpecificMetadata();
+
+ /**
+ * Associate embedder-specific data with the isolate. |slot| has to be
+ * between 0 and GetNumberOfDataSlots() - 1.
+ */
+ V8_INLINE void SetData(uint32_t slot, void* data);
+
+ /**
+ * Retrieve embedder-specific data from the isolate.
+ * Returns NULL if SetData has never been called for the given |slot|.
+ */
+ V8_INLINE void* GetData(uint32_t slot);
+
+ /**
+ * Returns the maximum number of available embedder data slots. Valid slots
+ * are in the range of 0 - GetNumberOfDataSlots() - 1.
+ */
+ V8_INLINE static uint32_t GetNumberOfDataSlots();
+
+ /**
+ * Return data that was previously attached to the isolate snapshot via
+ * SnapshotCreator, and removes the reference to it.
+ * Repeated call with the same index returns an empty MaybeLocal.
+ */
+ template <class T>
+ V8_INLINE MaybeLocal<T> GetDataFromSnapshotOnce(size_t index);
+
+ /**
+ * Get statistics about the heap memory usage.
+ */
+ void GetHeapStatistics(HeapStatistics* heap_statistics);
+
+ /**
+ * Returns the number of spaces in the heap.
+ */
+ size_t NumberOfHeapSpaces();
+
+ /**
+ * Get the memory usage of a space in the heap.
+ *
+ * \param space_statistics The HeapSpaceStatistics object to fill in
+ * statistics.
+ * \param index The index of the space to get statistics from, which ranges
+ * from 0 to NumberOfHeapSpaces() - 1.
+ * \returns true on success.
+ */
+ bool GetHeapSpaceStatistics(HeapSpaceStatistics* space_statistics,
+ size_t index);
+
+ /**
+ * Returns the number of types of objects tracked in the heap at GC.
+ */
+ size_t NumberOfTrackedHeapObjectTypes();
+
+ /**
+ * Get statistics about objects in the heap.
+ *
+ * \param object_statistics The HeapObjectStatistics object to fill in
+ * statistics of objects of given type, which were live in the previous GC.
+ * \param type_index The index of the type of object to fill details about,
+ * which ranges from 0 to NumberOfTrackedHeapObjectTypes() - 1.
+ * \returns true on success.
+ */
+ bool GetHeapObjectStatisticsAtLastGC(HeapObjectStatistics* object_statistics,
+ size_t type_index);
+
+ /**
+ * Get statistics about code and its metadata in the heap.
+ *
+ * \param object_statistics The HeapCodeStatistics object to fill in
+ * statistics of code, bytecode and their metadata.
+ * \returns true on success.
+ */
+ bool GetHeapCodeAndMetadataStatistics(HeapCodeStatistics* object_statistics);
+
+ /**
+ * This API is experimental and may change significantly.
+ *
+ * Enqueues a memory measurement request and invokes the delegate with the
+ * results.
+ *
+ * \param delegate the delegate that defines which contexts to measure and
+ * reports the results.
+ *
+ * \param execution promptness executing the memory measurement.
+ * The kEager value is expected to be used only in tests.
+ */
+ bool MeasureMemory(
+ std::unique_ptr<MeasureMemoryDelegate> delegate,
+ MeasureMemoryExecution execution = MeasureMemoryExecution::kDefault);
+
+ /**
+ * Get a call stack sample from the isolate.
+ * \param state Execution state.
+ * \param frames Caller allocated buffer to store stack frames.
+ * \param frames_limit Maximum number of frames to capture. The buffer must
+ * be large enough to hold the number of frames.
+ * \param sample_info The sample info is filled up by the function
+ * provides number of actual captured stack frames and
+ * the current VM state.
+ * \note GetStackSample should only be called when the JS thread is paused or
+ * interrupted. Otherwise the behavior is undefined.
+ */
+ void GetStackSample(const RegisterState& state, void** frames,
+ size_t frames_limit, SampleInfo* sample_info);
+
+ /**
+ * Adjusts the amount of registered external memory. Used to give V8 an
+ * indication of the amount of externally allocated memory that is kept alive
+ * by JavaScript objects. V8 uses this to decide when to perform global
+ * garbage collections. Registering externally allocated memory will trigger
+ * global garbage collections more often than it would otherwise in an attempt
+ * to garbage collect the JavaScript objects that keep the externally
+ * allocated memory alive.
+ *
+ * \param change_in_bytes the change in externally allocated memory that is
+ * kept alive by JavaScript objects.
+ * \returns the adjusted value.
+ */
+ int64_t AdjustAmountOfExternalAllocatedMemory(int64_t change_in_bytes);
+
+ /**
+ * Returns the number of phantom handles without callbacks that were reset
+ * by the garbage collector since the last call to this function.
+ */
+ size_t NumberOfPhantomHandleResetsSinceLastCall();
+
+ /**
+ * Returns heap profiler for this isolate. Will return NULL until the isolate
+ * is initialized.
+ */
+ HeapProfiler* GetHeapProfiler();
+
+ /**
+ * Tells the VM whether the embedder is idle or not.
+ */
+ void SetIdle(bool is_idle);
+
+ /** Returns the ArrayBuffer::Allocator used in this isolate. */
+ ArrayBuffer::Allocator* GetArrayBufferAllocator();
+
+ /** Returns true if this isolate has a current context. */
+ bool InContext();
+
+ /**
+ * Returns the context of the currently running JavaScript, or the context
+ * on the top of the stack if no JavaScript is running.
+ */
+ Local<Context> GetCurrentContext();
+
+ /**
+ * Returns either the last context entered through V8's C++ API, or the
+ * context of the currently running microtask while processing microtasks.
+ * If a context is entered while executing a microtask, that context is
+ * returned.
+ */
+ Local<Context> GetEnteredOrMicrotaskContext();
+
+ /**
+ * Returns the Context that corresponds to the Incumbent realm in HTML spec.
+ * https://html.spec.whatwg.org/multipage/webappapis.html#incumbent
+ */
+ Local<Context> GetIncumbentContext();
+
+ /**
+ * Schedules a v8::Exception::Error with the given message.
+ * See ThrowException for more details. Templatized to provide compile-time
+ * errors in case of too long strings (see v8::String::NewFromUtf8Literal).
+ */
+ template <int N>
+ Local<Value> ThrowError(const char (&message)[N]) {
+ return ThrowError(String::NewFromUtf8Literal(this, message));
+ }
+ Local<Value> ThrowError(Local<String> message);
+
+ /**
+ * Schedules an exception to be thrown when returning to JavaScript. When an
+ * exception has been scheduled it is illegal to invoke any JavaScript
+ * operation; the caller must return immediately and only after the exception
+ * has been handled does it become legal to invoke JavaScript operations.
+ */
+ Local<Value> ThrowException(Local<Value> exception);
+
+ using GCCallback = void (*)(Isolate* isolate, GCType type,
+ GCCallbackFlags flags);
+ using GCCallbackWithData = void (*)(Isolate* isolate, GCType type,
+ GCCallbackFlags flags, void* data);
+
+ /**
+ * Enables the host application to receive a notification before a
+ * garbage collection. Allocations are allowed in the callback function,
+ * but the callback is not re-entrant: if the allocation inside it will
+ * trigger the garbage collection, the callback won't be called again.
+ * It is possible to specify the GCType filter for your callback. But it is
+ * not possible to register the same callback function two times with
+ * different GCType filters.
+ */
+ void AddGCPrologueCallback(GCCallbackWithData callback, void* data = nullptr,
+ GCType gc_type_filter = kGCTypeAll);
+ void AddGCPrologueCallback(GCCallback callback,
+ GCType gc_type_filter = kGCTypeAll);
+
+ /**
+ * This function removes callback which was installed by
+ * AddGCPrologueCallback function.
+ */
+ void RemoveGCPrologueCallback(GCCallbackWithData, void* data = nullptr);
+ void RemoveGCPrologueCallback(GCCallback callback);
+
+ /**
+ * Sets the embedder heap tracer for the isolate.
+ */
+ void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
+
+ /*
+ * Gets the currently active heap tracer for the isolate.
+ */
+ EmbedderHeapTracer* GetEmbedderHeapTracer();
+
+ /**
+ * Sets an embedder roots handle that V8 should consider when performing
+ * non-unified heap garbage collections.
+ *
+ * Using only EmbedderHeapTracer automatically sets up a default handler.
+ * The intended use case is for setting a custom handler after invoking
+ * `AttachCppHeap()`.
+ *
+ * V8 does not take ownership of the handler.
+ */
+ void SetEmbedderRootsHandler(EmbedderRootsHandler* handler);
+
+ /**
+ * Attaches a managed C++ heap as an extension to the JavaScript heap. The
+ * embedder maintains ownership of the CppHeap. At most one C++ heap can be
+ * attached to V8.
+ *
+ * This is an experimental feature and may still change significantly.
+ */
+ void AttachCppHeap(CppHeap*);
+
+ /**
+ * Detaches a managed C++ heap if one was attached using `AttachCppHeap()`.
+ *
+ * This is an experimental feature and may still change significantly.
+ */
+ void DetachCppHeap();
+
+ /**
+ * This is an experimental feature and may still change significantly.
+
+ * \returns the C++ heap managed by V8. Only available if such a heap has been
+ * attached using `AttachCppHeap()`.
+ */
+ CppHeap* GetCppHeap() const;
+
+ /**
+ * Use for |AtomicsWaitCallback| to indicate the type of event it receives.
+ */
+ enum class AtomicsWaitEvent {
+ /** Indicates that this call is happening before waiting. */
+ kStartWait,
+ /** `Atomics.wait()` finished because of an `Atomics.wake()` call. */
+ kWokenUp,
+ /** `Atomics.wait()` finished because it timed out. */
+ kTimedOut,
+ /** `Atomics.wait()` was interrupted through |TerminateExecution()|. */
+ kTerminatedExecution,
+ /** `Atomics.wait()` was stopped through |AtomicsWaitWakeHandle|. */
+ kAPIStopped,
+ /** `Atomics.wait()` did not wait, as the initial condition was not met. */
+ kNotEqual
+ };
+
+ /**
+ * Passed to |AtomicsWaitCallback| as a means of stopping an ongoing
+ * `Atomics.wait` call.
+ */
+ class V8_EXPORT AtomicsWaitWakeHandle {
+ public:
+ /**
+ * Stop this `Atomics.wait()` call and call the |AtomicsWaitCallback|
+ * with |kAPIStopped|.
+ *
+ * This function may be called from another thread. The caller has to ensure
+ * through proper synchronization that it is not called after
+ * the finishing |AtomicsWaitCallback|.
+ *
+ * Note that the ECMAScript specification does not plan for the possibility
+ * of wakeups that are neither coming from a timeout or an `Atomics.wake()`
+ * call, so this may invalidate assumptions made by existing code.
+ * The embedder may accordingly wish to schedule an exception in the
+ * finishing |AtomicsWaitCallback|.
+ */
+ void Wake();
+ };
+
+ /**
+ * Embedder callback for `Atomics.wait()` that can be added through
+ * |SetAtomicsWaitCallback|.
+ *
+ * This will be called just before starting to wait with the |event| value
+ * |kStartWait| and after finishing waiting with one of the other
+ * values of |AtomicsWaitEvent| inside of an `Atomics.wait()` call.
+ *
+ * |array_buffer| will refer to the underlying SharedArrayBuffer,
+ * |offset_in_bytes| to the location of the waited-on memory address inside
+ * the SharedArrayBuffer.
+ *
+ * |value| and |timeout_in_ms| will be the values passed to
+ * the `Atomics.wait()` call. If no timeout was used, |timeout_in_ms|
+ * will be `INFINITY`.
+ *
+ * In the |kStartWait| callback, |stop_handle| will be an object that
+ * is only valid until the corresponding finishing callback and that
+ * can be used to stop the wait process while it is happening.
+ *
+ * This callback may schedule exceptions, *unless* |event| is equal to
+ * |kTerminatedExecution|.
+ */
+ using AtomicsWaitCallback = void (*)(AtomicsWaitEvent event,
+ Local<SharedArrayBuffer> array_buffer,
+ size_t offset_in_bytes, int64_t value,
+ double timeout_in_ms,
+ AtomicsWaitWakeHandle* stop_handle,
+ void* data);
+
+ /**
+ * Set a new |AtomicsWaitCallback|. This overrides an earlier
+ * |AtomicsWaitCallback|, if there was any. If |callback| is nullptr,
+ * this unsets the callback. |data| will be passed to the callback
+ * as its last parameter.
+ */
+ void SetAtomicsWaitCallback(AtomicsWaitCallback callback, void* data);
+
+ /**
+ * Enables the host application to receive a notification after a
+ * garbage collection. Allocations are allowed in the callback function,
+ * but the callback is not re-entrant: if the allocation inside it will
+ * trigger the garbage collection, the callback won't be called again.
+ * It is possible to specify the GCType filter for your callback. But it is
+ * not possible to register the same callback function two times with
+ * different GCType filters.
+ */
+ void AddGCEpilogueCallback(GCCallbackWithData callback, void* data = nullptr,
+ GCType gc_type_filter = kGCTypeAll);
+ void AddGCEpilogueCallback(GCCallback callback,
+ GCType gc_type_filter = kGCTypeAll);
+
+ /**
+ * This function removes callback which was installed by
+ * AddGCEpilogueCallback function.
+ */
+ void RemoveGCEpilogueCallback(GCCallbackWithData callback,
+ void* data = nullptr);
+ void RemoveGCEpilogueCallback(GCCallback callback);
+
+ using GetExternallyAllocatedMemoryInBytesCallback = size_t (*)();
+
+ /**
+ * Set the callback that tells V8 how much memory is currently allocated
+ * externally of the V8 heap. Ideally this memory is somehow connected to V8
+ * objects and may get freed-up when the corresponding V8 objects get
+ * collected by a V8 garbage collection.
+ */
+ void SetGetExternallyAllocatedMemoryInBytesCallback(
+ GetExternallyAllocatedMemoryInBytesCallback callback);
+
+ /**
+ * Forcefully terminate the current thread of JavaScript execution
+ * in the given isolate.
+ *
+ * This method can be used by any thread even if that thread has not
+ * acquired the V8 lock with a Locker object.
+ */
+ void TerminateExecution();
+
+ /**
+ * Is V8 terminating JavaScript execution.
+ *
+ * Returns true if JavaScript execution is currently terminating
+ * because of a call to TerminateExecution. In that case there are
+ * still JavaScript frames on the stack and the termination
+ * exception is still active.
+ */
+ bool IsExecutionTerminating();
+
+ /**
+ * Resume execution capability in the given isolate, whose execution
+ * was previously forcefully terminated using TerminateExecution().
+ *
+ * When execution is forcefully terminated using TerminateExecution(),
+ * the isolate can not resume execution until all JavaScript frames
+ * have propagated the uncatchable exception which is generated. This
+ * method allows the program embedding the engine to handle the
+ * termination event and resume execution capability, even if
+ * JavaScript frames remain on the stack.
+ *
+ * This method can be used by any thread even if that thread has not
+ * acquired the V8 lock with a Locker object.
+ */
+ void CancelTerminateExecution();
+
+ /**
+ * Request V8 to interrupt long running JavaScript code and invoke
+ * the given |callback| passing the given |data| to it. After |callback|
+ * returns control will be returned to the JavaScript code.
+ * There may be a number of interrupt requests in flight.
+ * Can be called from another thread without acquiring a |Locker|.
+ * Registered |callback| must not reenter interrupted Isolate.
+ */
+ void RequestInterrupt(InterruptCallback callback, void* data);
+
+ /**
+ * Returns true if there is ongoing background work within V8 that will
+ * eventually post a foreground task, like asynchronous WebAssembly
+ * compilation.
+ */
+ bool HasPendingBackgroundTasks();
+
+ /**
+ * Request garbage collection in this Isolate. It is only valid to call this
+ * function if --expose_gc was specified.
+ *
+ * This should only be used for testing purposes and not to enforce a garbage
+ * collection schedule. It has strong negative impact on the garbage
+ * collection performance. Use IdleNotificationDeadline() or
+ * LowMemoryNotification() instead to influence the garbage collection
+ * schedule.
+ */
+ void RequestGarbageCollectionForTesting(GarbageCollectionType type);
+
+ /**
+ * Set the callback to invoke for logging event.
+ */
+ void SetEventLogger(LogEventCallback that);
+
+ /**
+ * Adds a callback to notify the host application right before a script
+ * is about to run. If a script re-enters the runtime during executing, the
+ * BeforeCallEnteredCallback is invoked for each re-entrance.
+ * Executing scripts inside the callback will re-trigger the callback.
+ */
+ void AddBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
+
+ /**
+ * Removes callback that was installed by AddBeforeCallEnteredCallback.
+ */
+ void RemoveBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
+
+ /**
+ * Adds a callback to notify the host application when a script finished
+ * running. If a script re-enters the runtime during executing, the
+ * CallCompletedCallback is only invoked when the outer-most script
+ * execution ends. Executing scripts inside the callback do not trigger
+ * further callbacks.
+ */
+ void AddCallCompletedCallback(CallCompletedCallback callback);
+
+ /**
+ * Removes callback that was installed by AddCallCompletedCallback.
+ */
+ void RemoveCallCompletedCallback(CallCompletedCallback callback);
+
+ /**
+ * Set the PromiseHook callback for various promise lifecycle
+ * events.
+ */
+ void SetPromiseHook(PromiseHook hook);
+
+ /**
+ * Set callback to notify about promise reject with no handler, or
+ * revocation of such a previous notification once the handler is added.
+ */
+ void SetPromiseRejectCallback(PromiseRejectCallback callback);
+
+ /**
+ * Runs the default MicrotaskQueue until it gets empty and perform other
+ * microtask checkpoint steps, such as calling ClearKeptObjects. Asserts that
+ * the MicrotasksPolicy is not kScoped. Any exceptions thrown by microtask
+ * callbacks are swallowed.
+ */
+ void PerformMicrotaskCheckpoint();
+
+ /**
+ * Enqueues the callback to the default MicrotaskQueue
+ */
+ void EnqueueMicrotask(Local<Function> microtask);
+
+ /**
+ * Enqueues the callback to the default MicrotaskQueue
+ */
+ void EnqueueMicrotask(MicrotaskCallback callback, void* data = nullptr);
+
+ /**
+ * Controls how Microtasks are invoked. See MicrotasksPolicy for details.
+ */
+ void SetMicrotasksPolicy(MicrotasksPolicy policy);
+
+ /**
+ * Returns the policy controlling how Microtasks are invoked.
+ */
+ MicrotasksPolicy GetMicrotasksPolicy() const;
+
+ /**
+ * Adds a callback to notify the host application after
+ * microtasks were run on the default MicrotaskQueue. The callback is
+ * triggered by explicit RunMicrotasks call or automatic microtasks execution
+ * (see SetMicrotaskPolicy).
+ *
+ * Callback will trigger even if microtasks were attempted to run,
+ * but the microtasks queue was empty and no single microtask was actually
+ * executed.
+ *
+ * Executing scripts inside the callback will not re-trigger microtasks and
+ * the callback.
+ */
+ void AddMicrotasksCompletedCallback(
+ MicrotasksCompletedCallbackWithData callback, void* data = nullptr);
+
+ /**
+ * Removes callback that was installed by AddMicrotasksCompletedCallback.
+ */
+ void RemoveMicrotasksCompletedCallback(
+ MicrotasksCompletedCallbackWithData callback, void* data = nullptr);
+
+ /**
+ * Sets a callback for counting the number of times a feature of V8 is used.
+ */
+ void SetUseCounterCallback(UseCounterCallback callback);
+
+ /**
+ * Enables the host application to provide a mechanism for recording
+ * statistics counters.
+ */
+ void SetCounterFunction(CounterLookupCallback);
+
+ /**
+ * Enables the host application to provide a mechanism for recording
+ * histograms. The CreateHistogram function returns a
+ * histogram which will later be passed to the AddHistogramSample
+ * function.
+ */
+ void SetCreateHistogramFunction(CreateHistogramCallback);
+ void SetAddHistogramSampleFunction(AddHistogramSampleCallback);
+
+ /**
+ * Enables the host application to provide a mechanism for recording
+ * event based metrics. In order to use this interface
+ * include/v8-metrics.h
+ * needs to be included and the recorder needs to be derived from the
+ * Recorder base class defined there.
+ * This method can only be called once per isolate and must happen during
+ * isolate initialization before background threads are spawned.
+ */
+ void SetMetricsRecorder(
+ const std::shared_ptr<metrics::Recorder>& metrics_recorder);
+
+ /**
+ * Enables the host application to provide a mechanism for recording a
+ * predefined set of data as crash keys to be used in postmortem debugging in
+ * case of a crash.
+ */
+ void SetAddCrashKeyCallback(AddCrashKeyCallback);
+
+ /**
+ * Optional notification that the embedder is idle.
+ * V8 uses the notification to perform garbage collection.
+ * This call can be used repeatedly if the embedder remains idle.
+ * Returns true if the embedder should stop calling IdleNotificationDeadline
+ * until real work has been done. This indicates that V8 has done
+ * as much cleanup as it will be able to do.
+ *
+ * The deadline_in_seconds argument specifies the deadline V8 has to finish
+ * garbage collection work. deadline_in_seconds is compared with
+ * MonotonicallyIncreasingTime() and should be based on the same timebase as
+ * that function. There is no guarantee that the actual work will be done
+ * within the time limit.
+ */
+ bool IdleNotificationDeadline(double deadline_in_seconds);
+
+ /**
+ * Optional notification that the system is running low on memory.
+ * V8 uses these notifications to attempt to free memory.
+ */
+ void LowMemoryNotification();
+
+ /**
+ * Optional notification that a context has been disposed. V8 uses these
+ * notifications to guide the GC heuristic and cancel FinalizationRegistry
+ * cleanup tasks. Returns the number of context disposals - including this one
+ * - since the last time V8 had a chance to clean up.
+ *
+ * The optional parameter |dependant_context| specifies whether the disposed
+ * context was depending on state from other contexts or not.
+ */
+ int ContextDisposedNotification(bool dependant_context = true);
+
+ /**
+ * Optional notification that the isolate switched to the foreground.
+ * V8 uses these notifications to guide heuristics.
+ */
+ void IsolateInForegroundNotification();
+
+ /**
+ * Optional notification that the isolate switched to the background.
+ * V8 uses these notifications to guide heuristics.
+ */
+ void IsolateInBackgroundNotification();
+
+ /**
+ * Optional notification which will enable the memory savings mode.
+ * V8 uses this notification to guide heuristics which may result in a
+ * smaller memory footprint at the cost of reduced runtime performance.
+ */
+ void EnableMemorySavingsMode();
+
+ /**
+ * Optional notification which will disable the memory savings mode.
+ */
+ void DisableMemorySavingsMode();
+
+ /**
+ * Optional notification to tell V8 the current performance requirements
+ * of the embedder based on RAIL.
+ * V8 uses these notifications to guide heuristics.
+ * This is an unfinished experimental feature. Semantics and implementation
+ * may change frequently.
+ */
+ void SetRAILMode(RAILMode rail_mode);
+
+ /**
+ * Update load start time of the RAIL mode
+ */
+ void UpdateLoadStartTime();
+
+ /**
+ * Optional notification to tell V8 the current isolate is used for debugging
+ * and requires higher heap limit.
+ */
+ void IncreaseHeapLimitForDebugging();
+
+ /**
+ * Restores the original heap limit after IncreaseHeapLimitForDebugging().
+ */
+ void RestoreOriginalHeapLimit();
+
+ /**
+ * Returns true if the heap limit was increased for debugging and the
+ * original heap limit was not restored yet.
+ */
+ bool IsHeapLimitIncreasedForDebugging();
+
+ /**
+ * Allows the host application to provide the address of a function that is
+ * notified each time code is added, moved or removed.
+ *
+ * \param options options for the JIT code event handler.
+ * \param event_handler the JIT code event handler, which will be invoked
+ * each time code is added, moved or removed.
+ * \note \p event_handler won't get notified of existent code.
+ * \note since code removal notifications are not currently issued, the
+ * \p event_handler may get notifications of code that overlaps earlier
+ * code notifications. This happens when code areas are reused, and the
+ * earlier overlapping code areas should therefore be discarded.
+ * \note the events passed to \p event_handler and the strings they point to
+ * are not guaranteed to live past each call. The \p event_handler must
+ * copy strings and other parameters it needs to keep around.
+ * \note the set of events declared in JitCodeEvent::EventType is expected to
+ * grow over time, and the JitCodeEvent structure is expected to accrue
+ * new members. The \p event_handler function must ignore event codes
+ * it does not recognize to maintain future compatibility.
+ * \note Use Isolate::CreateParams to get events for code executed during
+ * Isolate setup.
+ */
+ void SetJitCodeEventHandler(JitCodeEventOptions options,
+ JitCodeEventHandler event_handler);
+
+ /**
+ * Modifies the stack limit for this Isolate.
+ *
+ * \param stack_limit An address beyond which the Vm's stack may not grow.
+ *
+ * \note If you are using threads then you should hold the V8::Locker lock
+ * while setting the stack limit and you must set a non-default stack
+ * limit separately for each thread.
+ */
+ void SetStackLimit(uintptr_t stack_limit);
+
+ /**
+ * Returns a memory range that can potentially contain jitted code. Code for
+ * V8's 'builtins' will not be in this range if embedded builtins is enabled.
+ *
+ * On Win64, embedders are advised to install function table callbacks for
+ * these ranges, as default SEH won't be able to unwind through jitted code.
+ * The first page of the code range is reserved for the embedder and is
+ * committed, writable, and executable, to be used to store unwind data, as
+ * documented in
+ * https://docs.microsoft.com/en-us/cpp/build/exception-handling-x64.
+ *
+ * Might be empty on other platforms.
+ *
+ * https://code.google.com/p/v8/issues/detail?id=3598
+ */
+ void GetCodeRange(void** start, size_t* length_in_bytes);
+
+ /**
+ * As GetCodeRange, but for embedded builtins (these live in a distinct
+ * memory region from other V8 Code objects).
+ */
+ void GetEmbeddedCodeRange(const void** start, size_t* length_in_bytes);
+
+ /**
+ * Returns the JSEntryStubs necessary for use with the Unwinder API.
+ */
+ JSEntryStubs GetJSEntryStubs();
+
+ static constexpr size_t kMinCodePagesBufferSize = 32;
+
+ /**
+ * Copies the code heap pages currently in use by V8 into |code_pages_out|.
+ * |code_pages_out| must have at least kMinCodePagesBufferSize capacity and
+ * must be empty.
+ *
+ * Signal-safe, does not allocate, does not access the V8 heap.
+ * No code on the stack can rely on pages that might be missing.
+ *
+ * Returns the number of pages available to be copied, which might be greater
+ * than |capacity|. In this case, only |capacity| pages will be copied into
+ * |code_pages_out|. The caller should provide a bigger buffer on the next
+ * call in order to get all available code pages, but this is not required.
+ */
+ size_t CopyCodePages(size_t capacity, MemoryRange* code_pages_out);
+
+ /** Set the callback to invoke in case of fatal errors. */
+ void SetFatalErrorHandler(FatalErrorCallback that);
+
+ /** Set the callback to invoke in case of OOM errors. */
+ void SetOOMErrorHandler(OOMErrorCallback that);
+
+ /**
+ * Add a callback to invoke in case the heap size is close to the heap limit.
+ * If multiple callbacks are added, only the most recently added callback is
+ * invoked.
+ */
+ void AddNearHeapLimitCallback(NearHeapLimitCallback callback, void* data);
+
+ /**
+ * Remove the given callback and restore the heap limit to the
+ * given limit. If the given limit is zero, then it is ignored.
+ * If the current heap size is greater than the given limit,
+ * then the heap limit is restored to the minimal limit that
+ * is possible for the current heap size.
+ */
+ void RemoveNearHeapLimitCallback(NearHeapLimitCallback callback,
+ size_t heap_limit);
+
+ /**
+ * If the heap limit was changed by the NearHeapLimitCallback, then the
+ * initial heap limit will be restored once the heap size falls below the
+ * given threshold percentage of the initial heap limit.
+ * The threshold percentage is a number in (0.0, 1.0) range.
+ */
+ void AutomaticallyRestoreInitialHeapLimit(double threshold_percent = 0.5);
+
+ /**
+ * Set the callback to invoke to check if code generation from
+ * strings should be allowed.
+ */
+ void SetModifyCodeGenerationFromStringsCallback(
+ ModifyCodeGenerationFromStringsCallback2 callback);
+
+ /**
+ * Set the callback to invoke to check if wasm code generation should
+ * be allowed.
+ */
+ void SetAllowWasmCodeGenerationCallback(
+ AllowWasmCodeGenerationCallback callback);
+
+ /**
+ * Embedder over{ride|load} injection points for wasm APIs. The expectation
+ * is that the embedder sets them at most once.
+ */
+ void SetWasmModuleCallback(ExtensionCallback callback);
+ void SetWasmInstanceCallback(ExtensionCallback callback);
+
+ void SetWasmStreamingCallback(WasmStreamingCallback callback);
+
+ void SetWasmLoadSourceMapCallback(WasmLoadSourceMapCallback callback);
+
+ void SetWasmSimdEnabledCallback(WasmSimdEnabledCallback callback);
+
+ void SetWasmExceptionsEnabledCallback(WasmExceptionsEnabledCallback callback);
+
+ void SetWasmDynamicTieringEnabledCallback(
+ WasmDynamicTieringEnabledCallback callback);
+
+ void SetSharedArrayBufferConstructorEnabledCallback(
+ SharedArrayBufferConstructorEnabledCallback callback);
+
+ /**
+ * This function can be called by the embedder to signal V8 that the dynamic
+ * enabling of features has finished. V8 can now set up dynamically added
+ * features.
+ */
+ void InstallConditionalFeatures(Local<Context> context);
+
+ /**
+ * Check if V8 is dead and therefore unusable. This is the case after
+ * fatal errors such as out-of-memory situations.
+ */
+ bool IsDead();
+
+ /**
+ * Adds a message listener (errors only).
+ *
+ * The same message listener can be added more than once and in that
+ * case it will be called more than once for each message.
+ *
+ * If data is specified, it will be passed to the callback when it is called.
+ * Otherwise, the exception object will be passed to the callback instead.
+ */
+ bool AddMessageListener(MessageCallback that,
+ Local<Value> data = Local<Value>());
+
+ /**
+ * Adds a message listener.
+ *
+ * The same message listener can be added more than once and in that
+ * case it will be called more than once for each message.
+ *
+ * If data is specified, it will be passed to the callback when it is called.
+ * Otherwise, the exception object will be passed to the callback instead.
+ *
+ * A listener can listen for particular error levels by providing a mask.
+ */
+ bool AddMessageListenerWithErrorLevel(MessageCallback that,
+ int message_levels,
+ Local<Value> data = Local<Value>());
+
+ /**
+ * Remove all message listeners from the specified callback function.
+ */
+ void RemoveMessageListeners(MessageCallback that);
+
+ /** Callback function for reporting failed access checks.*/
+ void SetFailedAccessCheckCallbackFunction(FailedAccessCheckCallback);
+
+ /**
+ * Tells V8 to capture current stack trace when uncaught exception occurs
+ * and report it to the message listeners. The option is off by default.
+ */
+ void SetCaptureStackTraceForUncaughtExceptions(
+ bool capture, int frame_limit = 10,
+ StackTrace::StackTraceOptions options = StackTrace::kOverview);
+
+ /**
+ * Iterates through all external resources referenced from current isolate
+ * heap. GC is not invoked prior to iterating, therefore there is no
+ * guarantee that visited objects are still alive.
+ */
+ void VisitExternalResources(ExternalResourceVisitor* visitor);
+
+ /**
+ * Iterates through all the persistent handles in the current isolate's heap
+ * that have class_ids.
+ */
+ void VisitHandlesWithClassIds(PersistentHandleVisitor* visitor);
+
+ /**
+ * Iterates through all the persistent handles in the current isolate's heap
+ * that have class_ids and are weak to be marked as inactive if there is no
+ * pending activity for the handle.
+ */
+ void VisitWeakHandles(PersistentHandleVisitor* visitor);
+
+ /**
+ * Check if this isolate is in use.
+ * True if at least one thread Enter'ed this isolate.
+ */
+ bool IsInUse();
+
+ /**
+ * Set whether calling Atomics.wait (a function that may block) is allowed in
+ * this isolate. This can also be configured via
+ * CreateParams::allow_atomics_wait.
+ */
+ void SetAllowAtomicsWait(bool allow);
+
+ /**
+ * Time zone redetection indicator for
+ * DateTimeConfigurationChangeNotification.
+ *
+ * kSkip indicates V8 that the notification should not trigger redetecting
+ * host time zone. kRedetect indicates V8 that host time zone should be
+ * redetected, and used to set the default time zone.
+ *
+ * The host time zone detection may require file system access or similar
+ * operations unlikely to be available inside a sandbox. If v8 is run inside a
+ * sandbox, the host time zone has to be detected outside the sandbox before
+ * calling DateTimeConfigurationChangeNotification function.
+ */
+ enum class TimeZoneDetection { kSkip, kRedetect };
+
+ /**
+ * Notification that the embedder has changed the time zone, daylight savings
+ * time or other date / time configuration parameters. V8 keeps a cache of
+ * various values used for date / time computation. This notification will
+ * reset those cached values for the current context so that date / time
+ * configuration changes would be reflected.
+ *
+ * This API should not be called more than needed as it will negatively impact
+ * the performance of date operations.
+ */
+ void DateTimeConfigurationChangeNotification(
+ TimeZoneDetection time_zone_detection = TimeZoneDetection::kSkip);
+
+ /**
+ * Notification that the embedder has changed the locale. V8 keeps a cache of
+ * various values used for locale computation. This notification will reset
+ * those cached values for the current context so that locale configuration
+ * changes would be reflected.
+ *
+ * This API should not be called more than needed as it will negatively impact
+ * the performance of locale operations.
+ */
+ void LocaleConfigurationChangeNotification();
+
+ Isolate() = delete;
+ ~Isolate() = delete;
+ Isolate(const Isolate&) = delete;
+ Isolate& operator=(const Isolate&) = delete;
+ // Deleting operator new and delete here is allowed as ctor and dtor is also
+ // deleted.
+ void* operator new(size_t size) = delete;
+ void* operator new[](size_t size) = delete;
+ void operator delete(void*, size_t) = delete;
+ void operator delete[](void*, size_t) = delete;
+
+ private:
+ template <class K, class V, class Traits>
+ friend class PersistentValueMapBase;
+
+ internal::Address* GetDataFromSnapshotOnce(size_t index);
+ void ReportExternalAllocationLimitReached();
+};
+
+void Isolate::SetData(uint32_t slot, void* data) {
+ using I = internal::Internals;
+ I::SetEmbedderData(this, slot, data);
+}
+
+void* Isolate::GetData(uint32_t slot) {
+ using I = internal::Internals;
+ return I::GetEmbedderData(this, slot);
+}
+
+uint32_t Isolate::GetNumberOfDataSlots() {
+ using I = internal::Internals;
+ return I::kNumIsolateDataSlots;
+}
+
+template <class T>
+MaybeLocal<T> Isolate::GetDataFromSnapshotOnce(size_t index) {
+ T* data = reinterpret_cast<T*>(GetDataFromSnapshotOnce(index));
+ if (data) internal::PerformCastCheck(data);
+ return Local<T>(data);
+}
+
+} // namespace v8
+
+#endif // INCLUDE_V8_ISOLATE_H_
diff --git a/chromium/v8/include/v8-json.h b/chromium/v8/include/v8-json.h
new file mode 100644
index 00000000000..23d918fc973
--- /dev/null
+++ b/chromium/v8/include/v8-json.h
@@ -0,0 +1,47 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_JSON_H_
+#define INCLUDE_V8_JSON_H_
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Context;
+class Value;
+class String;
+
+/**
+ * A JSON Parser and Stringifier.
+ */
+class V8_EXPORT JSON {
+ public:
+ /**
+ * Tries to parse the string |json_string| and returns it as value if
+ * successful.
+ *
+ * \param the context in which to parse and create the value.
+ * \param json_string The string to parse.
+ * \return The corresponding value if successfully parsed.
+ */
+ static V8_WARN_UNUSED_RESULT MaybeLocal<Value> Parse(
+ Local<Context> context, Local<String> json_string);
+
+ /**
+ * Tries to stringify the JSON-serializable object |json_object| and returns
+ * it as string if successful.
+ *
+ * \param json_object The JSON-serializable object to stringify.
+ * \return The corresponding string if successfully stringified.
+ */
+ static V8_WARN_UNUSED_RESULT MaybeLocal<String> Stringify(
+ Local<Context> context, Local<Value> json_object,
+ Local<String> gap = Local<String>());
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_JSON_H_
diff --git a/chromium/v8/include/v8-local-handle.h b/chromium/v8/include/v8-local-handle.h
new file mode 100644
index 00000000000..66a8e93af60
--- /dev/null
+++ b/chromium/v8/include/v8-local-handle.h
@@ -0,0 +1,459 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_LOCAL_HANDLE_H_
+#define INCLUDE_V8_LOCAL_HANDLE_H_
+
+#include <stddef.h>
+
+#include <type_traits>
+
+#include "v8-internal.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Boolean;
+template <class T>
+class BasicTracedReference;
+class Context;
+class EscapableHandleScope;
+template <class F>
+class Eternal;
+template <class F>
+class FunctionCallbackInfo;
+class Isolate;
+template <class F>
+class MaybeLocal;
+template <class T>
+class NonCopyablePersistentTraits;
+class Object;
+template <class T, class M = NonCopyablePersistentTraits<T>>
+class Persistent;
+template <class T>
+class PersistentBase;
+template <class F1, class F2, class F3>
+class PersistentValueMapBase;
+template <class F1, class F2>
+class PersistentValueVector;
+class Primitive;
+class Private;
+template <class F>
+class PropertyCallbackInfo;
+template <class F>
+class ReturnValue;
+class String;
+template <class F>
+class Traced;
+template <class F>
+class TracedGlobal;
+template <class F>
+class TracedReference;
+class TracedReferenceBase;
+class Utils;
+
+namespace internal {
+template <typename T>
+class CustomArguments;
+} // namespace internal
+
+namespace api_internal {
+// Called when ToLocalChecked is called on an empty Local.
+V8_EXPORT void ToLocalEmpty();
+} // namespace api_internal
+
+/**
+ * A stack-allocated class that governs a number of local handles.
+ * After a handle scope has been created, all local handles will be
+ * allocated within that handle scope until either the handle scope is
+ * deleted or another handle scope is created. If there is already a
+ * handle scope and a new one is created, all allocations will take
+ * place in the new handle scope until it is deleted. After that,
+ * new handles will again be allocated in the original handle scope.
+ *
+ * After the handle scope of a local handle has been deleted the
+ * garbage collector will no longer track the object stored in the
+ * handle and may deallocate it. The behavior of accessing a handle
+ * for which the handle scope has been deleted is undefined.
+ */
+class V8_EXPORT V8_NODISCARD HandleScope {
+ public:
+ explicit HandleScope(Isolate* isolate);
+
+ ~HandleScope();
+
+ /**
+ * Counts the number of allocated handles.
+ */
+ static int NumberOfHandles(Isolate* isolate);
+
+ V8_INLINE Isolate* GetIsolate() const {
+ return reinterpret_cast<Isolate*>(isolate_);
+ }
+
+ HandleScope(const HandleScope&) = delete;
+ void operator=(const HandleScope&) = delete;
+
+ protected:
+ V8_INLINE HandleScope() = default;
+
+ void Initialize(Isolate* isolate);
+
+ static internal::Address* CreateHandle(internal::Isolate* isolate,
+ internal::Address value);
+
+ private:
+ // Declaring operator new and delete as deleted is not spec compliant.
+ // Therefore declare them private instead to disable dynamic alloc
+ void* operator new(size_t size);
+ void* operator new[](size_t size);
+ void operator delete(void*, size_t);
+ void operator delete[](void*, size_t);
+
+ internal::Isolate* isolate_;
+ internal::Address* prev_next_;
+ internal::Address* prev_limit_;
+
+ // Local::New uses CreateHandle with an Isolate* parameter.
+ template <class F>
+ friend class Local;
+
+ // Object::GetInternalField and Context::GetEmbedderData use CreateHandle with
+ // a HeapObject in their shortcuts.
+ friend class Object;
+ friend class Context;
+};
+
+/**
+ * An object reference managed by the v8 garbage collector.
+ *
+ * All objects returned from v8 have to be tracked by the garbage collector so
+ * that it knows that the objects are still alive. Also, because the garbage
+ * collector may move objects, it is unsafe to point directly to an object.
+ * Instead, all objects are stored in handles which are known by the garbage
+ * collector and updated whenever an object moves. Handles should always be
+ * passed by value (except in cases like out-parameters) and they should never
+ * be allocated on the heap.
+ *
+ * There are two types of handles: local and persistent handles.
+ *
+ * Local handles are light-weight and transient and typically used in local
+ * operations. They are managed by HandleScopes. That means that a HandleScope
+ * must exist on the stack when they are created and that they are only valid
+ * inside of the HandleScope active during their creation. For passing a local
+ * handle to an outer HandleScope, an EscapableHandleScope and its Escape()
+ * method must be used.
+ *
+ * Persistent handles can be used when storing objects across several
+ * independent operations and have to be explicitly deallocated when they're no
+ * longer used.
+ *
+ * It is safe to extract the object stored in the handle by dereferencing the
+ * handle (for instance, to extract the Object* from a Local<Object>); the value
+ * will still be governed by a handle behind the scenes and the same rules apply
+ * to these values as to their handles.
+ */
+template <class T>
+class Local {
+ public:
+ V8_INLINE Local() : val_(nullptr) {}
+ template <class S>
+ V8_INLINE Local(Local<S> that) : val_(reinterpret_cast<T*>(*that)) {
+ /**
+ * This check fails when trying to convert between incompatible
+ * handles. For example, converting from a Local<String> to a
+ * Local<Number>.
+ */
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ }
+
+ /**
+ * Returns true if the handle is empty.
+ */
+ V8_INLINE bool IsEmpty() const { return val_ == nullptr; }
+
+ /**
+ * Sets the handle to be empty. IsEmpty() will then return true.
+ */
+ V8_INLINE void Clear() { val_ = nullptr; }
+
+ V8_INLINE T* operator->() const { return val_; }
+
+ V8_INLINE T* operator*() const { return val_; }
+
+ /**
+ * Checks whether two handles are the same.
+ * Returns true if both are empty, or if the objects to which they refer
+ * are identical.
+ *
+ * If both handles refer to JS objects, this is the same as strict equality.
+ * For primitives, such as numbers or strings, a `false` return value does not
+ * indicate that the values aren't equal in the JavaScript sense.
+ * Use `Value::StrictEquals()` to check primitives for equality.
+ */
+ template <class S>
+ V8_INLINE bool operator==(const Local<S>& that) const {
+ internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
+ internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
+ if (a == nullptr) return b == nullptr;
+ if (b == nullptr) return false;
+ return *a == *b;
+ }
+
+ template <class S>
+ V8_INLINE bool operator==(const PersistentBase<S>& that) const {
+ internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
+ internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
+ if (a == nullptr) return b == nullptr;
+ if (b == nullptr) return false;
+ return *a == *b;
+ }
+
+ /**
+ * Checks whether two handles are different.
+ * Returns true if only one of the handles is empty, or if
+ * the objects to which they refer are different.
+ *
+ * If both handles refer to JS objects, this is the same as strict
+ * non-equality. For primitives, such as numbers or strings, a `true` return
+ * value does not indicate that the values aren't equal in the JavaScript
+ * sense. Use `Value::StrictEquals()` to check primitives for equality.
+ */
+ template <class S>
+ V8_INLINE bool operator!=(const Local<S>& that) const {
+ return !operator==(that);
+ }
+
+ template <class S>
+ V8_INLINE bool operator!=(const Persistent<S>& that) const {
+ return !operator==(that);
+ }
+
+ /**
+ * Cast a handle to a subclass, e.g. Local<Value> to Local<Object>.
+ * This is only valid if the handle actually refers to a value of the
+ * target type.
+ */
+ template <class S>
+ V8_INLINE static Local<T> Cast(Local<S> that) {
+#ifdef V8_ENABLE_CHECKS
+ // If we're going to perform the type check then we have to check
+ // that the handle isn't empty before doing the checked cast.
+ if (that.IsEmpty()) return Local<T>();
+#endif
+ return Local<T>(T::Cast(*that));
+ }
+
+ /**
+ * Calling this is equivalent to Local<S>::Cast().
+ * In particular, this is only valid if the handle actually refers to a value
+ * of the target type.
+ */
+ template <class S>
+ V8_INLINE Local<S> As() const {
+ return Local<S>::Cast(*this);
+ }
+
+ /**
+ * Create a local handle for the content of another handle.
+ * The referee is kept alive by the local handle even when
+ * the original handle is destroyed/disposed.
+ */
+ V8_INLINE static Local<T> New(Isolate* isolate, Local<T> that) {
+ return New(isolate, that.val_);
+ }
+
+ V8_INLINE static Local<T> New(Isolate* isolate,
+ const PersistentBase<T>& that) {
+ return New(isolate, that.val_);
+ }
+
+ V8_INLINE static Local<T> New(Isolate* isolate,
+ const BasicTracedReference<T>& that) {
+ return New(isolate, *that);
+ }
+
+ private:
+ friend class TracedReferenceBase;
+ friend class Utils;
+ template <class F>
+ friend class Eternal;
+ template <class F>
+ friend class PersistentBase;
+ template <class F, class M>
+ friend class Persistent;
+ template <class F>
+ friend class Local;
+ template <class F>
+ friend class MaybeLocal;
+ template <class F>
+ friend class FunctionCallbackInfo;
+ template <class F>
+ friend class PropertyCallbackInfo;
+ friend class String;
+ friend class Object;
+ friend class Context;
+ friend class Isolate;
+ friend class Private;
+ template <class F>
+ friend class internal::CustomArguments;
+ friend Local<Primitive> Undefined(Isolate* isolate);
+ friend Local<Primitive> Null(Isolate* isolate);
+ friend Local<Boolean> True(Isolate* isolate);
+ friend Local<Boolean> False(Isolate* isolate);
+ friend class HandleScope;
+ friend class EscapableHandleScope;
+ template <class F1, class F2, class F3>
+ friend class PersistentValueMapBase;
+ template <class F1, class F2>
+ friend class PersistentValueVector;
+ template <class F>
+ friend class ReturnValue;
+ template <class F>
+ friend class Traced;
+ template <class F>
+ friend class TracedGlobal;
+ template <class F>
+ friend class BasicTracedReference;
+ template <class F>
+ friend class TracedReference;
+
+ explicit V8_INLINE Local(T* that) : val_(that) {}
+ V8_INLINE static Local<T> New(Isolate* isolate, T* that) {
+ if (that == nullptr) return Local<T>();
+ T* that_ptr = that;
+ internal::Address* p = reinterpret_cast<internal::Address*>(that_ptr);
+ return Local<T>(reinterpret_cast<T*>(HandleScope::CreateHandle(
+ reinterpret_cast<internal::Isolate*>(isolate), *p)));
+ }
+ T* val_;
+};
+
+#if !defined(V8_IMMINENT_DEPRECATION_WARNINGS)
+// Handle is an alias for Local for historical reasons.
+template <class T>
+using Handle = Local<T>;
+#endif
+
+/**
+ * A MaybeLocal<> is a wrapper around Local<> that enforces a check whether
+ * the Local<> is empty before it can be used.
+ *
+ * If an API method returns a MaybeLocal<>, the API method can potentially fail
+ * either because an exception is thrown, or because an exception is pending,
+ * e.g. because a previous API call threw an exception that hasn't been caught
+ * yet, or because a TerminateExecution exception was thrown. In that case, an
+ * empty MaybeLocal is returned.
+ */
+template <class T>
+class MaybeLocal {
+ public:
+ V8_INLINE MaybeLocal() : val_(nullptr) {}
+ template <class S>
+ V8_INLINE MaybeLocal(Local<S> that) : val_(reinterpret_cast<T*>(*that)) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ }
+
+ V8_INLINE bool IsEmpty() const { return val_ == nullptr; }
+
+ /**
+ * Converts this MaybeLocal<> to a Local<>. If this MaybeLocal<> is empty,
+ * |false| is returned and |out| is left untouched.
+ */
+ template <class S>
+ V8_WARN_UNUSED_RESULT V8_INLINE bool ToLocal(Local<S>* out) const {
+ out->val_ = IsEmpty() ? nullptr : this->val_;
+ return !IsEmpty();
+ }
+
+ /**
+ * Converts this MaybeLocal<> to a Local<>. If this MaybeLocal<> is empty,
+ * V8 will crash the process.
+ */
+ V8_INLINE Local<T> ToLocalChecked() {
+ if (V8_UNLIKELY(val_ == nullptr)) api_internal::ToLocalEmpty();
+ return Local<T>(val_);
+ }
+
+ /**
+ * Converts this MaybeLocal<> to a Local<>, using a default value if this
+ * MaybeLocal<> is empty.
+ */
+ template <class S>
+ V8_INLINE Local<S> FromMaybe(Local<S> default_value) const {
+ return IsEmpty() ? default_value : Local<S>(val_);
+ }
+
+ private:
+ T* val_;
+};
+
+/**
+ * A HandleScope which first allocates a handle in the current scope
+ * which will be later filled with the escape value.
+ */
+class V8_EXPORT V8_NODISCARD EscapableHandleScope : public HandleScope {
+ public:
+ explicit EscapableHandleScope(Isolate* isolate);
+ V8_INLINE ~EscapableHandleScope() = default;
+
+ /**
+ * Pushes the value into the previous scope and returns a handle to it.
+ * Cannot be called twice.
+ */
+ template <class T>
+ V8_INLINE Local<T> Escape(Local<T> value) {
+ internal::Address* slot =
+ Escape(reinterpret_cast<internal::Address*>(*value));
+ return Local<T>(reinterpret_cast<T*>(slot));
+ }
+
+ template <class T>
+ V8_INLINE MaybeLocal<T> EscapeMaybe(MaybeLocal<T> value) {
+ return Escape(value.FromMaybe(Local<T>()));
+ }
+
+ EscapableHandleScope(const EscapableHandleScope&) = delete;
+ void operator=(const EscapableHandleScope&) = delete;
+
+ private:
+ // Declaring operator new and delete as deleted is not spec compliant.
+ // Therefore declare them private instead to disable dynamic alloc
+ void* operator new(size_t size);
+ void* operator new[](size_t size);
+ void operator delete(void*, size_t);
+ void operator delete[](void*, size_t);
+
+ internal::Address* Escape(internal::Address* escape_value);
+ internal::Address* escape_slot_;
+};
+
+/**
+ * A SealHandleScope acts like a handle scope in which no handle allocations
+ * are allowed. It can be useful for debugging handle leaks.
+ * Handles can be allocated within inner normal HandleScopes.
+ */
+class V8_EXPORT V8_NODISCARD SealHandleScope {
+ public:
+ explicit SealHandleScope(Isolate* isolate);
+ ~SealHandleScope();
+
+ SealHandleScope(const SealHandleScope&) = delete;
+ void operator=(const SealHandleScope&) = delete;
+
+ private:
+ // Declaring operator new and delete as deleted is not spec compliant.
+ // Therefore declare them private instead to disable dynamic alloc
+ void* operator new(size_t size);
+ void* operator new[](size_t size);
+ void operator delete(void*, size_t);
+ void operator delete[](void*, size_t);
+
+ internal::Isolate* const isolate_;
+ internal::Address* prev_limit_;
+ int prev_sealed_level_;
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_LOCAL_HANDLE_H_
diff --git a/chromium/v8/include/v8-locker.h b/chromium/v8/include/v8-locker.h
new file mode 100644
index 00000000000..360022b7d99
--- /dev/null
+++ b/chromium/v8/include/v8-locker.h
@@ -0,0 +1,148 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_LOCKER_H_
+#define INCLUDE_V8_LOCKER_H_
+
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+namespace internal {
+class Isolate;
+} // namespace internal
+
+class Isolate;
+
+/**
+ * Multiple threads in V8 are allowed, but only one thread at a time is allowed
+ * to use any given V8 isolate, see the comments in the Isolate class. The
+ * definition of 'using a V8 isolate' includes accessing handles or holding onto
+ * object pointers obtained from V8 handles while in the particular V8 isolate.
+ * It is up to the user of V8 to ensure, perhaps with locking, that this
+ * constraint is not violated. In addition to any other synchronization
+ * mechanism that may be used, the v8::Locker and v8::Unlocker classes must be
+ * used to signal thread switches to V8.
+ *
+ * v8::Locker is a scoped lock object. While it's active, i.e. between its
+ * construction and destruction, the current thread is allowed to use the locked
+ * isolate. V8 guarantees that an isolate can be locked by at most one thread at
+ * any time. In other words, the scope of a v8::Locker is a critical section.
+ *
+ * Sample usage:
+ * \code
+ * ...
+ * {
+ * v8::Locker locker(isolate);
+ * v8::Isolate::Scope isolate_scope(isolate);
+ * ...
+ * // Code using V8 and isolate goes here.
+ * ...
+ * } // Destructor called here
+ * \endcode
+ *
+ * If you wish to stop using V8 in a thread A you can do this either by
+ * destroying the v8::Locker object as above or by constructing a v8::Unlocker
+ * object:
+ *
+ * \code
+ * {
+ * isolate->Exit();
+ * v8::Unlocker unlocker(isolate);
+ * ...
+ * // Code not using V8 goes here while V8 can run in another thread.
+ * ...
+ * } // Destructor called here.
+ * isolate->Enter();
+ * \endcode
+ *
+ * The Unlocker object is intended for use in a long-running callback from V8,
+ * where you want to release the V8 lock for other threads to use.
+ *
+ * The v8::Locker is a recursive lock, i.e. you can lock more than once in a
+ * given thread. This can be useful if you have code that can be called either
+ * from code that holds the lock or from code that does not. The Unlocker is
+ * not recursive so you can not have several Unlockers on the stack at once, and
+ * you cannot use an Unlocker in a thread that is not inside a Locker's scope.
+ *
+ * An unlocker will unlock several lockers if it has to and reinstate the
+ * correct depth of locking on its destruction, e.g.:
+ *
+ * \code
+ * // V8 not locked.
+ * {
+ * v8::Locker locker(isolate);
+ * Isolate::Scope isolate_scope(isolate);
+ * // V8 locked.
+ * {
+ * v8::Locker another_locker(isolate);
+ * // V8 still locked (2 levels).
+ * {
+ * isolate->Exit();
+ * v8::Unlocker unlocker(isolate);
+ * // V8 not locked.
+ * }
+ * isolate->Enter();
+ * // V8 locked again (2 levels).
+ * }
+ * // V8 still locked (1 level).
+ * }
+ * // V8 Now no longer locked.
+ * \endcode
+ */
+class V8_EXPORT Unlocker {
+ public:
+ /**
+ * Initialize Unlocker for a given Isolate.
+ */
+ V8_INLINE explicit Unlocker(Isolate* isolate) { Initialize(isolate); }
+
+ ~Unlocker();
+
+ private:
+ void Initialize(Isolate* isolate);
+
+ internal::Isolate* isolate_;
+};
+
+class V8_EXPORT Locker {
+ public:
+ /**
+ * Initialize Locker for a given Isolate.
+ */
+ V8_INLINE explicit Locker(Isolate* isolate) { Initialize(isolate); }
+
+ ~Locker();
+
+ /**
+ * Returns whether or not the locker for a given isolate, is locked by the
+ * current thread.
+ */
+ static bool IsLocked(Isolate* isolate);
+
+ /**
+ * Returns whether any v8::Locker has ever been used in this process.
+ * TODO(cbruni, chromium:1240851): Fix locking checks on a per-thread basis.
+ * The current implementation is quite confusing and leads to unexpected
+ * results if anybody uses v8::Locker in the current process.
+ */
+ static bool WasEverUsed();
+ V8_DEPRECATE_SOON("Use WasEverUsed instead")
+ static bool IsActive();
+
+ // Disallow copying and assigning.
+ Locker(const Locker&) = delete;
+ void operator=(const Locker&) = delete;
+
+ private:
+ void Initialize(Isolate* isolate);
+
+ bool has_lock_;
+ bool top_level_;
+ internal::Isolate* isolate_;
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_LOCKER_H_
diff --git a/chromium/v8/include/v8-maybe.h b/chromium/v8/include/v8-maybe.h
new file mode 100644
index 00000000000..0532a510059
--- /dev/null
+++ b/chromium/v8/include/v8-maybe.h
@@ -0,0 +1,137 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_MAYBE_H_
+#define INCLUDE_V8_MAYBE_H_
+
+#include "v8-internal.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+namespace api_internal {
+// Called when ToChecked is called on an empty Maybe.
+V8_EXPORT void FromJustIsNothing();
+} // namespace api_internal
+
+/**
+ * A simple Maybe type, representing an object which may or may not have a
+ * value, see https://hackage.haskell.org/package/base/docs/Data-Maybe.html.
+ *
+ * If an API method returns a Maybe<>, the API method can potentially fail
+ * either because an exception is thrown, or because an exception is pending,
+ * e.g. because a previous API call threw an exception that hasn't been caught
+ * yet, or because a TerminateExecution exception was thrown. In that case, a
+ * "Nothing" value is returned.
+ */
+template <class T>
+class Maybe {
+ public:
+ V8_INLINE bool IsNothing() const { return !has_value_; }
+ V8_INLINE bool IsJust() const { return has_value_; }
+
+ /**
+ * An alias for |FromJust|. Will crash if the Maybe<> is nothing.
+ */
+ V8_INLINE T ToChecked() const { return FromJust(); }
+
+ /**
+ * Short-hand for ToChecked(), which doesn't return a value. To be used, where
+ * the actual value of the Maybe is not needed like Object::Set.
+ */
+ V8_INLINE void Check() const {
+ if (V8_UNLIKELY(!IsJust())) api_internal::FromJustIsNothing();
+ }
+
+ /**
+ * Converts this Maybe<> to a value of type T. If this Maybe<> is
+ * nothing (empty), |false| is returned and |out| is left untouched.
+ */
+ V8_WARN_UNUSED_RESULT V8_INLINE bool To(T* out) const {
+ if (V8_LIKELY(IsJust())) *out = value_;
+ return IsJust();
+ }
+
+ /**
+ * Converts this Maybe<> to a value of type T. If this Maybe<> is
+ * nothing (empty), V8 will crash the process.
+ */
+ V8_INLINE T FromJust() const {
+ if (V8_UNLIKELY(!IsJust())) api_internal::FromJustIsNothing();
+ return value_;
+ }
+
+ /**
+ * Converts this Maybe<> to a value of type T, using a default value if this
+ * Maybe<> is nothing (empty).
+ */
+ V8_INLINE T FromMaybe(const T& default_value) const {
+ return has_value_ ? value_ : default_value;
+ }
+
+ V8_INLINE bool operator==(const Maybe& other) const {
+ return (IsJust() == other.IsJust()) &&
+ (!IsJust() || FromJust() == other.FromJust());
+ }
+
+ V8_INLINE bool operator!=(const Maybe& other) const {
+ return !operator==(other);
+ }
+
+ private:
+ Maybe() : has_value_(false) {}
+ explicit Maybe(const T& t) : has_value_(true), value_(t) {}
+
+ bool has_value_;
+ T value_;
+
+ template <class U>
+ friend Maybe<U> Nothing();
+ template <class U>
+ friend Maybe<U> Just(const U& u);
+};
+
+template <class T>
+inline Maybe<T> Nothing() {
+ return Maybe<T>();
+}
+
+template <class T>
+inline Maybe<T> Just(const T& t) {
+ return Maybe<T>(t);
+}
+
+// A template specialization of Maybe<T> for the case of T = void.
+template <>
+class Maybe<void> {
+ public:
+ V8_INLINE bool IsNothing() const { return !is_valid_; }
+ V8_INLINE bool IsJust() const { return is_valid_; }
+
+ V8_INLINE bool operator==(const Maybe& other) const {
+ return IsJust() == other.IsJust();
+ }
+
+ V8_INLINE bool operator!=(const Maybe& other) const {
+ return !operator==(other);
+ }
+
+ private:
+ struct JustTag {};
+
+ Maybe() : is_valid_(false) {}
+ explicit Maybe(JustTag) : is_valid_(true) {}
+
+ bool is_valid_;
+
+ template <class U>
+ friend Maybe<U> Nothing();
+ friend Maybe<void> JustVoid();
+};
+
+inline Maybe<void> JustVoid() { return Maybe<void>(Maybe<void>::JustTag()); }
+
+} // namespace v8
+
+#endif // INCLUDE_V8_MAYBE_H_
diff --git a/chromium/v8/include/v8-memory-span.h b/chromium/v8/include/v8-memory-span.h
new file mode 100644
index 00000000000..b26af4f705b
--- /dev/null
+++ b/chromium/v8/include/v8-memory-span.h
@@ -0,0 +1,43 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_MEMORY_SPAN_H_
+#define INCLUDE_V8_MEMORY_SPAN_H_
+
+#include <stddef.h>
+
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+/**
+ * Points to an unowned continous buffer holding a known number of elements.
+ *
+ * This is similar to std::span (under consideration for C++20), but does not
+ * require advanced C++ support. In the (far) future, this may be replaced with
+ * or aliased to std::span.
+ *
+ * To facilitate future migration, this class exposes a subset of the interface
+ * implemented by std::span.
+ */
+template <typename T>
+class V8_EXPORT MemorySpan {
+ public:
+ /** The default constructor creates an empty span. */
+ constexpr MemorySpan() = default;
+
+ constexpr MemorySpan(T* data, size_t size) : data_(data), size_(size) {}
+
+ /** Returns a pointer to the beginning of the buffer. */
+ constexpr T* data() const { return data_; }
+ /** Returns the number of elements that the buffer holds. */
+ constexpr size_t size() const { return size_; }
+
+ private:
+ T* data_ = nullptr;
+ size_t size_ = 0;
+};
+
+} // namespace v8
+#endif // INCLUDE_V8_MEMORY_SPAN_H_
diff --git a/chromium/v8/include/v8-message.h b/chromium/v8/include/v8-message.h
new file mode 100644
index 00000000000..62b6bd92f93
--- /dev/null
+++ b/chromium/v8/include/v8-message.h
@@ -0,0 +1,237 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_MESSAGE_H_
+#define INCLUDE_V8_MESSAGE_H_
+
+#include <stdio.h>
+
+#include <iosfwd>
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-maybe.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Integer;
+class PrimitiveArray;
+class StackTrace;
+class String;
+class Value;
+
+/**
+ * The optional attributes of ScriptOrigin.
+ */
+class ScriptOriginOptions {
+ public:
+ V8_INLINE ScriptOriginOptions(bool is_shared_cross_origin = false,
+ bool is_opaque = false, bool is_wasm = false,
+ bool is_module = false)
+ : flags_((is_shared_cross_origin ? kIsSharedCrossOrigin : 0) |
+ (is_wasm ? kIsWasm : 0) | (is_opaque ? kIsOpaque : 0) |
+ (is_module ? kIsModule : 0)) {}
+ V8_INLINE ScriptOriginOptions(int flags)
+ : flags_(flags &
+ (kIsSharedCrossOrigin | kIsOpaque | kIsWasm | kIsModule)) {}
+
+ bool IsSharedCrossOrigin() const {
+ return (flags_ & kIsSharedCrossOrigin) != 0;
+ }
+ bool IsOpaque() const { return (flags_ & kIsOpaque) != 0; }
+ bool IsWasm() const { return (flags_ & kIsWasm) != 0; }
+ bool IsModule() const { return (flags_ & kIsModule) != 0; }
+
+ int Flags() const { return flags_; }
+
+ private:
+ enum {
+ kIsSharedCrossOrigin = 1,
+ kIsOpaque = 1 << 1,
+ kIsWasm = 1 << 2,
+ kIsModule = 1 << 3
+ };
+ const int flags_;
+};
+
+/**
+ * The origin, within a file, of a script.
+ */
+class V8_EXPORT ScriptOrigin {
+ public:
+ V8_DEPRECATE_SOON("Use constructor with primitive C++ types")
+ ScriptOrigin(
+ Local<Value> resource_name, Local<Integer> resource_line_offset,
+ Local<Integer> resource_column_offset,
+ Local<Boolean> resource_is_shared_cross_origin = Local<Boolean>(),
+ Local<Integer> script_id = Local<Integer>(),
+ Local<Value> source_map_url = Local<Value>(),
+ Local<Boolean> resource_is_opaque = Local<Boolean>(),
+ Local<Boolean> is_wasm = Local<Boolean>(),
+ Local<Boolean> is_module = Local<Boolean>(),
+ Local<PrimitiveArray> host_defined_options = Local<PrimitiveArray>());
+ V8_DEPRECATE_SOON("Use constructor that takes an isolate")
+ explicit ScriptOrigin(
+ Local<Value> resource_name, int resource_line_offset = 0,
+ int resource_column_offset = 0,
+ bool resource_is_shared_cross_origin = false, int script_id = -1,
+ Local<Value> source_map_url = Local<Value>(),
+ bool resource_is_opaque = false, bool is_wasm = false,
+ bool is_module = false,
+ Local<PrimitiveArray> host_defined_options = Local<PrimitiveArray>());
+ V8_INLINE ScriptOrigin(
+ Isolate* isolate, Local<Value> resource_name,
+ int resource_line_offset = 0, int resource_column_offset = 0,
+ bool resource_is_shared_cross_origin = false, int script_id = -1,
+ Local<Value> source_map_url = Local<Value>(),
+ bool resource_is_opaque = false, bool is_wasm = false,
+ bool is_module = false,
+ Local<PrimitiveArray> host_defined_options = Local<PrimitiveArray>())
+ : isolate_(isolate),
+ resource_name_(resource_name),
+ resource_line_offset_(resource_line_offset),
+ resource_column_offset_(resource_column_offset),
+ options_(resource_is_shared_cross_origin, resource_is_opaque, is_wasm,
+ is_module),
+ script_id_(script_id),
+ source_map_url_(source_map_url),
+ host_defined_options_(host_defined_options) {}
+
+ V8_INLINE Local<Value> ResourceName() const;
+ V8_DEPRECATE_SOON("Use getter with primitive C++ types.")
+ V8_INLINE Local<Integer> ResourceLineOffset() const;
+ V8_DEPRECATE_SOON("Use getter with primitive C++ types.")
+ V8_INLINE Local<Integer> ResourceColumnOffset() const;
+ V8_DEPRECATE_SOON("Use getter with primitive C++ types.")
+ V8_INLINE Local<Integer> ScriptID() const;
+ V8_INLINE int LineOffset() const;
+ V8_INLINE int ColumnOffset() const;
+ V8_INLINE int ScriptId() const;
+ V8_INLINE Local<Value> SourceMapUrl() const;
+ V8_INLINE Local<PrimitiveArray> HostDefinedOptions() const;
+ V8_INLINE ScriptOriginOptions Options() const { return options_; }
+
+ private:
+ Isolate* isolate_;
+ Local<Value> resource_name_;
+ int resource_line_offset_;
+ int resource_column_offset_;
+ ScriptOriginOptions options_;
+ int script_id_;
+ Local<Value> source_map_url_;
+ Local<PrimitiveArray> host_defined_options_;
+};
+
+/**
+ * An error message.
+ */
+class V8_EXPORT Message {
+ public:
+ Local<String> Get() const;
+
+ /**
+ * Return the isolate to which the Message belongs.
+ */
+ Isolate* GetIsolate() const;
+
+ V8_WARN_UNUSED_RESULT MaybeLocal<String> GetSource(
+ Local<Context> context) const;
+ V8_WARN_UNUSED_RESULT MaybeLocal<String> GetSourceLine(
+ Local<Context> context) const;
+
+ /**
+ * Returns the origin for the script from where the function causing the
+ * error originates.
+ */
+ ScriptOrigin GetScriptOrigin() const;
+
+ /**
+ * Returns the resource name for the script from where the function causing
+ * the error originates.
+ */
+ Local<Value> GetScriptResourceName() const;
+
+ /**
+ * Exception stack trace. By default stack traces are not captured for
+ * uncaught exceptions. SetCaptureStackTraceForUncaughtExceptions allows
+ * to change this option.
+ */
+ Local<StackTrace> GetStackTrace() const;
+
+ /**
+ * Returns the number, 1-based, of the line where the error occurred.
+ */
+ V8_WARN_UNUSED_RESULT Maybe<int> GetLineNumber(Local<Context> context) const;
+
+ /**
+ * Returns the index within the script of the first character where
+ * the error occurred.
+ */
+ int GetStartPosition() const;
+
+ /**
+ * Returns the index within the script of the last character where
+ * the error occurred.
+ */
+ int GetEndPosition() const;
+
+ /**
+ * Returns the Wasm function index where the error occurred. Returns -1 if
+ * message is not from a Wasm script.
+ */
+ int GetWasmFunctionIndex() const;
+
+ /**
+ * Returns the error level of the message.
+ */
+ int ErrorLevel() const;
+
+ /**
+ * Returns the index within the line of the first character where
+ * the error occurred.
+ */
+ int GetStartColumn() const;
+ V8_WARN_UNUSED_RESULT Maybe<int> GetStartColumn(Local<Context> context) const;
+
+ /**
+ * Returns the index within the line of the last character where
+ * the error occurred.
+ */
+ int GetEndColumn() const;
+ V8_WARN_UNUSED_RESULT Maybe<int> GetEndColumn(Local<Context> context) const;
+
+ /**
+ * Passes on the value set by the embedder when it fed the script from which
+ * this Message was generated to V8.
+ */
+ bool IsSharedCrossOrigin() const;
+ bool IsOpaque() const;
+
+ V8_DEPRECATE_SOON("Use the version that takes a std::ostream&.")
+ static void PrintCurrentStackTrace(Isolate* isolate, FILE* out);
+ static void PrintCurrentStackTrace(Isolate* isolate, std::ostream& out);
+
+ static const int kNoLineNumberInfo = 0;
+ static const int kNoColumnInfo = 0;
+ static const int kNoScriptIdInfo = 0;
+ static const int kNoWasmFunctionIndexInfo = -1;
+};
+
+Local<Value> ScriptOrigin::ResourceName() const { return resource_name_; }
+
+Local<PrimitiveArray> ScriptOrigin::HostDefinedOptions() const {
+ return host_defined_options_;
+}
+
+int ScriptOrigin::LineOffset() const { return resource_line_offset_; }
+
+int ScriptOrigin::ColumnOffset() const { return resource_column_offset_; }
+
+int ScriptOrigin::ScriptId() const { return script_id_; }
+
+Local<Value> ScriptOrigin::SourceMapUrl() const { return source_map_url_; }
+
+} // namespace v8
+
+#endif // INCLUDE_V8_MESSAGE_H_
diff --git a/chromium/v8/include/v8-metrics.h b/chromium/v8/include/v8-metrics.h
index a6eea6a8645..29e54401067 100644
--- a/chromium/v8/include/v8-metrics.h
+++ b/chromium/v8/include/v8-metrics.h
@@ -5,10 +5,19 @@
#ifndef V8_METRICS_H_
#define V8_METRICS_H_
-#include "v8-internal.h" // NOLINT(build/include_directory)
-#include "v8.h" // NOLINT(build/include_directory)
+#include <stddef.h>
+#include <stdint.h>
+
+#include <vector>
+
+#include "v8-internal.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
namespace v8 {
+
+class Context;
+class Isolate;
+
namespace metrics {
struct GarbageCollectionPhases {
diff --git a/chromium/v8/include/v8-microtask-queue.h b/chromium/v8/include/v8-microtask-queue.h
new file mode 100644
index 00000000000..af9caa54a8f
--- /dev/null
+++ b/chromium/v8/include/v8-microtask-queue.h
@@ -0,0 +1,152 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_MICROTASKS_QUEUE_H_
+#define INCLUDE_V8_MICROTASKS_QUEUE_H_
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-microtask.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Function;
+
+namespace internal {
+class Isolate;
+class MicrotaskQueue;
+} // namespace internal
+
+/**
+ * Represents the microtask queue, where microtasks are stored and processed.
+ * https://html.spec.whatwg.org/multipage/webappapis.html#microtask-queue
+ * https://html.spec.whatwg.org/multipage/webappapis.html#enqueuejob(queuename,-job,-arguments)
+ * https://html.spec.whatwg.org/multipage/webappapis.html#perform-a-microtask-checkpoint
+ *
+ * A MicrotaskQueue instance may be associated to multiple Contexts by passing
+ * it to Context::New(), and they can be detached by Context::DetachGlobal().
+ * The embedder must keep the MicrotaskQueue instance alive until all associated
+ * Contexts are gone or detached.
+ *
+ * Use the same instance of MicrotaskQueue for all Contexts that may access each
+ * other synchronously. E.g. for Web embedding, use the same instance for all
+ * origins that share the same URL scheme and eTLD+1.
+ */
+class V8_EXPORT MicrotaskQueue {
+ public:
+ /**
+ * Creates an empty MicrotaskQueue instance.
+ */
+ static std::unique_ptr<MicrotaskQueue> New(
+ Isolate* isolate, MicrotasksPolicy policy = MicrotasksPolicy::kAuto);
+
+ virtual ~MicrotaskQueue() = default;
+
+ /**
+ * Enqueues the callback to the queue.
+ */
+ virtual void EnqueueMicrotask(Isolate* isolate,
+ Local<Function> microtask) = 0;
+
+ /**
+ * Enqueues the callback to the queue.
+ */
+ virtual void EnqueueMicrotask(v8::Isolate* isolate,
+ MicrotaskCallback callback,
+ void* data = nullptr) = 0;
+
+ /**
+ * Adds a callback to notify the embedder after microtasks were run. The
+ * callback is triggered by explicit RunMicrotasks call or automatic
+ * microtasks execution (see Isolate::SetMicrotasksPolicy).
+ *
+ * Callback will trigger even if microtasks were attempted to run,
+ * but the microtasks queue was empty and no single microtask was actually
+ * executed.
+ *
+ * Executing scripts inside the callback will not re-trigger microtasks and
+ * the callback.
+ */
+ virtual void AddMicrotasksCompletedCallback(
+ MicrotasksCompletedCallbackWithData callback, void* data = nullptr) = 0;
+
+ /**
+ * Removes callback that was installed by AddMicrotasksCompletedCallback.
+ */
+ virtual void RemoveMicrotasksCompletedCallback(
+ MicrotasksCompletedCallbackWithData callback, void* data = nullptr) = 0;
+
+ /**
+ * Runs microtasks if no microtask is running on this MicrotaskQueue instance.
+ */
+ virtual void PerformCheckpoint(Isolate* isolate) = 0;
+
+ /**
+ * Returns true if a microtask is running on this MicrotaskQueue instance.
+ */
+ virtual bool IsRunningMicrotasks() const = 0;
+
+ /**
+ * Returns the current depth of nested MicrotasksScope that has
+ * kRunMicrotasks.
+ */
+ virtual int GetMicrotasksScopeDepth() const = 0;
+
+ MicrotaskQueue(const MicrotaskQueue&) = delete;
+ MicrotaskQueue& operator=(const MicrotaskQueue&) = delete;
+
+ private:
+ friend class internal::MicrotaskQueue;
+ MicrotaskQueue() = default;
+};
+
+/**
+ * This scope is used to control microtasks when MicrotasksPolicy::kScoped
+ * is used on Isolate. In this mode every non-primitive call to V8 should be
+ * done inside some MicrotasksScope.
+ * Microtasks are executed when topmost MicrotasksScope marked as kRunMicrotasks
+ * exits.
+ * kDoNotRunMicrotasks should be used to annotate calls not intended to trigger
+ * microtasks.
+ */
+class V8_EXPORT V8_NODISCARD MicrotasksScope {
+ public:
+ enum Type { kRunMicrotasks, kDoNotRunMicrotasks };
+
+ MicrotasksScope(Isolate* isolate, Type type);
+ MicrotasksScope(Isolate* isolate, MicrotaskQueue* microtask_queue, Type type);
+ ~MicrotasksScope();
+
+ /**
+ * Runs microtasks if no kRunMicrotasks scope is currently active.
+ */
+ static void PerformCheckpoint(Isolate* isolate);
+
+ /**
+ * Returns current depth of nested kRunMicrotasks scopes.
+ */
+ static int GetCurrentDepth(Isolate* isolate);
+
+ /**
+ * Returns true while microtasks are being executed.
+ */
+ static bool IsRunningMicrotasks(Isolate* isolate);
+
+ // Prevent copying.
+ MicrotasksScope(const MicrotasksScope&) = delete;
+ MicrotasksScope& operator=(const MicrotasksScope&) = delete;
+
+ private:
+ internal::Isolate* const isolate_;
+ internal::MicrotaskQueue* const microtask_queue_;
+ bool run_;
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_MICROTASKS_QUEUE_H_
diff --git a/chromium/v8/include/v8-microtask.h b/chromium/v8/include/v8-microtask.h
new file mode 100644
index 00000000000..c159203608d
--- /dev/null
+++ b/chromium/v8/include/v8-microtask.h
@@ -0,0 +1,28 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_MICROTASK_H_
+#define INCLUDE_V8_MICROTASK_H_
+
+namespace v8 {
+
+class Isolate;
+
+// --- Microtasks Callbacks ---
+using MicrotasksCompletedCallbackWithData = void (*)(Isolate*, void*);
+using MicrotaskCallback = void (*)(void* data);
+
+/**
+ * Policy for running microtasks:
+ * - explicit: microtasks are invoked with the
+ * Isolate::PerformMicrotaskCheckpoint() method;
+ * - scoped: microtasks invocation is controlled by MicrotasksScope objects;
+ * - auto: microtasks are invoked when the script call depth decrements
+ * to zero.
+ */
+enum class MicrotasksPolicy { kExplicit, kScoped, kAuto };
+
+} // namespace v8
+
+#endif // INCLUDE_V8_MICROTASK_H_
diff --git a/chromium/v8/include/v8-object.h b/chromium/v8/include/v8-object.h
new file mode 100644
index 00000000000..114e452a380
--- /dev/null
+++ b/chromium/v8/include/v8-object.h
@@ -0,0 +1,770 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_OBJECT_H_
+#define INCLUDE_V8_OBJECT_H_
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-maybe.h" // NOLINT(build/include_directory)
+#include "v8-persistent-handle.h" // NOLINT(build/include_directory)
+#include "v8-primitive.h" // NOLINT(build/include_directory)
+#include "v8-traced-handle.h" // NOLINT(build/include_directory)
+#include "v8-value.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Array;
+class Function;
+class FunctionTemplate;
+template <typename T>
+class PropertyCallbackInfo;
+
+/**
+ * A private symbol
+ *
+ * This is an experimental feature. Use at your own risk.
+ */
+class V8_EXPORT Private : public Data {
+ public:
+ /**
+ * Returns the print name string of the private symbol, or undefined if none.
+ */
+ Local<Value> Name() const;
+
+ /**
+ * Create a private symbol. If name is not empty, it will be the description.
+ */
+ static Local<Private> New(Isolate* isolate,
+ Local<String> name = Local<String>());
+
+ /**
+ * Retrieve a global private symbol. If a symbol with this name has not
+ * been retrieved in the same isolate before, it is created.
+ * Note that private symbols created this way are never collected, so
+ * they should only be used for statically fixed properties.
+ * Also, there is only one global name space for the names used as keys.
+ * To minimize the potential for clashes, use qualified names as keys,
+ * e.g., "Class#property".
+ */
+ static Local<Private> ForApi(Isolate* isolate, Local<String> name);
+
+ V8_INLINE static Private* Cast(Data* data);
+
+ private:
+ Private();
+
+ static void CheckCast(Data* that);
+};
+
+/**
+ * An instance of a Property Descriptor, see Ecma-262 6.2.4.
+ *
+ * Properties in a descriptor are present or absent. If you do not set
+ * `enumerable`, `configurable`, and `writable`, they are absent. If `value`,
+ * `get`, or `set` are absent, but you must specify them in the constructor, use
+ * empty handles.
+ *
+ * Accessors `get` and `set` must be callable or undefined if they are present.
+ *
+ * \note Only query properties if they are present, i.e., call `x()` only if
+ * `has_x()` returns true.
+ *
+ * \code
+ * // var desc = {writable: false}
+ * v8::PropertyDescriptor d(Local<Value>()), false);
+ * d.value(); // error, value not set
+ * if (d.has_writable()) {
+ * d.writable(); // false
+ * }
+ *
+ * // var desc = {value: undefined}
+ * v8::PropertyDescriptor d(v8::Undefined(isolate));
+ *
+ * // var desc = {get: undefined}
+ * v8::PropertyDescriptor d(v8::Undefined(isolate), Local<Value>()));
+ * \endcode
+ */
+class V8_EXPORT PropertyDescriptor {
+ public:
+ // GenericDescriptor
+ PropertyDescriptor();
+
+ // DataDescriptor
+ explicit PropertyDescriptor(Local<Value> value);
+
+ // DataDescriptor with writable property
+ PropertyDescriptor(Local<Value> value, bool writable);
+
+ // AccessorDescriptor
+ PropertyDescriptor(Local<Value> get, Local<Value> set);
+
+ ~PropertyDescriptor();
+
+ Local<Value> value() const;
+ bool has_value() const;
+
+ Local<Value> get() const;
+ bool has_get() const;
+ Local<Value> set() const;
+ bool has_set() const;
+
+ void set_enumerable(bool enumerable);
+ bool enumerable() const;
+ bool has_enumerable() const;
+
+ void set_configurable(bool configurable);
+ bool configurable() const;
+ bool has_configurable() const;
+
+ bool writable() const;
+ bool has_writable() const;
+
+ struct PrivateData;
+ PrivateData* get_private() const { return private_; }
+
+ PropertyDescriptor(const PropertyDescriptor&) = delete;
+ void operator=(const PropertyDescriptor&) = delete;
+
+ private:
+ PrivateData* private_;
+};
+
+/**
+ * PropertyAttribute.
+ */
+enum PropertyAttribute {
+ /** None. **/
+ None = 0,
+ /** ReadOnly, i.e., not writable. **/
+ ReadOnly = 1 << 0,
+ /** DontEnum, i.e., not enumerable. **/
+ DontEnum = 1 << 1,
+ /** DontDelete, i.e., not configurable. **/
+ DontDelete = 1 << 2
+};
+
+/**
+ * Accessor[Getter|Setter] are used as callback functions when
+ * setting|getting a particular property. See Object and ObjectTemplate's
+ * method SetAccessor.
+ */
+using AccessorGetterCallback =
+ void (*)(Local<String> property, const PropertyCallbackInfo<Value>& info);
+using AccessorNameGetterCallback =
+ void (*)(Local<Name> property, const PropertyCallbackInfo<Value>& info);
+
+using AccessorSetterCallback = void (*)(Local<String> property,
+ Local<Value> value,
+ const PropertyCallbackInfo<void>& info);
+using AccessorNameSetterCallback =
+ void (*)(Local<Name> property, Local<Value> value,
+ const PropertyCallbackInfo<void>& info);
+
+/**
+ * Access control specifications.
+ *
+ * Some accessors should be accessible across contexts. These
+ * accessors have an explicit access control parameter which specifies
+ * the kind of cross-context access that should be allowed.
+ *
+ * TODO(dcarney): Remove PROHIBITS_OVERWRITING as it is now unused.
+ */
+enum AccessControl {
+ DEFAULT = 0,
+ ALL_CAN_READ = 1,
+ ALL_CAN_WRITE = 1 << 1,
+ PROHIBITS_OVERWRITING = 1 << 2
+};
+
+/**
+ * Property filter bits. They can be or'ed to build a composite filter.
+ */
+enum PropertyFilter {
+ ALL_PROPERTIES = 0,
+ ONLY_WRITABLE = 1,
+ ONLY_ENUMERABLE = 2,
+ ONLY_CONFIGURABLE = 4,
+ SKIP_STRINGS = 8,
+ SKIP_SYMBOLS = 16
+};
+
+/**
+ * Options for marking whether callbacks may trigger JS-observable side effects.
+ * Side-effect-free callbacks are allowlisted during debug evaluation with
+ * throwOnSideEffect. It applies when calling a Function, FunctionTemplate,
+ * or an Accessor callback. For Interceptors, please see
+ * PropertyHandlerFlags's kHasNoSideEffect.
+ * Callbacks that only cause side effects to the receiver are allowlisted if
+ * invoked on receiver objects that are created within the same debug-evaluate
+ * call, as these objects are temporary and the side effect does not escape.
+ */
+enum class SideEffectType {
+ kHasSideEffect,
+ kHasNoSideEffect,
+ kHasSideEffectToReceiver
+};
+
+/**
+ * Keys/Properties filter enums:
+ *
+ * KeyCollectionMode limits the range of collected properties. kOwnOnly limits
+ * the collected properties to the given Object only. kIncludesPrototypes will
+ * include all keys of the objects's prototype chain as well.
+ */
+enum class KeyCollectionMode { kOwnOnly, kIncludePrototypes };
+
+/**
+ * kIncludesIndices allows for integer indices to be collected, while
+ * kSkipIndices will exclude integer indices from being collected.
+ */
+enum class IndexFilter { kIncludeIndices, kSkipIndices };
+
+/**
+ * kConvertToString will convert integer indices to strings.
+ * kKeepNumbers will return numbers for integer indices.
+ */
+enum class KeyConversionMode { kConvertToString, kKeepNumbers, kNoNumbers };
+
+/**
+ * Integrity level for objects.
+ */
+enum class IntegrityLevel { kFrozen, kSealed };
+
+/**
+ * A JavaScript object (ECMA-262, 4.3.3)
+ */
+class V8_EXPORT Object : public Value {
+ public:
+ /**
+ * Set only return Just(true) or Empty(), so if it should never fail, use
+ * result.Check().
+ */
+ V8_WARN_UNUSED_RESULT Maybe<bool> Set(Local<Context> context,
+ Local<Value> key, Local<Value> value);
+
+ V8_WARN_UNUSED_RESULT Maybe<bool> Set(Local<Context> context, uint32_t index,
+ Local<Value> value);
+
+ // Implements CreateDataProperty (ECMA-262, 7.3.4).
+ //
+ // Defines a configurable, writable, enumerable property with the given value
+ // on the object unless the property already exists and is not configurable
+ // or the object is not extensible.
+ //
+ // Returns true on success.
+ V8_WARN_UNUSED_RESULT Maybe<bool> CreateDataProperty(Local<Context> context,
+ Local<Name> key,
+ Local<Value> value);
+ V8_WARN_UNUSED_RESULT Maybe<bool> CreateDataProperty(Local<Context> context,
+ uint32_t index,
+ Local<Value> value);
+
+ // Implements DefineOwnProperty.
+ //
+ // In general, CreateDataProperty will be faster, however, does not allow
+ // for specifying attributes.
+ //
+ // Returns true on success.
+ V8_WARN_UNUSED_RESULT Maybe<bool> DefineOwnProperty(
+ Local<Context> context, Local<Name> key, Local<Value> value,
+ PropertyAttribute attributes = None);
+
+ // Implements Object.DefineProperty(O, P, Attributes), see Ecma-262 19.1.2.4.
+ //
+ // The defineProperty function is used to add an own property or
+ // update the attributes of an existing own property of an object.
+ //
+ // Both data and accessor descriptors can be used.
+ //
+ // In general, CreateDataProperty is faster, however, does not allow
+ // for specifying attributes or an accessor descriptor.
+ //
+ // The PropertyDescriptor can change when redefining a property.
+ //
+ // Returns true on success.
+ V8_WARN_UNUSED_RESULT Maybe<bool> DefineProperty(
+ Local<Context> context, Local<Name> key, PropertyDescriptor& descriptor);
+
+ V8_WARN_UNUSED_RESULT MaybeLocal<Value> Get(Local<Context> context,
+ Local<Value> key);
+
+ V8_WARN_UNUSED_RESULT MaybeLocal<Value> Get(Local<Context> context,
+ uint32_t index);
+
+ /**
+ * Gets the property attributes of a property which can be None or
+ * any combination of ReadOnly, DontEnum and DontDelete. Returns
+ * None when the property doesn't exist.
+ */
+ V8_WARN_UNUSED_RESULT Maybe<PropertyAttribute> GetPropertyAttributes(
+ Local<Context> context, Local<Value> key);
+
+ /**
+ * Returns Object.getOwnPropertyDescriptor as per ES2016 section 19.1.2.6.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Value> GetOwnPropertyDescriptor(
+ Local<Context> context, Local<Name> key);
+
+ /**
+ * Object::Has() calls the abstract operation HasProperty(O, P) described
+ * in ECMA-262, 7.3.10. Has() returns
+ * true, if the object has the property, either own or on the prototype chain.
+ * Interceptors, i.e., PropertyQueryCallbacks, are called if present.
+ *
+ * Has() has the same side effects as JavaScript's `variable in object`.
+ * For example, calling Has() on a revoked proxy will throw an exception.
+ *
+ * \note Has() converts the key to a name, which possibly calls back into
+ * JavaScript.
+ *
+ * See also v8::Object::HasOwnProperty() and
+ * v8::Object::HasRealNamedProperty().
+ */
+ V8_WARN_UNUSED_RESULT Maybe<bool> Has(Local<Context> context,
+ Local<Value> key);
+
+ V8_WARN_UNUSED_RESULT Maybe<bool> Delete(Local<Context> context,
+ Local<Value> key);
+
+ V8_WARN_UNUSED_RESULT Maybe<bool> Has(Local<Context> context, uint32_t index);
+
+ V8_WARN_UNUSED_RESULT Maybe<bool> Delete(Local<Context> context,
+ uint32_t index);
+
+ /**
+ * Note: SideEffectType affects the getter only, not the setter.
+ */
+ V8_WARN_UNUSED_RESULT Maybe<bool> SetAccessor(
+ Local<Context> context, Local<Name> name,
+ AccessorNameGetterCallback getter,
+ AccessorNameSetterCallback setter = nullptr,
+ MaybeLocal<Value> data = MaybeLocal<Value>(),
+ AccessControl settings = DEFAULT, PropertyAttribute attribute = None,
+ SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
+ SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
+
+ void SetAccessorProperty(Local<Name> name, Local<Function> getter,
+ Local<Function> setter = Local<Function>(),
+ PropertyAttribute attribute = None,
+ AccessControl settings = DEFAULT);
+
+ /**
+ * Sets a native data property like Template::SetNativeDataProperty, but
+ * this method sets on this object directly.
+ */
+ V8_WARN_UNUSED_RESULT Maybe<bool> SetNativeDataProperty(
+ Local<Context> context, Local<Name> name,
+ AccessorNameGetterCallback getter,
+ AccessorNameSetterCallback setter = nullptr,
+ Local<Value> data = Local<Value>(), PropertyAttribute attributes = None,
+ SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
+ SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
+
+ /**
+ * Attempts to create a property with the given name which behaves like a data
+ * property, except that the provided getter is invoked (and provided with the
+ * data value) to supply its value the first time it is read. After the
+ * property is accessed once, it is replaced with an ordinary data property.
+ *
+ * Analogous to Template::SetLazyDataProperty.
+ */
+ V8_WARN_UNUSED_RESULT Maybe<bool> SetLazyDataProperty(
+ Local<Context> context, Local<Name> name,
+ AccessorNameGetterCallback getter, Local<Value> data = Local<Value>(),
+ PropertyAttribute attributes = None,
+ SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
+ SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
+
+ /**
+ * Functionality for private properties.
+ * This is an experimental feature, use at your own risk.
+ * Note: Private properties are not inherited. Do not rely on this, since it
+ * may change.
+ */
+ Maybe<bool> HasPrivate(Local<Context> context, Local<Private> key);
+ Maybe<bool> SetPrivate(Local<Context> context, Local<Private> key,
+ Local<Value> value);
+ Maybe<bool> DeletePrivate(Local<Context> context, Local<Private> key);
+ MaybeLocal<Value> GetPrivate(Local<Context> context, Local<Private> key);
+
+ /**
+ * Returns an array containing the names of the enumerable properties
+ * of this object, including properties from prototype objects. The
+ * array returned by this method contains the same values as would
+ * be enumerated by a for-in statement over this object.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Array> GetPropertyNames(
+ Local<Context> context);
+ V8_WARN_UNUSED_RESULT MaybeLocal<Array> GetPropertyNames(
+ Local<Context> context, KeyCollectionMode mode,
+ PropertyFilter property_filter, IndexFilter index_filter,
+ KeyConversionMode key_conversion = KeyConversionMode::kKeepNumbers);
+
+ /**
+ * This function has the same functionality as GetPropertyNames but
+ * the returned array doesn't contain the names of properties from
+ * prototype objects.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Array> GetOwnPropertyNames(
+ Local<Context> context);
+
+ /**
+ * Returns an array containing the names of the filtered properties
+ * of this object, including properties from prototype objects. The
+ * array returned by this method contains the same values as would
+ * be enumerated by a for-in statement over this object.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Array> GetOwnPropertyNames(
+ Local<Context> context, PropertyFilter filter,
+ KeyConversionMode key_conversion = KeyConversionMode::kKeepNumbers);
+
+ /**
+ * Get the prototype object. This does not skip objects marked to
+ * be skipped by __proto__ and it does not consult the security
+ * handler.
+ */
+ Local<Value> GetPrototype();
+
+ /**
+ * Set the prototype object. This does not skip objects marked to
+ * be skipped by __proto__ and it does not consult the security
+ * handler.
+ */
+ V8_WARN_UNUSED_RESULT Maybe<bool> SetPrototype(Local<Context> context,
+ Local<Value> prototype);
+
+ /**
+ * Finds an instance of the given function template in the prototype
+ * chain.
+ */
+ Local<Object> FindInstanceInPrototypeChain(Local<FunctionTemplate> tmpl);
+
+ /**
+ * Call builtin Object.prototype.toString on this object.
+ * This is different from Value::ToString() that may call
+ * user-defined toString function. This one does not.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<String> ObjectProtoToString(
+ Local<Context> context);
+
+ /**
+ * Returns the name of the function invoked as a constructor for this object.
+ */
+ Local<String> GetConstructorName();
+
+ /**
+ * Sets the integrity level of the object.
+ */
+ Maybe<bool> SetIntegrityLevel(Local<Context> context, IntegrityLevel level);
+
+ /** Gets the number of internal fields for this Object. */
+ int InternalFieldCount() const;
+
+ /** Same as above, but works for PersistentBase. */
+ V8_INLINE static int InternalFieldCount(
+ const PersistentBase<Object>& object) {
+ return object.val_->InternalFieldCount();
+ }
+
+ /** Same as above, but works for BasicTracedReference. */
+ V8_INLINE static int InternalFieldCount(
+ const BasicTracedReference<Object>& object) {
+ return object->InternalFieldCount();
+ }
+
+ /** Gets the value from an internal field. */
+ V8_INLINE Local<Value> GetInternalField(int index);
+
+ /** Sets the value in an internal field. */
+ void SetInternalField(int index, Local<Value> value);
+
+ /**
+ * Gets a 2-byte-aligned native pointer from an internal field. This field
+ * must have been set by SetAlignedPointerInInternalField, everything else
+ * leads to undefined behavior.
+ */
+ V8_INLINE void* GetAlignedPointerFromInternalField(int index);
+
+ /** Same as above, but works for PersistentBase. */
+ V8_INLINE static void* GetAlignedPointerFromInternalField(
+ const PersistentBase<Object>& object, int index) {
+ return object.val_->GetAlignedPointerFromInternalField(index);
+ }
+
+ /** Same as above, but works for TracedGlobal. */
+ V8_INLINE static void* GetAlignedPointerFromInternalField(
+ const BasicTracedReference<Object>& object, int index) {
+ return object->GetAlignedPointerFromInternalField(index);
+ }
+
+ /**
+ * Sets a 2-byte-aligned native pointer in an internal field. To retrieve such
+ * a field, GetAlignedPointerFromInternalField must be used, everything else
+ * leads to undefined behavior.
+ */
+ void SetAlignedPointerInInternalField(int index, void* value);
+ void SetAlignedPointerInInternalFields(int argc, int indices[],
+ void* values[]);
+
+ /**
+ * HasOwnProperty() is like JavaScript's Object.prototype.hasOwnProperty().
+ *
+ * See also v8::Object::Has() and v8::Object::HasRealNamedProperty().
+ */
+ V8_WARN_UNUSED_RESULT Maybe<bool> HasOwnProperty(Local<Context> context,
+ Local<Name> key);
+ V8_WARN_UNUSED_RESULT Maybe<bool> HasOwnProperty(Local<Context> context,
+ uint32_t index);
+ /**
+ * Use HasRealNamedProperty() if you want to check if an object has an own
+ * property without causing side effects, i.e., without calling interceptors.
+ *
+ * This function is similar to v8::Object::HasOwnProperty(), but it does not
+ * call interceptors.
+ *
+ * \note Consider using non-masking interceptors, i.e., the interceptors are
+ * not called if the receiver has the real named property. See
+ * `v8::PropertyHandlerFlags::kNonMasking`.
+ *
+ * See also v8::Object::Has().
+ */
+ V8_WARN_UNUSED_RESULT Maybe<bool> HasRealNamedProperty(Local<Context> context,
+ Local<Name> key);
+ V8_WARN_UNUSED_RESULT Maybe<bool> HasRealIndexedProperty(
+ Local<Context> context, uint32_t index);
+ V8_WARN_UNUSED_RESULT Maybe<bool> HasRealNamedCallbackProperty(
+ Local<Context> context, Local<Name> key);
+
+ /**
+ * If result.IsEmpty() no real property was located in the prototype chain.
+ * This means interceptors in the prototype chain are not called.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Value> GetRealNamedPropertyInPrototypeChain(
+ Local<Context> context, Local<Name> key);
+
+ /**
+ * Gets the property attributes of a real property in the prototype chain,
+ * which can be None or any combination of ReadOnly, DontEnum and DontDelete.
+ * Interceptors in the prototype chain are not called.
+ */
+ V8_WARN_UNUSED_RESULT Maybe<PropertyAttribute>
+ GetRealNamedPropertyAttributesInPrototypeChain(Local<Context> context,
+ Local<Name> key);
+
+ /**
+ * If result.IsEmpty() no real property was located on the object or
+ * in the prototype chain.
+ * This means interceptors in the prototype chain are not called.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Value> GetRealNamedProperty(
+ Local<Context> context, Local<Name> key);
+
+ /**
+ * Gets the property attributes of a real property which can be
+ * None or any combination of ReadOnly, DontEnum and DontDelete.
+ * Interceptors in the prototype chain are not called.
+ */
+ V8_WARN_UNUSED_RESULT Maybe<PropertyAttribute> GetRealNamedPropertyAttributes(
+ Local<Context> context, Local<Name> key);
+
+ /** Tests for a named lookup interceptor.*/
+ bool HasNamedLookupInterceptor() const;
+
+ /** Tests for an index lookup interceptor.*/
+ bool HasIndexedLookupInterceptor() const;
+
+ /**
+ * Returns the identity hash for this object. The current implementation
+ * uses a hidden property on the object to store the identity hash.
+ *
+ * The return value will never be 0. Also, it is not guaranteed to be
+ * unique.
+ */
+ int GetIdentityHash();
+
+ /**
+ * Clone this object with a fast but shallow copy. Values will point
+ * to the same values as the original object.
+ */
+ // TODO(dcarney): take an isolate and optionally bail out?
+ Local<Object> Clone();
+
+ /**
+ * Returns the context in which the object was created.
+ */
+ V8_DEPRECATE_SOON("Use MaybeLocal<Context> GetCreationContext()")
+ Local<Context> CreationContext();
+ MaybeLocal<Context> GetCreationContext();
+
+ /** Same as above, but works for Persistents */
+ V8_DEPRECATE_SOON(
+ "Use MaybeLocal<Context> GetCreationContext(const "
+ "PersistentBase<Object>& object)")
+ static Local<Context> CreationContext(const PersistentBase<Object>& object);
+ V8_INLINE static MaybeLocal<Context> GetCreationContext(
+ const PersistentBase<Object>& object) {
+ return object.val_->GetCreationContext();
+ }
+
+ /**
+ * Checks whether a callback is set by the
+ * ObjectTemplate::SetCallAsFunctionHandler method.
+ * When an Object is callable this method returns true.
+ */
+ bool IsCallable() const;
+
+ /**
+ * True if this object is a constructor.
+ */
+ bool IsConstructor() const;
+
+ /**
+ * True if this object can carry information relevant to the embedder in its
+ * embedder fields, false otherwise. This is generally true for objects
+ * constructed through function templates but also holds for other types where
+ * V8 automatically adds internal fields at compile time, such as e.g.
+ * v8::ArrayBuffer.
+ */
+ bool IsApiWrapper() const;
+
+ /**
+ * True if this object was created from an object template which was marked
+ * as undetectable. See v8::ObjectTemplate::MarkAsUndetectable for more
+ * information.
+ */
+ bool IsUndetectable() const;
+
+ /**
+ * Call an Object as a function if a callback is set by the
+ * ObjectTemplate::SetCallAsFunctionHandler method.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Value> CallAsFunction(Local<Context> context,
+ Local<Value> recv,
+ int argc,
+ Local<Value> argv[]);
+
+ /**
+ * Call an Object as a constructor if a callback is set by the
+ * ObjectTemplate::SetCallAsFunctionHandler method.
+ * Note: This method behaves like the Function::NewInstance method.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Value> CallAsConstructor(
+ Local<Context> context, int argc, Local<Value> argv[]);
+
+ /**
+ * Return the isolate to which the Object belongs to.
+ */
+ Isolate* GetIsolate();
+
+ /**
+ * If this object is a Set, Map, WeakSet or WeakMap, this returns a
+ * representation of the elements of this object as an array.
+ * If this object is a SetIterator or MapIterator, this returns all
+ * elements of the underlying collection, starting at the iterator's current
+ * position.
+ * For other types, this will return an empty MaybeLocal<Array> (without
+ * scheduling an exception).
+ */
+ MaybeLocal<Array> PreviewEntries(bool* is_key_value);
+
+ static Local<Object> New(Isolate* isolate);
+
+ /**
+ * Creates a JavaScript object with the given properties, and
+ * a the given prototype_or_null (which can be any JavaScript
+ * value, and if it's null, the newly created object won't have
+ * a prototype at all). This is similar to Object.create().
+ * All properties will be created as enumerable, configurable
+ * and writable properties.
+ */
+ static Local<Object> New(Isolate* isolate, Local<Value> prototype_or_null,
+ Local<Name>* names, Local<Value>* values,
+ size_t length);
+
+ V8_INLINE static Object* Cast(Value* obj);
+
+ /**
+ * Support for TC39 "dynamic code brand checks" proposal.
+ *
+ * This API allows to query whether an object was constructed from a
+ * "code like" ObjectTemplate.
+ *
+ * See also: v8::ObjectTemplate::SetCodeLike
+ */
+ bool IsCodeLike(Isolate* isolate) const;
+
+ private:
+ Object();
+ static void CheckCast(Value* obj);
+ Local<Value> SlowGetInternalField(int index);
+ void* SlowGetAlignedPointerFromInternalField(int index);
+};
+
+// --- Implementation ---
+
+Local<Value> Object::GetInternalField(int index) {
+#ifndef V8_ENABLE_CHECKS
+ using A = internal::Address;
+ using I = internal::Internals;
+ A obj = *reinterpret_cast<A*>(this);
+ // Fast path: If the object is a plain JSObject, which is the common case, we
+ // know where to find the internal fields and can return the value directly.
+ int instance_type = I::GetInstanceType(obj);
+ if (v8::internal::CanHaveInternalField(instance_type)) {
+ int offset = I::kJSObjectHeaderSize + (I::kEmbedderDataSlotSize * index);
+ A value = I::ReadRawField<A>(obj, offset);
+#ifdef V8_COMPRESS_POINTERS
+ // We read the full pointer value and then decompress it in order to avoid
+ // dealing with potential endiannes issues.
+ value = I::DecompressTaggedAnyField(obj, static_cast<uint32_t>(value));
+#endif
+ internal::Isolate* isolate =
+ internal::IsolateFromNeverReadOnlySpaceObject(obj);
+ A* result = HandleScope::CreateHandle(isolate, value);
+ return Local<Value>(reinterpret_cast<Value*>(result));
+ }
+#endif
+ return SlowGetInternalField(index);
+}
+
+void* Object::GetAlignedPointerFromInternalField(int index) {
+#ifndef V8_ENABLE_CHECKS
+ using A = internal::Address;
+ using I = internal::Internals;
+ A obj = *reinterpret_cast<A*>(this);
+ // Fast path: If the object is a plain JSObject, which is the common case, we
+ // know where to find the internal fields and can return the value directly.
+ auto instance_type = I::GetInstanceType(obj);
+ if (v8::internal::CanHaveInternalField(instance_type)) {
+ int offset = I::kJSObjectHeaderSize + (I::kEmbedderDataSlotSize * index);
+#ifdef V8_HEAP_SANDBOX
+ offset += I::kEmbedderDataSlotRawPayloadOffset;
+#endif
+ internal::Isolate* isolate = I::GetIsolateForHeapSandbox(obj);
+ A value = I::ReadExternalPointerField(
+ isolate, obj, offset, internal::kEmbedderDataSlotPayloadTag);
+ return reinterpret_cast<void*>(value);
+ }
+#endif
+ return SlowGetAlignedPointerFromInternalField(index);
+}
+
+Private* Private::Cast(Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return reinterpret_cast<Private*>(data);
+}
+
+Object* Object::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Object*>(value);
+}
+
+} // namespace v8
+
+#endif // INCLUDE_V8_OBJECT_H_
diff --git a/chromium/v8/include/v8-persistent-handle.h b/chromium/v8/include/v8-persistent-handle.h
new file mode 100644
index 00000000000..a6c21268d6a
--- /dev/null
+++ b/chromium/v8/include/v8-persistent-handle.h
@@ -0,0 +1,590 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_PERSISTENT_HANDLE_H_
+#define INCLUDE_V8_PERSISTENT_HANDLE_H_
+
+#include "v8-internal.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-weak-callback-info.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Isolate;
+template <class K, class V, class T>
+class PersistentValueMapBase;
+template <class V, class T>
+class PersistentValueVector;
+template <class T>
+class Global;
+template <class T>
+class PersistentBase;
+template <class K, class V, class T>
+class PersistentValueMap;
+class Value;
+
+namespace api_internal {
+V8_EXPORT Value* Eternalize(v8::Isolate* isolate, Value* handle);
+V8_EXPORT internal::Address* CopyGlobalReference(internal::Address* from);
+V8_EXPORT void DisposeGlobal(internal::Address* global_handle);
+V8_EXPORT void MakeWeak(internal::Address** location_addr);
+V8_EXPORT void* ClearWeak(internal::Address* location);
+V8_EXPORT void AnnotateStrongRetainer(internal::Address* location,
+ const char* label);
+V8_EXPORT internal::Address* GlobalizeReference(internal::Isolate* isolate,
+ internal::Address* handle);
+V8_EXPORT void MoveGlobalReference(internal::Address** from,
+ internal::Address** to);
+} // namespace api_internal
+
+/**
+ * Eternal handles are set-once handles that live for the lifetime of the
+ * isolate.
+ */
+template <class T>
+class Eternal {
+ public:
+ V8_INLINE Eternal() : val_(nullptr) {}
+ template <class S>
+ V8_INLINE Eternal(Isolate* isolate, Local<S> handle) : val_(nullptr) {
+ Set(isolate, handle);
+ }
+ // Can only be safely called if already set.
+ V8_INLINE Local<T> Get(Isolate* isolate) const {
+ // The eternal handle will never go away, so as with the roots, we don't
+ // even need to open a handle.
+ return Local<T>(val_);
+ }
+
+ V8_INLINE bool IsEmpty() const { return val_ == nullptr; }
+
+ template <class S>
+ void Set(Isolate* isolate, Local<S> handle) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ val_ = reinterpret_cast<T*>(
+ api_internal::Eternalize(isolate, reinterpret_cast<Value*>(*handle)));
+ }
+
+ private:
+ T* val_;
+};
+
+namespace api_internal {
+V8_EXPORT void MakeWeak(internal::Address* location, void* data,
+ WeakCallbackInfo<void>::Callback weak_callback,
+ WeakCallbackType type);
+} // namespace api_internal
+
+/**
+ * An object reference that is independent of any handle scope. Where
+ * a Local handle only lives as long as the HandleScope in which it was
+ * allocated, a PersistentBase handle remains valid until it is explicitly
+ * disposed using Reset().
+ *
+ * A persistent handle contains a reference to a storage cell within
+ * the V8 engine which holds an object value and which is updated by
+ * the garbage collector whenever the object is moved. A new storage
+ * cell can be created using the constructor or PersistentBase::Reset and
+ * existing handles can be disposed using PersistentBase::Reset.
+ *
+ */
+template <class T>
+class PersistentBase {
+ public:
+ /**
+ * If non-empty, destroy the underlying storage cell
+ * IsEmpty() will return true after this call.
+ */
+ V8_INLINE void Reset();
+
+ /**
+ * If non-empty, destroy the underlying storage cell
+ * and create a new one with the contents of other if other is non empty
+ */
+ template <class S>
+ V8_INLINE void Reset(Isolate* isolate, const Local<S>& other);
+
+ /**
+ * If non-empty, destroy the underlying storage cell
+ * and create a new one with the contents of other if other is non empty
+ */
+ template <class S>
+ V8_INLINE void Reset(Isolate* isolate, const PersistentBase<S>& other);
+
+ V8_INLINE bool IsEmpty() const { return val_ == nullptr; }
+ V8_INLINE void Empty() { val_ = 0; }
+
+ V8_INLINE Local<T> Get(Isolate* isolate) const {
+ return Local<T>::New(isolate, *this);
+ }
+
+ template <class S>
+ V8_INLINE bool operator==(const PersistentBase<S>& that) const {
+ internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
+ internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
+ if (a == nullptr) return b == nullptr;
+ if (b == nullptr) return false;
+ return *a == *b;
+ }
+
+ template <class S>
+ V8_INLINE bool operator==(const Local<S>& that) const {
+ internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
+ internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
+ if (a == nullptr) return b == nullptr;
+ if (b == nullptr) return false;
+ return *a == *b;
+ }
+
+ template <class S>
+ V8_INLINE bool operator!=(const PersistentBase<S>& that) const {
+ return !operator==(that);
+ }
+
+ template <class S>
+ V8_INLINE bool operator!=(const Local<S>& that) const {
+ return !operator==(that);
+ }
+
+ /**
+ * Install a finalization callback on this object.
+ * NOTE: There is no guarantee as to *when* or even *if* the callback is
+ * invoked. The invocation is performed solely on a best effort basis.
+ * As always, GC-based finalization should *not* be relied upon for any
+ * critical form of resource management!
+ *
+ * The callback is supposed to reset the handle. No further V8 API may be
+ * called in this callback. In case additional work involving V8 needs to be
+ * done, a second callback can be scheduled using
+ * WeakCallbackInfo<void>::SetSecondPassCallback.
+ */
+ template <typename P>
+ V8_INLINE void SetWeak(P* parameter,
+ typename WeakCallbackInfo<P>::Callback callback,
+ WeakCallbackType type);
+
+ /**
+ * Turns this handle into a weak phantom handle without finalization callback.
+ * The handle will be reset automatically when the garbage collector detects
+ * that the object is no longer reachable.
+ * A related function Isolate::NumberOfPhantomHandleResetsSinceLastCall
+ * returns how many phantom handles were reset by the garbage collector.
+ */
+ V8_INLINE void SetWeak();
+
+ template <typename P>
+ V8_INLINE P* ClearWeak();
+
+ // TODO(dcarney): remove this.
+ V8_INLINE void ClearWeak() { ClearWeak<void>(); }
+
+ /**
+ * Annotates the strong handle with the given label, which is then used by the
+ * heap snapshot generator as a name of the edge from the root to the handle.
+ * The function does not take ownership of the label and assumes that the
+ * label is valid as long as the handle is valid.
+ */
+ V8_INLINE void AnnotateStrongRetainer(const char* label);
+
+ /** Returns true if the handle's reference is weak. */
+ V8_INLINE bool IsWeak() const;
+
+ /**
+ * Assigns a wrapper class ID to the handle.
+ */
+ V8_INLINE void SetWrapperClassId(uint16_t class_id);
+
+ /**
+ * Returns the class ID previously assigned to this handle or 0 if no class ID
+ * was previously assigned.
+ */
+ V8_INLINE uint16_t WrapperClassId() const;
+
+ PersistentBase(const PersistentBase& other) = delete;
+ void operator=(const PersistentBase&) = delete;
+
+ private:
+ friend class Isolate;
+ friend class Utils;
+ template <class F>
+ friend class Local;
+ template <class F1, class F2>
+ friend class Persistent;
+ template <class F>
+ friend class Global;
+ template <class F>
+ friend class PersistentBase;
+ template <class F>
+ friend class ReturnValue;
+ template <class F1, class F2, class F3>
+ friend class PersistentValueMapBase;
+ template <class F1, class F2>
+ friend class PersistentValueVector;
+ friend class Object;
+
+ explicit V8_INLINE PersistentBase(T* val) : val_(val) {}
+ V8_INLINE static T* New(Isolate* isolate, T* that);
+
+ T* val_;
+};
+
+/**
+ * Default traits for Persistent. This class does not allow
+ * use of the copy constructor or assignment operator.
+ * At present kResetInDestructor is not set, but that will change in a future
+ * version.
+ */
+template <class T>
+class NonCopyablePersistentTraits {
+ public:
+ using NonCopyablePersistent = Persistent<T, NonCopyablePersistentTraits<T>>;
+ static const bool kResetInDestructor = false;
+ template <class S, class M>
+ V8_INLINE static void Copy(const Persistent<S, M>& source,
+ NonCopyablePersistent* dest) {
+ static_assert(sizeof(S) < 0,
+ "NonCopyablePersistentTraits::Copy is not instantiable");
+ }
+};
+
+/**
+ * Helper class traits to allow copying and assignment of Persistent.
+ * This will clone the contents of storage cell, but not any of the flags, etc.
+ */
+template <class T>
+struct CopyablePersistentTraits {
+ using CopyablePersistent = Persistent<T, CopyablePersistentTraits<T>>;
+ static const bool kResetInDestructor = true;
+ template <class S, class M>
+ static V8_INLINE void Copy(const Persistent<S, M>& source,
+ CopyablePersistent* dest) {
+ // do nothing, just allow copy
+ }
+};
+
+/**
+ * A PersistentBase which allows copy and assignment.
+ *
+ * Copy, assignment and destructor behavior is controlled by the traits
+ * class M.
+ *
+ * Note: Persistent class hierarchy is subject to future changes.
+ */
+template <class T, class M>
+class Persistent : public PersistentBase<T> {
+ public:
+ /**
+ * A Persistent with no storage cell.
+ */
+ V8_INLINE Persistent() : PersistentBase<T>(nullptr) {}
+ /**
+ * Construct a Persistent from a Local.
+ * When the Local is non-empty, a new storage cell is created
+ * pointing to the same object, and no flags are set.
+ */
+ template <class S>
+ V8_INLINE Persistent(Isolate* isolate, Local<S> that)
+ : PersistentBase<T>(PersistentBase<T>::New(isolate, *that)) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ }
+ /**
+ * Construct a Persistent from a Persistent.
+ * When the Persistent is non-empty, a new storage cell is created
+ * pointing to the same object, and no flags are set.
+ */
+ template <class S, class M2>
+ V8_INLINE Persistent(Isolate* isolate, const Persistent<S, M2>& that)
+ : PersistentBase<T>(PersistentBase<T>::New(isolate, *that)) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ }
+ /**
+ * The copy constructors and assignment operator create a Persistent
+ * exactly as the Persistent constructor, but the Copy function from the
+ * traits class is called, allowing the setting of flags based on the
+ * copied Persistent.
+ */
+ V8_INLINE Persistent(const Persistent& that) : PersistentBase<T>(nullptr) {
+ Copy(that);
+ }
+ template <class S, class M2>
+ V8_INLINE Persistent(const Persistent<S, M2>& that) : PersistentBase<T>(0) {
+ Copy(that);
+ }
+ V8_INLINE Persistent& operator=(const Persistent& that) {
+ Copy(that);
+ return *this;
+ }
+ template <class S, class M2>
+ V8_INLINE Persistent& operator=(const Persistent<S, M2>& that) {
+ Copy(that);
+ return *this;
+ }
+ /**
+ * The destructor will dispose the Persistent based on the
+ * kResetInDestructor flags in the traits class. Since not calling dispose
+ * can result in a memory leak, it is recommended to always set this flag.
+ */
+ V8_INLINE ~Persistent() {
+ if (M::kResetInDestructor) this->Reset();
+ }
+
+ // TODO(dcarney): this is pretty useless, fix or remove
+ template <class S>
+ V8_INLINE static Persistent<T>& Cast(const Persistent<S>& that) {
+#ifdef V8_ENABLE_CHECKS
+ // If we're going to perform the type check then we have to check
+ // that the handle isn't empty before doing the checked cast.
+ if (!that.IsEmpty()) T::Cast(*that);
+#endif
+ return reinterpret_cast<Persistent<T>&>(const_cast<Persistent<S>&>(that));
+ }
+
+ // TODO(dcarney): this is pretty useless, fix or remove
+ template <class S>
+ V8_INLINE Persistent<S>& As() const {
+ return Persistent<S>::Cast(*this);
+ }
+
+ private:
+ friend class Isolate;
+ friend class Utils;
+ template <class F>
+ friend class Local;
+ template <class F1, class F2>
+ friend class Persistent;
+ template <class F>
+ friend class ReturnValue;
+
+ explicit V8_INLINE Persistent(T* that) : PersistentBase<T>(that) {}
+ V8_INLINE T* operator*() const { return this->val_; }
+ template <class S, class M2>
+ V8_INLINE void Copy(const Persistent<S, M2>& that);
+};
+
+/**
+ * A PersistentBase which has move semantics.
+ *
+ * Note: Persistent class hierarchy is subject to future changes.
+ */
+template <class T>
+class Global : public PersistentBase<T> {
+ public:
+ /**
+ * A Global with no storage cell.
+ */
+ V8_INLINE Global() : PersistentBase<T>(nullptr) {}
+
+ /**
+ * Construct a Global from a Local.
+ * When the Local is non-empty, a new storage cell is created
+ * pointing to the same object, and no flags are set.
+ */
+ template <class S>
+ V8_INLINE Global(Isolate* isolate, Local<S> that)
+ : PersistentBase<T>(PersistentBase<T>::New(isolate, *that)) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ }
+
+ /**
+ * Construct a Global from a PersistentBase.
+ * When the Persistent is non-empty, a new storage cell is created
+ * pointing to the same object, and no flags are set.
+ */
+ template <class S>
+ V8_INLINE Global(Isolate* isolate, const PersistentBase<S>& that)
+ : PersistentBase<T>(PersistentBase<T>::New(isolate, that.val_)) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ }
+
+ /**
+ * Move constructor.
+ */
+ V8_INLINE Global(Global&& other);
+
+ V8_INLINE ~Global() { this->Reset(); }
+
+ /**
+ * Move via assignment.
+ */
+ template <class S>
+ V8_INLINE Global& operator=(Global<S>&& rhs);
+
+ /**
+ * Pass allows returning uniques from functions, etc.
+ */
+ Global Pass() { return static_cast<Global&&>(*this); }
+
+ /*
+ * For compatibility with Chromium's base::Bind (base::Passed).
+ */
+ using MoveOnlyTypeForCPP03 = void;
+
+ Global(const Global&) = delete;
+ void operator=(const Global&) = delete;
+
+ private:
+ template <class F>
+ friend class ReturnValue;
+ V8_INLINE T* operator*() const { return this->val_; }
+};
+
+// UniquePersistent is an alias for Global for historical reason.
+template <class T>
+using UniquePersistent = Global<T>;
+
+/**
+ * Interface for iterating through all the persistent handles in the heap.
+ */
+class V8_EXPORT PersistentHandleVisitor {
+ public:
+ virtual ~PersistentHandleVisitor() = default;
+ virtual void VisitPersistentHandle(Persistent<Value>* value,
+ uint16_t class_id) {}
+};
+
+template <class T>
+T* PersistentBase<T>::New(Isolate* isolate, T* that) {
+ if (that == nullptr) return nullptr;
+ internal::Address* p = reinterpret_cast<internal::Address*>(that);
+ return reinterpret_cast<T*>(api_internal::GlobalizeReference(
+ reinterpret_cast<internal::Isolate*>(isolate), p));
+}
+
+template <class T, class M>
+template <class S, class M2>
+void Persistent<T, M>::Copy(const Persistent<S, M2>& that) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ this->Reset();
+ if (that.IsEmpty()) return;
+ internal::Address* p = reinterpret_cast<internal::Address*>(that.val_);
+ this->val_ = reinterpret_cast<T*>(api_internal::CopyGlobalReference(p));
+ M::Copy(that, this);
+}
+
+template <class T>
+bool PersistentBase<T>::IsWeak() const {
+ using I = internal::Internals;
+ if (this->IsEmpty()) return false;
+ return I::GetNodeState(reinterpret_cast<internal::Address*>(this->val_)) ==
+ I::kNodeStateIsWeakValue;
+}
+
+template <class T>
+void PersistentBase<T>::Reset() {
+ if (this->IsEmpty()) return;
+ api_internal::DisposeGlobal(reinterpret_cast<internal::Address*>(this->val_));
+ val_ = nullptr;
+}
+
+/**
+ * If non-empty, destroy the underlying storage cell
+ * and create a new one with the contents of other if other is non empty
+ */
+template <class T>
+template <class S>
+void PersistentBase<T>::Reset(Isolate* isolate, const Local<S>& other) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ Reset();
+ if (other.IsEmpty()) return;
+ this->val_ = New(isolate, other.val_);
+}
+
+/**
+ * If non-empty, destroy the underlying storage cell
+ * and create a new one with the contents of other if other is non empty
+ */
+template <class T>
+template <class S>
+void PersistentBase<T>::Reset(Isolate* isolate,
+ const PersistentBase<S>& other) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ Reset();
+ if (other.IsEmpty()) return;
+ this->val_ = New(isolate, other.val_);
+}
+
+template <class T>
+template <typename P>
+V8_INLINE void PersistentBase<T>::SetWeak(
+ P* parameter, typename WeakCallbackInfo<P>::Callback callback,
+ WeakCallbackType type) {
+ using Callback = WeakCallbackInfo<void>::Callback;
+#if (__GNUC__ >= 8) && !defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-function-type"
+#endif
+ api_internal::MakeWeak(reinterpret_cast<internal::Address*>(this->val_),
+ parameter, reinterpret_cast<Callback>(callback), type);
+#if (__GNUC__ >= 8) && !defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+}
+
+template <class T>
+void PersistentBase<T>::SetWeak() {
+ api_internal::MakeWeak(reinterpret_cast<internal::Address**>(&this->val_));
+}
+
+template <class T>
+template <typename P>
+P* PersistentBase<T>::ClearWeak() {
+ return reinterpret_cast<P*>(api_internal::ClearWeak(
+ reinterpret_cast<internal::Address*>(this->val_)));
+}
+
+template <class T>
+void PersistentBase<T>::AnnotateStrongRetainer(const char* label) {
+ api_internal::AnnotateStrongRetainer(
+ reinterpret_cast<internal::Address*>(this->val_), label);
+}
+
+template <class T>
+void PersistentBase<T>::SetWrapperClassId(uint16_t class_id) {
+ using I = internal::Internals;
+ if (this->IsEmpty()) return;
+ internal::Address* obj = reinterpret_cast<internal::Address*>(this->val_);
+ uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
+ *reinterpret_cast<uint16_t*>(addr) = class_id;
+}
+
+template <class T>
+uint16_t PersistentBase<T>::WrapperClassId() const {
+ using I = internal::Internals;
+ if (this->IsEmpty()) return 0;
+ internal::Address* obj = reinterpret_cast<internal::Address*>(this->val_);
+ uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
+ return *reinterpret_cast<uint16_t*>(addr);
+}
+
+template <class T>
+Global<T>::Global(Global&& other) : PersistentBase<T>(other.val_) {
+ if (other.val_ != nullptr) {
+ api_internal::MoveGlobalReference(
+ reinterpret_cast<internal::Address**>(&other.val_),
+ reinterpret_cast<internal::Address**>(&this->val_));
+ other.val_ = nullptr;
+ }
+}
+
+template <class T>
+template <class S>
+Global<T>& Global<T>::operator=(Global<S>&& rhs) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ if (this != &rhs) {
+ this->Reset();
+ if (rhs.val_ != nullptr) {
+ this->val_ = rhs.val_;
+ api_internal::MoveGlobalReference(
+ reinterpret_cast<internal::Address**>(&rhs.val_),
+ reinterpret_cast<internal::Address**>(&this->val_));
+ rhs.val_ = nullptr;
+ }
+ }
+ return *this;
+}
+
+} // namespace v8
+
+#endif // INCLUDE_V8_PERSISTENT_HANDLE_H_
diff --git a/chromium/v8/include/v8-platform.h b/chromium/v8/include/v8-platform.h
index fc9a357feb6..e60e1757b63 100644
--- a/chromium/v8/include/v8-platform.h
+++ b/chromium/v8/include/v8-platform.h
@@ -430,11 +430,29 @@ class PageAllocator {
/**
* Frees memory in the given [address, address + size) range. address and size
* should be operating system page-aligned. The next write to this
- * memory area brings the memory transparently back.
+ * memory area brings the memory transparently back. This should be treated as
+ * a hint to the OS that the pages are no longer needed. It does not guarantee
+ * that the pages will be discarded immediately or at all.
*/
virtual bool DiscardSystemPages(void* address, size_t size) { return true; }
/**
+ * Decommits any wired memory pages in the given range, allowing the OS to
+ * reclaim them, and marks the region as inacessible (kNoAccess). The address
+ * range stays reserved and can be accessed again later by changing its
+ * permissions. However, in that case the memory content is guaranteed to be
+ * zero-initialized again. The memory must have been previously allocated by a
+ * call to AllocatePages. Returns true on success, false otherwise.
+ */
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ // Implementing this API is required when the virtual memory cage is enabled.
+ virtual bool DecommitPages(void* address, size_t size) = 0;
+#else
+ // Otherwise, it is optional for now.
+ virtual bool DecommitPages(void* address, size_t size) { return false; }
+#endif
+
+ /**
* INTERNAL ONLY: This interface has not been stabilised and may change
* without notice from one release to another without being deprecated first.
*/
@@ -499,6 +517,18 @@ class PageAllocator {
};
/**
+ * V8 Allocator used for allocating zone backings.
+ */
+class ZoneBackingAllocator {
+ public:
+ using MallocFn = void* (*)(size_t);
+ using FreeFn = void (*)(void*);
+
+ virtual MallocFn GetMallocFn() const { return ::malloc; }
+ virtual FreeFn GetFreeFn() const { return ::free; }
+};
+
+/**
* V8 Platform abstraction layer.
*
* The embedder has to provide an implementation of this interface before
@@ -517,6 +547,14 @@ class Platform {
}
/**
+ * Allows the embedder to specify a custom allocator used for zones.
+ */
+ virtual ZoneBackingAllocator* GetZoneBackingAllocator() {
+ static ZoneBackingAllocator default_allocator;
+ return &default_allocator;
+ }
+
+ /**
* Enables the embedder to respond in cases where V8 can't allocate large
* blocks of memory. V8 retries the failed allocation once after calling this
* method. On success, execution continues; otherwise V8 exits with a fatal
diff --git a/chromium/v8/include/v8-primitive-object.h b/chromium/v8/include/v8-primitive-object.h
new file mode 100644
index 00000000000..573932d0789
--- /dev/null
+++ b/chromium/v8/include/v8-primitive-object.h
@@ -0,0 +1,118 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_PRIMITIVE_OBJECT_H_
+#define INCLUDE_V8_PRIMITIVE_OBJECT_H_
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-object.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Isolate;
+
+/**
+ * A Number object (ECMA-262, 4.3.21).
+ */
+class V8_EXPORT NumberObject : public Object {
+ public:
+ static Local<Value> New(Isolate* isolate, double value);
+
+ double ValueOf() const;
+
+ V8_INLINE static NumberObject* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<NumberObject*>(value);
+ }
+
+ private:
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * A BigInt object (https://tc39.github.io/proposal-bigint)
+ */
+class V8_EXPORT BigIntObject : public Object {
+ public:
+ static Local<Value> New(Isolate* isolate, int64_t value);
+
+ Local<BigInt> ValueOf() const;
+
+ V8_INLINE static BigIntObject* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<BigIntObject*>(value);
+ }
+
+ private:
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * A Boolean object (ECMA-262, 4.3.15).
+ */
+class V8_EXPORT BooleanObject : public Object {
+ public:
+ static Local<Value> New(Isolate* isolate, bool value);
+
+ bool ValueOf() const;
+
+ V8_INLINE static BooleanObject* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<BooleanObject*>(value);
+ }
+
+ private:
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * A String object (ECMA-262, 4.3.18).
+ */
+class V8_EXPORT StringObject : public Object {
+ public:
+ static Local<Value> New(Isolate* isolate, Local<String> value);
+
+ Local<String> ValueOf() const;
+
+ V8_INLINE static StringObject* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<StringObject*>(value);
+ }
+
+ private:
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * A Symbol object (ECMA-262 edition 6).
+ */
+class V8_EXPORT SymbolObject : public Object {
+ public:
+ static Local<Value> New(Isolate* isolate, Local<Symbol> value);
+
+ Local<Symbol> ValueOf() const;
+
+ V8_INLINE static SymbolObject* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<SymbolObject*>(value);
+ }
+
+ private:
+ static void CheckCast(Value* obj);
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_PRIMITIVE_OBJECT_H_
diff --git a/chromium/v8/include/v8-primitive.h b/chromium/v8/include/v8-primitive.h
new file mode 100644
index 00000000000..59d959da057
--- /dev/null
+++ b/chromium/v8/include/v8-primitive.h
@@ -0,0 +1,858 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_PRIMITIVE_H_
+#define INCLUDE_V8_PRIMITIVE_H_
+
+#include "v8-data.h" // NOLINT(build/include_directory)
+#include "v8-internal.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-value.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Context;
+class Isolate;
+class String;
+
+namespace internal {
+class ExternalString;
+class ScopedExternalStringLock;
+} // namespace internal
+
+/**
+ * The superclass of primitive values. See ECMA-262 4.3.2.
+ */
+class V8_EXPORT Primitive : public Value {};
+
+/**
+ * A primitive boolean value (ECMA-262, 4.3.14). Either the true
+ * or false value.
+ */
+class V8_EXPORT Boolean : public Primitive {
+ public:
+ bool Value() const;
+ V8_INLINE static Boolean* Cast(v8::Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return static_cast<Boolean*>(data);
+ }
+
+ V8_INLINE static Local<Boolean> New(Isolate* isolate, bool value);
+
+ private:
+ static void CheckCast(v8::Data* that);
+};
+
+/**
+ * An array to hold Primitive values. This is used by the embedder to
+ * pass host defined options to the ScriptOptions during compilation.
+ *
+ * This is passed back to the embedder as part of
+ * HostImportModuleDynamicallyCallback for module loading.
+ */
+class V8_EXPORT PrimitiveArray {
+ public:
+ static Local<PrimitiveArray> New(Isolate* isolate, int length);
+ int Length() const;
+ void Set(Isolate* isolate, int index, Local<Primitive> item);
+ Local<Primitive> Get(Isolate* isolate, int index);
+};
+
+/**
+ * A superclass for symbols and strings.
+ */
+class V8_EXPORT Name : public Primitive {
+ public:
+ /**
+ * Returns the identity hash for this object. The current implementation
+ * uses an inline property on the object to store the identity hash.
+ *
+ * The return value will never be 0. Also, it is not guaranteed to be
+ * unique.
+ */
+ int GetIdentityHash();
+
+ V8_INLINE static Name* Cast(Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return static_cast<Name*>(data);
+ }
+
+ private:
+ static void CheckCast(Data* that);
+};
+
+/**
+ * A flag describing different modes of string creation.
+ *
+ * Aside from performance implications there are no differences between the two
+ * creation modes.
+ */
+enum class NewStringType {
+ /**
+ * Create a new string, always allocating new storage memory.
+ */
+ kNormal,
+
+ /**
+ * Acts as a hint that the string should be created in the
+ * old generation heap space and be deduplicated if an identical string
+ * already exists.
+ */
+ kInternalized
+};
+
+/**
+ * A JavaScript string value (ECMA-262, 4.3.17).
+ */
+class V8_EXPORT String : public Name {
+ public:
+ static constexpr int kMaxLength =
+ internal::kApiSystemPointerSize == 4 ? (1 << 28) - 16 : (1 << 29) - 24;
+
+ enum Encoding {
+ UNKNOWN_ENCODING = 0x1,
+ TWO_BYTE_ENCODING = 0x0,
+ ONE_BYTE_ENCODING = 0x8
+ };
+ /**
+ * Returns the number of characters (UTF-16 code units) in this string.
+ */
+ int Length() const;
+
+ /**
+ * Returns the number of bytes in the UTF-8 encoded
+ * representation of this string.
+ */
+ int Utf8Length(Isolate* isolate) const;
+
+ /**
+ * Returns whether this string is known to contain only one byte data,
+ * i.e. ISO-8859-1 code points.
+ * Does not read the string.
+ * False negatives are possible.
+ */
+ bool IsOneByte() const;
+
+ /**
+ * Returns whether this string contain only one byte data,
+ * i.e. ISO-8859-1 code points.
+ * Will read the entire string in some cases.
+ */
+ bool ContainsOnlyOneByte() const;
+
+ /**
+ * Write the contents of the string to an external buffer.
+ * If no arguments are given, expects the buffer to be large
+ * enough to hold the entire string and NULL terminator. Copies
+ * the contents of the string and the NULL terminator into the
+ * buffer.
+ *
+ * WriteUtf8 will not write partial UTF-8 sequences, preferring to stop
+ * before the end of the buffer.
+ *
+ * Copies up to length characters into the output buffer.
+ * Only null-terminates if there is enough space in the buffer.
+ *
+ * \param buffer The buffer into which the string will be copied.
+ * \param start The starting position within the string at which
+ * copying begins.
+ * \param length The number of characters to copy from the string. For
+ * WriteUtf8 the number of bytes in the buffer.
+ * \param nchars_ref The number of characters written, can be NULL.
+ * \param options Various options that might affect performance of this or
+ * subsequent operations.
+ * \return The number of characters copied to the buffer excluding the null
+ * terminator. For WriteUtf8: The number of bytes copied to the buffer
+ * including the null terminator (if written).
+ */
+ enum WriteOptions {
+ NO_OPTIONS = 0,
+ HINT_MANY_WRITES_EXPECTED = 1,
+ NO_NULL_TERMINATION = 2,
+ PRESERVE_ONE_BYTE_NULL = 4,
+ // Used by WriteUtf8 to replace orphan surrogate code units with the
+ // unicode replacement character. Needs to be set to guarantee valid UTF-8
+ // output.
+ REPLACE_INVALID_UTF8 = 8
+ };
+
+ // 16-bit character codes.
+ int Write(Isolate* isolate, uint16_t* buffer, int start = 0, int length = -1,
+ int options = NO_OPTIONS) const;
+ // One byte characters.
+ int WriteOneByte(Isolate* isolate, uint8_t* buffer, int start = 0,
+ int length = -1, int options = NO_OPTIONS) const;
+ // UTF-8 encoded characters.
+ int WriteUtf8(Isolate* isolate, char* buffer, int length = -1,
+ int* nchars_ref = nullptr, int options = NO_OPTIONS) const;
+
+ /**
+ * A zero length string.
+ */
+ V8_INLINE static Local<String> Empty(Isolate* isolate);
+
+ /**
+ * Returns true if the string is external.
+ */
+ bool IsExternal() const;
+
+ /**
+ * Returns true if the string is both external and two-byte.
+ */
+ bool IsExternalTwoByte() const;
+
+ /**
+ * Returns true if the string is both external and one-byte.
+ */
+ bool IsExternalOneByte() const;
+
+ class V8_EXPORT ExternalStringResourceBase {
+ public:
+ virtual ~ExternalStringResourceBase() = default;
+
+ /**
+ * If a string is cacheable, the value returned by
+ * ExternalStringResource::data() may be cached, otherwise it is not
+ * expected to be stable beyond the current top-level task.
+ */
+ virtual bool IsCacheable() const { return true; }
+
+ // Disallow copying and assigning.
+ ExternalStringResourceBase(const ExternalStringResourceBase&) = delete;
+ void operator=(const ExternalStringResourceBase&) = delete;
+
+ protected:
+ ExternalStringResourceBase() = default;
+
+ /**
+ * Internally V8 will call this Dispose method when the external string
+ * resource is no longer needed. The default implementation will use the
+ * delete operator. This method can be overridden in subclasses to
+ * control how allocated external string resources are disposed.
+ */
+ virtual void Dispose() { delete this; }
+
+ /**
+ * For a non-cacheable string, the value returned by
+ * |ExternalStringResource::data()| has to be stable between |Lock()| and
+ * |Unlock()|, that is the string must behave as is |IsCacheable()| returned
+ * true.
+ *
+ * These two functions must be thread-safe, and can be called from anywhere.
+ * They also must handle lock depth, in the sense that each can be called
+ * several times, from different threads, and unlocking should only happen
+ * when the balance of Lock() and Unlock() calls is 0.
+ */
+ virtual void Lock() const {}
+
+ /**
+ * Unlocks the string.
+ */
+ virtual void Unlock() const {}
+
+ private:
+ friend class internal::ExternalString;
+ friend class v8::String;
+ friend class internal::ScopedExternalStringLock;
+ };
+
+ /**
+ * An ExternalStringResource is a wrapper around a two-byte string
+ * buffer that resides outside V8's heap. Implement an
+ * ExternalStringResource to manage the life cycle of the underlying
+ * buffer. Note that the string data must be immutable.
+ */
+ class V8_EXPORT ExternalStringResource : public ExternalStringResourceBase {
+ public:
+ /**
+ * Override the destructor to manage the life cycle of the underlying
+ * buffer.
+ */
+ ~ExternalStringResource() override = default;
+
+ /**
+ * The string data from the underlying buffer. If the resource is cacheable
+ * then data() must return the same value for all invocations.
+ */
+ virtual const uint16_t* data() const = 0;
+
+ /**
+ * The length of the string. That is, the number of two-byte characters.
+ */
+ virtual size_t length() const = 0;
+
+ /**
+ * Returns the cached data from the underlying buffer. This method can be
+ * called only for cacheable resources (i.e. IsCacheable() == true) and only
+ * after UpdateDataCache() was called.
+ */
+ const uint16_t* cached_data() const {
+ CheckCachedDataInvariants();
+ return cached_data_;
+ }
+
+ /**
+ * Update {cached_data_} with the data from the underlying buffer. This can
+ * be called only for cacheable resources.
+ */
+ void UpdateDataCache();
+
+ protected:
+ ExternalStringResource() = default;
+
+ private:
+ void CheckCachedDataInvariants() const;
+
+ const uint16_t* cached_data_ = nullptr;
+ };
+
+ /**
+ * An ExternalOneByteStringResource is a wrapper around an one-byte
+ * string buffer that resides outside V8's heap. Implement an
+ * ExternalOneByteStringResource to manage the life cycle of the
+ * underlying buffer. Note that the string data must be immutable
+ * and that the data must be Latin-1 and not UTF-8, which would require
+ * special treatment internally in the engine and do not allow efficient
+ * indexing. Use String::New or convert to 16 bit data for non-Latin1.
+ */
+
+ class V8_EXPORT ExternalOneByteStringResource
+ : public ExternalStringResourceBase {
+ public:
+ /**
+ * Override the destructor to manage the life cycle of the underlying
+ * buffer.
+ */
+ ~ExternalOneByteStringResource() override = default;
+
+ /**
+ * The string data from the underlying buffer. If the resource is cacheable
+ * then data() must return the same value for all invocations.
+ */
+ virtual const char* data() const = 0;
+
+ /** The number of Latin-1 characters in the string.*/
+ virtual size_t length() const = 0;
+
+ /**
+ * Returns the cached data from the underlying buffer. If the resource is
+ * uncacheable or if UpdateDataCache() was not called before, it has
+ * undefined behaviour.
+ */
+ const char* cached_data() const {
+ CheckCachedDataInvariants();
+ return cached_data_;
+ }
+
+ /**
+ * Update {cached_data_} with the data from the underlying buffer. This can
+ * be called only for cacheable resources.
+ */
+ void UpdateDataCache();
+
+ protected:
+ ExternalOneByteStringResource() = default;
+
+ private:
+ void CheckCachedDataInvariants() const;
+
+ const char* cached_data_ = nullptr;
+ };
+
+ /**
+ * If the string is an external string, return the ExternalStringResourceBase
+ * regardless of the encoding, otherwise return NULL. The encoding of the
+ * string is returned in encoding_out.
+ */
+ V8_INLINE ExternalStringResourceBase* GetExternalStringResourceBase(
+ Encoding* encoding_out) const;
+
+ /**
+ * Get the ExternalStringResource for an external string. Returns
+ * NULL if IsExternal() doesn't return true.
+ */
+ V8_INLINE ExternalStringResource* GetExternalStringResource() const;
+
+ /**
+ * Get the ExternalOneByteStringResource for an external one-byte string.
+ * Returns NULL if IsExternalOneByte() doesn't return true.
+ */
+ const ExternalOneByteStringResource* GetExternalOneByteStringResource() const;
+
+ V8_INLINE static String* Cast(v8::Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return static_cast<String*>(data);
+ }
+
+ /**
+ * Allocates a new string from a UTF-8 literal. This is equivalent to calling
+ * String::NewFromUtf(isolate, "...").ToLocalChecked(), but without the check
+ * overhead.
+ *
+ * When called on a string literal containing '\0', the inferred length is the
+ * length of the input array minus 1 (for the final '\0') and not the value
+ * returned by strlen.
+ **/
+ template <int N>
+ static V8_WARN_UNUSED_RESULT Local<String> NewFromUtf8Literal(
+ Isolate* isolate, const char (&literal)[N],
+ NewStringType type = NewStringType::kNormal) {
+ static_assert(N <= kMaxLength, "String is too long");
+ return NewFromUtf8Literal(isolate, literal, type, N - 1);
+ }
+
+ /** Allocates a new string from UTF-8 data. Only returns an empty value when
+ * length > kMaxLength. **/
+ static V8_WARN_UNUSED_RESULT MaybeLocal<String> NewFromUtf8(
+ Isolate* isolate, const char* data,
+ NewStringType type = NewStringType::kNormal, int length = -1);
+
+ /** Allocates a new string from Latin-1 data. Only returns an empty value
+ * when length > kMaxLength. **/
+ static V8_WARN_UNUSED_RESULT MaybeLocal<String> NewFromOneByte(
+ Isolate* isolate, const uint8_t* data,
+ NewStringType type = NewStringType::kNormal, int length = -1);
+
+ /** Allocates a new string from UTF-16 data. Only returns an empty value when
+ * length > kMaxLength. **/
+ static V8_WARN_UNUSED_RESULT MaybeLocal<String> NewFromTwoByte(
+ Isolate* isolate, const uint16_t* data,
+ NewStringType type = NewStringType::kNormal, int length = -1);
+
+ /**
+ * Creates a new string by concatenating the left and the right strings
+ * passed in as parameters.
+ */
+ static Local<String> Concat(Isolate* isolate, Local<String> left,
+ Local<String> right);
+
+ /**
+ * Creates a new external string using the data defined in the given
+ * resource. When the external string is no longer live on V8's heap the
+ * resource will be disposed by calling its Dispose method. The caller of
+ * this function should not otherwise delete or modify the resource. Neither
+ * should the underlying buffer be deallocated or modified except through the
+ * destructor of the external string resource.
+ */
+ static V8_WARN_UNUSED_RESULT MaybeLocal<String> NewExternalTwoByte(
+ Isolate* isolate, ExternalStringResource* resource);
+
+ /**
+ * Associate an external string resource with this string by transforming it
+ * in place so that existing references to this string in the JavaScript heap
+ * will use the external string resource. The external string resource's
+ * character contents need to be equivalent to this string.
+ * Returns true if the string has been changed to be an external string.
+ * The string is not modified if the operation fails. See NewExternal for
+ * information on the lifetime of the resource.
+ */
+ bool MakeExternal(ExternalStringResource* resource);
+
+ /**
+ * Creates a new external string using the one-byte data defined in the given
+ * resource. When the external string is no longer live on V8's heap the
+ * resource will be disposed by calling its Dispose method. The caller of
+ * this function should not otherwise delete or modify the resource. Neither
+ * should the underlying buffer be deallocated or modified except through the
+ * destructor of the external string resource.
+ */
+ static V8_WARN_UNUSED_RESULT MaybeLocal<String> NewExternalOneByte(
+ Isolate* isolate, ExternalOneByteStringResource* resource);
+
+ /**
+ * Associate an external string resource with this string by transforming it
+ * in place so that existing references to this string in the JavaScript heap
+ * will use the external string resource. The external string resource's
+ * character contents need to be equivalent to this string.
+ * Returns true if the string has been changed to be an external string.
+ * The string is not modified if the operation fails. See NewExternal for
+ * information on the lifetime of the resource.
+ */
+ bool MakeExternal(ExternalOneByteStringResource* resource);
+
+ /**
+ * Returns true if this string can be made external.
+ */
+ bool CanMakeExternal() const;
+
+ /**
+ * Returns true if the strings values are equal. Same as JS ==/===.
+ */
+ bool StringEquals(Local<String> str) const;
+
+ /**
+ * Converts an object to a UTF-8-encoded character array. Useful if
+ * you want to print the object. If conversion to a string fails
+ * (e.g. due to an exception in the toString() method of the object)
+ * then the length() method returns 0 and the * operator returns
+ * NULL.
+ */
+ class V8_EXPORT Utf8Value {
+ public:
+ Utf8Value(Isolate* isolate, Local<v8::Value> obj);
+ ~Utf8Value();
+ char* operator*() { return str_; }
+ const char* operator*() const { return str_; }
+ int length() const { return length_; }
+
+ // Disallow copying and assigning.
+ Utf8Value(const Utf8Value&) = delete;
+ void operator=(const Utf8Value&) = delete;
+
+ private:
+ char* str_;
+ int length_;
+ };
+
+ /**
+ * Converts an object to a two-byte (UTF-16-encoded) string.
+ * If conversion to a string fails (eg. due to an exception in the toString()
+ * method of the object) then the length() method returns 0 and the * operator
+ * returns NULL.
+ */
+ class V8_EXPORT Value {
+ public:
+ Value(Isolate* isolate, Local<v8::Value> obj);
+ ~Value();
+ uint16_t* operator*() { return str_; }
+ const uint16_t* operator*() const { return str_; }
+ int length() const { return length_; }
+
+ // Disallow copying and assigning.
+ Value(const Value&) = delete;
+ void operator=(const Value&) = delete;
+
+ private:
+ uint16_t* str_;
+ int length_;
+ };
+
+ private:
+ void VerifyExternalStringResourceBase(ExternalStringResourceBase* v,
+ Encoding encoding) const;
+ void VerifyExternalStringResource(ExternalStringResource* val) const;
+ ExternalStringResource* GetExternalStringResourceSlow() const;
+ ExternalStringResourceBase* GetExternalStringResourceBaseSlow(
+ String::Encoding* encoding_out) const;
+
+ static Local<v8::String> NewFromUtf8Literal(Isolate* isolate,
+ const char* literal,
+ NewStringType type, int length);
+
+ static void CheckCast(v8::Data* that);
+};
+
+// Zero-length string specialization (templated string size includes
+// terminator).
+template <>
+inline V8_WARN_UNUSED_RESULT Local<String> String::NewFromUtf8Literal(
+ Isolate* isolate, const char (&literal)[1], NewStringType type) {
+ return String::Empty(isolate);
+}
+
+/**
+ * Interface for iterating through all external resources in the heap.
+ */
+class V8_EXPORT ExternalResourceVisitor {
+ public:
+ virtual ~ExternalResourceVisitor() = default;
+ virtual void VisitExternalString(Local<String> string) {}
+};
+
+/**
+ * A JavaScript symbol (ECMA-262 edition 6)
+ */
+class V8_EXPORT Symbol : public Name {
+ public:
+ /**
+ * Returns the description string of the symbol, or undefined if none.
+ */
+ V8_DEPRECATE_SOON("Use Symbol::Description(isolate)")
+ Local<Value> Description() const;
+ Local<Value> Description(Isolate* isolate) const;
+
+ /**
+ * Create a symbol. If description is not empty, it will be used as the
+ * description.
+ */
+ static Local<Symbol> New(Isolate* isolate,
+ Local<String> description = Local<String>());
+
+ /**
+ * Access global symbol registry.
+ * Note that symbols created this way are never collected, so
+ * they should only be used for statically fixed properties.
+ * Also, there is only one global name space for the descriptions used as
+ * keys.
+ * To minimize the potential for clashes, use qualified names as keys.
+ */
+ static Local<Symbol> For(Isolate* isolate, Local<String> description);
+
+ /**
+ * Retrieve a global symbol. Similar to |For|, but using a separate
+ * registry that is not accessible by (and cannot clash with) JavaScript code.
+ */
+ static Local<Symbol> ForApi(Isolate* isolate, Local<String> description);
+
+ // Well-known symbols
+ static Local<Symbol> GetAsyncIterator(Isolate* isolate);
+ static Local<Symbol> GetHasInstance(Isolate* isolate);
+ static Local<Symbol> GetIsConcatSpreadable(Isolate* isolate);
+ static Local<Symbol> GetIterator(Isolate* isolate);
+ static Local<Symbol> GetMatch(Isolate* isolate);
+ static Local<Symbol> GetReplace(Isolate* isolate);
+ static Local<Symbol> GetSearch(Isolate* isolate);
+ static Local<Symbol> GetSplit(Isolate* isolate);
+ static Local<Symbol> GetToPrimitive(Isolate* isolate);
+ static Local<Symbol> GetToStringTag(Isolate* isolate);
+ static Local<Symbol> GetUnscopables(Isolate* isolate);
+
+ V8_INLINE static Symbol* Cast(Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return static_cast<Symbol*>(data);
+ }
+
+ private:
+ Symbol();
+ static void CheckCast(Data* that);
+};
+
+/**
+ * A JavaScript number value (ECMA-262, 4.3.20)
+ */
+class V8_EXPORT Number : public Primitive {
+ public:
+ double Value() const;
+ static Local<Number> New(Isolate* isolate, double value);
+ V8_INLINE static Number* Cast(v8::Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return static_cast<Number*>(data);
+ }
+
+ private:
+ Number();
+ static void CheckCast(v8::Data* that);
+};
+
+/**
+ * A JavaScript value representing a signed integer.
+ */
+class V8_EXPORT Integer : public Number {
+ public:
+ static Local<Integer> New(Isolate* isolate, int32_t value);
+ static Local<Integer> NewFromUnsigned(Isolate* isolate, uint32_t value);
+ int64_t Value() const;
+ V8_INLINE static Integer* Cast(v8::Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return static_cast<Integer*>(data);
+ }
+
+ private:
+ Integer();
+ static void CheckCast(v8::Data* that);
+};
+
+/**
+ * A JavaScript value representing a 32-bit signed integer.
+ */
+class V8_EXPORT Int32 : public Integer {
+ public:
+ int32_t Value() const;
+ V8_INLINE static Int32* Cast(v8::Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return static_cast<Int32*>(data);
+ }
+
+ private:
+ Int32();
+ static void CheckCast(v8::Data* that);
+};
+
+/**
+ * A JavaScript value representing a 32-bit unsigned integer.
+ */
+class V8_EXPORT Uint32 : public Integer {
+ public:
+ uint32_t Value() const;
+ V8_INLINE static Uint32* Cast(v8::Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return static_cast<Uint32*>(data);
+ }
+
+ private:
+ Uint32();
+ static void CheckCast(v8::Data* that);
+};
+
+/**
+ * A JavaScript BigInt value (https://tc39.github.io/proposal-bigint)
+ */
+class V8_EXPORT BigInt : public Primitive {
+ public:
+ static Local<BigInt> New(Isolate* isolate, int64_t value);
+ static Local<BigInt> NewFromUnsigned(Isolate* isolate, uint64_t value);
+ /**
+ * Creates a new BigInt object using a specified sign bit and a
+ * specified list of digits/words.
+ * The resulting number is calculated as:
+ *
+ * (-1)^sign_bit * (words[0] * (2^64)^0 + words[1] * (2^64)^1 + ...)
+ */
+ static MaybeLocal<BigInt> NewFromWords(Local<Context> context, int sign_bit,
+ int word_count, const uint64_t* words);
+
+ /**
+ * Returns the value of this BigInt as an unsigned 64-bit integer.
+ * If `lossless` is provided, it will reflect whether the return value was
+ * truncated or wrapped around. In particular, it is set to `false` if this
+ * BigInt is negative.
+ */
+ uint64_t Uint64Value(bool* lossless = nullptr) const;
+
+ /**
+ * Returns the value of this BigInt as a signed 64-bit integer.
+ * If `lossless` is provided, it will reflect whether this BigInt was
+ * truncated or not.
+ */
+ int64_t Int64Value(bool* lossless = nullptr) const;
+
+ /**
+ * Returns the number of 64-bit words needed to store the result of
+ * ToWordsArray().
+ */
+ int WordCount() const;
+
+ /**
+ * Writes the contents of this BigInt to a specified memory location.
+ * `sign_bit` must be provided and will be set to 1 if this BigInt is
+ * negative.
+ * `*word_count` has to be initialized to the length of the `words` array.
+ * Upon return, it will be set to the actual number of words that would
+ * be needed to store this BigInt (i.e. the return value of `WordCount()`).
+ */
+ void ToWordsArray(int* sign_bit, int* word_count, uint64_t* words) const;
+
+ V8_INLINE static BigInt* Cast(v8::Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return static_cast<BigInt*>(data);
+ }
+
+ private:
+ BigInt();
+ static void CheckCast(v8::Data* that);
+};
+
+Local<String> String::Empty(Isolate* isolate) {
+ using S = internal::Address;
+ using I = internal::Internals;
+ I::CheckInitialized(isolate);
+ S* slot = I::GetRoot(isolate, I::kEmptyStringRootIndex);
+ return Local<String>(reinterpret_cast<String*>(slot));
+}
+
+String::ExternalStringResource* String::GetExternalStringResource() const {
+ using A = internal::Address;
+ using I = internal::Internals;
+ A obj = *reinterpret_cast<const A*>(this);
+
+ ExternalStringResource* result;
+ if (I::IsExternalTwoByteString(I::GetInstanceType(obj))) {
+ internal::Isolate* isolate = I::GetIsolateForHeapSandbox(obj);
+ A value =
+ I::ReadExternalPointerField(isolate, obj, I::kStringResourceOffset,
+ internal::kExternalStringResourceTag);
+ result = reinterpret_cast<String::ExternalStringResource*>(value);
+ } else {
+ result = GetExternalStringResourceSlow();
+ }
+#ifdef V8_ENABLE_CHECKS
+ VerifyExternalStringResource(result);
+#endif
+ return result;
+}
+
+String::ExternalStringResourceBase* String::GetExternalStringResourceBase(
+ String::Encoding* encoding_out) const {
+ using A = internal::Address;
+ using I = internal::Internals;
+ A obj = *reinterpret_cast<const A*>(this);
+ int type = I::GetInstanceType(obj) & I::kFullStringRepresentationMask;
+ *encoding_out = static_cast<Encoding>(type & I::kStringEncodingMask);
+ ExternalStringResourceBase* resource;
+ if (type == I::kExternalOneByteRepresentationTag ||
+ type == I::kExternalTwoByteRepresentationTag) {
+ internal::Isolate* isolate = I::GetIsolateForHeapSandbox(obj);
+ A value =
+ I::ReadExternalPointerField(isolate, obj, I::kStringResourceOffset,
+ internal::kExternalStringResourceTag);
+ resource = reinterpret_cast<ExternalStringResourceBase*>(value);
+ } else {
+ resource = GetExternalStringResourceBaseSlow(encoding_out);
+ }
+#ifdef V8_ENABLE_CHECKS
+ VerifyExternalStringResourceBase(resource, *encoding_out);
+#endif
+ return resource;
+}
+
+// --- Statics ---
+
+V8_INLINE Local<Primitive> Undefined(Isolate* isolate) {
+ using S = internal::Address;
+ using I = internal::Internals;
+ I::CheckInitialized(isolate);
+ S* slot = I::GetRoot(isolate, I::kUndefinedValueRootIndex);
+ return Local<Primitive>(reinterpret_cast<Primitive*>(slot));
+}
+
+V8_INLINE Local<Primitive> Null(Isolate* isolate) {
+ using S = internal::Address;
+ using I = internal::Internals;
+ I::CheckInitialized(isolate);
+ S* slot = I::GetRoot(isolate, I::kNullValueRootIndex);
+ return Local<Primitive>(reinterpret_cast<Primitive*>(slot));
+}
+
+V8_INLINE Local<Boolean> True(Isolate* isolate) {
+ using S = internal::Address;
+ using I = internal::Internals;
+ I::CheckInitialized(isolate);
+ S* slot = I::GetRoot(isolate, I::kTrueValueRootIndex);
+ return Local<Boolean>(reinterpret_cast<Boolean*>(slot));
+}
+
+V8_INLINE Local<Boolean> False(Isolate* isolate) {
+ using S = internal::Address;
+ using I = internal::Internals;
+ I::CheckInitialized(isolate);
+ S* slot = I::GetRoot(isolate, I::kFalseValueRootIndex);
+ return Local<Boolean>(reinterpret_cast<Boolean*>(slot));
+}
+
+Local<Boolean> Boolean::New(Isolate* isolate, bool value) {
+ return value ? True(isolate) : False(isolate);
+}
+
+} // namespace v8
+
+#endif // INCLUDE_V8_PRIMITIVE_H_
diff --git a/chromium/v8/include/v8-profiler.h b/chromium/v8/include/v8-profiler.h
index 9a40cfcf307..f2354cac38e 100644
--- a/chromium/v8/include/v8-profiler.h
+++ b/chromium/v8/include/v8-profiler.h
@@ -11,7 +11,9 @@
#include <unordered_set>
#include <vector>
-#include "v8.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-message.h" // NOLINT(build/include_directory)
+#include "v8-persistent-handle.h" // NOLINT(build/include_directory)
/**
* Profiler support for the V8 JavaScript engine.
@@ -20,6 +22,7 @@ namespace v8 {
class HeapGraphNode;
struct HeapStatsUpdate;
+class Object;
using NativeObject = void*;
using SnapshotObjectId = uint32_t;
diff --git a/chromium/v8/include/v8-promise.h b/chromium/v8/include/v8-promise.h
new file mode 100644
index 00000000000..9da8e4b4e86
--- /dev/null
+++ b/chromium/v8/include/v8-promise.h
@@ -0,0 +1,174 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_PROMISE_H_
+#define INCLUDE_V8_PROMISE_H_
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-object.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Context;
+
+#ifndef V8_PROMISE_INTERNAL_FIELD_COUNT
+// The number of required internal fields can be defined by embedder.
+#define V8_PROMISE_INTERNAL_FIELD_COUNT 0
+#endif
+
+/**
+ * An instance of the built-in Promise constructor (ES6 draft).
+ */
+class V8_EXPORT Promise : public Object {
+ public:
+ /**
+ * State of the promise. Each value corresponds to one of the possible values
+ * of the [[PromiseState]] field.
+ */
+ enum PromiseState { kPending, kFulfilled, kRejected };
+
+ class V8_EXPORT Resolver : public Object {
+ public:
+ /**
+ * Create a new resolver, along with an associated promise in pending state.
+ */
+ static V8_WARN_UNUSED_RESULT MaybeLocal<Resolver> New(
+ Local<Context> context);
+
+ /**
+ * Extract the associated promise.
+ */
+ Local<Promise> GetPromise();
+
+ /**
+ * Resolve/reject the associated promise with a given value.
+ * Ignored if the promise is no longer pending.
+ */
+ V8_WARN_UNUSED_RESULT Maybe<bool> Resolve(Local<Context> context,
+ Local<Value> value);
+
+ V8_WARN_UNUSED_RESULT Maybe<bool> Reject(Local<Context> context,
+ Local<Value> value);
+
+ V8_INLINE static Resolver* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Promise::Resolver*>(value);
+ }
+
+ private:
+ Resolver();
+ static void CheckCast(Value* obj);
+ };
+
+ /**
+ * Register a resolution/rejection handler with a promise.
+ * The handler is given the respective resolution/rejection value as
+ * an argument. If the promise is already resolved/rejected, the handler is
+ * invoked at the end of turn.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Promise> Catch(Local<Context> context,
+ Local<Function> handler);
+
+ V8_WARN_UNUSED_RESULT MaybeLocal<Promise> Then(Local<Context> context,
+ Local<Function> handler);
+
+ V8_WARN_UNUSED_RESULT MaybeLocal<Promise> Then(Local<Context> context,
+ Local<Function> on_fulfilled,
+ Local<Function> on_rejected);
+
+ /**
+ * Returns true if the promise has at least one derived promise, and
+ * therefore resolve/reject handlers (including default handler).
+ */
+ bool HasHandler() const;
+
+ /**
+ * Returns the content of the [[PromiseResult]] field. The Promise must not
+ * be pending.
+ */
+ Local<Value> Result();
+
+ /**
+ * Returns the value of the [[PromiseState]] field.
+ */
+ PromiseState State();
+
+ /**
+ * Marks this promise as handled to avoid reporting unhandled rejections.
+ */
+ void MarkAsHandled();
+
+ /**
+ * Marks this promise as silent to prevent pausing the debugger when the
+ * promise is rejected.
+ */
+ void MarkAsSilent();
+
+ V8_INLINE static Promise* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Promise*>(value);
+ }
+
+ static const int kEmbedderFieldCount = V8_PROMISE_INTERNAL_FIELD_COUNT;
+
+ private:
+ Promise();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * PromiseHook with type kInit is called when a new promise is
+ * created. When a new promise is created as part of the chain in the
+ * case of Promise.then or in the intermediate promises created by
+ * Promise.{race, all}/AsyncFunctionAwait, we pass the parent promise
+ * otherwise we pass undefined.
+ *
+ * PromiseHook with type kResolve is called at the beginning of
+ * resolve or reject function defined by CreateResolvingFunctions.
+ *
+ * PromiseHook with type kBefore is called at the beginning of the
+ * PromiseReactionJob.
+ *
+ * PromiseHook with type kAfter is called right at the end of the
+ * PromiseReactionJob.
+ */
+enum class PromiseHookType { kInit, kResolve, kBefore, kAfter };
+
+using PromiseHook = void (*)(PromiseHookType type, Local<Promise> promise,
+ Local<Value> parent);
+
+// --- Promise Reject Callback ---
+enum PromiseRejectEvent {
+ kPromiseRejectWithNoHandler = 0,
+ kPromiseHandlerAddedAfterReject = 1,
+ kPromiseRejectAfterResolved = 2,
+ kPromiseResolveAfterResolved = 3,
+};
+
+class PromiseRejectMessage {
+ public:
+ PromiseRejectMessage(Local<Promise> promise, PromiseRejectEvent event,
+ Local<Value> value)
+ : promise_(promise), event_(event), value_(value) {}
+
+ V8_INLINE Local<Promise> GetPromise() const { return promise_; }
+ V8_INLINE PromiseRejectEvent GetEvent() const { return event_; }
+ V8_INLINE Local<Value> GetValue() const { return value_; }
+
+ private:
+ Local<Promise> promise_;
+ PromiseRejectEvent event_;
+ Local<Value> value_;
+};
+
+using PromiseRejectCallback = void (*)(PromiseRejectMessage message);
+
+} // namespace v8
+
+#endif // INCLUDE_V8_PROMISE_H_
diff --git a/chromium/v8/include/v8-proxy.h b/chromium/v8/include/v8-proxy.h
new file mode 100644
index 00000000000..a08db8805c6
--- /dev/null
+++ b/chromium/v8/include/v8-proxy.h
@@ -0,0 +1,50 @@
+
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_PROXY_H_
+#define INCLUDE_V8_PROXY_H_
+
+#include "v8-context.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-object.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Context;
+
+/**
+ * An instance of the built-in Proxy constructor (ECMA-262, 6th Edition,
+ * 26.2.1).
+ */
+class V8_EXPORT Proxy : public Object {
+ public:
+ Local<Value> GetTarget();
+ Local<Value> GetHandler();
+ bool IsRevoked() const;
+ void Revoke();
+
+ /**
+ * Creates a new Proxy for the target object.
+ */
+ static MaybeLocal<Proxy> New(Local<Context> context,
+ Local<Object> local_target,
+ Local<Object> local_handler);
+
+ V8_INLINE static Proxy* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Proxy*>(value);
+ }
+
+ private:
+ Proxy();
+ static void CheckCast(Value* obj);
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_PROXY_H_
diff --git a/chromium/v8/include/v8-regexp.h b/chromium/v8/include/v8-regexp.h
new file mode 100644
index 00000000000..3791bc03687
--- /dev/null
+++ b/chromium/v8/include/v8-regexp.h
@@ -0,0 +1,105 @@
+
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_REGEXP_H_
+#define INCLUDE_V8_REGEXP_H_
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-object.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Context;
+
+/**
+ * An instance of the built-in RegExp constructor (ECMA-262, 15.10).
+ */
+class V8_EXPORT RegExp : public Object {
+ public:
+ /**
+ * Regular expression flag bits. They can be or'ed to enable a set
+ * of flags.
+ * The kLinear value ('l') is experimental and can only be used with
+ * --enable-experimental-regexp-engine. RegExps with kLinear flag are
+ * guaranteed to be executed in asymptotic linear time wrt. the length of
+ * the subject string.
+ */
+ enum Flags {
+ kNone = 0,
+ kGlobal = 1 << 0,
+ kIgnoreCase = 1 << 1,
+ kMultiline = 1 << 2,
+ kSticky = 1 << 3,
+ kUnicode = 1 << 4,
+ kDotAll = 1 << 5,
+ kLinear = 1 << 6,
+ kHasIndices = 1 << 7,
+ };
+
+ static constexpr int kFlagCount = 8;
+
+ /**
+ * Creates a regular expression from the given pattern string and
+ * the flags bit field. May throw a JavaScript exception as
+ * described in ECMA-262, 15.10.4.1.
+ *
+ * For example,
+ * RegExp::New(v8::String::New("foo"),
+ * static_cast<RegExp::Flags>(kGlobal | kMultiline))
+ * is equivalent to evaluating "/foo/gm".
+ */
+ static V8_WARN_UNUSED_RESULT MaybeLocal<RegExp> New(Local<Context> context,
+ Local<String> pattern,
+ Flags flags);
+
+ /**
+ * Like New, but additionally specifies a backtrack limit. If the number of
+ * backtracks done in one Exec call hits the limit, a match failure is
+ * immediately returned.
+ */
+ static V8_WARN_UNUSED_RESULT MaybeLocal<RegExp> NewWithBacktrackLimit(
+ Local<Context> context, Local<String> pattern, Flags flags,
+ uint32_t backtrack_limit);
+
+ /**
+ * Executes the current RegExp instance on the given subject string.
+ * Equivalent to RegExp.prototype.exec as described in
+ *
+ * https://tc39.es/ecma262/#sec-regexp.prototype.exec
+ *
+ * On success, an Array containing the matched strings is returned. On
+ * failure, returns Null.
+ *
+ * Note: modifies global context state, accessible e.g. through RegExp.input.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Object> Exec(Local<Context> context,
+ Local<String> subject);
+
+ /**
+ * Returns the value of the source property: a string representing
+ * the regular expression.
+ */
+ Local<String> GetSource() const;
+
+ /**
+ * Returns the flags bit field.
+ */
+ Flags GetFlags() const;
+
+ V8_INLINE static RegExp* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<RegExp*>(value);
+ }
+
+ private:
+ static void CheckCast(Value* obj);
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_REGEXP_H_
diff --git a/chromium/v8/include/v8-script.h b/chromium/v8/include/v8-script.h
new file mode 100644
index 00000000000..d17089932cc
--- /dev/null
+++ b/chromium/v8/include/v8-script.h
@@ -0,0 +1,771 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_SCRIPT_H_
+#define INCLUDE_V8_SCRIPT_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "v8-data.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-maybe.h" // NOLINT(build/include_directory)
+#include "v8-message.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Function;
+class Object;
+class PrimitiveArray;
+class Script;
+
+namespace internal {
+class BackgroundDeserializeTask;
+struct ScriptStreamingData;
+} // namespace internal
+
+/**
+ * A container type that holds relevant metadata for module loading.
+ *
+ * This is passed back to the embedder as part of
+ * HostImportModuleDynamicallyCallback for module loading.
+ */
+class V8_EXPORT ScriptOrModule {
+ public:
+ /**
+ * The name that was passed by the embedder as ResourceName to the
+ * ScriptOrigin. This can be either a v8::String or v8::Undefined.
+ */
+ Local<Value> GetResourceName();
+
+ /**
+ * The options that were passed by the embedder as HostDefinedOptions to
+ * the ScriptOrigin.
+ */
+ Local<PrimitiveArray> GetHostDefinedOptions();
+};
+
+/**
+ * A compiled JavaScript script, not yet tied to a Context.
+ */
+class V8_EXPORT UnboundScript {
+ public:
+ /**
+ * Binds the script to the currently entered context.
+ */
+ Local<Script> BindToCurrentContext();
+
+ int GetId() const;
+ Local<Value> GetScriptName();
+
+ /**
+ * Data read from magic sourceURL comments.
+ */
+ Local<Value> GetSourceURL();
+ /**
+ * Data read from magic sourceMappingURL comments.
+ */
+ Local<Value> GetSourceMappingURL();
+
+ /**
+ * Returns zero based line number of the code_pos location in the script.
+ * -1 will be returned if no information available.
+ */
+ int GetLineNumber(int code_pos);
+
+ static const int kNoScriptId = 0;
+};
+
+/**
+ * A compiled JavaScript module, not yet tied to a Context.
+ */
+class V8_EXPORT UnboundModuleScript : public Data {
+ // Only used as a container for code caching.
+};
+
+/**
+ * A location in JavaScript source.
+ */
+class V8_EXPORT Location {
+ public:
+ int GetLineNumber() { return line_number_; }
+ int GetColumnNumber() { return column_number_; }
+
+ Location(int line_number, int column_number)
+ : line_number_(line_number), column_number_(column_number) {}
+
+ private:
+ int line_number_;
+ int column_number_;
+};
+
+class V8_EXPORT ModuleRequest : public Data {
+ public:
+ /**
+ * Returns the module specifier for this ModuleRequest.
+ */
+ Local<String> GetSpecifier() const;
+
+ /**
+ * Returns the source code offset of this module request.
+ * Use Module::SourceOffsetToLocation to convert this to line/column numbers.
+ */
+ int GetSourceOffset() const;
+
+ /**
+ * Contains the import assertions for this request in the form:
+ * [key1, value1, source_offset1, key2, value2, source_offset2, ...].
+ * The keys and values are of type v8::String, and the source offsets are of
+ * type Int32. Use Module::SourceOffsetToLocation to convert the source
+ * offsets to Locations with line/column numbers.
+ *
+ * All assertions present in the module request will be supplied in this
+ * list, regardless of whether they are supported by the host. Per
+ * https://tc39.es/proposal-import-assertions/#sec-hostgetsupportedimportassertions,
+ * hosts are expected to ignore assertions that they do not support (as
+ * opposed to, for example, triggering an error if an unsupported assertion is
+ * present).
+ */
+ Local<FixedArray> GetImportAssertions() const;
+
+ V8_INLINE static ModuleRequest* Cast(Data* data);
+
+ private:
+ static void CheckCast(Data* obj);
+};
+
+/**
+ * A compiled JavaScript module.
+ */
+class V8_EXPORT Module : public Data {
+ public:
+ /**
+ * The different states a module can be in.
+ *
+ * This corresponds to the states used in ECMAScript except that "evaluated"
+ * is split into kEvaluated and kErrored, indicating success and failure,
+ * respectively.
+ */
+ enum Status {
+ kUninstantiated,
+ kInstantiating,
+ kInstantiated,
+ kEvaluating,
+ kEvaluated,
+ kErrored
+ };
+
+ /**
+ * Returns the module's current status.
+ */
+ Status GetStatus() const;
+
+ /**
+ * For a module in kErrored status, this returns the corresponding exception.
+ */
+ Local<Value> GetException() const;
+
+ /**
+ * Returns the number of modules requested by this module.
+ */
+ V8_DEPRECATE_SOON("Use Module::GetModuleRequests() and FixedArray::Length().")
+ int GetModuleRequestsLength() const;
+
+ /**
+ * Returns the ith module specifier in this module.
+ * i must be < GetModuleRequestsLength() and >= 0.
+ */
+ V8_DEPRECATE_SOON(
+ "Use Module::GetModuleRequests() and ModuleRequest::GetSpecifier().")
+ Local<String> GetModuleRequest(int i) const;
+
+ /**
+ * Returns the source location (line number and column number) of the ith
+ * module specifier's first occurrence in this module.
+ */
+ V8_DEPRECATE_SOON(
+ "Use Module::GetModuleRequests(), ModuleRequest::GetSourceOffset(), and "
+ "Module::SourceOffsetToLocation().")
+ Location GetModuleRequestLocation(int i) const;
+
+ /**
+ * Returns the ModuleRequests for this module.
+ */
+ Local<FixedArray> GetModuleRequests() const;
+
+ /**
+ * For the given source text offset in this module, returns the corresponding
+ * Location with line and column numbers.
+ */
+ Location SourceOffsetToLocation(int offset) const;
+
+ /**
+ * Returns the identity hash for this object.
+ */
+ int GetIdentityHash() const;
+
+ using ResolveCallback V8_DEPRECATE_SOON("Use ResolveModuleCallback") =
+ MaybeLocal<Module> (*)(Local<Context> context, Local<String> specifier,
+ Local<Module> referrer);
+ using ResolveModuleCallback = MaybeLocal<Module> (*)(
+ Local<Context> context, Local<String> specifier,
+ Local<FixedArray> import_assertions, Local<Module> referrer);
+
+ /**
+ * Instantiates the module and its dependencies.
+ *
+ * Returns an empty Maybe<bool> if an exception occurred during
+ * instantiation. (In the case where the callback throws an exception, that
+ * exception is propagated.)
+ */
+ V8_DEPRECATE_SOON(
+ "Use the version of InstantiateModule that takes a ResolveModuleCallback "
+ "parameter")
+ V8_WARN_UNUSED_RESULT Maybe<bool> InstantiateModule(Local<Context> context,
+ ResolveCallback callback);
+ V8_WARN_UNUSED_RESULT Maybe<bool> InstantiateModule(
+ Local<Context> context, ResolveModuleCallback callback);
+
+ /**
+ * Evaluates the module and its dependencies.
+ *
+ * If status is kInstantiated, run the module's code and return a Promise
+ * object. On success, set status to kEvaluated and resolve the Promise with
+ * the completion value; on failure, set status to kErrored and reject the
+ * Promise with the error.
+ *
+ * If IsGraphAsync() is false, the returned Promise is settled.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Value> Evaluate(Local<Context> context);
+
+ /**
+ * Returns the namespace object of this module.
+ *
+ * The module's status must be at least kInstantiated.
+ */
+ Local<Value> GetModuleNamespace();
+
+ /**
+ * Returns the corresponding context-unbound module script.
+ *
+ * The module must be unevaluated, i.e. its status must not be kEvaluating,
+ * kEvaluated or kErrored.
+ */
+ Local<UnboundModuleScript> GetUnboundModuleScript();
+
+ /**
+ * Returns the underlying script's id.
+ *
+ * The module must be a SourceTextModule and must not have a kErrored status.
+ */
+ int ScriptId() const;
+
+ /**
+ * Returns whether this module or any of its requested modules is async,
+ * i.e. contains top-level await.
+ *
+ * The module's status must be at least kInstantiated.
+ */
+ bool IsGraphAsync() const;
+
+ /**
+ * Returns whether the module is a SourceTextModule.
+ */
+ bool IsSourceTextModule() const;
+
+ /**
+ * Returns whether the module is a SyntheticModule.
+ */
+ bool IsSyntheticModule() const;
+
+ /*
+ * Callback defined in the embedder. This is responsible for setting
+ * the module's exported values with calls to SetSyntheticModuleExport().
+ * The callback must return a resolved Promise to indicate success (where no
+ * exception was thrown) and return an empy MaybeLocal to indicate falure
+ * (where an exception was thrown).
+ */
+ using SyntheticModuleEvaluationSteps =
+ MaybeLocal<Value> (*)(Local<Context> context, Local<Module> module);
+
+ /**
+ * Creates a new SyntheticModule with the specified export names, where
+ * evaluation_steps will be executed upon module evaluation.
+ * export_names must not contain duplicates.
+ * module_name is used solely for logging/debugging and doesn't affect module
+ * behavior.
+ */
+ static Local<Module> CreateSyntheticModule(
+ Isolate* isolate, Local<String> module_name,
+ const std::vector<Local<String>>& export_names,
+ SyntheticModuleEvaluationSteps evaluation_steps);
+
+ /**
+ * Set this module's exported value for the name export_name to the specified
+ * export_value. This method must be called only on Modules created via
+ * CreateSyntheticModule. An error will be thrown if export_name is not one
+ * of the export_names that were passed in that CreateSyntheticModule call.
+ * Returns Just(true) on success, Nothing<bool>() if an error was thrown.
+ */
+ V8_WARN_UNUSED_RESULT Maybe<bool> SetSyntheticModuleExport(
+ Isolate* isolate, Local<String> export_name, Local<Value> export_value);
+
+ V8_INLINE static Module* Cast(Data* data);
+
+ private:
+ static void CheckCast(Data* obj);
+};
+
+/**
+ * A compiled JavaScript script, tied to a Context which was active when the
+ * script was compiled.
+ */
+class V8_EXPORT Script {
+ public:
+ /**
+ * A shorthand for ScriptCompiler::Compile().
+ */
+ static V8_WARN_UNUSED_RESULT MaybeLocal<Script> Compile(
+ Local<Context> context, Local<String> source,
+ ScriptOrigin* origin = nullptr);
+
+ /**
+ * Runs the script returning the resulting value. It will be run in the
+ * context in which it was created (ScriptCompiler::CompileBound or
+ * UnboundScript::BindToCurrentContext()).
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Value> Run(Local<Context> context);
+
+ /**
+ * Returns the corresponding context-unbound script.
+ */
+ Local<UnboundScript> GetUnboundScript();
+};
+
+enum class ScriptType { kClassic, kModule };
+
+/**
+ * For compiling scripts.
+ */
+class V8_EXPORT ScriptCompiler {
+ public:
+ class ConsumeCodeCacheTask;
+
+ /**
+ * Compilation data that the embedder can cache and pass back to speed up
+ * future compilations. The data is produced if the CompilerOptions passed to
+ * the compilation functions in ScriptCompiler contains produce_data_to_cache
+ * = true. The data to cache can then can be retrieved from
+ * UnboundScript.
+ */
+ struct V8_EXPORT CachedData {
+ enum BufferPolicy { BufferNotOwned, BufferOwned };
+
+ CachedData()
+ : data(nullptr),
+ length(0),
+ rejected(false),
+ buffer_policy(BufferNotOwned) {}
+
+ // If buffer_policy is BufferNotOwned, the caller keeps the ownership of
+ // data and guarantees that it stays alive until the CachedData object is
+ // destroyed. If the policy is BufferOwned, the given data will be deleted
+ // (with delete[]) when the CachedData object is destroyed.
+ CachedData(const uint8_t* data, int length,
+ BufferPolicy buffer_policy = BufferNotOwned);
+ ~CachedData();
+ // TODO(marja): Async compilation; add constructors which take a callback
+ // which will be called when V8 no longer needs the data.
+ const uint8_t* data;
+ int length;
+ bool rejected;
+ BufferPolicy buffer_policy;
+
+ // Prevent copying.
+ CachedData(const CachedData&) = delete;
+ CachedData& operator=(const CachedData&) = delete;
+ };
+
+ /**
+ * Source code which can be then compiled to a UnboundScript or Script.
+ */
+ class Source {
+ public:
+ // Source takes ownership of both CachedData and CodeCacheConsumeTask.
+ V8_INLINE Source(Local<String> source_string, const ScriptOrigin& origin,
+ CachedData* cached_data = nullptr,
+ ConsumeCodeCacheTask* consume_cache_task = nullptr);
+ // Source takes ownership of both CachedData and CodeCacheConsumeTask.
+ V8_INLINE explicit Source(
+ Local<String> source_string, CachedData* cached_data = nullptr,
+ ConsumeCodeCacheTask* consume_cache_task = nullptr);
+ V8_INLINE ~Source() = default;
+
+ // Ownership of the CachedData or its buffers is *not* transferred to the
+ // caller. The CachedData object is alive as long as the Source object is
+ // alive.
+ V8_INLINE const CachedData* GetCachedData() const;
+
+ V8_INLINE const ScriptOriginOptions& GetResourceOptions() const;
+
+ private:
+ friend class ScriptCompiler;
+
+ Local<String> source_string;
+
+ // Origin information
+ Local<Value> resource_name;
+ int resource_line_offset;
+ int resource_column_offset;
+ ScriptOriginOptions resource_options;
+ Local<Value> source_map_url;
+ Local<PrimitiveArray> host_defined_options;
+
+ // Cached data from previous compilation (if a kConsume*Cache flag is
+ // set), or hold newly generated cache data (kProduce*Cache flags) are
+ // set when calling a compile method.
+ std::unique_ptr<CachedData> cached_data;
+ std::unique_ptr<ConsumeCodeCacheTask> consume_cache_task;
+ };
+
+ /**
+ * For streaming incomplete script data to V8. The embedder should implement a
+ * subclass of this class.
+ */
+ class V8_EXPORT ExternalSourceStream {
+ public:
+ virtual ~ExternalSourceStream() = default;
+
+ /**
+ * V8 calls this to request the next chunk of data from the embedder. This
+ * function will be called on a background thread, so it's OK to block and
+ * wait for the data, if the embedder doesn't have data yet. Returns the
+ * length of the data returned. When the data ends, GetMoreData should
+ * return 0. Caller takes ownership of the data.
+ *
+ * When streaming UTF-8 data, V8 handles multi-byte characters split between
+ * two data chunks, but doesn't handle multi-byte characters split between
+ * more than two data chunks. The embedder can avoid this problem by always
+ * returning at least 2 bytes of data.
+ *
+ * When streaming UTF-16 data, V8 does not handle characters split between
+ * two data chunks. The embedder has to make sure that chunks have an even
+ * length.
+ *
+ * If the embedder wants to cancel the streaming, they should make the next
+ * GetMoreData call return 0. V8 will interpret it as end of data (and most
+ * probably, parsing will fail). The streaming task will return as soon as
+ * V8 has parsed the data it received so far.
+ */
+ virtual size_t GetMoreData(const uint8_t** src) = 0;
+
+ /**
+ * V8 calls this method to set a 'bookmark' at the current position in
+ * the source stream, for the purpose of (maybe) later calling
+ * ResetToBookmark. If ResetToBookmark is called later, then subsequent
+ * calls to GetMoreData should return the same data as they did when
+ * SetBookmark was called earlier.
+ *
+ * The embedder may return 'false' to indicate it cannot provide this
+ * functionality.
+ */
+ virtual bool SetBookmark();
+
+ /**
+ * V8 calls this to return to a previously set bookmark.
+ */
+ virtual void ResetToBookmark();
+ };
+
+ /**
+ * Source code which can be streamed into V8 in pieces. It will be parsed
+ * while streaming and compiled after parsing has completed. StreamedSource
+ * must be kept alive while the streaming task is run (see ScriptStreamingTask
+ * below).
+ */
+ class V8_EXPORT StreamedSource {
+ public:
+ enum Encoding { ONE_BYTE, TWO_BYTE, UTF8, WINDOWS_1252 };
+
+ StreamedSource(std::unique_ptr<ExternalSourceStream> source_stream,
+ Encoding encoding);
+ ~StreamedSource();
+
+ internal::ScriptStreamingData* impl() const { return impl_.get(); }
+
+ // Prevent copying.
+ StreamedSource(const StreamedSource&) = delete;
+ StreamedSource& operator=(const StreamedSource&) = delete;
+
+ private:
+ std::unique_ptr<internal::ScriptStreamingData> impl_;
+ };
+
+ /**
+ * A streaming task which the embedder must run on a background thread to
+ * stream scripts into V8. Returned by ScriptCompiler::StartStreaming.
+ */
+ class V8_EXPORT ScriptStreamingTask final {
+ public:
+ void Run();
+
+ private:
+ friend class ScriptCompiler;
+
+ explicit ScriptStreamingTask(internal::ScriptStreamingData* data)
+ : data_(data) {}
+
+ internal::ScriptStreamingData* data_;
+ };
+
+ /**
+ * A task which the embedder must run on a background thread to
+ * consume a V8 code cache. Returned by
+ * ScriptCompiler::StarConsumingCodeCache.
+ */
+ class V8_EXPORT ConsumeCodeCacheTask final {
+ public:
+ ~ConsumeCodeCacheTask();
+
+ void Run();
+
+ private:
+ friend class ScriptCompiler;
+
+ explicit ConsumeCodeCacheTask(
+ std::unique_ptr<internal::BackgroundDeserializeTask> impl);
+
+ std::unique_ptr<internal::BackgroundDeserializeTask> impl_;
+ };
+
+ enum CompileOptions {
+ kNoCompileOptions = 0,
+ kConsumeCodeCache,
+ kEagerCompile
+ };
+
+ /**
+ * The reason for which we are not requesting or providing a code cache.
+ */
+ enum NoCacheReason {
+ kNoCacheNoReason = 0,
+ kNoCacheBecauseCachingDisabled,
+ kNoCacheBecauseNoResource,
+ kNoCacheBecauseInlineScript,
+ kNoCacheBecauseModule,
+ kNoCacheBecauseStreamingSource,
+ kNoCacheBecauseInspector,
+ kNoCacheBecauseScriptTooSmall,
+ kNoCacheBecauseCacheTooCold,
+ kNoCacheBecauseV8Extension,
+ kNoCacheBecauseExtensionModule,
+ kNoCacheBecausePacScript,
+ kNoCacheBecauseInDocumentWrite,
+ kNoCacheBecauseResourceWithNoCacheHandler,
+ kNoCacheBecauseDeferredProduceCodeCache
+ };
+
+ /**
+ * Compiles the specified script (context-independent).
+ * Cached data as part of the source object can be optionally produced to be
+ * consumed later to speed up compilation of identical source scripts.
+ *
+ * Note that when producing cached data, the source must point to NULL for
+ * cached data. When consuming cached data, the cached data must have been
+ * produced by the same version of V8, and the embedder needs to ensure the
+ * cached data is the correct one for the given script.
+ *
+ * \param source Script source code.
+ * \return Compiled script object (context independent; for running it must be
+ * bound to a context).
+ */
+ static V8_WARN_UNUSED_RESULT MaybeLocal<UnboundScript> CompileUnboundScript(
+ Isolate* isolate, Source* source,
+ CompileOptions options = kNoCompileOptions,
+ NoCacheReason no_cache_reason = kNoCacheNoReason);
+
+ /**
+ * Compiles the specified script (bound to current context).
+ *
+ * \param source Script source code.
+ * \param pre_data Pre-parsing data, as obtained by ScriptData::PreCompile()
+ * using pre_data speeds compilation if it's done multiple times.
+ * Owned by caller, no references are kept when this function returns.
+ * \return Compiled script object, bound to the context that was active
+ * when this function was called. When run it will always use this
+ * context.
+ */
+ static V8_WARN_UNUSED_RESULT MaybeLocal<Script> Compile(
+ Local<Context> context, Source* source,
+ CompileOptions options = kNoCompileOptions,
+ NoCacheReason no_cache_reason = kNoCacheNoReason);
+
+ /**
+ * Returns a task which streams script data into V8, or NULL if the script
+ * cannot be streamed. The user is responsible for running the task on a
+ * background thread and deleting it. When ran, the task starts parsing the
+ * script, and it will request data from the StreamedSource as needed. When
+ * ScriptStreamingTask::Run exits, all data has been streamed and the script
+ * can be compiled (see Compile below).
+ *
+ * This API allows to start the streaming with as little data as possible, and
+ * the remaining data (for example, the ScriptOrigin) is passed to Compile.
+ */
+ static ScriptStreamingTask* StartStreaming(
+ Isolate* isolate, StreamedSource* source,
+ ScriptType type = ScriptType::kClassic);
+
+ static ConsumeCodeCacheTask* StartConsumingCodeCache(
+ Isolate* isolate, std::unique_ptr<CachedData> source);
+
+ /**
+ * Compiles a streamed script (bound to current context).
+ *
+ * This can only be called after the streaming has finished
+ * (ScriptStreamingTask has been run). V8 doesn't construct the source string
+ * during streaming, so the embedder needs to pass the full source here.
+ */
+ static V8_WARN_UNUSED_RESULT MaybeLocal<Script> Compile(
+ Local<Context> context, StreamedSource* source,
+ Local<String> full_source_string, const ScriptOrigin& origin);
+
+ /**
+ * Return a version tag for CachedData for the current V8 version & flags.
+ *
+ * This value is meant only for determining whether a previously generated
+ * CachedData instance is still valid; the tag has no other meaing.
+ *
+ * Background: The data carried by CachedData may depend on the exact
+ * V8 version number or current compiler flags. This means that when
+ * persisting CachedData, the embedder must take care to not pass in
+ * data from another V8 version, or the same version with different
+ * features enabled.
+ *
+ * The easiest way to do so is to clear the embedder's cache on any
+ * such change.
+ *
+ * Alternatively, this tag can be stored alongside the cached data and
+ * compared when it is being used.
+ */
+ static uint32_t CachedDataVersionTag();
+
+ /**
+ * Compile an ES module, returning a Module that encapsulates
+ * the compiled code.
+ *
+ * Corresponds to the ParseModule abstract operation in the
+ * ECMAScript specification.
+ */
+ static V8_WARN_UNUSED_RESULT MaybeLocal<Module> CompileModule(
+ Isolate* isolate, Source* source,
+ CompileOptions options = kNoCompileOptions,
+ NoCacheReason no_cache_reason = kNoCacheNoReason);
+
+ /**
+ * Compiles a streamed module script.
+ *
+ * This can only be called after the streaming has finished
+ * (ScriptStreamingTask has been run). V8 doesn't construct the source string
+ * during streaming, so the embedder needs to pass the full source here.
+ */
+ static V8_WARN_UNUSED_RESULT MaybeLocal<Module> CompileModule(
+ Local<Context> context, StreamedSource* v8_source,
+ Local<String> full_source_string, const ScriptOrigin& origin);
+
+ /**
+ * Compile a function for a given context. This is equivalent to running
+ *
+ * with (obj) {
+ * return function(args) { ... }
+ * }
+ *
+ * It is possible to specify multiple context extensions (obj in the above
+ * example).
+ */
+ static V8_WARN_UNUSED_RESULT MaybeLocal<Function> CompileFunctionInContext(
+ Local<Context> context, Source* source, size_t arguments_count,
+ Local<String> arguments[], size_t context_extension_count,
+ Local<Object> context_extensions[],
+ CompileOptions options = kNoCompileOptions,
+ NoCacheReason no_cache_reason = kNoCacheNoReason,
+ Local<ScriptOrModule>* script_or_module_out = nullptr);
+
+ /**
+ * Creates and returns code cache for the specified unbound_script.
+ * This will return nullptr if the script cannot be serialized. The
+ * CachedData returned by this function should be owned by the caller.
+ */
+ static CachedData* CreateCodeCache(Local<UnboundScript> unbound_script);
+
+ /**
+ * Creates and returns code cache for the specified unbound_module_script.
+ * This will return nullptr if the script cannot be serialized. The
+ * CachedData returned by this function should be owned by the caller.
+ */
+ static CachedData* CreateCodeCache(
+ Local<UnboundModuleScript> unbound_module_script);
+
+ /**
+ * Creates and returns code cache for the specified function that was
+ * previously produced by CompileFunctionInContext.
+ * This will return nullptr if the script cannot be serialized. The
+ * CachedData returned by this function should be owned by the caller.
+ */
+ static CachedData* CreateCodeCacheForFunction(Local<Function> function);
+
+ private:
+ static V8_WARN_UNUSED_RESULT MaybeLocal<UnboundScript> CompileUnboundInternal(
+ Isolate* isolate, Source* source, CompileOptions options,
+ NoCacheReason no_cache_reason);
+};
+
+ScriptCompiler::Source::Source(Local<String> string, const ScriptOrigin& origin,
+ CachedData* data,
+ ConsumeCodeCacheTask* consume_cache_task)
+ : source_string(string),
+ resource_name(origin.ResourceName()),
+ resource_line_offset(origin.LineOffset()),
+ resource_column_offset(origin.ColumnOffset()),
+ resource_options(origin.Options()),
+ source_map_url(origin.SourceMapUrl()),
+ host_defined_options(origin.HostDefinedOptions()),
+ cached_data(data),
+ consume_cache_task(consume_cache_task) {}
+
+ScriptCompiler::Source::Source(Local<String> string, CachedData* data,
+ ConsumeCodeCacheTask* consume_cache_task)
+ : source_string(string),
+ cached_data(data),
+ consume_cache_task(consume_cache_task) {}
+
+const ScriptCompiler::CachedData* ScriptCompiler::Source::GetCachedData()
+ const {
+ return cached_data.get();
+}
+
+const ScriptOriginOptions& ScriptCompiler::Source::GetResourceOptions() const {
+ return resource_options;
+}
+
+ModuleRequest* ModuleRequest::Cast(Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return reinterpret_cast<ModuleRequest*>(data);
+}
+
+Module* Module::Cast(Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return reinterpret_cast<Module*>(data);
+}
+
+} // namespace v8
+
+#endif // INCLUDE_V8_SCRIPT_H_
diff --git a/chromium/v8/include/v8-snapshot.h b/chromium/v8/include/v8-snapshot.h
new file mode 100644
index 00000000000..ed02598c36b
--- /dev/null
+++ b/chromium/v8/include/v8-snapshot.h
@@ -0,0 +1,198 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_SNAPSHOT_H_
+#define INCLUDE_V8_SNAPSHOT_H_
+
+#include <vector>
+
+#include "v8-internal.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Object;
+
+class V8_EXPORT StartupData {
+ public:
+ /**
+ * Whether the data created can be rehashed and and the hash seed can be
+ * recomputed when deserialized.
+ * Only valid for StartupData returned by SnapshotCreator::CreateBlob().
+ */
+ bool CanBeRehashed() const;
+ /**
+ * Allows embedders to verify whether the data is valid for the current
+ * V8 instance.
+ */
+ bool IsValid() const;
+
+ const char* data;
+ int raw_size;
+};
+
+/**
+ * Callback and supporting data used in SnapshotCreator to implement embedder
+ * logic to serialize internal fields.
+ * Internal fields that directly reference V8 objects are serialized without
+ * calling this callback. Internal fields that contain aligned pointers are
+ * serialized by this callback if it returns non-zero result. Otherwise it is
+ * serialized verbatim.
+ */
+struct SerializeInternalFieldsCallback {
+ using CallbackFunction = StartupData (*)(Local<Object> holder, int index,
+ void* data);
+ SerializeInternalFieldsCallback(CallbackFunction function = nullptr,
+ void* data_arg = nullptr)
+ : callback(function), data(data_arg) {}
+ CallbackFunction callback;
+ void* data;
+};
+// Note that these fields are called "internal fields" in the API and called
+// "embedder fields" within V8.
+using SerializeEmbedderFieldsCallback = SerializeInternalFieldsCallback;
+
+/**
+ * Callback and supporting data used to implement embedder logic to deserialize
+ * internal fields.
+ */
+struct DeserializeInternalFieldsCallback {
+ using CallbackFunction = void (*)(Local<Object> holder, int index,
+ StartupData payload, void* data);
+ DeserializeInternalFieldsCallback(CallbackFunction function = nullptr,
+ void* data_arg = nullptr)
+ : callback(function), data(data_arg) {}
+ void (*callback)(Local<Object> holder, int index, StartupData payload,
+ void* data);
+ void* data;
+};
+
+using DeserializeEmbedderFieldsCallback = DeserializeInternalFieldsCallback;
+
+/**
+ * Helper class to create a snapshot data blob.
+ *
+ * The Isolate used by a SnapshotCreator is owned by it, and will be entered
+ * and exited by the constructor and destructor, respectively; The destructor
+ * will also destroy the Isolate. Experimental language features, including
+ * those available by default, are not available while creating a snapshot.
+ */
+class V8_EXPORT SnapshotCreator {
+ public:
+ enum class FunctionCodeHandling { kClear, kKeep };
+
+ /**
+ * Initialize and enter an isolate, and set it up for serialization.
+ * The isolate is either created from scratch or from an existing snapshot.
+ * The caller keeps ownership of the argument snapshot.
+ * \param existing_blob existing snapshot from which to create this one.
+ * \param external_references a null-terminated array of external references
+ * that must be equivalent to CreateParams::external_references.
+ */
+ SnapshotCreator(Isolate* isolate,
+ const intptr_t* external_references = nullptr,
+ StartupData* existing_blob = nullptr);
+
+ /**
+ * Create and enter an isolate, and set it up for serialization.
+ * The isolate is either created from scratch or from an existing snapshot.
+ * The caller keeps ownership of the argument snapshot.
+ * \param existing_blob existing snapshot from which to create this one.
+ * \param external_references a null-terminated array of external references
+ * that must be equivalent to CreateParams::external_references.
+ */
+ SnapshotCreator(const intptr_t* external_references = nullptr,
+ StartupData* existing_blob = nullptr);
+
+ /**
+ * Destroy the snapshot creator, and exit and dispose of the Isolate
+ * associated with it.
+ */
+ ~SnapshotCreator();
+
+ /**
+ * \returns the isolate prepared by the snapshot creator.
+ */
+ Isolate* GetIsolate();
+
+ /**
+ * Set the default context to be included in the snapshot blob.
+ * The snapshot will not contain the global proxy, and we expect one or a
+ * global object template to create one, to be provided upon deserialization.
+ *
+ * \param callback optional callback to serialize internal fields.
+ */
+ void SetDefaultContext(Local<Context> context,
+ SerializeInternalFieldsCallback callback =
+ SerializeInternalFieldsCallback());
+
+ /**
+ * Add additional context to be included in the snapshot blob.
+ * The snapshot will include the global proxy.
+ *
+ * \param callback optional callback to serialize internal fields.
+ *
+ * \returns the index of the context in the snapshot blob.
+ */
+ size_t AddContext(Local<Context> context,
+ SerializeInternalFieldsCallback callback =
+ SerializeInternalFieldsCallback());
+
+ /**
+ * Attach arbitrary V8::Data to the context snapshot, which can be retrieved
+ * via Context::GetDataFromSnapshotOnce after deserialization. This data does
+ * not survive when a new snapshot is created from an existing snapshot.
+ * \returns the index for retrieval.
+ */
+ template <class T>
+ V8_INLINE size_t AddData(Local<Context> context, Local<T> object);
+
+ /**
+ * Attach arbitrary V8::Data to the isolate snapshot, which can be retrieved
+ * via Isolate::GetDataFromSnapshotOnce after deserialization. This data does
+ * not survive when a new snapshot is created from an existing snapshot.
+ * \returns the index for retrieval.
+ */
+ template <class T>
+ V8_INLINE size_t AddData(Local<T> object);
+
+ /**
+ * Created a snapshot data blob.
+ * This must not be called from within a handle scope.
+ * \param function_code_handling whether to include compiled function code
+ * in the snapshot.
+ * \returns { nullptr, 0 } on failure, and a startup snapshot on success. The
+ * caller acquires ownership of the data array in the return value.
+ */
+ StartupData CreateBlob(FunctionCodeHandling function_code_handling);
+
+ // Disallow copying and assigning.
+ SnapshotCreator(const SnapshotCreator&) = delete;
+ void operator=(const SnapshotCreator&) = delete;
+
+ private:
+ size_t AddData(Local<Context> context, internal::Address object);
+ size_t AddData(internal::Address object);
+
+ void* data_;
+};
+
+template <class T>
+size_t SnapshotCreator::AddData(Local<Context> context, Local<T> object) {
+ T* object_ptr = *object;
+ internal::Address* p = reinterpret_cast<internal::Address*>(object_ptr);
+ return AddData(context, *p);
+}
+
+template <class T>
+size_t SnapshotCreator::AddData(Local<T> object) {
+ T* object_ptr = *object;
+ internal::Address* p = reinterpret_cast<internal::Address*>(object_ptr);
+ return AddData(*p);
+}
+
+} // namespace v8
+
+#endif // INCLUDE_V8_SNAPSHOT_H_
diff --git a/chromium/v8/include/v8-statistics.h b/chromium/v8/include/v8-statistics.h
new file mode 100644
index 00000000000..7f69e5d65ef
--- /dev/null
+++ b/chromium/v8/include/v8-statistics.h
@@ -0,0 +1,215 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_STATISTICS_H_
+#define INCLUDE_V8_STATISTICS_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-promise.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Context;
+class Isolate;
+
+namespace internal {
+class ReadOnlyHeap;
+} // namespace internal
+
+/**
+ * Controls how the default MeasureMemoryDelegate reports the result of
+ * the memory measurement to JS. With kSummary only the total size is reported.
+ * With kDetailed the result includes the size of each native context.
+ */
+enum class MeasureMemoryMode { kSummary, kDetailed };
+
+/**
+ * Controls how promptly a memory measurement request is executed.
+ * By default the measurement is folded with the next scheduled GC which may
+ * happen after a while and is forced after some timeout.
+ * The kEager mode starts incremental GC right away and is useful for testing.
+ * The kLazy mode does not force GC.
+ */
+enum class MeasureMemoryExecution { kDefault, kEager, kLazy };
+
+/**
+ * The delegate is used in Isolate::MeasureMemory API.
+ *
+ * It specifies the contexts that need to be measured and gets called when
+ * the measurement is completed to report the results.
+ */
+class V8_EXPORT MeasureMemoryDelegate {
+ public:
+ virtual ~MeasureMemoryDelegate() = default;
+
+ /**
+ * Returns true if the size of the given context needs to be measured.
+ */
+ virtual bool ShouldMeasure(Local<Context> context) = 0;
+
+ /**
+ * This function is called when memory measurement finishes.
+ *
+ * \param context_sizes_in_bytes a vector of (context, size) pairs that
+ * includes each context for which ShouldMeasure returned true and that
+ * was not garbage collected while the memory measurement was in progress.
+ *
+ * \param unattributed_size_in_bytes total size of objects that were not
+ * attributed to any context (i.e. are likely shared objects).
+ */
+ virtual void MeasurementComplete(
+ const std::vector<std::pair<Local<Context>, size_t>>&
+ context_sizes_in_bytes,
+ size_t unattributed_size_in_bytes) = 0;
+
+ /**
+ * Returns a default delegate that resolves the given promise when
+ * the memory measurement completes.
+ *
+ * \param isolate the current isolate
+ * \param context the current context
+ * \param promise_resolver the promise resolver that is given the
+ * result of the memory measurement.
+ * \param mode the detail level of the result.
+ */
+ static std::unique_ptr<MeasureMemoryDelegate> Default(
+ Isolate* isolate, Local<Context> context,
+ Local<Promise::Resolver> promise_resolver, MeasureMemoryMode mode);
+};
+
+/**
+ * Collection of shared per-process V8 memory information.
+ *
+ * Instances of this class can be passed to
+ * v8::V8::GetSharedMemoryStatistics to get shared memory statistics from V8.
+ */
+class V8_EXPORT SharedMemoryStatistics {
+ public:
+ SharedMemoryStatistics();
+ size_t read_only_space_size() { return read_only_space_size_; }
+ size_t read_only_space_used_size() { return read_only_space_used_size_; }
+ size_t read_only_space_physical_size() {
+ return read_only_space_physical_size_;
+ }
+
+ private:
+ size_t read_only_space_size_;
+ size_t read_only_space_used_size_;
+ size_t read_only_space_physical_size_;
+
+ friend class V8;
+ friend class internal::ReadOnlyHeap;
+};
+
+/**
+ * Collection of V8 heap information.
+ *
+ * Instances of this class can be passed to v8::Isolate::GetHeapStatistics to
+ * get heap statistics from V8.
+ */
+class V8_EXPORT HeapStatistics {
+ public:
+ HeapStatistics();
+ size_t total_heap_size() { return total_heap_size_; }
+ size_t total_heap_size_executable() { return total_heap_size_executable_; }
+ size_t total_physical_size() { return total_physical_size_; }
+ size_t total_available_size() { return total_available_size_; }
+ size_t total_global_handles_size() { return total_global_handles_size_; }
+ size_t used_global_handles_size() { return used_global_handles_size_; }
+ size_t used_heap_size() { return used_heap_size_; }
+ size_t heap_size_limit() { return heap_size_limit_; }
+ size_t malloced_memory() { return malloced_memory_; }
+ size_t external_memory() { return external_memory_; }
+ size_t peak_malloced_memory() { return peak_malloced_memory_; }
+ size_t number_of_native_contexts() { return number_of_native_contexts_; }
+ size_t number_of_detached_contexts() { return number_of_detached_contexts_; }
+
+ /**
+ * Returns a 0/1 boolean, which signifies whether the V8 overwrite heap
+ * garbage with a bit pattern.
+ */
+ size_t does_zap_garbage() { return does_zap_garbage_; }
+
+ private:
+ size_t total_heap_size_;
+ size_t total_heap_size_executable_;
+ size_t total_physical_size_;
+ size_t total_available_size_;
+ size_t used_heap_size_;
+ size_t heap_size_limit_;
+ size_t malloced_memory_;
+ size_t external_memory_;
+ size_t peak_malloced_memory_;
+ bool does_zap_garbage_;
+ size_t number_of_native_contexts_;
+ size_t number_of_detached_contexts_;
+ size_t total_global_handles_size_;
+ size_t used_global_handles_size_;
+
+ friend class V8;
+ friend class Isolate;
+};
+
+class V8_EXPORT HeapSpaceStatistics {
+ public:
+ HeapSpaceStatistics();
+ const char* space_name() { return space_name_; }
+ size_t space_size() { return space_size_; }
+ size_t space_used_size() { return space_used_size_; }
+ size_t space_available_size() { return space_available_size_; }
+ size_t physical_space_size() { return physical_space_size_; }
+
+ private:
+ const char* space_name_;
+ size_t space_size_;
+ size_t space_used_size_;
+ size_t space_available_size_;
+ size_t physical_space_size_;
+
+ friend class Isolate;
+};
+
+class V8_EXPORT HeapObjectStatistics {
+ public:
+ HeapObjectStatistics();
+ const char* object_type() { return object_type_; }
+ const char* object_sub_type() { return object_sub_type_; }
+ size_t object_count() { return object_count_; }
+ size_t object_size() { return object_size_; }
+
+ private:
+ const char* object_type_;
+ const char* object_sub_type_;
+ size_t object_count_;
+ size_t object_size_;
+
+ friend class Isolate;
+};
+
+class V8_EXPORT HeapCodeStatistics {
+ public:
+ HeapCodeStatistics();
+ size_t code_and_metadata_size() { return code_and_metadata_size_; }
+ size_t bytecode_and_metadata_size() { return bytecode_and_metadata_size_; }
+ size_t external_script_source_size() { return external_script_source_size_; }
+
+ private:
+ size_t code_and_metadata_size_;
+ size_t bytecode_and_metadata_size_;
+ size_t external_script_source_size_;
+
+ friend class Isolate;
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_STATISTICS_H_
diff --git a/chromium/v8/include/v8-template.h b/chromium/v8/include/v8-template.h
new file mode 100644
index 00000000000..96fcab6074c
--- /dev/null
+++ b/chromium/v8/include/v8-template.h
@@ -0,0 +1,1052 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_TEMPLATE_H_
+#define INCLUDE_V8_TEMPLATE_H_
+
+#include "v8-data.h" // NOLINT(build/include_directory)
+#include "v8-function-callback.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-memory-span.h" // NOLINT(build/include_directory)
+#include "v8-object.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class AccessorSignature;
+class CFunction;
+class FunctionTemplate;
+class ObjectTemplate;
+class Signature;
+
+// --- Templates ---
+
+#define V8_INTRINSICS_LIST(F) \
+ F(ArrayProto_entries, array_entries_iterator) \
+ F(ArrayProto_forEach, array_for_each_iterator) \
+ F(ArrayProto_keys, array_keys_iterator) \
+ F(ArrayProto_values, array_values_iterator) \
+ F(ArrayPrototype, initial_array_prototype) \
+ F(AsyncIteratorPrototype, initial_async_iterator_prototype) \
+ F(ErrorPrototype, initial_error_prototype) \
+ F(IteratorPrototype, initial_iterator_prototype) \
+ F(ObjProto_valueOf, object_value_of_function)
+
+enum Intrinsic {
+#define V8_DECL_INTRINSIC(name, iname) k##name,
+ V8_INTRINSICS_LIST(V8_DECL_INTRINSIC)
+#undef V8_DECL_INTRINSIC
+};
+
+/**
+ * The superclass of object and function templates.
+ */
+class V8_EXPORT Template : public Data {
+ public:
+ /**
+ * Adds a property to each instance created by this template.
+ *
+ * The property must be defined either as a primitive value, or a template.
+ */
+ void Set(Local<Name> name, Local<Data> value,
+ PropertyAttribute attributes = None);
+ void SetPrivate(Local<Private> name, Local<Data> value,
+ PropertyAttribute attributes = None);
+ V8_INLINE void Set(Isolate* isolate, const char* name, Local<Data> value,
+ PropertyAttribute attributes = None);
+
+ void SetAccessorProperty(
+ Local<Name> name,
+ Local<FunctionTemplate> getter = Local<FunctionTemplate>(),
+ Local<FunctionTemplate> setter = Local<FunctionTemplate>(),
+ PropertyAttribute attribute = None, AccessControl settings = DEFAULT);
+
+ /**
+ * Whenever the property with the given name is accessed on objects
+ * created from this Template the getter and setter callbacks
+ * are called instead of getting and setting the property directly
+ * on the JavaScript object.
+ *
+ * \param name The name of the property for which an accessor is added.
+ * \param getter The callback to invoke when getting the property.
+ * \param setter The callback to invoke when setting the property.
+ * \param data A piece of data that will be passed to the getter and setter
+ * callbacks whenever they are invoked.
+ * \param settings Access control settings for the accessor. This is a bit
+ * field consisting of one of more of
+ * DEFAULT = 0, ALL_CAN_READ = 1, or ALL_CAN_WRITE = 2.
+ * The default is to not allow cross-context access.
+ * ALL_CAN_READ means that all cross-context reads are allowed.
+ * ALL_CAN_WRITE means that all cross-context writes are allowed.
+ * The combination ALL_CAN_READ | ALL_CAN_WRITE can be used to allow all
+ * cross-context access.
+ * \param attribute The attributes of the property for which an accessor
+ * is added.
+ * \param signature The signature describes valid receivers for the accessor
+ * and is used to perform implicit instance checks against them. If the
+ * receiver is incompatible (i.e. is not an instance of the constructor as
+ * defined by FunctionTemplate::HasInstance()), an implicit TypeError is
+ * thrown and no callback is invoked.
+ */
+ void SetNativeDataProperty(
+ Local<String> name, AccessorGetterCallback getter,
+ AccessorSetterCallback setter = nullptr,
+ Local<Value> data = Local<Value>(), PropertyAttribute attribute = None,
+ Local<AccessorSignature> signature = Local<AccessorSignature>(),
+ AccessControl settings = DEFAULT,
+ SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
+ SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
+ void SetNativeDataProperty(
+ Local<Name> name, AccessorNameGetterCallback getter,
+ AccessorNameSetterCallback setter = nullptr,
+ Local<Value> data = Local<Value>(), PropertyAttribute attribute = None,
+ Local<AccessorSignature> signature = Local<AccessorSignature>(),
+ AccessControl settings = DEFAULT,
+ SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
+ SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
+
+ /**
+ * Like SetNativeDataProperty, but V8 will replace the native data property
+ * with a real data property on first access.
+ */
+ void SetLazyDataProperty(
+ Local<Name> name, AccessorNameGetterCallback getter,
+ Local<Value> data = Local<Value>(), PropertyAttribute attribute = None,
+ SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
+ SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
+
+ /**
+ * During template instantiation, sets the value with the intrinsic property
+ * from the correct context.
+ */
+ void SetIntrinsicDataProperty(Local<Name> name, Intrinsic intrinsic,
+ PropertyAttribute attribute = None);
+
+ private:
+ Template();
+
+ friend class ObjectTemplate;
+ friend class FunctionTemplate;
+};
+
+// TODO(dcarney): Replace GenericNamedPropertyFooCallback with just
+// NamedPropertyFooCallback.
+
+/**
+ * Interceptor for get requests on an object.
+ *
+ * Use `info.GetReturnValue().Set()` to set the return value of the
+ * intercepted get request.
+ *
+ * \param property The name of the property for which the request was
+ * intercepted.
+ * \param info Information about the intercepted request, such as
+ * isolate, receiver, return value, or whether running in `'use strict`' mode.
+ * See `PropertyCallbackInfo`.
+ *
+ * \code
+ * void GetterCallback(
+ * Local<Name> name,
+ * const v8::PropertyCallbackInfo<v8::Value>& info) {
+ * info.GetReturnValue().Set(v8_num(42));
+ * }
+ *
+ * v8::Local<v8::FunctionTemplate> templ =
+ * v8::FunctionTemplate::New(isolate);
+ * templ->InstanceTemplate()->SetHandler(
+ * v8::NamedPropertyHandlerConfiguration(GetterCallback));
+ * LocalContext env;
+ * env->Global()
+ * ->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
+ * .ToLocalChecked()
+ * ->NewInstance(env.local())
+ * .ToLocalChecked())
+ * .FromJust();
+ * v8::Local<v8::Value> result = CompileRun("obj.a = 17; obj.a");
+ * CHECK(v8_num(42)->Equals(env.local(), result).FromJust());
+ * \endcode
+ *
+ * See also `ObjectTemplate::SetHandler`.
+ */
+using GenericNamedPropertyGetterCallback =
+ void (*)(Local<Name> property, const PropertyCallbackInfo<Value>& info);
+
+/**
+ * Interceptor for set requests on an object.
+ *
+ * Use `info.GetReturnValue()` to indicate whether the request was intercepted
+ * or not. If the setter successfully intercepts the request, i.e., if the
+ * request should not be further executed, call
+ * `info.GetReturnValue().Set(value)`. If the setter
+ * did not intercept the request, i.e., if the request should be handled as
+ * if no interceptor is present, do not not call `Set()`.
+ *
+ * \param property The name of the property for which the request was
+ * intercepted.
+ * \param value The value which the property will have if the request
+ * is not intercepted.
+ * \param info Information about the intercepted request, such as
+ * isolate, receiver, return value, or whether running in `'use strict'` mode.
+ * See `PropertyCallbackInfo`.
+ *
+ * See also
+ * `ObjectTemplate::SetHandler.`
+ */
+using GenericNamedPropertySetterCallback =
+ void (*)(Local<Name> property, Local<Value> value,
+ const PropertyCallbackInfo<Value>& info);
+
+/**
+ * Intercepts all requests that query the attributes of the
+ * property, e.g., getOwnPropertyDescriptor(), propertyIsEnumerable(), and
+ * defineProperty().
+ *
+ * Use `info.GetReturnValue().Set(value)` to set the property attributes. The
+ * value is an integer encoding a `v8::PropertyAttribute`.
+ *
+ * \param property The name of the property for which the request was
+ * intercepted.
+ * \param info Information about the intercepted request, such as
+ * isolate, receiver, return value, or whether running in `'use strict'` mode.
+ * See `PropertyCallbackInfo`.
+ *
+ * \note Some functions query the property attributes internally, even though
+ * they do not return the attributes. For example, `hasOwnProperty()` can
+ * trigger this interceptor depending on the state of the object.
+ *
+ * See also
+ * `ObjectTemplate::SetHandler.`
+ */
+using GenericNamedPropertyQueryCallback =
+ void (*)(Local<Name> property, const PropertyCallbackInfo<Integer>& info);
+
+/**
+ * Interceptor for delete requests on an object.
+ *
+ * Use `info.GetReturnValue()` to indicate whether the request was intercepted
+ * or not. If the deleter successfully intercepts the request, i.e., if the
+ * request should not be further executed, call
+ * `info.GetReturnValue().Set(value)` with a boolean `value`. The `value` is
+ * used as the return value of `delete`.
+ *
+ * \param property The name of the property for which the request was
+ * intercepted.
+ * \param info Information about the intercepted request, such as
+ * isolate, receiver, return value, or whether running in `'use strict'` mode.
+ * See `PropertyCallbackInfo`.
+ *
+ * \note If you need to mimic the behavior of `delete`, i.e., throw in strict
+ * mode instead of returning false, use `info.ShouldThrowOnError()` to determine
+ * if you are in strict mode.
+ *
+ * See also `ObjectTemplate::SetHandler.`
+ */
+using GenericNamedPropertyDeleterCallback =
+ void (*)(Local<Name> property, const PropertyCallbackInfo<Boolean>& info);
+
+/**
+ * Returns an array containing the names of the properties the named
+ * property getter intercepts.
+ *
+ * Note: The values in the array must be of type v8::Name.
+ */
+using GenericNamedPropertyEnumeratorCallback =
+ void (*)(const PropertyCallbackInfo<Array>& info);
+
+/**
+ * Interceptor for defineProperty requests on an object.
+ *
+ * Use `info.GetReturnValue()` to indicate whether the request was intercepted
+ * or not. If the definer successfully intercepts the request, i.e., if the
+ * request should not be further executed, call
+ * `info.GetReturnValue().Set(value)`. If the definer
+ * did not intercept the request, i.e., if the request should be handled as
+ * if no interceptor is present, do not not call `Set()`.
+ *
+ * \param property The name of the property for which the request was
+ * intercepted.
+ * \param desc The property descriptor which is used to define the
+ * property if the request is not intercepted.
+ * \param info Information about the intercepted request, such as
+ * isolate, receiver, return value, or whether running in `'use strict'` mode.
+ * See `PropertyCallbackInfo`.
+ *
+ * See also `ObjectTemplate::SetHandler`.
+ */
+using GenericNamedPropertyDefinerCallback =
+ void (*)(Local<Name> property, const PropertyDescriptor& desc,
+ const PropertyCallbackInfo<Value>& info);
+
+/**
+ * Interceptor for getOwnPropertyDescriptor requests on an object.
+ *
+ * Use `info.GetReturnValue().Set()` to set the return value of the
+ * intercepted request. The return value must be an object that
+ * can be converted to a PropertyDescriptor, e.g., a `v8::value` returned from
+ * `v8::Object::getOwnPropertyDescriptor`.
+ *
+ * \param property The name of the property for which the request was
+ * intercepted.
+ * \info Information about the intercepted request, such as
+ * isolate, receiver, return value, or whether running in `'use strict'` mode.
+ * See `PropertyCallbackInfo`.
+ *
+ * \note If GetOwnPropertyDescriptor is intercepted, it will
+ * always return true, i.e., indicate that the property was found.
+ *
+ * See also `ObjectTemplate::SetHandler`.
+ */
+using GenericNamedPropertyDescriptorCallback =
+ void (*)(Local<Name> property, const PropertyCallbackInfo<Value>& info);
+
+/**
+ * See `v8::GenericNamedPropertyGetterCallback`.
+ */
+using IndexedPropertyGetterCallback =
+ void (*)(uint32_t index, const PropertyCallbackInfo<Value>& info);
+
+/**
+ * See `v8::GenericNamedPropertySetterCallback`.
+ */
+using IndexedPropertySetterCallback =
+ void (*)(uint32_t index, Local<Value> value,
+ const PropertyCallbackInfo<Value>& info);
+
+/**
+ * See `v8::GenericNamedPropertyQueryCallback`.
+ */
+using IndexedPropertyQueryCallback =
+ void (*)(uint32_t index, const PropertyCallbackInfo<Integer>& info);
+
+/**
+ * See `v8::GenericNamedPropertyDeleterCallback`.
+ */
+using IndexedPropertyDeleterCallback =
+ void (*)(uint32_t index, const PropertyCallbackInfo<Boolean>& info);
+
+/**
+ * Returns an array containing the indices of the properties the indexed
+ * property getter intercepts.
+ *
+ * Note: The values in the array must be uint32_t.
+ */
+using IndexedPropertyEnumeratorCallback =
+ void (*)(const PropertyCallbackInfo<Array>& info);
+
+/**
+ * See `v8::GenericNamedPropertyDefinerCallback`.
+ */
+using IndexedPropertyDefinerCallback =
+ void (*)(uint32_t index, const PropertyDescriptor& desc,
+ const PropertyCallbackInfo<Value>& info);
+
+/**
+ * See `v8::GenericNamedPropertyDescriptorCallback`.
+ */
+using IndexedPropertyDescriptorCallback =
+ void (*)(uint32_t index, const PropertyCallbackInfo<Value>& info);
+
+/**
+ * Returns true if the given context should be allowed to access the given
+ * object.
+ */
+using AccessCheckCallback = bool (*)(Local<Context> accessing_context,
+ Local<Object> accessed_object,
+ Local<Value> data);
+
+enum class ConstructorBehavior { kThrow, kAllow };
+
+/**
+ * A FunctionTemplate is used to create functions at runtime. There
+ * can only be one function created from a FunctionTemplate in a
+ * context. The lifetime of the created function is equal to the
+ * lifetime of the context. So in case the embedder needs to create
+ * temporary functions that can be collected using Scripts is
+ * preferred.
+ *
+ * Any modification of a FunctionTemplate after first instantiation will trigger
+ * a crash.
+ *
+ * A FunctionTemplate can have properties, these properties are added to the
+ * function object when it is created.
+ *
+ * A FunctionTemplate has a corresponding instance template which is
+ * used to create object instances when the function is used as a
+ * constructor. Properties added to the instance template are added to
+ * each object instance.
+ *
+ * A FunctionTemplate can have a prototype template. The prototype template
+ * is used to create the prototype object of the function.
+ *
+ * The following example shows how to use a FunctionTemplate:
+ *
+ * \code
+ * v8::Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
+ * t->Set(isolate, "func_property", v8::Number::New(isolate, 1));
+ *
+ * v8::Local<v8::Template> proto_t = t->PrototypeTemplate();
+ * proto_t->Set(isolate,
+ * "proto_method",
+ * v8::FunctionTemplate::New(isolate, InvokeCallback));
+ * proto_t->Set(isolate, "proto_const", v8::Number::New(isolate, 2));
+ *
+ * v8::Local<v8::ObjectTemplate> instance_t = t->InstanceTemplate();
+ * instance_t->SetAccessor(
+ String::NewFromUtf8Literal(isolate, "instance_accessor"),
+ * InstanceAccessorCallback);
+ * instance_t->SetHandler(
+ * NamedPropertyHandlerConfiguration(PropertyHandlerCallback));
+ * instance_t->Set(String::NewFromUtf8Literal(isolate, "instance_property"),
+ * Number::New(isolate, 3));
+ *
+ * v8::Local<v8::Function> function = t->GetFunction();
+ * v8::Local<v8::Object> instance = function->NewInstance();
+ * \endcode
+ *
+ * Let's use "function" as the JS variable name of the function object
+ * and "instance" for the instance object created above. The function
+ * and the instance will have the following properties:
+ *
+ * \code
+ * func_property in function == true;
+ * function.func_property == 1;
+ *
+ * function.prototype.proto_method() invokes 'InvokeCallback'
+ * function.prototype.proto_const == 2;
+ *
+ * instance instanceof function == true;
+ * instance.instance_accessor calls 'InstanceAccessorCallback'
+ * instance.instance_property == 3;
+ * \endcode
+ *
+ * A FunctionTemplate can inherit from another one by calling the
+ * FunctionTemplate::Inherit method. The following graph illustrates
+ * the semantics of inheritance:
+ *
+ * \code
+ * FunctionTemplate Parent -> Parent() . prototype -> { }
+ * ^ ^
+ * | Inherit(Parent) | .__proto__
+ * | |
+ * FunctionTemplate Child -> Child() . prototype -> { }
+ * \endcode
+ *
+ * A FunctionTemplate 'Child' inherits from 'Parent', the prototype
+ * object of the Child() function has __proto__ pointing to the
+ * Parent() function's prototype object. An instance of the Child
+ * function has all properties on Parent's instance templates.
+ *
+ * Let Parent be the FunctionTemplate initialized in the previous
+ * section and create a Child FunctionTemplate by:
+ *
+ * \code
+ * Local<FunctionTemplate> parent = t;
+ * Local<FunctionTemplate> child = FunctionTemplate::New();
+ * child->Inherit(parent);
+ *
+ * Local<Function> child_function = child->GetFunction();
+ * Local<Object> child_instance = child_function->NewInstance();
+ * \endcode
+ *
+ * The Child function and Child instance will have the following
+ * properties:
+ *
+ * \code
+ * child_func.prototype.__proto__ == function.prototype;
+ * child_instance.instance_accessor calls 'InstanceAccessorCallback'
+ * child_instance.instance_property == 3;
+ * \endcode
+ *
+ * The additional 'c_function' parameter refers to a fast API call, which
+ * must not trigger GC or JavaScript execution, or call into V8 in other
+ * ways. For more information how to define them, see
+ * include/v8-fast-api-calls.h. Please note that this feature is still
+ * experimental.
+ */
+class V8_EXPORT FunctionTemplate : public Template {
+ public:
+ /** Creates a function template.*/
+ static Local<FunctionTemplate> New(
+ Isolate* isolate, FunctionCallback callback = nullptr,
+ Local<Value> data = Local<Value>(),
+ Local<Signature> signature = Local<Signature>(), int length = 0,
+ ConstructorBehavior behavior = ConstructorBehavior::kAllow,
+ SideEffectType side_effect_type = SideEffectType::kHasSideEffect,
+ const CFunction* c_function = nullptr, uint16_t instance_type = 0,
+ uint16_t allowed_receiver_instance_type_range_start = 0,
+ uint16_t allowed_receiver_instance_type_range_end = 0);
+
+ /** Creates a function template for multiple overloaded fast API calls.*/
+ static Local<FunctionTemplate> NewWithCFunctionOverloads(
+ Isolate* isolate, FunctionCallback callback = nullptr,
+ Local<Value> data = Local<Value>(),
+ Local<Signature> signature = Local<Signature>(), int length = 0,
+ ConstructorBehavior behavior = ConstructorBehavior::kAllow,
+ SideEffectType side_effect_type = SideEffectType::kHasSideEffect,
+ const MemorySpan<const CFunction>& c_function_overloads = {});
+
+ /**
+ * Creates a function template backed/cached by a private property.
+ */
+ static Local<FunctionTemplate> NewWithCache(
+ Isolate* isolate, FunctionCallback callback,
+ Local<Private> cache_property, Local<Value> data = Local<Value>(),
+ Local<Signature> signature = Local<Signature>(), int length = 0,
+ SideEffectType side_effect_type = SideEffectType::kHasSideEffect);
+
+ /** Returns the unique function instance in the current execution context.*/
+ V8_WARN_UNUSED_RESULT MaybeLocal<Function> GetFunction(
+ Local<Context> context);
+
+ /**
+ * Similar to Context::NewRemoteContext, this creates an instance that
+ * isn't backed by an actual object.
+ *
+ * The InstanceTemplate of this FunctionTemplate must have access checks with
+ * handlers installed.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewRemoteInstance();
+
+ /**
+ * Set the call-handler callback for a FunctionTemplate. This
+ * callback is called whenever the function created from this
+ * FunctionTemplate is called. The 'c_function' represents a fast
+ * API call, see the comment above the class declaration.
+ */
+ void SetCallHandler(
+ FunctionCallback callback, Local<Value> data = Local<Value>(),
+ SideEffectType side_effect_type = SideEffectType::kHasSideEffect,
+ const MemorySpan<const CFunction>& c_function_overloads = {});
+
+ /** Set the predefined length property for the FunctionTemplate. */
+ void SetLength(int length);
+
+ /** Get the InstanceTemplate. */
+ Local<ObjectTemplate> InstanceTemplate();
+
+ /**
+ * Causes the function template to inherit from a parent function template.
+ * This means the function's prototype.__proto__ is set to the parent
+ * function's prototype.
+ **/
+ void Inherit(Local<FunctionTemplate> parent);
+
+ /**
+ * A PrototypeTemplate is the template used to create the prototype object
+ * of the function created by this template.
+ */
+ Local<ObjectTemplate> PrototypeTemplate();
+
+ /**
+ * A PrototypeProviderTemplate is another function template whose prototype
+ * property is used for this template. This is mutually exclusive with setting
+ * a prototype template indirectly by calling PrototypeTemplate() or using
+ * Inherit().
+ **/
+ void SetPrototypeProviderTemplate(Local<FunctionTemplate> prototype_provider);
+
+ /**
+ * Set the class name of the FunctionTemplate. This is used for
+ * printing objects created with the function created from the
+ * FunctionTemplate as its constructor.
+ */
+ void SetClassName(Local<String> name);
+
+ /**
+ * When set to true, no access check will be performed on the receiver of a
+ * function call. Currently defaults to true, but this is subject to change.
+ */
+ void SetAcceptAnyReceiver(bool value);
+
+ /**
+ * Sets the ReadOnly flag in the attributes of the 'prototype' property
+ * of functions created from this FunctionTemplate to true.
+ */
+ void ReadOnlyPrototype();
+
+ /**
+ * Removes the prototype property from functions created from this
+ * FunctionTemplate.
+ */
+ void RemovePrototype();
+
+ /**
+ * Returns true if the given object is an instance of this function
+ * template.
+ */
+ bool HasInstance(Local<Value> object);
+
+ /**
+ * Returns true if the given value is an API object that was constructed by an
+ * instance of this function template (without checking for inheriting
+ * function templates).
+ *
+ * This is an experimental feature and may still change significantly.
+ */
+ bool IsLeafTemplateForApiObject(v8::Local<v8::Value> value) const;
+
+ V8_INLINE static FunctionTemplate* Cast(Data* data);
+
+ private:
+ FunctionTemplate();
+
+ static void CheckCast(Data* that);
+ friend class Context;
+ friend class ObjectTemplate;
+};
+
+/**
+ * Configuration flags for v8::NamedPropertyHandlerConfiguration or
+ * v8::IndexedPropertyHandlerConfiguration.
+ */
+enum class PropertyHandlerFlags {
+ /**
+ * None.
+ */
+ kNone = 0,
+
+ /**
+ * See ALL_CAN_READ above.
+ */
+ kAllCanRead = 1,
+
+ /** Will not call into interceptor for properties on the receiver or prototype
+ * chain, i.e., only call into interceptor for properties that do not exist.
+ * Currently only valid for named interceptors.
+ */
+ kNonMasking = 1 << 1,
+
+ /**
+ * Will not call into interceptor for symbol lookup. Only meaningful for
+ * named interceptors.
+ */
+ kOnlyInterceptStrings = 1 << 2,
+
+ /**
+ * The getter, query, enumerator callbacks do not produce side effects.
+ */
+ kHasNoSideEffect = 1 << 3,
+};
+
+struct NamedPropertyHandlerConfiguration {
+ NamedPropertyHandlerConfiguration(
+ GenericNamedPropertyGetterCallback getter,
+ GenericNamedPropertySetterCallback setter,
+ GenericNamedPropertyQueryCallback query,
+ GenericNamedPropertyDeleterCallback deleter,
+ GenericNamedPropertyEnumeratorCallback enumerator,
+ GenericNamedPropertyDefinerCallback definer,
+ GenericNamedPropertyDescriptorCallback descriptor,
+ Local<Value> data = Local<Value>(),
+ PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
+ : getter(getter),
+ setter(setter),
+ query(query),
+ deleter(deleter),
+ enumerator(enumerator),
+ definer(definer),
+ descriptor(descriptor),
+ data(data),
+ flags(flags) {}
+
+ NamedPropertyHandlerConfiguration(
+ /** Note: getter is required */
+ GenericNamedPropertyGetterCallback getter = nullptr,
+ GenericNamedPropertySetterCallback setter = nullptr,
+ GenericNamedPropertyQueryCallback query = nullptr,
+ GenericNamedPropertyDeleterCallback deleter = nullptr,
+ GenericNamedPropertyEnumeratorCallback enumerator = nullptr,
+ Local<Value> data = Local<Value>(),
+ PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
+ : getter(getter),
+ setter(setter),
+ query(query),
+ deleter(deleter),
+ enumerator(enumerator),
+ definer(nullptr),
+ descriptor(nullptr),
+ data(data),
+ flags(flags) {}
+
+ NamedPropertyHandlerConfiguration(
+ GenericNamedPropertyGetterCallback getter,
+ GenericNamedPropertySetterCallback setter,
+ GenericNamedPropertyDescriptorCallback descriptor,
+ GenericNamedPropertyDeleterCallback deleter,
+ GenericNamedPropertyEnumeratorCallback enumerator,
+ GenericNamedPropertyDefinerCallback definer,
+ Local<Value> data = Local<Value>(),
+ PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
+ : getter(getter),
+ setter(setter),
+ query(nullptr),
+ deleter(deleter),
+ enumerator(enumerator),
+ definer(definer),
+ descriptor(descriptor),
+ data(data),
+ flags(flags) {}
+
+ GenericNamedPropertyGetterCallback getter;
+ GenericNamedPropertySetterCallback setter;
+ GenericNamedPropertyQueryCallback query;
+ GenericNamedPropertyDeleterCallback deleter;
+ GenericNamedPropertyEnumeratorCallback enumerator;
+ GenericNamedPropertyDefinerCallback definer;
+ GenericNamedPropertyDescriptorCallback descriptor;
+ Local<Value> data;
+ PropertyHandlerFlags flags;
+};
+
+struct IndexedPropertyHandlerConfiguration {
+ IndexedPropertyHandlerConfiguration(
+ IndexedPropertyGetterCallback getter,
+ IndexedPropertySetterCallback setter, IndexedPropertyQueryCallback query,
+ IndexedPropertyDeleterCallback deleter,
+ IndexedPropertyEnumeratorCallback enumerator,
+ IndexedPropertyDefinerCallback definer,
+ IndexedPropertyDescriptorCallback descriptor,
+ Local<Value> data = Local<Value>(),
+ PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
+ : getter(getter),
+ setter(setter),
+ query(query),
+ deleter(deleter),
+ enumerator(enumerator),
+ definer(definer),
+ descriptor(descriptor),
+ data(data),
+ flags(flags) {}
+
+ IndexedPropertyHandlerConfiguration(
+ /** Note: getter is required */
+ IndexedPropertyGetterCallback getter = nullptr,
+ IndexedPropertySetterCallback setter = nullptr,
+ IndexedPropertyQueryCallback query = nullptr,
+ IndexedPropertyDeleterCallback deleter = nullptr,
+ IndexedPropertyEnumeratorCallback enumerator = nullptr,
+ Local<Value> data = Local<Value>(),
+ PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
+ : getter(getter),
+ setter(setter),
+ query(query),
+ deleter(deleter),
+ enumerator(enumerator),
+ definer(nullptr),
+ descriptor(nullptr),
+ data(data),
+ flags(flags) {}
+
+ IndexedPropertyHandlerConfiguration(
+ IndexedPropertyGetterCallback getter,
+ IndexedPropertySetterCallback setter,
+ IndexedPropertyDescriptorCallback descriptor,
+ IndexedPropertyDeleterCallback deleter,
+ IndexedPropertyEnumeratorCallback enumerator,
+ IndexedPropertyDefinerCallback definer,
+ Local<Value> data = Local<Value>(),
+ PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
+ : getter(getter),
+ setter(setter),
+ query(nullptr),
+ deleter(deleter),
+ enumerator(enumerator),
+ definer(definer),
+ descriptor(descriptor),
+ data(data),
+ flags(flags) {}
+
+ IndexedPropertyGetterCallback getter;
+ IndexedPropertySetterCallback setter;
+ IndexedPropertyQueryCallback query;
+ IndexedPropertyDeleterCallback deleter;
+ IndexedPropertyEnumeratorCallback enumerator;
+ IndexedPropertyDefinerCallback definer;
+ IndexedPropertyDescriptorCallback descriptor;
+ Local<Value> data;
+ PropertyHandlerFlags flags;
+};
+
+/**
+ * An ObjectTemplate is used to create objects at runtime.
+ *
+ * Properties added to an ObjectTemplate are added to each object
+ * created from the ObjectTemplate.
+ */
+class V8_EXPORT ObjectTemplate : public Template {
+ public:
+ /** Creates an ObjectTemplate. */
+ static Local<ObjectTemplate> New(
+ Isolate* isolate,
+ Local<FunctionTemplate> constructor = Local<FunctionTemplate>());
+
+ /** Creates a new instance of this template.*/
+ V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewInstance(Local<Context> context);
+
+ /**
+ * Sets an accessor on the object template.
+ *
+ * Whenever the property with the given name is accessed on objects
+ * created from this ObjectTemplate the getter and setter callbacks
+ * are called instead of getting and setting the property directly
+ * on the JavaScript object.
+ *
+ * \param name The name of the property for which an accessor is added.
+ * \param getter The callback to invoke when getting the property.
+ * \param setter The callback to invoke when setting the property.
+ * \param data A piece of data that will be passed to the getter and setter
+ * callbacks whenever they are invoked.
+ * \param settings Access control settings for the accessor. This is a bit
+ * field consisting of one of more of
+ * DEFAULT = 0, ALL_CAN_READ = 1, or ALL_CAN_WRITE = 2.
+ * The default is to not allow cross-context access.
+ * ALL_CAN_READ means that all cross-context reads are allowed.
+ * ALL_CAN_WRITE means that all cross-context writes are allowed.
+ * The combination ALL_CAN_READ | ALL_CAN_WRITE can be used to allow all
+ * cross-context access.
+ * \param attribute The attributes of the property for which an accessor
+ * is added.
+ * \param signature The signature describes valid receivers for the accessor
+ * and is used to perform implicit instance checks against them. If the
+ * receiver is incompatible (i.e. is not an instance of the constructor as
+ * defined by FunctionTemplate::HasInstance()), an implicit TypeError is
+ * thrown and no callback is invoked.
+ */
+ void SetAccessor(
+ Local<String> name, AccessorGetterCallback getter,
+ AccessorSetterCallback setter = nullptr,
+ Local<Value> data = Local<Value>(), AccessControl settings = DEFAULT,
+ PropertyAttribute attribute = None,
+ Local<AccessorSignature> signature = Local<AccessorSignature>(),
+ SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
+ SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
+ void SetAccessor(
+ Local<Name> name, AccessorNameGetterCallback getter,
+ AccessorNameSetterCallback setter = nullptr,
+ Local<Value> data = Local<Value>(), AccessControl settings = DEFAULT,
+ PropertyAttribute attribute = None,
+ Local<AccessorSignature> signature = Local<AccessorSignature>(),
+ SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
+ SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
+
+ /**
+ * Sets a named property handler on the object template.
+ *
+ * Whenever a property whose name is a string or a symbol is accessed on
+ * objects created from this object template, the provided callback is
+ * invoked instead of accessing the property directly on the JavaScript
+ * object.
+ *
+ * @param configuration The NamedPropertyHandlerConfiguration that defines the
+ * callbacks to invoke when accessing a property.
+ */
+ void SetHandler(const NamedPropertyHandlerConfiguration& configuration);
+
+ /**
+ * Sets an indexed property handler on the object template.
+ *
+ * Whenever an indexed property is accessed on objects created from
+ * this object template, the provided callback is invoked instead of
+ * accessing the property directly on the JavaScript object.
+ *
+ * \param getter The callback to invoke when getting a property.
+ * \param setter The callback to invoke when setting a property.
+ * \param query The callback to invoke to check if an object has a property.
+ * \param deleter The callback to invoke when deleting a property.
+ * \param enumerator The callback to invoke to enumerate all the indexed
+ * properties of an object.
+ * \param data A piece of data that will be passed to the callbacks
+ * whenever they are invoked.
+ */
+ // TODO(dcarney): deprecate
+ void SetIndexedPropertyHandler(
+ IndexedPropertyGetterCallback getter,
+ IndexedPropertySetterCallback setter = nullptr,
+ IndexedPropertyQueryCallback query = nullptr,
+ IndexedPropertyDeleterCallback deleter = nullptr,
+ IndexedPropertyEnumeratorCallback enumerator = nullptr,
+ Local<Value> data = Local<Value>()) {
+ SetHandler(IndexedPropertyHandlerConfiguration(getter, setter, query,
+ deleter, enumerator, data));
+ }
+
+ /**
+ * Sets an indexed property handler on the object template.
+ *
+ * Whenever an indexed property is accessed on objects created from
+ * this object template, the provided callback is invoked instead of
+ * accessing the property directly on the JavaScript object.
+ *
+ * @param configuration The IndexedPropertyHandlerConfiguration that defines
+ * the callbacks to invoke when accessing a property.
+ */
+ void SetHandler(const IndexedPropertyHandlerConfiguration& configuration);
+
+ /**
+ * Sets the callback to be used when calling instances created from
+ * this template as a function. If no callback is set, instances
+ * behave like normal JavaScript objects that cannot be called as a
+ * function.
+ */
+ void SetCallAsFunctionHandler(FunctionCallback callback,
+ Local<Value> data = Local<Value>());
+
+ /**
+ * Mark object instances of the template as undetectable.
+ *
+ * In many ways, undetectable objects behave as though they are not
+ * there. They behave like 'undefined' in conditionals and when
+ * printed. However, properties can be accessed and called as on
+ * normal objects.
+ */
+ void MarkAsUndetectable();
+
+ /**
+ * Sets access check callback on the object template and enables access
+ * checks.
+ *
+ * When accessing properties on instances of this object template,
+ * the access check callback will be called to determine whether or
+ * not to allow cross-context access to the properties.
+ */
+ void SetAccessCheckCallback(AccessCheckCallback callback,
+ Local<Value> data = Local<Value>());
+
+ /**
+ * Like SetAccessCheckCallback but invokes an interceptor on failed access
+ * checks instead of looking up all-can-read properties. You can only use
+ * either this method or SetAccessCheckCallback, but not both at the same
+ * time.
+ */
+ void SetAccessCheckCallbackAndHandler(
+ AccessCheckCallback callback,
+ const NamedPropertyHandlerConfiguration& named_handler,
+ const IndexedPropertyHandlerConfiguration& indexed_handler,
+ Local<Value> data = Local<Value>());
+
+ /**
+ * Gets the number of internal fields for objects generated from
+ * this template.
+ */
+ int InternalFieldCount() const;
+
+ /**
+ * Sets the number of internal fields for objects generated from
+ * this template.
+ */
+ void SetInternalFieldCount(int value);
+
+ /**
+ * Returns true if the object will be an immutable prototype exotic object.
+ */
+ bool IsImmutableProto() const;
+
+ /**
+ * Makes the ObjectTemplate for an immutable prototype exotic object, with an
+ * immutable __proto__.
+ */
+ void SetImmutableProto();
+
+ /**
+ * Support for TC39 "dynamic code brand checks" proposal.
+ *
+ * This API allows to mark (& query) objects as "code like", which causes
+ * them to be treated like Strings in the context of eval and function
+ * constructor.
+ *
+ * Reference: https://github.com/tc39/proposal-dynamic-code-brand-checks
+ */
+ void SetCodeLike();
+ bool IsCodeLike() const;
+
+ V8_INLINE static ObjectTemplate* Cast(Data* data);
+
+ private:
+ ObjectTemplate();
+ static Local<ObjectTemplate> New(internal::Isolate* isolate,
+ Local<FunctionTemplate> constructor);
+ static void CheckCast(Data* that);
+ friend class FunctionTemplate;
+};
+
+/**
+ * A Signature specifies which receiver is valid for a function.
+ *
+ * A receiver matches a given signature if the receiver (or any of its
+ * hidden prototypes) was created from the signature's FunctionTemplate, or
+ * from a FunctionTemplate that inherits directly or indirectly from the
+ * signature's FunctionTemplate.
+ */
+class V8_EXPORT Signature : public Data {
+ public:
+ static Local<Signature> New(
+ Isolate* isolate,
+ Local<FunctionTemplate> receiver = Local<FunctionTemplate>());
+
+ V8_INLINE static Signature* Cast(Data* data);
+
+ private:
+ Signature();
+
+ static void CheckCast(Data* that);
+};
+
+/**
+ * An AccessorSignature specifies which receivers are valid parameters
+ * to an accessor callback.
+ */
+class V8_EXPORT AccessorSignature : public Data {
+ public:
+ static Local<AccessorSignature> New(
+ Isolate* isolate,
+ Local<FunctionTemplate> receiver = Local<FunctionTemplate>());
+
+ V8_INLINE static AccessorSignature* Cast(Data* data);
+
+ private:
+ AccessorSignature();
+
+ static void CheckCast(Data* that);
+};
+
+// --- Implementation ---
+
+void Template::Set(Isolate* isolate, const char* name, Local<Data> value,
+ PropertyAttribute attributes) {
+ Set(String::NewFromUtf8(isolate, name, NewStringType::kInternalized)
+ .ToLocalChecked(),
+ value, attributes);
+}
+
+FunctionTemplate* FunctionTemplate::Cast(Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return reinterpret_cast<FunctionTemplate*>(data);
+}
+
+ObjectTemplate* ObjectTemplate::Cast(Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return reinterpret_cast<ObjectTemplate*>(data);
+}
+
+Signature* Signature::Cast(Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return reinterpret_cast<Signature*>(data);
+}
+
+AccessorSignature* AccessorSignature::Cast(Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return reinterpret_cast<AccessorSignature*>(data);
+}
+
+} // namespace v8
+
+#endif // INCLUDE_V8_TEMPLATE_H_
diff --git a/chromium/v8/include/v8-traced-handle.h b/chromium/v8/include/v8-traced-handle.h
new file mode 100644
index 00000000000..15c9693ecbb
--- /dev/null
+++ b/chromium/v8/include/v8-traced-handle.h
@@ -0,0 +1,605 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_TRACED_HANDLE_H_
+#define INCLUDE_V8_TRACED_HANDLE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include <atomic>
+#include <memory>
+#include <string>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include "v8-internal.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-weak-callback-info.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Value;
+
+namespace internal {
+class BasicTracedReferenceExtractor;
+} // namespace internal
+
+namespace api_internal {
+V8_EXPORT internal::Address* GlobalizeTracedReference(
+ internal::Isolate* isolate, internal::Address* handle,
+ internal::Address* slot, bool has_destructor);
+V8_EXPORT void MoveTracedGlobalReference(internal::Address** from,
+ internal::Address** to);
+V8_EXPORT void CopyTracedGlobalReference(const internal::Address* const* from,
+ internal::Address** to);
+V8_EXPORT void DisposeTracedGlobal(internal::Address* global_handle);
+V8_EXPORT void SetFinalizationCallbackTraced(
+ internal::Address* location, void* parameter,
+ WeakCallbackInfo<void>::Callback callback);
+} // namespace api_internal
+
+/**
+ * Deprecated. Use |TracedReference<T>| instead.
+ */
+template <typename T>
+struct TracedGlobalTrait {};
+
+class TracedReferenceBase {
+ public:
+ /**
+ * Returns true if the reference is empty, i.e., has not been assigned
+ * object.
+ */
+ bool IsEmpty() const { return val_ == nullptr; }
+
+ /**
+ * If non-empty, destroy the underlying storage cell. |IsEmpty| will return
+ * true after this call.
+ */
+ V8_INLINE void Reset();
+
+ /**
+ * Construct a Local<Value> from this handle.
+ */
+ V8_INLINE v8::Local<v8::Value> Get(v8::Isolate* isolate) const {
+ if (IsEmpty()) return Local<Value>();
+ return Local<Value>::New(isolate, reinterpret_cast<Value*>(val_));
+ }
+
+ /**
+ * Returns true if this TracedReference is empty, i.e., has not been
+ * assigned an object. This version of IsEmpty is thread-safe.
+ */
+ bool IsEmptyThreadSafe() const {
+ return this->GetSlotThreadSafe() == nullptr;
+ }
+
+ /**
+ * Assigns a wrapper class ID to the handle.
+ */
+ V8_INLINE void SetWrapperClassId(uint16_t class_id);
+
+ /**
+ * Returns the class ID previously assigned to this handle or 0 if no class ID
+ * was previously assigned.
+ */
+ V8_INLINE uint16_t WrapperClassId() const;
+
+ protected:
+ /**
+ * Update this reference in a thread-safe way.
+ */
+ void SetSlotThreadSafe(void* new_val) {
+ reinterpret_cast<std::atomic<void*>*>(&val_)->store(
+ new_val, std::memory_order_relaxed);
+ }
+
+ /**
+ * Get this reference in a thread-safe way
+ */
+ const void* GetSlotThreadSafe() const {
+ return reinterpret_cast<std::atomic<const void*> const*>(&val_)->load(
+ std::memory_order_relaxed);
+ }
+
+ V8_EXPORT void CheckValue() const;
+
+ // val_ points to a GlobalHandles node.
+ internal::Address* val_ = nullptr;
+
+ friend class internal::BasicTracedReferenceExtractor;
+ template <typename F>
+ friend class Local;
+ template <typename U>
+ friend bool operator==(const TracedReferenceBase&, const Local<U>&);
+ friend bool operator==(const TracedReferenceBase&,
+ const TracedReferenceBase&);
+};
+
+/**
+ * A traced handle with copy and move semantics. The handle is to be used
+ * together with |v8::EmbedderHeapTracer| or as part of GarbageCollected objects
+ * (see v8-cppgc.h) and specifies edges from C++ objects to JavaScript.
+ *
+ * The exact semantics are:
+ * - Tracing garbage collections use |v8::EmbedderHeapTracer| or cppgc.
+ * - Non-tracing garbage collections refer to
+ * |v8::EmbedderRootsHandler::IsRoot()| whether the handle should
+ * be treated as root or not.
+ *
+ * Note that the base class cannot be instantiated itself. Choose from
+ * - TracedGlobal
+ * - TracedReference
+ */
+template <typename T>
+class BasicTracedReference : public TracedReferenceBase {
+ public:
+ /**
+ * Construct a Local<T> from this handle.
+ */
+ Local<T> Get(Isolate* isolate) const { return Local<T>::New(isolate, *this); }
+
+ template <class S>
+ V8_INLINE BasicTracedReference<S>& As() const {
+ return reinterpret_cast<BasicTracedReference<S>&>(
+ const_cast<BasicTracedReference<T>&>(*this));
+ }
+
+ T* operator->() const {
+#ifdef V8_ENABLE_CHECKS
+ CheckValue();
+#endif // V8_ENABLE_CHECKS
+ return reinterpret_cast<T*>(val_);
+ }
+ T* operator*() const {
+#ifdef V8_ENABLE_CHECKS
+ CheckValue();
+#endif // V8_ENABLE_CHECKS
+ return reinterpret_cast<T*>(val_);
+ }
+
+ private:
+ enum DestructionMode { kWithDestructor, kWithoutDestructor };
+
+ /**
+ * An empty BasicTracedReference without storage cell.
+ */
+ BasicTracedReference() = default;
+
+ V8_INLINE static internal::Address* New(Isolate* isolate, T* that, void* slot,
+ DestructionMode destruction_mode);
+
+ friend class EmbedderHeapTracer;
+ template <typename F>
+ friend class Local;
+ friend class Object;
+ template <typename F>
+ friend class TracedGlobal;
+ template <typename F>
+ friend class TracedReference;
+ template <typename F>
+ friend class BasicTracedReference;
+ template <typename F>
+ friend class ReturnValue;
+};
+
+/**
+ * A traced handle with destructor that clears the handle. For more details see
+ * BasicTracedReference.
+ */
+template <typename T>
+class TracedGlobal : public BasicTracedReference<T> {
+ public:
+ using BasicTracedReference<T>::Reset;
+
+ /**
+ * Destructor resetting the handle.Is
+ */
+ ~TracedGlobal() { this->Reset(); }
+
+ /**
+ * An empty TracedGlobal without storage cell.
+ */
+ TracedGlobal() : BasicTracedReference<T>() {}
+
+ /**
+ * Construct a TracedGlobal from a Local.
+ *
+ * When the Local is non-empty, a new storage cell is created
+ * pointing to the same object.
+ */
+ template <class S>
+ TracedGlobal(Isolate* isolate, Local<S> that) : BasicTracedReference<T>() {
+ this->val_ = this->New(isolate, that.val_, &this->val_,
+ BasicTracedReference<T>::kWithDestructor);
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ }
+
+ /**
+ * Move constructor initializing TracedGlobal from an existing one.
+ */
+ V8_INLINE TracedGlobal(TracedGlobal&& other) {
+ // Forward to operator=.
+ *this = std::move(other);
+ }
+
+ /**
+ * Move constructor initializing TracedGlobal from an existing one.
+ */
+ template <typename S>
+ V8_INLINE TracedGlobal(TracedGlobal<S>&& other) {
+ // Forward to operator=.
+ *this = std::move(other);
+ }
+
+ /**
+ * Copy constructor initializing TracedGlobal from an existing one.
+ */
+ V8_INLINE TracedGlobal(const TracedGlobal& other) {
+ // Forward to operator=;
+ *this = other;
+ }
+
+ /**
+ * Copy constructor initializing TracedGlobal from an existing one.
+ */
+ template <typename S>
+ V8_INLINE TracedGlobal(const TracedGlobal<S>& other) {
+ // Forward to operator=;
+ *this = other;
+ }
+
+ /**
+ * Move assignment operator initializing TracedGlobal from an existing one.
+ */
+ V8_INLINE TracedGlobal& operator=(TracedGlobal&& rhs);
+
+ /**
+ * Move assignment operator initializing TracedGlobal from an existing one.
+ */
+ template <class S>
+ V8_INLINE TracedGlobal& operator=(TracedGlobal<S>&& rhs);
+
+ /**
+ * Copy assignment operator initializing TracedGlobal from an existing one.
+ *
+ * Note: Prohibited when |other| has a finalization callback set through
+ * |SetFinalizationCallback|.
+ */
+ V8_INLINE TracedGlobal& operator=(const TracedGlobal& rhs);
+
+ /**
+ * Copy assignment operator initializing TracedGlobal from an existing one.
+ *
+ * Note: Prohibited when |other| has a finalization callback set through
+ * |SetFinalizationCallback|.
+ */
+ template <class S>
+ V8_INLINE TracedGlobal& operator=(const TracedGlobal<S>& rhs);
+
+ /**
+ * If non-empty, destroy the underlying storage cell and create a new one with
+ * the contents of other if other is non empty
+ */
+ template <class S>
+ V8_INLINE void Reset(Isolate* isolate, const Local<S>& other);
+
+ template <class S>
+ V8_INLINE TracedGlobal<S>& As() const {
+ return reinterpret_cast<TracedGlobal<S>&>(
+ const_cast<TracedGlobal<T>&>(*this));
+ }
+
+ /**
+ * Adds a finalization callback to the handle. The type of this callback is
+ * similar to WeakCallbackType::kInternalFields, i.e., it will pass the
+ * parameter and the first two internal fields of the object.
+ *
+ * The callback is then supposed to reset the handle in the callback. No
+ * further V8 API may be called in this callback. In case additional work
+ * involving V8 needs to be done, a second callback can be scheduled using
+ * WeakCallbackInfo<void>::SetSecondPassCallback.
+ */
+ V8_INLINE void SetFinalizationCallback(
+ void* parameter, WeakCallbackInfo<void>::Callback callback);
+};
+
+/**
+ * A traced handle without destructor that clears the handle. The embedder needs
+ * to ensure that the handle is not accessed once the V8 object has been
+ * reclaimed. This can happen when the handle is not passed through the
+ * EmbedderHeapTracer. For more details see BasicTracedReference.
+ *
+ * The reference assumes the embedder has precise knowledge about references at
+ * all times. In case V8 needs to separately handle on-stack references, the
+ * embedder is required to set the stack start through
+ * |EmbedderHeapTracer::SetStackStart|.
+ */
+template <typename T>
+class TracedReference : public BasicTracedReference<T> {
+ public:
+ using BasicTracedReference<T>::Reset;
+
+ /**
+ * An empty TracedReference without storage cell.
+ */
+ TracedReference() : BasicTracedReference<T>() {}
+
+ /**
+ * Construct a TracedReference from a Local.
+ *
+ * When the Local is non-empty, a new storage cell is created
+ * pointing to the same object.
+ */
+ template <class S>
+ TracedReference(Isolate* isolate, Local<S> that) : BasicTracedReference<T>() {
+ this->val_ = this->New(isolate, that.val_, &this->val_,
+ BasicTracedReference<T>::kWithoutDestructor);
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ }
+
+ /**
+ * Move constructor initializing TracedReference from an
+ * existing one.
+ */
+ V8_INLINE TracedReference(TracedReference&& other) {
+ // Forward to operator=.
+ *this = std::move(other);
+ }
+
+ /**
+ * Move constructor initializing TracedReference from an
+ * existing one.
+ */
+ template <typename S>
+ V8_INLINE TracedReference(TracedReference<S>&& other) {
+ // Forward to operator=.
+ *this = std::move(other);
+ }
+
+ /**
+ * Copy constructor initializing TracedReference from an
+ * existing one.
+ */
+ V8_INLINE TracedReference(const TracedReference& other) {
+ // Forward to operator=;
+ *this = other;
+ }
+
+ /**
+ * Copy constructor initializing TracedReference from an
+ * existing one.
+ */
+ template <typename S>
+ V8_INLINE TracedReference(const TracedReference<S>& other) {
+ // Forward to operator=;
+ *this = other;
+ }
+
+ /**
+ * Move assignment operator initializing TracedGlobal from an existing one.
+ */
+ V8_INLINE TracedReference& operator=(TracedReference&& rhs);
+
+ /**
+ * Move assignment operator initializing TracedGlobal from an existing one.
+ */
+ template <class S>
+ V8_INLINE TracedReference& operator=(TracedReference<S>&& rhs);
+
+ /**
+ * Copy assignment operator initializing TracedGlobal from an existing one.
+ */
+ V8_INLINE TracedReference& operator=(const TracedReference& rhs);
+
+ /**
+ * Copy assignment operator initializing TracedGlobal from an existing one.
+ */
+ template <class S>
+ V8_INLINE TracedReference& operator=(const TracedReference<S>& rhs);
+
+ /**
+ * If non-empty, destroy the underlying storage cell and create a new one with
+ * the contents of other if other is non empty
+ */
+ template <class S>
+ V8_INLINE void Reset(Isolate* isolate, const Local<S>& other);
+
+ template <class S>
+ V8_INLINE TracedReference<S>& As() const {
+ return reinterpret_cast<TracedReference<S>&>(
+ const_cast<TracedReference<T>&>(*this));
+ }
+};
+
+// --- Implementation ---
+template <class T>
+internal::Address* BasicTracedReference<T>::New(
+ Isolate* isolate, T* that, void* slot, DestructionMode destruction_mode) {
+ if (that == nullptr) return nullptr;
+ internal::Address* p = reinterpret_cast<internal::Address*>(that);
+ return api_internal::GlobalizeTracedReference(
+ reinterpret_cast<internal::Isolate*>(isolate), p,
+ reinterpret_cast<internal::Address*>(slot),
+ destruction_mode == kWithDestructor);
+}
+
+void TracedReferenceBase::Reset() {
+ if (IsEmpty()) return;
+ api_internal::DisposeTracedGlobal(reinterpret_cast<internal::Address*>(val_));
+ SetSlotThreadSafe(nullptr);
+}
+
+V8_INLINE bool operator==(const TracedReferenceBase& lhs,
+ const TracedReferenceBase& rhs) {
+ v8::internal::Address* a = reinterpret_cast<v8::internal::Address*>(lhs.val_);
+ v8::internal::Address* b = reinterpret_cast<v8::internal::Address*>(rhs.val_);
+ if (a == nullptr) return b == nullptr;
+ if (b == nullptr) return false;
+ return *a == *b;
+}
+
+template <typename U>
+V8_INLINE bool operator==(const TracedReferenceBase& lhs,
+ const v8::Local<U>& rhs) {
+ v8::internal::Address* a = reinterpret_cast<v8::internal::Address*>(lhs.val_);
+ v8::internal::Address* b = reinterpret_cast<v8::internal::Address*>(*rhs);
+ if (a == nullptr) return b == nullptr;
+ if (b == nullptr) return false;
+ return *a == *b;
+}
+
+template <typename U>
+V8_INLINE bool operator==(const v8::Local<U>& lhs,
+ const TracedReferenceBase& rhs) {
+ return rhs == lhs;
+}
+
+V8_INLINE bool operator!=(const TracedReferenceBase& lhs,
+ const TracedReferenceBase& rhs) {
+ return !(lhs == rhs);
+}
+
+template <typename U>
+V8_INLINE bool operator!=(const TracedReferenceBase& lhs,
+ const v8::Local<U>& rhs) {
+ return !(lhs == rhs);
+}
+
+template <typename U>
+V8_INLINE bool operator!=(const v8::Local<U>& lhs,
+ const TracedReferenceBase& rhs) {
+ return !(rhs == lhs);
+}
+
+template <class T>
+template <class S>
+void TracedGlobal<T>::Reset(Isolate* isolate, const Local<S>& other) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ Reset();
+ if (other.IsEmpty()) return;
+ this->val_ = this->New(isolate, other.val_, &this->val_,
+ BasicTracedReference<T>::kWithDestructor);
+}
+
+template <class T>
+template <class S>
+TracedGlobal<T>& TracedGlobal<T>::operator=(TracedGlobal<S>&& rhs) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ *this = std::move(rhs.template As<T>());
+ return *this;
+}
+
+template <class T>
+template <class S>
+TracedGlobal<T>& TracedGlobal<T>::operator=(const TracedGlobal<S>& rhs) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ *this = rhs.template As<T>();
+ return *this;
+}
+
+template <class T>
+TracedGlobal<T>& TracedGlobal<T>::operator=(TracedGlobal&& rhs) {
+ if (this != &rhs) {
+ api_internal::MoveTracedGlobalReference(
+ reinterpret_cast<internal::Address**>(&rhs.val_),
+ reinterpret_cast<internal::Address**>(&this->val_));
+ }
+ return *this;
+}
+
+template <class T>
+TracedGlobal<T>& TracedGlobal<T>::operator=(const TracedGlobal& rhs) {
+ if (this != &rhs) {
+ this->Reset();
+ if (rhs.val_ != nullptr) {
+ api_internal::CopyTracedGlobalReference(
+ reinterpret_cast<const internal::Address* const*>(&rhs.val_),
+ reinterpret_cast<internal::Address**>(&this->val_));
+ }
+ }
+ return *this;
+}
+
+template <class T>
+template <class S>
+void TracedReference<T>::Reset(Isolate* isolate, const Local<S>& other) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ this->Reset();
+ if (other.IsEmpty()) return;
+ this->SetSlotThreadSafe(
+ this->New(isolate, other.val_, &this->val_,
+ BasicTracedReference<T>::kWithoutDestructor));
+}
+
+template <class T>
+template <class S>
+TracedReference<T>& TracedReference<T>::operator=(TracedReference<S>&& rhs) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ *this = std::move(rhs.template As<T>());
+ return *this;
+}
+
+template <class T>
+template <class S>
+TracedReference<T>& TracedReference<T>::operator=(
+ const TracedReference<S>& rhs) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ *this = rhs.template As<T>();
+ return *this;
+}
+
+template <class T>
+TracedReference<T>& TracedReference<T>::operator=(TracedReference&& rhs) {
+ if (this != &rhs) {
+ api_internal::MoveTracedGlobalReference(
+ reinterpret_cast<internal::Address**>(&rhs.val_),
+ reinterpret_cast<internal::Address**>(&this->val_));
+ }
+ return *this;
+}
+
+template <class T>
+TracedReference<T>& TracedReference<T>::operator=(const TracedReference& rhs) {
+ if (this != &rhs) {
+ this->Reset();
+ if (rhs.val_ != nullptr) {
+ api_internal::CopyTracedGlobalReference(
+ reinterpret_cast<const internal::Address* const*>(&rhs.val_),
+ reinterpret_cast<internal::Address**>(&this->val_));
+ }
+ }
+ return *this;
+}
+
+void TracedReferenceBase::SetWrapperClassId(uint16_t class_id) {
+ using I = internal::Internals;
+ if (IsEmpty()) return;
+ internal::Address* obj = reinterpret_cast<internal::Address*>(val_);
+ uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
+ *reinterpret_cast<uint16_t*>(addr) = class_id;
+}
+
+uint16_t TracedReferenceBase::WrapperClassId() const {
+ using I = internal::Internals;
+ if (IsEmpty()) return 0;
+ internal::Address* obj = reinterpret_cast<internal::Address*>(val_);
+ uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
+ return *reinterpret_cast<uint16_t*>(addr);
+}
+
+template <class T>
+void TracedGlobal<T>::SetFinalizationCallback(
+ void* parameter, typename WeakCallbackInfo<void>::Callback callback) {
+ api_internal::SetFinalizationCallbackTraced(
+ reinterpret_cast<internal::Address*>(this->val_), parameter, callback);
+}
+
+} // namespace v8
+
+#endif // INCLUDE_V8_TRACED_HANDLE_H_
diff --git a/chromium/v8/include/v8-typed-array.h b/chromium/v8/include/v8-typed-array.h
new file mode 100644
index 00000000000..483b4f772ff
--- /dev/null
+++ b/chromium/v8/include/v8-typed-array.h
@@ -0,0 +1,282 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_TYPED_ARRAY_H_
+#define INCLUDE_V8_TYPED_ARRAY_H_
+
+#include "v8-array-buffer.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class SharedArrayBuffer;
+
+/**
+ * A base class for an instance of TypedArray series of constructors
+ * (ES6 draft 15.13.6).
+ */
+class V8_EXPORT TypedArray : public ArrayBufferView {
+ public:
+ /*
+ * The largest typed array size that can be constructed using New.
+ */
+ static constexpr size_t kMaxLength =
+ internal::kApiSystemPointerSize == 4
+ ? internal::kSmiMaxValue
+ : static_cast<size_t>(uint64_t{1} << 32);
+
+ /**
+ * Number of elements in this typed array
+ * (e.g. for Int16Array, |ByteLength|/2).
+ */
+ size_t Length();
+
+ V8_INLINE static TypedArray* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<TypedArray*>(value);
+ }
+
+ private:
+ TypedArray();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * An instance of Uint8Array constructor (ES6 draft 15.13.6).
+ */
+class V8_EXPORT Uint8Array : public TypedArray {
+ public:
+ static Local<Uint8Array> New(Local<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ static Local<Uint8Array> New(Local<SharedArrayBuffer> shared_array_buffer,
+ size_t byte_offset, size_t length);
+ V8_INLINE static Uint8Array* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Uint8Array*>(value);
+ }
+
+ private:
+ Uint8Array();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * An instance of Uint8ClampedArray constructor (ES6 draft 15.13.6).
+ */
+class V8_EXPORT Uint8ClampedArray : public TypedArray {
+ public:
+ static Local<Uint8ClampedArray> New(Local<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ static Local<Uint8ClampedArray> New(
+ Local<SharedArrayBuffer> shared_array_buffer, size_t byte_offset,
+ size_t length);
+ V8_INLINE static Uint8ClampedArray* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Uint8ClampedArray*>(value);
+ }
+
+ private:
+ Uint8ClampedArray();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * An instance of Int8Array constructor (ES6 draft 15.13.6).
+ */
+class V8_EXPORT Int8Array : public TypedArray {
+ public:
+ static Local<Int8Array> New(Local<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ static Local<Int8Array> New(Local<SharedArrayBuffer> shared_array_buffer,
+ size_t byte_offset, size_t length);
+ V8_INLINE static Int8Array* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Int8Array*>(value);
+ }
+
+ private:
+ Int8Array();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * An instance of Uint16Array constructor (ES6 draft 15.13.6).
+ */
+class V8_EXPORT Uint16Array : public TypedArray {
+ public:
+ static Local<Uint16Array> New(Local<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ static Local<Uint16Array> New(Local<SharedArrayBuffer> shared_array_buffer,
+ size_t byte_offset, size_t length);
+ V8_INLINE static Uint16Array* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Uint16Array*>(value);
+ }
+
+ private:
+ Uint16Array();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * An instance of Int16Array constructor (ES6 draft 15.13.6).
+ */
+class V8_EXPORT Int16Array : public TypedArray {
+ public:
+ static Local<Int16Array> New(Local<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ static Local<Int16Array> New(Local<SharedArrayBuffer> shared_array_buffer,
+ size_t byte_offset, size_t length);
+ V8_INLINE static Int16Array* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Int16Array*>(value);
+ }
+
+ private:
+ Int16Array();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * An instance of Uint32Array constructor (ES6 draft 15.13.6).
+ */
+class V8_EXPORT Uint32Array : public TypedArray {
+ public:
+ static Local<Uint32Array> New(Local<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ static Local<Uint32Array> New(Local<SharedArrayBuffer> shared_array_buffer,
+ size_t byte_offset, size_t length);
+ V8_INLINE static Uint32Array* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Uint32Array*>(value);
+ }
+
+ private:
+ Uint32Array();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * An instance of Int32Array constructor (ES6 draft 15.13.6).
+ */
+class V8_EXPORT Int32Array : public TypedArray {
+ public:
+ static Local<Int32Array> New(Local<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ static Local<Int32Array> New(Local<SharedArrayBuffer> shared_array_buffer,
+ size_t byte_offset, size_t length);
+ V8_INLINE static Int32Array* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Int32Array*>(value);
+ }
+
+ private:
+ Int32Array();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * An instance of Float32Array constructor (ES6 draft 15.13.6).
+ */
+class V8_EXPORT Float32Array : public TypedArray {
+ public:
+ static Local<Float32Array> New(Local<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ static Local<Float32Array> New(Local<SharedArrayBuffer> shared_array_buffer,
+ size_t byte_offset, size_t length);
+ V8_INLINE static Float32Array* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Float32Array*>(value);
+ }
+
+ private:
+ Float32Array();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * An instance of Float64Array constructor (ES6 draft 15.13.6).
+ */
+class V8_EXPORT Float64Array : public TypedArray {
+ public:
+ static Local<Float64Array> New(Local<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ static Local<Float64Array> New(Local<SharedArrayBuffer> shared_array_buffer,
+ size_t byte_offset, size_t length);
+ V8_INLINE static Float64Array* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Float64Array*>(value);
+ }
+
+ private:
+ Float64Array();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * An instance of BigInt64Array constructor.
+ */
+class V8_EXPORT BigInt64Array : public TypedArray {
+ public:
+ static Local<BigInt64Array> New(Local<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ static Local<BigInt64Array> New(Local<SharedArrayBuffer> shared_array_buffer,
+ size_t byte_offset, size_t length);
+ V8_INLINE static BigInt64Array* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<BigInt64Array*>(value);
+ }
+
+ private:
+ BigInt64Array();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * An instance of BigUint64Array constructor.
+ */
+class V8_EXPORT BigUint64Array : public TypedArray {
+ public:
+ static Local<BigUint64Array> New(Local<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ static Local<BigUint64Array> New(Local<SharedArrayBuffer> shared_array_buffer,
+ size_t byte_offset, size_t length);
+ V8_INLINE static BigUint64Array* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<BigUint64Array*>(value);
+ }
+
+ private:
+ BigUint64Array();
+ static void CheckCast(Value* obj);
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_TYPED_ARRAY_H_
diff --git a/chromium/v8/include/v8-unwinder-state.h b/chromium/v8/include/v8-unwinder-state.h
index 00f8b8b176d..a30f7325f48 100644
--- a/chromium/v8/include/v8-unwinder-state.h
+++ b/chromium/v8/include/v8-unwinder-state.h
@@ -17,9 +17,10 @@ struct CalleeSavedRegisters {
void* arm_r9;
void* arm_r10;
};
-#elif V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || \
- V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_S390
+#elif V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \
+ V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || \
+ V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_S390 || \
+ V8_TARGET_ARCH_LOONG64
struct CalleeSavedRegisters {};
#else
#error Target architecture was not detected as supported by v8
diff --git a/chromium/v8/include/v8-unwinder.h b/chromium/v8/include/v8-unwinder.h
new file mode 100644
index 00000000000..22a5cd713d4
--- /dev/null
+++ b/chromium/v8/include/v8-unwinder.h
@@ -0,0 +1,129 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_UNWINDER_H_
+#define INCLUDE_V8_UNWINDER_H_
+
+#include <memory>
+
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+// Holds the callee saved registers needed for the stack unwinder. It is the
+// empty struct if no registers are required. Implemented in
+// include/v8-unwinder-state.h.
+struct CalleeSavedRegisters;
+
+// A RegisterState represents the current state of registers used
+// by the sampling profiler API.
+struct V8_EXPORT RegisterState {
+ RegisterState();
+ ~RegisterState();
+ RegisterState(const RegisterState& other);
+ RegisterState& operator=(const RegisterState& other);
+
+ void* pc; // Instruction pointer.
+ void* sp; // Stack pointer.
+ void* fp; // Frame pointer.
+ void* lr; // Link register (or nullptr on platforms without a link register).
+ // Callee saved registers (or null if no callee saved registers were stored)
+ std::unique_ptr<CalleeSavedRegisters> callee_saved;
+};
+
+// A StateTag represents a possible state of the VM.
+enum StateTag {
+ JS,
+ GC,
+ PARSER,
+ BYTECODE_COMPILER,
+ COMPILER,
+ OTHER,
+ EXTERNAL,
+ ATOMICS_WAIT,
+ IDLE
+};
+
+// The output structure filled up by GetStackSample API function.
+struct SampleInfo {
+ size_t frames_count; // Number of frames collected.
+ StateTag vm_state; // Current VM state.
+ void* external_callback_entry; // External callback address if VM is
+ // executing an external callback.
+ void* context; // Incumbent native context address.
+};
+
+struct MemoryRange {
+ const void* start = nullptr;
+ size_t length_in_bytes = 0;
+};
+
+struct JSEntryStub {
+ MemoryRange code;
+};
+
+struct JSEntryStubs {
+ JSEntryStub js_entry_stub;
+ JSEntryStub js_construct_entry_stub;
+ JSEntryStub js_run_microtasks_entry_stub;
+};
+
+/**
+ * Various helpers for skipping over V8 frames in a given stack.
+ *
+ * The unwinder API is only supported on the x64, ARM64 and ARM32 architectures.
+ */
+class V8_EXPORT Unwinder {
+ public:
+ /**
+ * Attempt to unwind the stack to the most recent C++ frame. This function is
+ * signal-safe and does not access any V8 state and thus doesn't require an
+ * Isolate.
+ *
+ * The unwinder needs to know the location of the JS Entry Stub (a piece of
+ * code that is run when C++ code calls into generated JS code). This is used
+ * for edge cases where the current frame is being constructed or torn down
+ * when the stack sample occurs.
+ *
+ * The unwinder also needs the virtual memory range of all possible V8 code
+ * objects. There are two ranges required - the heap code range and the range
+ * for code embedded in the binary.
+ *
+ * Available on x64, ARM64 and ARM32.
+ *
+ * \param code_pages A list of all of the ranges in which V8 has allocated
+ * executable code. The caller should obtain this list by calling
+ * Isolate::CopyCodePages() during the same interrupt/thread suspension that
+ * captures the stack.
+ * \param register_state The current registers. This is an in-out param that
+ * will be overwritten with the register values after unwinding, on success.
+ * \param stack_base The resulting stack pointer and frame pointer values are
+ * bounds-checked against the stack_base and the original stack pointer value
+ * to ensure that they are valid locations in the given stack. If these values
+ * or any intermediate frame pointer values used during unwinding are ever out
+ * of these bounds, unwinding will fail.
+ *
+ * \return True on success.
+ */
+ static bool TryUnwindV8Frames(const JSEntryStubs& entry_stubs,
+ size_t code_pages_length,
+ const MemoryRange* code_pages,
+ RegisterState* register_state,
+ const void* stack_base);
+
+ /**
+ * Whether the PC is within the V8 code range represented by code_pages.
+ *
+ * If this returns false, then calling UnwindV8Frames() with the same PC
+ * and unwind_state will always fail. If it returns true, then unwinding may
+ * (but not necessarily) be successful.
+ *
+ * Available on x64, ARM64 and ARM32
+ */
+ static bool PCIsInV8(size_t code_pages_length, const MemoryRange* code_pages,
+ void* pc);
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_UNWINDER_H_
diff --git a/chromium/v8/include/v8-util.h b/chromium/v8/include/v8-util.h
index 8e4d66153d1..c54418aa251 100644
--- a/chromium/v8/include/v8-util.h
+++ b/chromium/v8/include/v8-util.h
@@ -5,11 +5,14 @@
#ifndef V8_UTIL_H_
#define V8_UTIL_H_
-#include "v8.h" // NOLINT(build/include_directory)
#include <assert.h>
+
#include <map>
#include <vector>
+#include "v8-function-callback.h" // NOLINT(build/include_directory)
+#include "v8-persistent-handle.h" // NOLINT(build/include_directory)
+
/**
* Support for Persistent containers.
*
@@ -19,6 +22,9 @@
*/
namespace v8 {
+template <typename K, typename V, typename Traits>
+class GlobalValueMap;
+
typedef uintptr_t PersistentContainerValue;
static const uintptr_t kPersistentContainerNotFound = 0;
enum PersistentContainerCallbackType {
diff --git a/chromium/v8/include/v8-value-serializer.h b/chromium/v8/include/v8-value-serializer.h
new file mode 100644
index 00000000000..574567bd5a7
--- /dev/null
+++ b/chromium/v8/include/v8-value-serializer.h
@@ -0,0 +1,249 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_VALUE_SERIALIZER_H_
+#define INCLUDE_V8_VALUE_SERIALIZER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <utility>
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-maybe.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class ArrayBuffer;
+class Isolate;
+class Object;
+class SharedArrayBuffer;
+class String;
+class WasmModuleObject;
+class Value;
+
+namespace internal {
+struct ScriptStreamingData;
+} // namespace internal
+
+/**
+ * Value serialization compatible with the HTML structured clone algorithm.
+ * The format is backward-compatible (i.e. safe to store to disk).
+ */
+class V8_EXPORT ValueSerializer {
+ public:
+ class V8_EXPORT Delegate {
+ public:
+ virtual ~Delegate() = default;
+
+ /**
+ * Handles the case where a DataCloneError would be thrown in the structured
+ * clone spec. Other V8 embedders may throw some other appropriate exception
+ * type.
+ */
+ virtual void ThrowDataCloneError(Local<String> message) = 0;
+
+ /**
+ * The embedder overrides this method to write some kind of host object, if
+ * possible. If not, a suitable exception should be thrown and
+ * Nothing<bool>() returned.
+ */
+ virtual Maybe<bool> WriteHostObject(Isolate* isolate, Local<Object> object);
+
+ /**
+ * Called when the ValueSerializer is going to serialize a
+ * SharedArrayBuffer object. The embedder must return an ID for the
+ * object, using the same ID if this SharedArrayBuffer has already been
+ * serialized in this buffer. When deserializing, this ID will be passed to
+ * ValueDeserializer::GetSharedArrayBufferFromId as |clone_id|.
+ *
+ * If the object cannot be serialized, an
+ * exception should be thrown and Nothing<uint32_t>() returned.
+ */
+ virtual Maybe<uint32_t> GetSharedArrayBufferId(
+ Isolate* isolate, Local<SharedArrayBuffer> shared_array_buffer);
+
+ virtual Maybe<uint32_t> GetWasmModuleTransferId(
+ Isolate* isolate, Local<WasmModuleObject> module);
+ /**
+ * Allocates memory for the buffer of at least the size provided. The actual
+ * size (which may be greater or equal) is written to |actual_size|. If no
+ * buffer has been allocated yet, nullptr will be provided.
+ *
+ * If the memory cannot be allocated, nullptr should be returned.
+ * |actual_size| will be ignored. It is assumed that |old_buffer| is still
+ * valid in this case and has not been modified.
+ *
+ * The default implementation uses the stdlib's `realloc()` function.
+ */
+ virtual void* ReallocateBufferMemory(void* old_buffer, size_t size,
+ size_t* actual_size);
+
+ /**
+ * Frees a buffer allocated with |ReallocateBufferMemory|.
+ *
+ * The default implementation uses the stdlib's `free()` function.
+ */
+ virtual void FreeBufferMemory(void* buffer);
+ };
+
+ explicit ValueSerializer(Isolate* isolate);
+ ValueSerializer(Isolate* isolate, Delegate* delegate);
+ ~ValueSerializer();
+
+ /**
+ * Writes out a header, which includes the format version.
+ */
+ void WriteHeader();
+
+ /**
+ * Serializes a JavaScript value into the buffer.
+ */
+ V8_WARN_UNUSED_RESULT Maybe<bool> WriteValue(Local<Context> context,
+ Local<Value> value);
+
+ /**
+ * Returns the stored data (allocated using the delegate's
+ * ReallocateBufferMemory) and its size. This serializer should not be used
+ * once the buffer is released. The contents are undefined if a previous write
+ * has failed. Ownership of the buffer is transferred to the caller.
+ */
+ V8_WARN_UNUSED_RESULT std::pair<uint8_t*, size_t> Release();
+
+ /**
+ * Marks an ArrayBuffer as havings its contents transferred out of band.
+ * Pass the corresponding ArrayBuffer in the deserializing context to
+ * ValueDeserializer::TransferArrayBuffer.
+ */
+ void TransferArrayBuffer(uint32_t transfer_id,
+ Local<ArrayBuffer> array_buffer);
+
+ /**
+ * Indicate whether to treat ArrayBufferView objects as host objects,
+ * i.e. pass them to Delegate::WriteHostObject. This should not be
+ * called when no Delegate was passed.
+ *
+ * The default is not to treat ArrayBufferViews as host objects.
+ */
+ void SetTreatArrayBufferViewsAsHostObjects(bool mode);
+
+ /**
+ * Write raw data in various common formats to the buffer.
+ * Note that integer types are written in base-128 varint format, not with a
+ * binary copy. For use during an override of Delegate::WriteHostObject.
+ */
+ void WriteUint32(uint32_t value);
+ void WriteUint64(uint64_t value);
+ void WriteDouble(double value);
+ void WriteRawBytes(const void* source, size_t length);
+
+ ValueSerializer(const ValueSerializer&) = delete;
+ void operator=(const ValueSerializer&) = delete;
+
+ private:
+ struct PrivateData;
+ PrivateData* private_;
+};
+
+/**
+ * Deserializes values from data written with ValueSerializer, or a compatible
+ * implementation.
+ */
+class V8_EXPORT ValueDeserializer {
+ public:
+ class V8_EXPORT Delegate {
+ public:
+ virtual ~Delegate() = default;
+
+ /**
+ * The embedder overrides this method to read some kind of host object, if
+ * possible. If not, a suitable exception should be thrown and
+ * MaybeLocal<Object>() returned.
+ */
+ virtual MaybeLocal<Object> ReadHostObject(Isolate* isolate);
+
+ /**
+ * Get a WasmModuleObject given a transfer_id previously provided
+ * by ValueSerializer::GetWasmModuleTransferId
+ */
+ virtual MaybeLocal<WasmModuleObject> GetWasmModuleFromId(
+ Isolate* isolate, uint32_t transfer_id);
+
+ /**
+ * Get a SharedArrayBuffer given a clone_id previously provided
+ * by ValueSerializer::GetSharedArrayBufferId
+ */
+ virtual MaybeLocal<SharedArrayBuffer> GetSharedArrayBufferFromId(
+ Isolate* isolate, uint32_t clone_id);
+ };
+
+ ValueDeserializer(Isolate* isolate, const uint8_t* data, size_t size);
+ ValueDeserializer(Isolate* isolate, const uint8_t* data, size_t size,
+ Delegate* delegate);
+ ~ValueDeserializer();
+
+ /**
+ * Reads and validates a header (including the format version).
+ * May, for example, reject an invalid or unsupported wire format.
+ */
+ V8_WARN_UNUSED_RESULT Maybe<bool> ReadHeader(Local<Context> context);
+
+ /**
+ * Deserializes a JavaScript value from the buffer.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Value> ReadValue(Local<Context> context);
+
+ /**
+ * Accepts the array buffer corresponding to the one passed previously to
+ * ValueSerializer::TransferArrayBuffer.
+ */
+ void TransferArrayBuffer(uint32_t transfer_id,
+ Local<ArrayBuffer> array_buffer);
+
+ /**
+ * Similar to TransferArrayBuffer, but for SharedArrayBuffer.
+ * The id is not necessarily in the same namespace as unshared ArrayBuffer
+ * objects.
+ */
+ void TransferSharedArrayBuffer(uint32_t id,
+ Local<SharedArrayBuffer> shared_array_buffer);
+
+ /**
+ * Must be called before ReadHeader to enable support for reading the legacy
+ * wire format (i.e., which predates this being shipped).
+ *
+ * Don't use this unless you need to read data written by previous versions of
+ * blink::ScriptValueSerializer.
+ */
+ void SetSupportsLegacyWireFormat(bool supports_legacy_wire_format);
+
+ /**
+ * Reads the underlying wire format version. Likely mostly to be useful to
+ * legacy code reading old wire format versions. Must be called after
+ * ReadHeader.
+ */
+ uint32_t GetWireFormatVersion() const;
+
+ /**
+ * Reads raw data in various common formats to the buffer.
+ * Note that integer types are read in base-128 varint format, not with a
+ * binary copy. For use during an override of Delegate::ReadHostObject.
+ */
+ V8_WARN_UNUSED_RESULT bool ReadUint32(uint32_t* value);
+ V8_WARN_UNUSED_RESULT bool ReadUint64(uint64_t* value);
+ V8_WARN_UNUSED_RESULT bool ReadDouble(double* value);
+ V8_WARN_UNUSED_RESULT bool ReadRawBytes(size_t length, const void** data);
+
+ ValueDeserializer(const ValueDeserializer&) = delete;
+ void operator=(const ValueDeserializer&) = delete;
+
+ private:
+ struct PrivateData;
+ PrivateData* private_;
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_VALUE_SERIALIZER_H_
diff --git a/chromium/v8/include/v8-value.h b/chromium/v8/include/v8-value.h
new file mode 100644
index 00000000000..adca989e002
--- /dev/null
+++ b/chromium/v8/include/v8-value.h
@@ -0,0 +1,526 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_VALUE_H_
+#define INCLUDE_V8_VALUE_H_
+
+#include "v8-data.h" // NOLINT(build/include_directory)
+#include "v8-internal.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-maybe.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+/**
+ * The v8 JavaScript engine.
+ */
+namespace v8 {
+
+class BigInt;
+class Int32;
+class Integer;
+class Number;
+class Object;
+class String;
+class Uint32;
+
+/**
+ * The superclass of all JavaScript values and objects.
+ */
+class V8_EXPORT Value : public Data {
+ public:
+ /**
+ * Returns true if this value is the undefined value. See ECMA-262
+ * 4.3.10.
+ *
+ * This is equivalent to `value === undefined` in JS.
+ */
+ V8_INLINE bool IsUndefined() const;
+
+ /**
+ * Returns true if this value is the null value. See ECMA-262
+ * 4.3.11.
+ *
+ * This is equivalent to `value === null` in JS.
+ */
+ V8_INLINE bool IsNull() const;
+
+ /**
+ * Returns true if this value is either the null or the undefined value.
+ * See ECMA-262
+ * 4.3.11. and 4.3.12
+ *
+ * This is equivalent to `value == null` in JS.
+ */
+ V8_INLINE bool IsNullOrUndefined() const;
+
+ /**
+ * Returns true if this value is true.
+ *
+ * This is not the same as `BooleanValue()`. The latter performs a
+ * conversion to boolean, i.e. the result of `Boolean(value)` in JS, whereas
+ * this checks `value === true`.
+ */
+ bool IsTrue() const;
+
+ /**
+ * Returns true if this value is false.
+ *
+ * This is not the same as `!BooleanValue()`. The latter performs a
+ * conversion to boolean, i.e. the result of `!Boolean(value)` in JS, whereas
+ * this checks `value === false`.
+ */
+ bool IsFalse() const;
+
+ /**
+ * Returns true if this value is a symbol or a string.
+ *
+ * This is equivalent to
+ * `typeof value === 'string' || typeof value === 'symbol'` in JS.
+ */
+ bool IsName() const;
+
+ /**
+ * Returns true if this value is an instance of the String type.
+ * See ECMA-262 8.4.
+ *
+ * This is equivalent to `typeof value === 'string'` in JS.
+ */
+ V8_INLINE bool IsString() const;
+
+ /**
+ * Returns true if this value is a symbol.
+ *
+ * This is equivalent to `typeof value === 'symbol'` in JS.
+ */
+ bool IsSymbol() const;
+
+ /**
+ * Returns true if this value is a function.
+ *
+ * This is equivalent to `typeof value === 'function'` in JS.
+ */
+ bool IsFunction() const;
+
+ /**
+ * Returns true if this value is an array. Note that it will return false for
+ * an Proxy for an array.
+ */
+ bool IsArray() const;
+
+ /**
+ * Returns true if this value is an object.
+ */
+ bool IsObject() const;
+
+ /**
+ * Returns true if this value is a bigint.
+ *
+ * This is equivalent to `typeof value === 'bigint'` in JS.
+ */
+ bool IsBigInt() const;
+
+ /**
+ * Returns true if this value is boolean.
+ *
+ * This is equivalent to `typeof value === 'boolean'` in JS.
+ */
+ bool IsBoolean() const;
+
+ /**
+ * Returns true if this value is a number.
+ *
+ * This is equivalent to `typeof value === 'number'` in JS.
+ */
+ bool IsNumber() const;
+
+ /**
+ * Returns true if this value is an `External` object.
+ */
+ bool IsExternal() const;
+
+ /**
+ * Returns true if this value is a 32-bit signed integer.
+ */
+ bool IsInt32() const;
+
+ /**
+ * Returns true if this value is a 32-bit unsigned integer.
+ */
+ bool IsUint32() const;
+
+ /**
+ * Returns true if this value is a Date.
+ */
+ bool IsDate() const;
+
+ /**
+ * Returns true if this value is an Arguments object.
+ */
+ bool IsArgumentsObject() const;
+
+ /**
+ * Returns true if this value is a BigInt object.
+ */
+ bool IsBigIntObject() const;
+
+ /**
+ * Returns true if this value is a Boolean object.
+ */
+ bool IsBooleanObject() const;
+
+ /**
+ * Returns true if this value is a Number object.
+ */
+ bool IsNumberObject() const;
+
+ /**
+ * Returns true if this value is a String object.
+ */
+ bool IsStringObject() const;
+
+ /**
+ * Returns true if this value is a Symbol object.
+ */
+ bool IsSymbolObject() const;
+
+ /**
+ * Returns true if this value is a NativeError.
+ */
+ bool IsNativeError() const;
+
+ /**
+ * Returns true if this value is a RegExp.
+ */
+ bool IsRegExp() const;
+
+ /**
+ * Returns true if this value is an async function.
+ */
+ bool IsAsyncFunction() const;
+
+ /**
+ * Returns true if this value is a Generator function.
+ */
+ bool IsGeneratorFunction() const;
+
+ /**
+ * Returns true if this value is a Generator object (iterator).
+ */
+ bool IsGeneratorObject() const;
+
+ /**
+ * Returns true if this value is a Promise.
+ */
+ bool IsPromise() const;
+
+ /**
+ * Returns true if this value is a Map.
+ */
+ bool IsMap() const;
+
+ /**
+ * Returns true if this value is a Set.
+ */
+ bool IsSet() const;
+
+ /**
+ * Returns true if this value is a Map Iterator.
+ */
+ bool IsMapIterator() const;
+
+ /**
+ * Returns true if this value is a Set Iterator.
+ */
+ bool IsSetIterator() const;
+
+ /**
+ * Returns true if this value is a WeakMap.
+ */
+ bool IsWeakMap() const;
+
+ /**
+ * Returns true if this value is a WeakSet.
+ */
+ bool IsWeakSet() const;
+
+ /**
+ * Returns true if this value is an ArrayBuffer.
+ */
+ bool IsArrayBuffer() const;
+
+ /**
+ * Returns true if this value is an ArrayBufferView.
+ */
+ bool IsArrayBufferView() const;
+
+ /**
+ * Returns true if this value is one of TypedArrays.
+ */
+ bool IsTypedArray() const;
+
+ /**
+ * Returns true if this value is an Uint8Array.
+ */
+ bool IsUint8Array() const;
+
+ /**
+ * Returns true if this value is an Uint8ClampedArray.
+ */
+ bool IsUint8ClampedArray() const;
+
+ /**
+ * Returns true if this value is an Int8Array.
+ */
+ bool IsInt8Array() const;
+
+ /**
+ * Returns true if this value is an Uint16Array.
+ */
+ bool IsUint16Array() const;
+
+ /**
+ * Returns true if this value is an Int16Array.
+ */
+ bool IsInt16Array() const;
+
+ /**
+ * Returns true if this value is an Uint32Array.
+ */
+ bool IsUint32Array() const;
+
+ /**
+ * Returns true if this value is an Int32Array.
+ */
+ bool IsInt32Array() const;
+
+ /**
+ * Returns true if this value is a Float32Array.
+ */
+ bool IsFloat32Array() const;
+
+ /**
+ * Returns true if this value is a Float64Array.
+ */
+ bool IsFloat64Array() const;
+
+ /**
+ * Returns true if this value is a BigInt64Array.
+ */
+ bool IsBigInt64Array() const;
+
+ /**
+ * Returns true if this value is a BigUint64Array.
+ */
+ bool IsBigUint64Array() const;
+
+ /**
+ * Returns true if this value is a DataView.
+ */
+ bool IsDataView() const;
+
+ /**
+ * Returns true if this value is a SharedArrayBuffer.
+ */
+ bool IsSharedArrayBuffer() const;
+
+ /**
+ * Returns true if this value is a JavaScript Proxy.
+ */
+ bool IsProxy() const;
+
+ /**
+ * Returns true if this value is a WasmMemoryObject.
+ */
+ bool IsWasmMemoryObject() const;
+
+ /**
+ * Returns true if this value is a WasmModuleObject.
+ */
+ bool IsWasmModuleObject() const;
+
+ /**
+ * Returns true if the value is a Module Namespace Object.
+ */
+ bool IsModuleNamespaceObject() const;
+
+ /**
+ * Perform the equivalent of `BigInt(value)` in JS.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<BigInt> ToBigInt(
+ Local<Context> context) const;
+ /**
+ * Perform the equivalent of `Number(value)` in JS.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Number> ToNumber(
+ Local<Context> context) const;
+ /**
+ * Perform the equivalent of `String(value)` in JS.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<String> ToString(
+ Local<Context> context) const;
+ /**
+ * Provide a string representation of this value usable for debugging.
+ * This operation has no observable side effects and will succeed
+ * unless e.g. execution is being terminated.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<String> ToDetailString(
+ Local<Context> context) const;
+ /**
+ * Perform the equivalent of `Object(value)` in JS.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Object> ToObject(
+ Local<Context> context) const;
+ /**
+ * Perform the equivalent of `Number(value)` in JS and convert the result
+ * to an integer. Negative values are rounded up, positive values are rounded
+ * down. NaN is converted to 0. Infinite values yield undefined results.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Integer> ToInteger(
+ Local<Context> context) const;
+ /**
+ * Perform the equivalent of `Number(value)` in JS and convert the result
+ * to an unsigned 32-bit integer by performing the steps in
+ * https://tc39.es/ecma262/#sec-touint32.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Uint32> ToUint32(
+ Local<Context> context) const;
+ /**
+ * Perform the equivalent of `Number(value)` in JS and convert the result
+ * to a signed 32-bit integer by performing the steps in
+ * https://tc39.es/ecma262/#sec-toint32.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Int32> ToInt32(Local<Context> context) const;
+
+ /**
+ * Perform the equivalent of `Boolean(value)` in JS. This can never fail.
+ */
+ Local<Boolean> ToBoolean(Isolate* isolate) const;
+
+ /**
+ * Attempts to convert a string to an array index.
+ * Returns an empty handle if the conversion fails.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Uint32> ToArrayIndex(
+ Local<Context> context) const;
+
+ /** Returns the equivalent of `ToBoolean()->Value()`. */
+ bool BooleanValue(Isolate* isolate) const;
+
+ /** Returns the equivalent of `ToNumber()->Value()`. */
+ V8_WARN_UNUSED_RESULT Maybe<double> NumberValue(Local<Context> context) const;
+ /** Returns the equivalent of `ToInteger()->Value()`. */
+ V8_WARN_UNUSED_RESULT Maybe<int64_t> IntegerValue(
+ Local<Context> context) const;
+ /** Returns the equivalent of `ToUint32()->Value()`. */
+ V8_WARN_UNUSED_RESULT Maybe<uint32_t> Uint32Value(
+ Local<Context> context) const;
+ /** Returns the equivalent of `ToInt32()->Value()`. */
+ V8_WARN_UNUSED_RESULT Maybe<int32_t> Int32Value(Local<Context> context) const;
+
+ /** JS == */
+ V8_WARN_UNUSED_RESULT Maybe<bool> Equals(Local<Context> context,
+ Local<Value> that) const;
+ bool StrictEquals(Local<Value> that) const;
+ bool SameValue(Local<Value> that) const;
+
+ template <class T>
+ V8_INLINE static Value* Cast(T* value) {
+ return static_cast<Value*>(value);
+ }
+
+ Local<String> TypeOf(Isolate*);
+
+ Maybe<bool> InstanceOf(Local<Context> context, Local<Object> object);
+
+ private:
+ V8_INLINE bool QuickIsUndefined() const;
+ V8_INLINE bool QuickIsNull() const;
+ V8_INLINE bool QuickIsNullOrUndefined() const;
+ V8_INLINE bool QuickIsString() const;
+ bool FullIsUndefined() const;
+ bool FullIsNull() const;
+ bool FullIsString() const;
+
+ static void CheckCast(Data* that);
+};
+
+template <>
+V8_INLINE Value* Value::Cast(Data* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Value*>(value);
+}
+
+bool Value::IsUndefined() const {
+#ifdef V8_ENABLE_CHECKS
+ return FullIsUndefined();
+#else
+ return QuickIsUndefined();
+#endif
+}
+
+bool Value::QuickIsUndefined() const {
+ using A = internal::Address;
+ using I = internal::Internals;
+ A obj = *reinterpret_cast<const A*>(this);
+ if (!I::HasHeapObjectTag(obj)) return false;
+ if (I::GetInstanceType(obj) != I::kOddballType) return false;
+ return (I::GetOddballKind(obj) == I::kUndefinedOddballKind);
+}
+
+bool Value::IsNull() const {
+#ifdef V8_ENABLE_CHECKS
+ return FullIsNull();
+#else
+ return QuickIsNull();
+#endif
+}
+
+bool Value::QuickIsNull() const {
+ using A = internal::Address;
+ using I = internal::Internals;
+ A obj = *reinterpret_cast<const A*>(this);
+ if (!I::HasHeapObjectTag(obj)) return false;
+ if (I::GetInstanceType(obj) != I::kOddballType) return false;
+ return (I::GetOddballKind(obj) == I::kNullOddballKind);
+}
+
+bool Value::IsNullOrUndefined() const {
+#ifdef V8_ENABLE_CHECKS
+ return FullIsNull() || FullIsUndefined();
+#else
+ return QuickIsNullOrUndefined();
+#endif
+}
+
+bool Value::QuickIsNullOrUndefined() const {
+ using A = internal::Address;
+ using I = internal::Internals;
+ A obj = *reinterpret_cast<const A*>(this);
+ if (!I::HasHeapObjectTag(obj)) return false;
+ if (I::GetInstanceType(obj) != I::kOddballType) return false;
+ int kind = I::GetOddballKind(obj);
+ return kind == I::kNullOddballKind || kind == I::kUndefinedOddballKind;
+}
+
+bool Value::IsString() const {
+#ifdef V8_ENABLE_CHECKS
+ return FullIsString();
+#else
+ return QuickIsString();
+#endif
+}
+
+bool Value::QuickIsString() const {
+ using A = internal::Address;
+ using I = internal::Internals;
+ A obj = *reinterpret_cast<const A*>(this);
+ if (!I::HasHeapObjectTag(obj)) return false;
+ return (I::GetInstanceType(obj) < I::kFirstNonstringType);
+}
+
+} // namespace v8
+
+#endif // INCLUDE_V8_VALUE_H_
diff --git a/chromium/v8/include/v8-version.h b/chromium/v8/include/v8-version.h
index a4ef2015e02..41e81474936 100644
--- a/chromium/v8/include/v8-version.h
+++ b/chromium/v8/include/v8-version.h
@@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 9
-#define V8_MINOR_VERSION 4
-#define V8_BUILD_NUMBER 146
-#define V8_PATCH_LEVEL 24
+#define V8_MINOR_VERSION 6
+#define V8_BUILD_NUMBER 180
+#define V8_PATCH_LEVEL 23
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/chromium/v8/include/v8-wasm.h b/chromium/v8/include/v8-wasm.h
new file mode 100644
index 00000000000..612ed2fae40
--- /dev/null
+++ b/chromium/v8/include/v8-wasm.h
@@ -0,0 +1,251 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_WASM_H_
+#define INCLUDE_V8_WASM_H_
+
+#include <memory>
+#include <string>
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-memory-span.h" // NOLINT(build/include_directory)
+#include "v8-object.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class ArrayBuffer;
+class Promise;
+
+namespace internal {
+namespace wasm {
+class NativeModule;
+class StreamingDecoder;
+} // namespace wasm
+} // namespace internal
+
+/**
+ * An owned byte buffer with associated size.
+ */
+struct OwnedBuffer {
+ std::unique_ptr<const uint8_t[]> buffer;
+ size_t size = 0;
+ OwnedBuffer(std::unique_ptr<const uint8_t[]> buffer, size_t size)
+ : buffer(std::move(buffer)), size(size) {}
+ OwnedBuffer() = default;
+};
+
+// Wrapper around a compiled WebAssembly module, which is potentially shared by
+// different WasmModuleObjects.
+class V8_EXPORT CompiledWasmModule {
+ public:
+ /**
+ * Serialize the compiled module. The serialized data does not include the
+ * wire bytes.
+ */
+ OwnedBuffer Serialize();
+
+ /**
+ * Get the (wasm-encoded) wire bytes that were used to compile this module.
+ */
+ MemorySpan<const uint8_t> GetWireBytesRef();
+
+ const std::string& source_url() const { return source_url_; }
+
+ private:
+ friend class WasmModuleObject;
+ friend class WasmStreaming;
+
+ explicit CompiledWasmModule(std::shared_ptr<internal::wasm::NativeModule>,
+ const char* source_url, size_t url_length);
+
+ const std::shared_ptr<internal::wasm::NativeModule> native_module_;
+ const std::string source_url_;
+};
+
+// An instance of WebAssembly.Memory.
+class V8_EXPORT WasmMemoryObject : public Object {
+ public:
+ WasmMemoryObject() = delete;
+
+ /**
+ * Returns underlying ArrayBuffer.
+ */
+ Local<ArrayBuffer> Buffer();
+
+ V8_INLINE static WasmMemoryObject* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<WasmMemoryObject*>(value);
+ }
+
+ private:
+ static void CheckCast(Value* object);
+};
+
+// An instance of WebAssembly.Module.
+class V8_EXPORT WasmModuleObject : public Object {
+ public:
+ WasmModuleObject() = delete;
+
+ /**
+ * Efficiently re-create a WasmModuleObject, without recompiling, from
+ * a CompiledWasmModule.
+ */
+ static MaybeLocal<WasmModuleObject> FromCompiledModule(
+ Isolate* isolate, const CompiledWasmModule&);
+
+ /**
+ * Get the compiled module for this module object. The compiled module can be
+ * shared by several module objects.
+ */
+ CompiledWasmModule GetCompiledModule();
+
+ V8_INLINE static WasmModuleObject* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<WasmModuleObject*>(value);
+ }
+
+ private:
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * The V8 interface for WebAssembly streaming compilation. When streaming
+ * compilation is initiated, V8 passes a {WasmStreaming} object to the embedder
+ * such that the embedder can pass the input bytes for streaming compilation to
+ * V8.
+ */
+class V8_EXPORT WasmStreaming final {
+ public:
+ class WasmStreamingImpl;
+
+ /**
+ * Client to receive streaming event notifications.
+ */
+ class Client {
+ public:
+ virtual ~Client() = default;
+ /**
+ * Passes the fully compiled module to the client. This can be used to
+ * implement code caching.
+ */
+ virtual void OnModuleCompiled(CompiledWasmModule compiled_module) = 0;
+ };
+
+ explicit WasmStreaming(std::unique_ptr<WasmStreamingImpl> impl);
+
+ ~WasmStreaming();
+
+ /**
+ * Pass a new chunk of bytes to WebAssembly streaming compilation.
+ * The buffer passed into {OnBytesReceived} is owned by the caller.
+ */
+ void OnBytesReceived(const uint8_t* bytes, size_t size);
+
+ /**
+ * {Finish} should be called after all received bytes where passed to
+ * {OnBytesReceived} to tell V8 that there will be no more bytes. {Finish}
+ * does not have to be called after {Abort} has been called already.
+ * If {can_use_compiled_module} is true and {SetCompiledModuleBytes} was
+ * previously called, the compiled module bytes can be used.
+ * If {can_use_compiled_module} is false, the compiled module bytes previously
+ * set by {SetCompiledModuleBytes} should not be used.
+ */
+ void Finish(bool can_use_compiled_module = true);
+
+ /**
+ * Abort streaming compilation. If {exception} has a value, then the promise
+ * associated with streaming compilation is rejected with that value. If
+ * {exception} does not have value, the promise does not get rejected.
+ */
+ void Abort(MaybeLocal<Value> exception);
+
+ /**
+ * Passes previously compiled module bytes. This must be called before
+ * {OnBytesReceived}, {Finish}, or {Abort}. Returns true if the module bytes
+ * can be used, false otherwise. The buffer passed via {bytes} and {size}
+ * is owned by the caller. If {SetCompiledModuleBytes} returns true, the
+ * buffer must remain valid until either {Finish} or {Abort} completes.
+ * The compiled module bytes should not be used until {Finish(true)} is
+ * called, because they can be invalidated later by {Finish(false)}.
+ */
+ bool SetCompiledModuleBytes(const uint8_t* bytes, size_t size);
+
+ /**
+ * Sets the client object that will receive streaming event notifications.
+ * This must be called before {OnBytesReceived}, {Finish}, or {Abort}.
+ */
+ void SetClient(std::shared_ptr<Client> client);
+
+ /*
+ * Sets the UTF-8 encoded source URL for the {Script} object. This must be
+ * called before {Finish}.
+ */
+ void SetUrl(const char* url, size_t length);
+
+ /**
+ * Unpacks a {WasmStreaming} object wrapped in a {Managed} for the embedder.
+ * Since the embedder is on the other side of the API, it cannot unpack the
+ * {Managed} itself.
+ */
+ static std::shared_ptr<WasmStreaming> Unpack(Isolate* isolate,
+ Local<Value> value);
+
+ private:
+ std::unique_ptr<WasmStreamingImpl> impl_;
+};
+
+// TODO(mtrofin): when streaming compilation is done, we can rename this
+// to simply WasmModuleObjectBuilder
+class V8_EXPORT WasmModuleObjectBuilderStreaming final {
+ public:
+ explicit WasmModuleObjectBuilderStreaming(Isolate* isolate);
+ /**
+ * The buffer passed into OnBytesReceived is owned by the caller.
+ */
+ void OnBytesReceived(const uint8_t*, size_t size);
+ void Finish();
+ /**
+ * Abort streaming compilation. If {exception} has a value, then the promise
+ * associated with streaming compilation is rejected with that value. If
+ * {exception} does not have value, the promise does not get rejected.
+ */
+ void Abort(MaybeLocal<Value> exception);
+ Local<Promise> GetPromise();
+
+ ~WasmModuleObjectBuilderStreaming() = default;
+
+ private:
+ WasmModuleObjectBuilderStreaming(const WasmModuleObjectBuilderStreaming&) =
+ delete;
+ WasmModuleObjectBuilderStreaming(WasmModuleObjectBuilderStreaming&&) =
+ default;
+ WasmModuleObjectBuilderStreaming& operator=(
+ const WasmModuleObjectBuilderStreaming&) = delete;
+ WasmModuleObjectBuilderStreaming& operator=(
+ WasmModuleObjectBuilderStreaming&&) = default;
+ Isolate* isolate_ = nullptr;
+
+#if V8_CC_MSVC
+ /**
+ * We don't need the static Copy API, so the default
+ * NonCopyablePersistentTraits would be sufficient, however,
+ * MSVC eagerly instantiates the Copy.
+ * We ensure we don't use Copy, however, by compiling with the
+ * defaults everywhere else.
+ */
+ Persistent<Promise, CopyablePersistentTraits<Promise>> promise_;
+#else
+ Persistent<Promise> promise_;
+#endif
+ std::shared_ptr<internal::wasm::StreamingDecoder> streaming_decoder_;
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_WASM_H_
diff --git a/chromium/v8/include/v8-weak-callback-info.h b/chromium/v8/include/v8-weak-callback-info.h
new file mode 100644
index 00000000000..ff3c08238e3
--- /dev/null
+++ b/chromium/v8/include/v8-weak-callback-info.h
@@ -0,0 +1,73 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_WEAK_CALLBACK_INFO_H_
+#define INCLUDE_V8_WEAK_CALLBACK_INFO_H_
+
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Isolate;
+
+namespace api_internal {
+V8_EXPORT void InternalFieldOutOfBounds(int index);
+} // namespace api_internal
+
+static const int kInternalFieldsInWeakCallback = 2;
+static const int kEmbedderFieldsInWeakCallback = 2;
+
+template <typename T>
+class WeakCallbackInfo {
+ public:
+ using Callback = void (*)(const WeakCallbackInfo<T>& data);
+
+ WeakCallbackInfo(Isolate* isolate, T* parameter,
+ void* embedder_fields[kEmbedderFieldsInWeakCallback],
+ Callback* callback)
+ : isolate_(isolate), parameter_(parameter), callback_(callback) {
+ for (int i = 0; i < kEmbedderFieldsInWeakCallback; ++i) {
+ embedder_fields_[i] = embedder_fields[i];
+ }
+ }
+
+ V8_INLINE Isolate* GetIsolate() const { return isolate_; }
+ V8_INLINE T* GetParameter() const { return parameter_; }
+ V8_INLINE void* GetInternalField(int index) const;
+
+ // When first called, the embedder MUST Reset() the Global which triggered the
+ // callback. The Global itself is unusable for anything else. No v8 other api
+ // calls may be called in the first callback. Should additional work be
+ // required, the embedder must set a second pass callback, which will be
+ // called after all the initial callbacks are processed.
+ // Calling SetSecondPassCallback on the second pass will immediately crash.
+ void SetSecondPassCallback(Callback callback) const { *callback_ = callback; }
+
+ private:
+ Isolate* isolate_;
+ T* parameter_;
+ Callback* callback_;
+ void* embedder_fields_[kEmbedderFieldsInWeakCallback];
+};
+
+// kParameter will pass a void* parameter back to the callback, kInternalFields
+// will pass the first two internal fields back to the callback, kFinalizer
+// will pass a void* parameter back, but is invoked before the object is
+// actually collected, so it can be resurrected. In the last case, it is not
+// possible to request a second pass callback.
+enum class WeakCallbackType { kParameter, kInternalFields, kFinalizer };
+
+template <class T>
+void* WeakCallbackInfo<T>::GetInternalField(int index) const {
+#ifdef V8_ENABLE_CHECKS
+ if (index < 0 || index >= kEmbedderFieldsInWeakCallback) {
+ api_internal::InternalFieldOutOfBounds(index);
+ }
+#endif
+ return embedder_fields_[index];
+}
+
+} // namespace v8
+
+#endif // INCLUDE_V8_WEAK_CALLBACK_INFO_H_
diff --git a/chromium/v8/include/v8.h b/chromium/v8/include/v8.h
index 78c454e334f..dd91f880b72 100644
--- a/chromium/v8/include/v8.h
+++ b/chromium/v8/include/v8.h
@@ -7,7 +7,7 @@
* V8 is Google's open source JavaScript engine.
*
* This set of documents provides reference material generated from the
- * V8 header file, include/v8.h.
+ * V8 header files in the include/ subdirectory.
*
* For other documentation see https://v8.dev/.
*/
@@ -17,19 +17,51 @@
#include <stddef.h>
#include <stdint.h>
-#include <stdio.h>
-#include <atomic>
#include <memory>
-#include <string>
-#include <type_traits>
-#include <utility>
#include <vector>
#include "cppgc/common.h"
-#include "v8-internal.h" // NOLINT(build/include_directory)
-#include "v8-version.h" // NOLINT(build/include_directory)
-#include "v8config.h" // NOLINT(build/include_directory)
+#include "v8-array-buffer.h" // NOLINT(build/include_directory)
+#include "v8-container.h" // NOLINT(build/include_directory)
+#include "v8-context.h" // NOLINT(build/include_directory)
+#include "v8-data.h" // NOLINT(build/include_directory)
+#include "v8-date.h" // NOLINT(build/include_directory)
+#include "v8-debug.h" // NOLINT(build/include_directory)
+#include "v8-exception.h" // NOLINT(build/include_directory)
+#include "v8-extension.h" // NOLINT(build/include_directory)
+#include "v8-external.h" // NOLINT(build/include_directory)
+#include "v8-function.h" // NOLINT(build/include_directory)
+#include "v8-initialization.h" // NOLINT(build/include_directory)
+#include "v8-internal.h" // NOLINT(build/include_directory)
+#include "v8-isolate.h" // NOLINT(build/include_directory)
+#include "v8-json.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-locker.h" // NOLINT(build/include_directory)
+#include "v8-maybe.h" // NOLINT(build/include_directory)
+#include "v8-memory-span.h" // NOLINT(build/include_directory)
+#include "v8-message.h" // NOLINT(build/include_directory)
+#include "v8-microtask-queue.h" // NOLINT(build/include_directory)
+#include "v8-microtask.h" // NOLINT(build/include_directory)
+#include "v8-object.h" // NOLINT(build/include_directory)
+#include "v8-persistent-handle.h" // NOLINT(build/include_directory)
+#include "v8-primitive-object.h" // NOLINT(build/include_directory)
+#include "v8-primitive.h" // NOLINT(build/include_directory)
+#include "v8-promise.h" // NOLINT(build/include_directory)
+#include "v8-proxy.h" // NOLINT(build/include_directory)
+#include "v8-regexp.h" // NOLINT(build/include_directory)
+#include "v8-script.h" // NOLINT(build/include_directory)
+#include "v8-snapshot.h" // NOLINT(build/include_directory)
+#include "v8-statistics.h" // NOLINT(build/include_directory)
+#include "v8-template.h" // NOLINT(build/include_directory)
+#include "v8-traced-handle.h" // NOLINT(build/include_directory)
+#include "v8-typed-array.h" // NOLINT(build/include_directory)
+#include "v8-unwinder.h" // NOLINT(build/include_directory)
+#include "v8-value-serializer.h" // NOLINT(build/include_directory)
+#include "v8-value.h" // NOLINT(build/include_directory)
+#include "v8-version.h" // NOLINT(build/include_directory)
+#include "v8-wasm.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
// We reserve the V8_* prefix for macros defined in V8 public API and
// assume there are no name conflicts with the embedder's code.
@@ -39,12292 +71,7 @@
*/
namespace v8 {
-class AccessorSignature;
-class Array;
-class ArrayBuffer;
-class BigInt;
-class BigIntObject;
-class Boolean;
-class BooleanObject;
-class CFunction;
-class CallHandlerHelper;
-class Context;
-class CppHeap;
-class CTypeInfo;
-class Data;
-class Date;
-class EscapableHandleScope;
-class External;
-class Function;
-class FunctionTemplate;
-class HeapProfiler;
-class ImplementationUtilities;
-class Int32;
-class Integer;
-class Isolate;
-class Isolate;
-class MicrotaskQueue;
-class Name;
-class Number;
-class NumberObject;
-class Object;
-class ObjectOperationDescriptor;
-class ObjectTemplate;
class Platform;
-class Primitive;
-class PrimitiveArray;
-class Private;
-class Promise;
-class PropertyDescriptor;
-class Proxy;
-class RawOperationDescriptor;
-class Script;
-class SharedArrayBuffer;
-class Signature;
-class StackFrame;
-class StackTrace;
-class StartupData;
-class String;
-class StringObject;
-class Symbol;
-class SymbolObject;
-class TracedReferenceBase;
-class Uint32;
-class Utils;
-class Value;
-class WasmMemoryObject;
-class WasmModuleObject;
-template <class K, class V, class T>
-class GlobalValueMap;
-template <class K, class V, class T>
-class PersistentValueMapBase;
-template<class T> class NonCopyablePersistentTraits;
-template <class T, class M = NonCopyablePersistentTraits<T>>
-class Persistent;
-template <class T>
-class BasicTracedReference;
-template <class T>
-class Eternal;
-template <class T>
-class Global;
-template <class T>
-class Local;
-template <class T>
-class Maybe;
-template <class T>
-class MaybeLocal;
-template <class T>
-class TracedGlobal;
-template <class T>
-class TracedReference;
-template<class K, class V, class T> class PersistentValueMap;
-template<class T, class P> class WeakCallbackObject;
-template <class T>
-class PersistentBase;
-template <class V, class T>
-class PersistentValueVector;
-template<typename T> class FunctionCallbackInfo;
-template<typename T> class PropertyCallbackInfo;
-template<typename T> class ReturnValue;
-
-namespace internal {
-class BackgroundDeserializeTask;
-class BasicTracedReferenceExtractor;
-class ExternalString;
-class FunctionCallbackArguments;
-class GlobalHandles;
-class Heap;
-class HeapObject;
-class Isolate;
-class LocalEmbedderHeapTracer;
-class MicrotaskQueue;
-class PropertyCallbackArguments;
-class ReadOnlyHeap;
-class ScopedExternalStringLock;
-class ThreadLocalTop;
-struct ScriptStreamingData;
-enum class ArgumentsType;
-template <ArgumentsType>
-class Arguments;
-template <typename T>
-class CustomArguments;
-
-namespace wasm {
-class NativeModule;
-class StreamingDecoder;
-} // namespace wasm
-
-} // namespace internal
-
-namespace metrics {
-class Recorder;
-} // namespace metrics
-
-namespace debug {
-class ConsoleCallArguments;
-} // namespace debug
-
-// --- Handles ---
-
-/**
- * An object reference managed by the v8 garbage collector.
- *
- * All objects returned from v8 have to be tracked by the garbage
- * collector so that it knows that the objects are still alive. Also,
- * because the garbage collector may move objects, it is unsafe to
- * point directly to an object. Instead, all objects are stored in
- * handles which are known by the garbage collector and updated
- * whenever an object moves. Handles should always be passed by value
- * (except in cases like out-parameters) and they should never be
- * allocated on the heap.
- *
- * There are two types of handles: local and persistent handles.
- *
- * Local handles are light-weight and transient and typically used in
- * local operations. They are managed by HandleScopes. That means that a
- * HandleScope must exist on the stack when they are created and that they are
- * only valid inside of the HandleScope active during their creation.
- * For passing a local handle to an outer HandleScope, an EscapableHandleScope
- * and its Escape() method must be used.
- *
- * Persistent handles can be used when storing objects across several
- * independent operations and have to be explicitly deallocated when they're no
- * longer used.
- *
- * It is safe to extract the object stored in the handle by
- * dereferencing the handle (for instance, to extract the Object* from
- * a Local<Object>); the value will still be governed by a handle
- * behind the scenes and the same rules apply to these values as to
- * their handles.
- */
-template <class T>
-class Local {
- public:
- V8_INLINE Local() : val_(nullptr) {}
- template <class S>
- V8_INLINE Local(Local<S> that)
- : val_(reinterpret_cast<T*>(*that)) {
- /**
- * This check fails when trying to convert between incompatible
- * handles. For example, converting from a Local<String> to a
- * Local<Number>.
- */
- static_assert(std::is_base_of<T, S>::value, "type check");
- }
-
- /**
- * Returns true if the handle is empty.
- */
- V8_INLINE bool IsEmpty() const { return val_ == nullptr; }
-
- /**
- * Sets the handle to be empty. IsEmpty() will then return true.
- */
- V8_INLINE void Clear() { val_ = nullptr; }
-
- V8_INLINE T* operator->() const { return val_; }
-
- V8_INLINE T* operator*() const { return val_; }
-
- /**
- * Checks whether two handles are the same.
- * Returns true if both are empty, or if the objects to which they refer
- * are identical.
- *
- * If both handles refer to JS objects, this is the same as strict equality.
- * For primitives, such as numbers or strings, a `false` return value does not
- * indicate that the values aren't equal in the JavaScript sense.
- * Use `Value::StrictEquals()` to check primitives for equality.
- */
- template <class S>
- V8_INLINE bool operator==(const Local<S>& that) const {
- internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
- internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
- if (a == nullptr) return b == nullptr;
- if (b == nullptr) return false;
- return *a == *b;
- }
-
- template <class S> V8_INLINE bool operator==(
- const PersistentBase<S>& that) const {
- internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
- internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
- if (a == nullptr) return b == nullptr;
- if (b == nullptr) return false;
- return *a == *b;
- }
-
- /**
- * Checks whether two handles are different.
- * Returns true if only one of the handles is empty, or if
- * the objects to which they refer are different.
- *
- * If both handles refer to JS objects, this is the same as strict
- * non-equality. For primitives, such as numbers or strings, a `true` return
- * value does not indicate that the values aren't equal in the JavaScript
- * sense. Use `Value::StrictEquals()` to check primitives for equality.
- */
- template <class S>
- V8_INLINE bool operator!=(const Local<S>& that) const {
- return !operator==(that);
- }
-
- template <class S> V8_INLINE bool operator!=(
- const Persistent<S>& that) const {
- return !operator==(that);
- }
-
- /**
- * Cast a handle to a subclass, e.g. Local<Value> to Local<Object>.
- * This is only valid if the handle actually refers to a value of the
- * target type.
- */
- template <class S> V8_INLINE static Local<T> Cast(Local<S> that) {
-#ifdef V8_ENABLE_CHECKS
- // If we're going to perform the type check then we have to check
- // that the handle isn't empty before doing the checked cast.
- if (that.IsEmpty()) return Local<T>();
-#endif
- return Local<T>(T::Cast(*that));
- }
-
- /**
- * Calling this is equivalent to Local<S>::Cast().
- * In particular, this is only valid if the handle actually refers to a value
- * of the target type.
- */
- template <class S>
- V8_INLINE Local<S> As() const {
- return Local<S>::Cast(*this);
- }
-
- /**
- * Create a local handle for the content of another handle.
- * The referee is kept alive by the local handle even when
- * the original handle is destroyed/disposed.
- */
- V8_INLINE static Local<T> New(Isolate* isolate, Local<T> that);
- V8_INLINE static Local<T> New(Isolate* isolate,
- const PersistentBase<T>& that);
- V8_INLINE static Local<T> New(Isolate* isolate,
- const BasicTracedReference<T>& that);
-
- private:
- friend class TracedReferenceBase;
- friend class Utils;
- template<class F> friend class Eternal;
- template<class F> friend class PersistentBase;
- template<class F, class M> friend class Persistent;
- template<class F> friend class Local;
- template <class F>
- friend class MaybeLocal;
- template<class F> friend class FunctionCallbackInfo;
- template<class F> friend class PropertyCallbackInfo;
- friend class String;
- friend class Object;
- friend class Context;
- friend class Isolate;
- friend class Private;
- template<class F> friend class internal::CustomArguments;
- friend Local<Primitive> Undefined(Isolate* isolate);
- friend Local<Primitive> Null(Isolate* isolate);
- friend Local<Boolean> True(Isolate* isolate);
- friend Local<Boolean> False(Isolate* isolate);
- friend class HandleScope;
- friend class EscapableHandleScope;
- template <class F1, class F2, class F3>
- friend class PersistentValueMapBase;
- template<class F1, class F2> friend class PersistentValueVector;
- template <class F>
- friend class ReturnValue;
- template <class F>
- friend class Traced;
- template <class F>
- friend class TracedGlobal;
- template <class F>
- friend class BasicTracedReference;
- template <class F>
- friend class TracedReference;
-
- explicit V8_INLINE Local(T* that) : val_(that) {}
- V8_INLINE static Local<T> New(Isolate* isolate, T* that);
- T* val_;
-};
-
-
-#if !defined(V8_IMMINENT_DEPRECATION_WARNINGS)
-// Handle is an alias for Local for historical reasons.
-template <class T>
-using Handle = Local<T>;
-#endif
-
-
-/**
- * A MaybeLocal<> is a wrapper around Local<> that enforces a check whether
- * the Local<> is empty before it can be used.
- *
- * If an API method returns a MaybeLocal<>, the API method can potentially fail
- * either because an exception is thrown, or because an exception is pending,
- * e.g. because a previous API call threw an exception that hasn't been caught
- * yet, or because a TerminateExecution exception was thrown. In that case, an
- * empty MaybeLocal is returned.
- */
-template <class T>
-class MaybeLocal {
- public:
- V8_INLINE MaybeLocal() : val_(nullptr) {}
- template <class S>
- V8_INLINE MaybeLocal(Local<S> that)
- : val_(reinterpret_cast<T*>(*that)) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- }
-
- V8_INLINE bool IsEmpty() const { return val_ == nullptr; }
-
- /**
- * Converts this MaybeLocal<> to a Local<>. If this MaybeLocal<> is empty,
- * |false| is returned and |out| is left untouched.
- */
- template <class S>
- V8_WARN_UNUSED_RESULT V8_INLINE bool ToLocal(Local<S>* out) const {
- out->val_ = IsEmpty() ? nullptr : this->val_;
- return !IsEmpty();
- }
-
- /**
- * Converts this MaybeLocal<> to a Local<>. If this MaybeLocal<> is empty,
- * V8 will crash the process.
- */
- V8_INLINE Local<T> ToLocalChecked();
-
- /**
- * Converts this MaybeLocal<> to a Local<>, using a default value if this
- * MaybeLocal<> is empty.
- */
- template <class S>
- V8_INLINE Local<S> FromMaybe(Local<S> default_value) const {
- return IsEmpty() ? default_value : Local<S>(val_);
- }
-
- private:
- T* val_;
-};
-
-/**
- * Eternal handles are set-once handles that live for the lifetime of the
- * isolate.
- */
-template <class T> class Eternal {
- public:
- V8_INLINE Eternal() : val_(nullptr) {}
- template <class S>
- V8_INLINE Eternal(Isolate* isolate, Local<S> handle) : val_(nullptr) {
- Set(isolate, handle);
- }
- // Can only be safely called if already set.
- V8_INLINE Local<T> Get(Isolate* isolate) const;
- V8_INLINE bool IsEmpty() const { return val_ == nullptr; }
- template<class S> V8_INLINE void Set(Isolate* isolate, Local<S> handle);
-
- private:
- T* val_;
-};
-
-
-static const int kInternalFieldsInWeakCallback = 2;
-static const int kEmbedderFieldsInWeakCallback = 2;
-
-template <typename T>
-class WeakCallbackInfo {
- public:
- using Callback = void (*)(const WeakCallbackInfo<T>& data);
-
- WeakCallbackInfo(Isolate* isolate, T* parameter,
- void* embedder_fields[kEmbedderFieldsInWeakCallback],
- Callback* callback)
- : isolate_(isolate), parameter_(parameter), callback_(callback) {
- for (int i = 0; i < kEmbedderFieldsInWeakCallback; ++i) {
- embedder_fields_[i] = embedder_fields[i];
- }
- }
-
- V8_INLINE Isolate* GetIsolate() const { return isolate_; }
- V8_INLINE T* GetParameter() const { return parameter_; }
- V8_INLINE void* GetInternalField(int index) const;
-
- // When first called, the embedder MUST Reset() the Global which triggered the
- // callback. The Global itself is unusable for anything else. No v8 other api
- // calls may be called in the first callback. Should additional work be
- // required, the embedder must set a second pass callback, which will be
- // called after all the initial callbacks are processed.
- // Calling SetSecondPassCallback on the second pass will immediately crash.
- void SetSecondPassCallback(Callback callback) const { *callback_ = callback; }
-
- private:
- Isolate* isolate_;
- T* parameter_;
- Callback* callback_;
- void* embedder_fields_[kEmbedderFieldsInWeakCallback];
-};
-
-
-// kParameter will pass a void* parameter back to the callback, kInternalFields
-// will pass the first two internal fields back to the callback, kFinalizer
-// will pass a void* parameter back, but is invoked before the object is
-// actually collected, so it can be resurrected. In the last case, it is not
-// possible to request a second pass callback.
-enum class WeakCallbackType { kParameter, kInternalFields, kFinalizer };
-
-/**
- * An object reference that is independent of any handle scope. Where
- * a Local handle only lives as long as the HandleScope in which it was
- * allocated, a PersistentBase handle remains valid until it is explicitly
- * disposed using Reset().
- *
- * A persistent handle contains a reference to a storage cell within
- * the V8 engine which holds an object value and which is updated by
- * the garbage collector whenever the object is moved. A new storage
- * cell can be created using the constructor or PersistentBase::Reset and
- * existing handles can be disposed using PersistentBase::Reset.
- *
- */
-template <class T> class PersistentBase {
- public:
- /**
- * If non-empty, destroy the underlying storage cell
- * IsEmpty() will return true after this call.
- */
- V8_INLINE void Reset();
- /**
- * If non-empty, destroy the underlying storage cell
- * and create a new one with the contents of other if other is non empty
- */
- template <class S>
- V8_INLINE void Reset(Isolate* isolate, const Local<S>& other);
-
- /**
- * If non-empty, destroy the underlying storage cell
- * and create a new one with the contents of other if other is non empty
- */
- template <class S>
- V8_INLINE void Reset(Isolate* isolate, const PersistentBase<S>& other);
-
- V8_INLINE bool IsEmpty() const { return val_ == nullptr; }
- V8_INLINE void Empty() { val_ = 0; }
-
- V8_INLINE Local<T> Get(Isolate* isolate) const {
- return Local<T>::New(isolate, *this);
- }
-
- template <class S>
- V8_INLINE bool operator==(const PersistentBase<S>& that) const {
- internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
- internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
- if (a == nullptr) return b == nullptr;
- if (b == nullptr) return false;
- return *a == *b;
- }
-
- template <class S>
- V8_INLINE bool operator==(const Local<S>& that) const {
- internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
- internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
- if (a == nullptr) return b == nullptr;
- if (b == nullptr) return false;
- return *a == *b;
- }
-
- template <class S>
- V8_INLINE bool operator!=(const PersistentBase<S>& that) const {
- return !operator==(that);
- }
-
- template <class S>
- V8_INLINE bool operator!=(const Local<S>& that) const {
- return !operator==(that);
- }
-
- /**
- * Install a finalization callback on this object.
- * NOTE: There is no guarantee as to *when* or even *if* the callback is
- * invoked. The invocation is performed solely on a best effort basis.
- * As always, GC-based finalization should *not* be relied upon for any
- * critical form of resource management!
- *
- * The callback is supposed to reset the handle. No further V8 API may be
- * called in this callback. In case additional work involving V8 needs to be
- * done, a second callback can be scheduled using
- * WeakCallbackInfo<void>::SetSecondPassCallback.
- */
- template <typename P>
- V8_INLINE void SetWeak(P* parameter,
- typename WeakCallbackInfo<P>::Callback callback,
- WeakCallbackType type);
-
- /**
- * Turns this handle into a weak phantom handle without finalization callback.
- * The handle will be reset automatically when the garbage collector detects
- * that the object is no longer reachable.
- * A related function Isolate::NumberOfPhantomHandleResetsSinceLastCall
- * returns how many phantom handles were reset by the garbage collector.
- */
- V8_INLINE void SetWeak();
-
- template<typename P>
- V8_INLINE P* ClearWeak();
-
- // TODO(dcarney): remove this.
- V8_INLINE void ClearWeak() { ClearWeak<void>(); }
-
- /**
- * Annotates the strong handle with the given label, which is then used by the
- * heap snapshot generator as a name of the edge from the root to the handle.
- * The function does not take ownership of the label and assumes that the
- * label is valid as long as the handle is valid.
- */
- V8_INLINE void AnnotateStrongRetainer(const char* label);
-
- /** Returns true if the handle's reference is weak. */
- V8_INLINE bool IsWeak() const;
-
- /**
- * Assigns a wrapper class ID to the handle.
- */
- V8_INLINE void SetWrapperClassId(uint16_t class_id);
-
- /**
- * Returns the class ID previously assigned to this handle or 0 if no class ID
- * was previously assigned.
- */
- V8_INLINE uint16_t WrapperClassId() const;
-
- PersistentBase(const PersistentBase& other) = delete;
- void operator=(const PersistentBase&) = delete;
-
- private:
- friend class Isolate;
- friend class Utils;
- template<class F> friend class Local;
- template<class F1, class F2> friend class Persistent;
- template <class F>
- friend class Global;
- template<class F> friend class PersistentBase;
- template<class F> friend class ReturnValue;
- template <class F1, class F2, class F3>
- friend class PersistentValueMapBase;
- template<class F1, class F2> friend class PersistentValueVector;
- friend class Object;
-
- explicit V8_INLINE PersistentBase(T* val) : val_(val) {}
- V8_INLINE static T* New(Isolate* isolate, T* that);
-
- T* val_;
-};
-
-
-/**
- * Default traits for Persistent. This class does not allow
- * use of the copy constructor or assignment operator.
- * At present kResetInDestructor is not set, but that will change in a future
- * version.
- */
-template<class T>
-class NonCopyablePersistentTraits {
- public:
- using NonCopyablePersistent = Persistent<T, NonCopyablePersistentTraits<T>>;
- static const bool kResetInDestructor = false;
- template<class S, class M>
- V8_INLINE static void Copy(const Persistent<S, M>& source,
- NonCopyablePersistent* dest) {
- static_assert(sizeof(S) < 0,
- "NonCopyablePersistentTraits::Copy is not instantiable");
- }
-};
-
-
-/**
- * Helper class traits to allow copying and assignment of Persistent.
- * This will clone the contents of storage cell, but not any of the flags, etc.
- */
-template<class T>
-struct CopyablePersistentTraits {
- using CopyablePersistent = Persistent<T, CopyablePersistentTraits<T>>;
- static const bool kResetInDestructor = true;
- template<class S, class M>
- static V8_INLINE void Copy(const Persistent<S, M>& source,
- CopyablePersistent* dest) {
- // do nothing, just allow copy
- }
-};
-
-
-/**
- * A PersistentBase which allows copy and assignment.
- *
- * Copy, assignment and destructor behavior is controlled by the traits
- * class M.
- *
- * Note: Persistent class hierarchy is subject to future changes.
- */
-template <class T, class M> class Persistent : public PersistentBase<T> {
- public:
- /**
- * A Persistent with no storage cell.
- */
- V8_INLINE Persistent() : PersistentBase<T>(nullptr) {}
- /**
- * Construct a Persistent from a Local.
- * When the Local is non-empty, a new storage cell is created
- * pointing to the same object, and no flags are set.
- */
- template <class S>
- V8_INLINE Persistent(Isolate* isolate, Local<S> that)
- : PersistentBase<T>(PersistentBase<T>::New(isolate, *that)) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- }
- /**
- * Construct a Persistent from a Persistent.
- * When the Persistent is non-empty, a new storage cell is created
- * pointing to the same object, and no flags are set.
- */
- template <class S, class M2>
- V8_INLINE Persistent(Isolate* isolate, const Persistent<S, M2>& that)
- : PersistentBase<T>(PersistentBase<T>::New(isolate, *that)) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- }
- /**
- * The copy constructors and assignment operator create a Persistent
- * exactly as the Persistent constructor, but the Copy function from the
- * traits class is called, allowing the setting of flags based on the
- * copied Persistent.
- */
- V8_INLINE Persistent(const Persistent& that) : PersistentBase<T>(nullptr) {
- Copy(that);
- }
- template <class S, class M2>
- V8_INLINE Persistent(const Persistent<S, M2>& that) : PersistentBase<T>(0) {
- Copy(that);
- }
- V8_INLINE Persistent& operator=(const Persistent& that) {
- Copy(that);
- return *this;
- }
- template <class S, class M2>
- V8_INLINE Persistent& operator=(const Persistent<S, M2>& that) {
- Copy(that);
- return *this;
- }
- /**
- * The destructor will dispose the Persistent based on the
- * kResetInDestructor flags in the traits class. Since not calling dispose
- * can result in a memory leak, it is recommended to always set this flag.
- */
- V8_INLINE ~Persistent() {
- if (M::kResetInDestructor) this->Reset();
- }
-
- // TODO(dcarney): this is pretty useless, fix or remove
- template <class S>
- V8_INLINE static Persistent<T>& Cast(const Persistent<S>& that) {
-#ifdef V8_ENABLE_CHECKS
- // If we're going to perform the type check then we have to check
- // that the handle isn't empty before doing the checked cast.
- if (!that.IsEmpty()) T::Cast(*that);
-#endif
- return reinterpret_cast<Persistent<T>&>(const_cast<Persistent<S>&>(that));
- }
-
- // TODO(dcarney): this is pretty useless, fix or remove
- template <class S>
- V8_INLINE Persistent<S>& As() const {
- return Persistent<S>::Cast(*this);
- }
-
- private:
- friend class Isolate;
- friend class Utils;
- template<class F> friend class Local;
- template<class F1, class F2> friend class Persistent;
- template<class F> friend class ReturnValue;
-
- explicit V8_INLINE Persistent(T* that) : PersistentBase<T>(that) {}
- V8_INLINE T* operator*() const { return this->val_; }
- template<class S, class M2>
- V8_INLINE void Copy(const Persistent<S, M2>& that);
-};
-
-
-/**
- * A PersistentBase which has move semantics.
- *
- * Note: Persistent class hierarchy is subject to future changes.
- */
-template <class T>
-class Global : public PersistentBase<T> {
- public:
- /**
- * A Global with no storage cell.
- */
- V8_INLINE Global() : PersistentBase<T>(nullptr) {}
-
- /**
- * Construct a Global from a Local.
- * When the Local is non-empty, a new storage cell is created
- * pointing to the same object, and no flags are set.
- */
- template <class S>
- V8_INLINE Global(Isolate* isolate, Local<S> that)
- : PersistentBase<T>(PersistentBase<T>::New(isolate, *that)) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- }
-
- /**
- * Construct a Global from a PersistentBase.
- * When the Persistent is non-empty, a new storage cell is created
- * pointing to the same object, and no flags are set.
- */
- template <class S>
- V8_INLINE Global(Isolate* isolate, const PersistentBase<S>& that)
- : PersistentBase<T>(PersistentBase<T>::New(isolate, that.val_)) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- }
-
- /**
- * Move constructor.
- */
- V8_INLINE Global(Global&& other);
-
- V8_INLINE ~Global() { this->Reset(); }
-
- /**
- * Move via assignment.
- */
- template <class S>
- V8_INLINE Global& operator=(Global<S>&& rhs);
-
- /**
- * Pass allows returning uniques from functions, etc.
- */
- Global Pass() { return static_cast<Global&&>(*this); }
-
- /*
- * For compatibility with Chromium's base::Bind (base::Passed).
- */
- using MoveOnlyTypeForCPP03 = void;
-
- Global(const Global&) = delete;
- void operator=(const Global&) = delete;
-
- private:
- template <class F>
- friend class ReturnValue;
- V8_INLINE T* operator*() const { return this->val_; }
-};
-
-
-// UniquePersistent is an alias for Global for historical reason.
-template <class T>
-using UniquePersistent = Global<T>;
-
-/**
- * Deprecated. Use |TracedReference<T>| instead.
- */
-template <typename T>
-struct TracedGlobalTrait {};
-
-class TracedReferenceBase {
- public:
- /**
- * Returns true if the reference is empty, i.e., has not been assigned
- * object.
- */
- bool IsEmpty() const { return val_ == nullptr; }
-
- /**
- * If non-empty, destroy the underlying storage cell. |IsEmpty| will return
- * true after this call.
- */
- V8_INLINE void Reset();
-
- /**
- * Construct a Local<Value> from this handle.
- */
- V8_INLINE v8::Local<v8::Value> Get(v8::Isolate* isolate) const;
-
- /**
- * Returns true if this TracedReference is empty, i.e., has not been
- * assigned an object. This version of IsEmpty is thread-safe.
- */
- bool IsEmptyThreadSafe() const {
- return this->GetSlotThreadSafe() == nullptr;
- }
-
- /**
- * Assigns a wrapper class ID to the handle.
- */
- V8_INLINE void SetWrapperClassId(uint16_t class_id);
-
- /**
- * Returns the class ID previously assigned to this handle or 0 if no class ID
- * was previously assigned.
- */
- V8_INLINE uint16_t WrapperClassId() const;
-
- protected:
- /**
- * Update this reference in a thread-safe way.
- */
- void SetSlotThreadSafe(void* new_val) {
- reinterpret_cast<std::atomic<void*>*>(&val_)->store(
- new_val, std::memory_order_relaxed);
- }
-
- /**
- * Get this reference in a thread-safe way
- */
- const void* GetSlotThreadSafe() const {
- return reinterpret_cast<std::atomic<const void*> const*>(&val_)->load(
- std::memory_order_relaxed);
- }
-
- V8_EXPORT void CheckValue() const;
-
- // val_ points to a GlobalHandles node.
- internal::Address* val_ = nullptr;
-
- friend class internal::BasicTracedReferenceExtractor;
- template <typename F>
- friend class Local;
- template <typename U>
- friend bool operator==(const TracedReferenceBase&, const Local<U>&);
- friend bool operator==(const TracedReferenceBase&,
- const TracedReferenceBase&);
-};
-
-/**
- * A traced handle with copy and move semantics. The handle is to be used
- * together with |v8::EmbedderHeapTracer| or as part of GarbageCollected objects
- * (see v8-cppgc.h) and specifies edges from C++ objects to JavaScript.
- *
- * The exact semantics are:
- * - Tracing garbage collections use |v8::EmbedderHeapTracer| or cppgc.
- * - Non-tracing garbage collections refer to
- * |v8::EmbedderRootsHandler::IsRoot()| whether the handle should
- * be treated as root or not.
- *
- * Note that the base class cannot be instantiated itself. Choose from
- * - TracedGlobal
- * - TracedReference
- */
-template <typename T>
-class BasicTracedReference : public TracedReferenceBase {
- public:
- /**
- * Construct a Local<T> from this handle.
- */
- Local<T> Get(Isolate* isolate) const { return Local<T>::New(isolate, *this); }
-
- template <class S>
- V8_INLINE BasicTracedReference<S>& As() const {
- return reinterpret_cast<BasicTracedReference<S>&>(
- const_cast<BasicTracedReference<T>&>(*this));
- }
-
- T* operator->() const {
-#ifdef V8_ENABLE_CHECKS
- CheckValue();
-#endif // V8_ENABLE_CHECKS
- return reinterpret_cast<T*>(val_);
- }
- T* operator*() const {
-#ifdef V8_ENABLE_CHECKS
- CheckValue();
-#endif // V8_ENABLE_CHECKS
- return reinterpret_cast<T*>(val_);
- }
-
- private:
- enum DestructionMode { kWithDestructor, kWithoutDestructor };
-
- /**
- * An empty BasicTracedReference without storage cell.
- */
- BasicTracedReference() = default;
-
- V8_INLINE static internal::Address* New(Isolate* isolate, T* that, void* slot,
- DestructionMode destruction_mode);
-
- friend class EmbedderHeapTracer;
- template <typename F>
- friend class Local;
- friend class Object;
- template <typename F>
- friend class TracedGlobal;
- template <typename F>
- friend class TracedReference;
- template <typename F>
- friend class BasicTracedReference;
- template <typename F>
- friend class ReturnValue;
-};
-
-/**
- * A traced handle with destructor that clears the handle. For more details see
- * BasicTracedReference.
- */
-template <typename T>
-class TracedGlobal : public BasicTracedReference<T> {
- public:
- using BasicTracedReference<T>::Reset;
-
- /**
- * Destructor resetting the handle.Is
- */
- ~TracedGlobal() { this->Reset(); }
-
- /**
- * An empty TracedGlobal without storage cell.
- */
- TracedGlobal() : BasicTracedReference<T>() {}
-
- /**
- * Construct a TracedGlobal from a Local.
- *
- * When the Local is non-empty, a new storage cell is created
- * pointing to the same object.
- */
- template <class S>
- TracedGlobal(Isolate* isolate, Local<S> that) : BasicTracedReference<T>() {
- this->val_ = this->New(isolate, that.val_, &this->val_,
- BasicTracedReference<T>::kWithDestructor);
- static_assert(std::is_base_of<T, S>::value, "type check");
- }
-
- /**
- * Move constructor initializing TracedGlobal from an existing one.
- */
- V8_INLINE TracedGlobal(TracedGlobal&& other) {
- // Forward to operator=.
- *this = std::move(other);
- }
-
- /**
- * Move constructor initializing TracedGlobal from an existing one.
- */
- template <typename S>
- V8_INLINE TracedGlobal(TracedGlobal<S>&& other) {
- // Forward to operator=.
- *this = std::move(other);
- }
-
- /**
- * Copy constructor initializing TracedGlobal from an existing one.
- */
- V8_INLINE TracedGlobal(const TracedGlobal& other) {
- // Forward to operator=;
- *this = other;
- }
-
- /**
- * Copy constructor initializing TracedGlobal from an existing one.
- */
- template <typename S>
- V8_INLINE TracedGlobal(const TracedGlobal<S>& other) {
- // Forward to operator=;
- *this = other;
- }
-
- /**
- * Move assignment operator initializing TracedGlobal from an existing one.
- */
- V8_INLINE TracedGlobal& operator=(TracedGlobal&& rhs);
-
- /**
- * Move assignment operator initializing TracedGlobal from an existing one.
- */
- template <class S>
- V8_INLINE TracedGlobal& operator=(TracedGlobal<S>&& rhs);
-
- /**
- * Copy assignment operator initializing TracedGlobal from an existing one.
- *
- * Note: Prohibited when |other| has a finalization callback set through
- * |SetFinalizationCallback|.
- */
- V8_INLINE TracedGlobal& operator=(const TracedGlobal& rhs);
-
- /**
- * Copy assignment operator initializing TracedGlobal from an existing one.
- *
- * Note: Prohibited when |other| has a finalization callback set through
- * |SetFinalizationCallback|.
- */
- template <class S>
- V8_INLINE TracedGlobal& operator=(const TracedGlobal<S>& rhs);
-
- /**
- * If non-empty, destroy the underlying storage cell and create a new one with
- * the contents of other if other is non empty
- */
- template <class S>
- V8_INLINE void Reset(Isolate* isolate, const Local<S>& other);
-
- template <class S>
- V8_INLINE TracedGlobal<S>& As() const {
- return reinterpret_cast<TracedGlobal<S>&>(
- const_cast<TracedGlobal<T>&>(*this));
- }
-
- /**
- * Adds a finalization callback to the handle. The type of this callback is
- * similar to WeakCallbackType::kInternalFields, i.e., it will pass the
- * parameter and the first two internal fields of the object.
- *
- * The callback is then supposed to reset the handle in the callback. No
- * further V8 API may be called in this callback. In case additional work
- * involving V8 needs to be done, a second callback can be scheduled using
- * WeakCallbackInfo<void>::SetSecondPassCallback.
- */
- V8_INLINE void SetFinalizationCallback(
- void* parameter, WeakCallbackInfo<void>::Callback callback);
-};
-
-/**
- * A traced handle without destructor that clears the handle. The embedder needs
- * to ensure that the handle is not accessed once the V8 object has been
- * reclaimed. This can happen when the handle is not passed through the
- * EmbedderHeapTracer. For more details see BasicTracedReference.
- *
- * The reference assumes the embedder has precise knowledge about references at
- * all times. In case V8 needs to separately handle on-stack references, the
- * embedder is required to set the stack start through
- * |EmbedderHeapTracer::SetStackStart|.
- */
-template <typename T>
-class TracedReference : public BasicTracedReference<T> {
- public:
- using BasicTracedReference<T>::Reset;
-
- /**
- * An empty TracedReference without storage cell.
- */
- TracedReference() : BasicTracedReference<T>() {}
-
- /**
- * Construct a TracedReference from a Local.
- *
- * When the Local is non-empty, a new storage cell is created
- * pointing to the same object.
- */
- template <class S>
- TracedReference(Isolate* isolate, Local<S> that) : BasicTracedReference<T>() {
- this->val_ = this->New(isolate, that.val_, &this->val_,
- BasicTracedReference<T>::kWithoutDestructor);
- static_assert(std::is_base_of<T, S>::value, "type check");
- }
-
- /**
- * Move constructor initializing TracedReference from an
- * existing one.
- */
- V8_INLINE TracedReference(TracedReference&& other) {
- // Forward to operator=.
- *this = std::move(other);
- }
-
- /**
- * Move constructor initializing TracedReference from an
- * existing one.
- */
- template <typename S>
- V8_INLINE TracedReference(TracedReference<S>&& other) {
- // Forward to operator=.
- *this = std::move(other);
- }
-
- /**
- * Copy constructor initializing TracedReference from an
- * existing one.
- */
- V8_INLINE TracedReference(const TracedReference& other) {
- // Forward to operator=;
- *this = other;
- }
-
- /**
- * Copy constructor initializing TracedReference from an
- * existing one.
- */
- template <typename S>
- V8_INLINE TracedReference(const TracedReference<S>& other) {
- // Forward to operator=;
- *this = other;
- }
-
- /**
- * Move assignment operator initializing TracedGlobal from an existing one.
- */
- V8_INLINE TracedReference& operator=(TracedReference&& rhs);
-
- /**
- * Move assignment operator initializing TracedGlobal from an existing one.
- */
- template <class S>
- V8_INLINE TracedReference& operator=(TracedReference<S>&& rhs);
-
- /**
- * Copy assignment operator initializing TracedGlobal from an existing one.
- */
- V8_INLINE TracedReference& operator=(const TracedReference& rhs);
-
- /**
- * Copy assignment operator initializing TracedGlobal from an existing one.
- */
- template <class S>
- V8_INLINE TracedReference& operator=(const TracedReference<S>& rhs);
-
- /**
- * If non-empty, destroy the underlying storage cell and create a new one with
- * the contents of other if other is non empty
- */
- template <class S>
- V8_INLINE void Reset(Isolate* isolate, const Local<S>& other);
-
- template <class S>
- V8_INLINE TracedReference<S>& As() const {
- return reinterpret_cast<TracedReference<S>&>(
- const_cast<TracedReference<T>&>(*this));
- }
-};
-
- /**
- * A stack-allocated class that governs a number of local handles.
- * After a handle scope has been created, all local handles will be
- * allocated within that handle scope until either the handle scope is
- * deleted or another handle scope is created. If there is already a
- * handle scope and a new one is created, all allocations will take
- * place in the new handle scope until it is deleted. After that,
- * new handles will again be allocated in the original handle scope.
- *
- * After the handle scope of a local handle has been deleted the
- * garbage collector will no longer track the object stored in the
- * handle and may deallocate it. The behavior of accessing a handle
- * for which the handle scope has been deleted is undefined.
- */
-class V8_EXPORT V8_NODISCARD HandleScope {
- public:
- explicit HandleScope(Isolate* isolate);
-
- ~HandleScope();
-
- /**
- * Counts the number of allocated handles.
- */
- static int NumberOfHandles(Isolate* isolate);
-
- V8_INLINE Isolate* GetIsolate() const {
- return reinterpret_cast<Isolate*>(isolate_);
- }
-
- HandleScope(const HandleScope&) = delete;
- void operator=(const HandleScope&) = delete;
-
- protected:
- V8_INLINE HandleScope() = default;
-
- void Initialize(Isolate* isolate);
-
- static internal::Address* CreateHandle(internal::Isolate* isolate,
- internal::Address value);
-
- private:
- // Declaring operator new and delete as deleted is not spec compliant.
- // Therefore declare them private instead to disable dynamic alloc
- void* operator new(size_t size);
- void* operator new[](size_t size);
- void operator delete(void*, size_t);
- void operator delete[](void*, size_t);
-
- internal::Isolate* isolate_;
- internal::Address* prev_next_;
- internal::Address* prev_limit_;
-
- // Local::New uses CreateHandle with an Isolate* parameter.
- template<class F> friend class Local;
-
- // Object::GetInternalField and Context::GetEmbedderData use CreateHandle with
- // a HeapObject in their shortcuts.
- friend class Object;
- friend class Context;
-};
-
-/**
- * A HandleScope which first allocates a handle in the current scope
- * which will be later filled with the escape value.
- */
-class V8_EXPORT V8_NODISCARD EscapableHandleScope : public HandleScope {
- public:
- explicit EscapableHandleScope(Isolate* isolate);
- V8_INLINE ~EscapableHandleScope() = default;
-
- /**
- * Pushes the value into the previous scope and returns a handle to it.
- * Cannot be called twice.
- */
- template <class T>
- V8_INLINE Local<T> Escape(Local<T> value) {
- internal::Address* slot =
- Escape(reinterpret_cast<internal::Address*>(*value));
- return Local<T>(reinterpret_cast<T*>(slot));
- }
-
- template <class T>
- V8_INLINE MaybeLocal<T> EscapeMaybe(MaybeLocal<T> value) {
- return Escape(value.FromMaybe(Local<T>()));
- }
-
- EscapableHandleScope(const EscapableHandleScope&) = delete;
- void operator=(const EscapableHandleScope&) = delete;
-
- private:
- // Declaring operator new and delete as deleted is not spec compliant.
- // Therefore declare them private instead to disable dynamic alloc
- void* operator new(size_t size);
- void* operator new[](size_t size);
- void operator delete(void*, size_t);
- void operator delete[](void*, size_t);
-
- internal::Address* Escape(internal::Address* escape_value);
- internal::Address* escape_slot_;
-};
-
-/**
- * A SealHandleScope acts like a handle scope in which no handle allocations
- * are allowed. It can be useful for debugging handle leaks.
- * Handles can be allocated within inner normal HandleScopes.
- */
-class V8_EXPORT V8_NODISCARD SealHandleScope {
- public:
- explicit SealHandleScope(Isolate* isolate);
- ~SealHandleScope();
-
- SealHandleScope(const SealHandleScope&) = delete;
- void operator=(const SealHandleScope&) = delete;
-
- private:
- // Declaring operator new and delete as deleted is not spec compliant.
- // Therefore declare them private instead to disable dynamic alloc
- void* operator new(size_t size);
- void* operator new[](size_t size);
- void operator delete(void*, size_t);
- void operator delete[](void*, size_t);
-
- internal::Isolate* const isolate_;
- internal::Address* prev_limit_;
- int prev_sealed_level_;
-};
-
-// --- Special objects ---
-
-/**
- * The superclass of objects that can reside on V8's heap.
- */
-class V8_EXPORT Data {
- public:
- /**
- * Returns true if this data is a |v8::Value|.
- */
- bool IsValue() const;
-
- /**
- * Returns true if this data is a |v8::Module|.
- */
- bool IsModule() const;
-
- /**
- * Returns true if this data is a |v8::Private|.
- */
- bool IsPrivate() const;
-
- /**
- * Returns true if this data is a |v8::ObjectTemplate|.
- */
- bool IsObjectTemplate() const;
-
- /**
- * Returns true if this data is a |v8::FunctionTemplate|.
- */
- bool IsFunctionTemplate() const;
-
- /**
- * Returns true if this data is a |v8::Context|.
- */
- bool IsContext() const;
-
- private:
- Data();
-};
-
-/**
- * A container type that holds relevant metadata for module loading.
- *
- * This is passed back to the embedder as part of
- * HostImportModuleDynamicallyCallback for module loading.
- */
-class V8_EXPORT ScriptOrModule {
- public:
- /**
- * The name that was passed by the embedder as ResourceName to the
- * ScriptOrigin. This can be either a v8::String or v8::Undefined.
- */
- Local<Value> GetResourceName();
-
- /**
- * The options that were passed by the embedder as HostDefinedOptions to
- * the ScriptOrigin.
- */
- Local<PrimitiveArray> GetHostDefinedOptions();
-};
-
-/**
- * An array to hold Primitive values. This is used by the embedder to
- * pass host defined options to the ScriptOptions during compilation.
- *
- * This is passed back to the embedder as part of
- * HostImportModuleDynamicallyCallback for module loading.
- *
- */
-class V8_EXPORT PrimitiveArray {
- public:
- static Local<PrimitiveArray> New(Isolate* isolate, int length);
- int Length() const;
- void Set(Isolate* isolate, int index, Local<Primitive> item);
- Local<Primitive> Get(Isolate* isolate, int index);
-};
-
-/**
- * The optional attributes of ScriptOrigin.
- */
-class ScriptOriginOptions {
- public:
- V8_INLINE ScriptOriginOptions(bool is_shared_cross_origin = false,
- bool is_opaque = false, bool is_wasm = false,
- bool is_module = false)
- : flags_((is_shared_cross_origin ? kIsSharedCrossOrigin : 0) |
- (is_wasm ? kIsWasm : 0) | (is_opaque ? kIsOpaque : 0) |
- (is_module ? kIsModule : 0)) {}
- V8_INLINE ScriptOriginOptions(int flags)
- : flags_(flags &
- (kIsSharedCrossOrigin | kIsOpaque | kIsWasm | kIsModule)) {}
-
- bool IsSharedCrossOrigin() const {
- return (flags_ & kIsSharedCrossOrigin) != 0;
- }
- bool IsOpaque() const { return (flags_ & kIsOpaque) != 0; }
- bool IsWasm() const { return (flags_ & kIsWasm) != 0; }
- bool IsModule() const { return (flags_ & kIsModule) != 0; }
-
- int Flags() const { return flags_; }
-
- private:
- enum {
- kIsSharedCrossOrigin = 1,
- kIsOpaque = 1 << 1,
- kIsWasm = 1 << 2,
- kIsModule = 1 << 3
- };
- const int flags_;
-};
-
-/**
- * The origin, within a file, of a script.
- */
-class ScriptOrigin {
- public:
- V8_DEPRECATED("Use constructor with primitive C++ types")
- V8_INLINE explicit ScriptOrigin(
- Local<Value> resource_name, Local<Integer> resource_line_offset,
- Local<Integer> resource_column_offset,
- Local<Boolean> resource_is_shared_cross_origin = Local<Boolean>(),
- Local<Integer> script_id = Local<Integer>(),
- Local<Value> source_map_url = Local<Value>(),
- Local<Boolean> resource_is_opaque = Local<Boolean>(),
- Local<Boolean> is_wasm = Local<Boolean>(),
- Local<Boolean> is_module = Local<Boolean>(),
- Local<PrimitiveArray> host_defined_options = Local<PrimitiveArray>());
- V8_DEPRECATED("Use constructor that takes an isolate")
- V8_INLINE explicit ScriptOrigin(
- Local<Value> resource_name, int resource_line_offset = 0,
- int resource_column_offset = 0,
- bool resource_is_shared_cross_origin = false, int script_id = -1,
- Local<Value> source_map_url = Local<Value>(),
- bool resource_is_opaque = false, bool is_wasm = false,
- bool is_module = false,
- Local<PrimitiveArray> host_defined_options = Local<PrimitiveArray>());
- V8_INLINE explicit ScriptOrigin(
- Isolate* isolate, Local<Value> resource_name,
- int resource_line_offset = 0, int resource_column_offset = 0,
- bool resource_is_shared_cross_origin = false, int script_id = -1,
- Local<Value> source_map_url = Local<Value>(),
- bool resource_is_opaque = false, bool is_wasm = false,
- bool is_module = false,
- Local<PrimitiveArray> host_defined_options = Local<PrimitiveArray>());
-
- V8_INLINE Local<Value> ResourceName() const;
- V8_DEPRECATED("Use getter with primitvie C++ types.")
- V8_INLINE Local<Integer> ResourceLineOffset() const;
- V8_DEPRECATED("Use getter with primitvie C++ types.")
- V8_INLINE Local<Integer> ResourceColumnOffset() const;
- V8_DEPRECATED("Use getter with primitvie C++ types.")
- V8_INLINE Local<Integer> ScriptID() const;
- V8_INLINE int LineOffset() const;
- V8_INLINE int ColumnOffset() const;
- V8_INLINE int ScriptId() const;
- V8_INLINE Local<Value> SourceMapUrl() const;
- V8_INLINE Local<PrimitiveArray> HostDefinedOptions() const;
- V8_INLINE ScriptOriginOptions Options() const { return options_; }
-
- private:
- Isolate* isolate_;
- Local<Value> resource_name_;
- int resource_line_offset_;
- int resource_column_offset_;
- ScriptOriginOptions options_;
- int script_id_;
- Local<Value> source_map_url_;
- Local<PrimitiveArray> host_defined_options_;
-};
-
-/**
- * A compiled JavaScript script, not yet tied to a Context.
- */
-class V8_EXPORT UnboundScript {
- public:
- /**
- * Binds the script to the currently entered context.
- */
- Local<Script> BindToCurrentContext();
-
- int GetId() const;
- Local<Value> GetScriptName();
-
- /**
- * Data read from magic sourceURL comments.
- */
- Local<Value> GetSourceURL();
- /**
- * Data read from magic sourceMappingURL comments.
- */
- Local<Value> GetSourceMappingURL();
-
- /**
- * Returns zero based line number of the code_pos location in the script.
- * -1 will be returned if no information available.
- */
- int GetLineNumber(int code_pos);
-
- static const int kNoScriptId = 0;
-};
-
-/**
- * A compiled JavaScript module, not yet tied to a Context.
- */
-class V8_EXPORT UnboundModuleScript : public Data {
- // Only used as a container for code caching.
-};
-
-/**
- * A location in JavaScript source.
- */
-class V8_EXPORT Location {
- public:
- int GetLineNumber() { return line_number_; }
- int GetColumnNumber() { return column_number_; }
-
- Location(int line_number, int column_number)
- : line_number_(line_number), column_number_(column_number) {}
-
- private:
- int line_number_;
- int column_number_;
-};
-
-/**
- * A fixed-sized array with elements of type Data.
- */
-class V8_EXPORT FixedArray : public Data {
- public:
- int Length() const;
- Local<Data> Get(Local<Context> context, int i) const;
-};
-
-class V8_EXPORT ModuleRequest : public Data {
- public:
- /**
- * Returns the module specifier for this ModuleRequest.
- */
- Local<String> GetSpecifier() const;
-
- /**
- * Returns the source code offset of this module request.
- * Use Module::SourceOffsetToLocation to convert this to line/column numbers.
- */
- int GetSourceOffset() const;
-
- /**
- * Contains the import assertions for this request in the form:
- * [key1, value1, source_offset1, key2, value2, source_offset2, ...].
- * The keys and values are of type v8::String, and the source offsets are of
- * type Int32. Use Module::SourceOffsetToLocation to convert the source
- * offsets to Locations with line/column numbers.
- *
- * All assertions present in the module request will be supplied in this
- * list, regardless of whether they are supported by the host. Per
- * https://tc39.es/proposal-import-assertions/#sec-hostgetsupportedimportassertions,
- * hosts are expected to ignore assertions that they do not support (as
- * opposed to, for example, triggering an error if an unsupported assertion is
- * present).
- */
- Local<FixedArray> GetImportAssertions() const;
-
- V8_INLINE static ModuleRequest* Cast(Data* data);
-
- private:
- static void CheckCast(Data* obj);
-};
-
-/**
- * A compiled JavaScript module.
- */
-class V8_EXPORT Module : public Data {
- public:
- /**
- * The different states a module can be in.
- *
- * This corresponds to the states used in ECMAScript except that "evaluated"
- * is split into kEvaluated and kErrored, indicating success and failure,
- * respectively.
- */
- enum Status {
- kUninstantiated,
- kInstantiating,
- kInstantiated,
- kEvaluating,
- kEvaluated,
- kErrored
- };
-
- /**
- * Returns the module's current status.
- */
- Status GetStatus() const;
-
- /**
- * For a module in kErrored status, this returns the corresponding exception.
- */
- Local<Value> GetException() const;
-
- /**
- * Returns the number of modules requested by this module.
- */
- V8_DEPRECATED("Use Module::GetModuleRequests() and FixedArray::Length().")
- int GetModuleRequestsLength() const;
-
- /**
- * Returns the ith module specifier in this module.
- * i must be < GetModuleRequestsLength() and >= 0.
- */
- V8_DEPRECATED(
- "Use Module::GetModuleRequests() and ModuleRequest::GetSpecifier().")
- Local<String> GetModuleRequest(int i) const;
-
- /**
- * Returns the source location (line number and column number) of the ith
- * module specifier's first occurrence in this module.
- */
- V8_DEPRECATED(
- "Use Module::GetModuleRequests(), ModuleRequest::GetSourceOffset(), and "
- "Module::SourceOffsetToLocation().")
- Location GetModuleRequestLocation(int i) const;
-
- /**
- * Returns the ModuleRequests for this module.
- */
- Local<FixedArray> GetModuleRequests() const;
-
- /**
- * For the given source text offset in this module, returns the corresponding
- * Location with line and column numbers.
- */
- Location SourceOffsetToLocation(int offset) const;
-
- /**
- * Returns the identity hash for this object.
- */
- int GetIdentityHash() const;
-
- using ResolveCallback V8_DEPRECATED("Use ResolveModuleCallback") =
- MaybeLocal<Module> (*)(Local<Context> context, Local<String> specifier,
- Local<Module> referrer);
- using ResolveModuleCallback = MaybeLocal<Module> (*)(
- Local<Context> context, Local<String> specifier,
- Local<FixedArray> import_assertions, Local<Module> referrer);
-
- /**
- * Instantiates the module and its dependencies.
- *
- * Returns an empty Maybe<bool> if an exception occurred during
- * instantiation. (In the case where the callback throws an exception, that
- * exception is propagated.)
- */
- V8_DEPRECATED(
- "Use the version of InstantiateModule that takes a ResolveModuleCallback "
- "parameter")
- V8_WARN_UNUSED_RESULT Maybe<bool> InstantiateModule(Local<Context> context,
- ResolveCallback callback);
- V8_WARN_UNUSED_RESULT Maybe<bool> InstantiateModule(
- Local<Context> context, ResolveModuleCallback callback);
-
- /**
- * Evaluates the module and its dependencies.
- *
- * If status is kInstantiated, run the module's code and return a Promise
- * object. On success, set status to kEvaluated and resolve the Promise with
- * the completion value; on failure, set status to kErrored and reject the
- * Promise with the error.
- *
- * If IsGraphAsync() is false, the returned Promise is settled.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Value> Evaluate(Local<Context> context);
-
- /**
- * Returns the namespace object of this module.
- *
- * The module's status must be at least kInstantiated.
- */
- Local<Value> GetModuleNamespace();
-
- /**
- * Returns the corresponding context-unbound module script.
- *
- * The module must be unevaluated, i.e. its status must not be kEvaluating,
- * kEvaluated or kErrored.
- */
- Local<UnboundModuleScript> GetUnboundModuleScript();
-
- /**
- * Returns the underlying script's id.
- *
- * The module must be a SourceTextModule and must not have a kErrored status.
- */
- int ScriptId() const;
-
- /**
- * Returns whether this module or any of its requested modules is async,
- * i.e. contains top-level await.
- *
- * The module's status must be at least kInstantiated.
- */
- bool IsGraphAsync() const;
-
- /**
- * Returns whether the module is a SourceTextModule.
- */
- bool IsSourceTextModule() const;
-
- /**
- * Returns whether the module is a SyntheticModule.
- */
- bool IsSyntheticModule() const;
-
- /*
- * Callback defined in the embedder. This is responsible for setting
- * the module's exported values with calls to SetSyntheticModuleExport().
- * The callback must return a resolved Promise to indicate success (where no
- * exception was thrown) and return an empy MaybeLocal to indicate falure
- * (where an exception was thrown).
- */
- using SyntheticModuleEvaluationSteps =
- MaybeLocal<Value> (*)(Local<Context> context, Local<Module> module);
-
- /**
- * Creates a new SyntheticModule with the specified export names, where
- * evaluation_steps will be executed upon module evaluation.
- * export_names must not contain duplicates.
- * module_name is used solely for logging/debugging and doesn't affect module
- * behavior.
- */
- static Local<Module> CreateSyntheticModule(
- Isolate* isolate, Local<String> module_name,
- const std::vector<Local<String>>& export_names,
- SyntheticModuleEvaluationSteps evaluation_steps);
-
- /**
- * Set this module's exported value for the name export_name to the specified
- * export_value. This method must be called only on Modules created via
- * CreateSyntheticModule. An error will be thrown if export_name is not one
- * of the export_names that were passed in that CreateSyntheticModule call.
- * Returns Just(true) on success, Nothing<bool>() if an error was thrown.
- */
- V8_WARN_UNUSED_RESULT Maybe<bool> SetSyntheticModuleExport(
- Isolate* isolate, Local<String> export_name, Local<Value> export_value);
-
- V8_INLINE static Module* Cast(Data* data);
-
- private:
- static void CheckCast(Data* obj);
-};
-
-/**
- * A compiled JavaScript script, tied to a Context which was active when the
- * script was compiled.
- */
-class V8_EXPORT Script {
- public:
- /**
- * A shorthand for ScriptCompiler::Compile().
- */
- static V8_WARN_UNUSED_RESULT MaybeLocal<Script> Compile(
- Local<Context> context, Local<String> source,
- ScriptOrigin* origin = nullptr);
-
- /**
- * Runs the script returning the resulting value. It will be run in the
- * context in which it was created (ScriptCompiler::CompileBound or
- * UnboundScript::BindToCurrentContext()).
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Value> Run(Local<Context> context);
-
- /**
- * Returns the corresponding context-unbound script.
- */
- Local<UnboundScript> GetUnboundScript();
-};
-
-enum class ScriptType { kClassic, kModule };
-
-/**
- * For compiling scripts.
- */
-class V8_EXPORT ScriptCompiler {
- public:
- class ConsumeCodeCacheTask;
-
- /**
- * Compilation data that the embedder can cache and pass back to speed up
- * future compilations. The data is produced if the CompilerOptions passed to
- * the compilation functions in ScriptCompiler contains produce_data_to_cache
- * = true. The data to cache can then can be retrieved from
- * UnboundScript.
- */
- struct V8_EXPORT CachedData {
- enum BufferPolicy {
- BufferNotOwned,
- BufferOwned
- };
-
- CachedData()
- : data(nullptr),
- length(0),
- rejected(false),
- buffer_policy(BufferNotOwned) {}
-
- // If buffer_policy is BufferNotOwned, the caller keeps the ownership of
- // data and guarantees that it stays alive until the CachedData object is
- // destroyed. If the policy is BufferOwned, the given data will be deleted
- // (with delete[]) when the CachedData object is destroyed.
- CachedData(const uint8_t* data, int length,
- BufferPolicy buffer_policy = BufferNotOwned);
- ~CachedData();
- // TODO(marja): Async compilation; add constructors which take a callback
- // which will be called when V8 no longer needs the data.
- const uint8_t* data;
- int length;
- bool rejected;
- BufferPolicy buffer_policy;
-
- // Prevent copying.
- CachedData(const CachedData&) = delete;
- CachedData& operator=(const CachedData&) = delete;
- };
-
- /**
- * Source code which can be then compiled to a UnboundScript or Script.
- */
- class Source {
- public:
- // Source takes ownership of both CachedData and CodeCacheConsumeTask.
- V8_INLINE Source(Local<String> source_string, const ScriptOrigin& origin,
- CachedData* cached_data = nullptr,
- ConsumeCodeCacheTask* consume_cache_task = nullptr);
- // Source takes ownership of both CachedData and CodeCacheConsumeTask.
- V8_INLINE explicit Source(
- Local<String> source_string, CachedData* cached_data = nullptr,
- ConsumeCodeCacheTask* consume_cache_task = nullptr);
- V8_INLINE ~Source() = default;
-
- // Ownership of the CachedData or its buffers is *not* transferred to the
- // caller. The CachedData object is alive as long as the Source object is
- // alive.
- V8_INLINE const CachedData* GetCachedData() const;
-
- V8_INLINE const ScriptOriginOptions& GetResourceOptions() const;
-
- private:
- friend class ScriptCompiler;
-
- Local<String> source_string;
-
- // Origin information
- Local<Value> resource_name;
- int resource_line_offset;
- int resource_column_offset;
- ScriptOriginOptions resource_options;
- Local<Value> source_map_url;
- Local<PrimitiveArray> host_defined_options;
-
- // Cached data from previous compilation (if a kConsume*Cache flag is
- // set), or hold newly generated cache data (kProduce*Cache flags) are
- // set when calling a compile method.
- std::unique_ptr<CachedData> cached_data;
- std::unique_ptr<ConsumeCodeCacheTask> consume_cache_task;
- };
-
- /**
- * For streaming incomplete script data to V8. The embedder should implement a
- * subclass of this class.
- */
- class V8_EXPORT ExternalSourceStream {
- public:
- virtual ~ExternalSourceStream() = default;
-
- /**
- * V8 calls this to request the next chunk of data from the embedder. This
- * function will be called on a background thread, so it's OK to block and
- * wait for the data, if the embedder doesn't have data yet. Returns the
- * length of the data returned. When the data ends, GetMoreData should
- * return 0. Caller takes ownership of the data.
- *
- * When streaming UTF-8 data, V8 handles multi-byte characters split between
- * two data chunks, but doesn't handle multi-byte characters split between
- * more than two data chunks. The embedder can avoid this problem by always
- * returning at least 2 bytes of data.
- *
- * When streaming UTF-16 data, V8 does not handle characters split between
- * two data chunks. The embedder has to make sure that chunks have an even
- * length.
- *
- * If the embedder wants to cancel the streaming, they should make the next
- * GetMoreData call return 0. V8 will interpret it as end of data (and most
- * probably, parsing will fail). The streaming task will return as soon as
- * V8 has parsed the data it received so far.
- */
- virtual size_t GetMoreData(const uint8_t** src) = 0;
-
- /**
- * V8 calls this method to set a 'bookmark' at the current position in
- * the source stream, for the purpose of (maybe) later calling
- * ResetToBookmark. If ResetToBookmark is called later, then subsequent
- * calls to GetMoreData should return the same data as they did when
- * SetBookmark was called earlier.
- *
- * The embedder may return 'false' to indicate it cannot provide this
- * functionality.
- */
- virtual bool SetBookmark();
-
- /**
- * V8 calls this to return to a previously set bookmark.
- */
- virtual void ResetToBookmark();
- };
-
- /**
- * Source code which can be streamed into V8 in pieces. It will be parsed
- * while streaming and compiled after parsing has completed. StreamedSource
- * must be kept alive while the streaming task is run (see ScriptStreamingTask
- * below).
- */
- class V8_EXPORT StreamedSource {
- public:
- enum Encoding { ONE_BYTE, TWO_BYTE, UTF8, WINDOWS_1252 };
-
- StreamedSource(std::unique_ptr<ExternalSourceStream> source_stream,
- Encoding encoding);
- ~StreamedSource();
-
- internal::ScriptStreamingData* impl() const { return impl_.get(); }
-
- // Prevent copying.
- StreamedSource(const StreamedSource&) = delete;
- StreamedSource& operator=(const StreamedSource&) = delete;
-
- private:
- std::unique_ptr<internal::ScriptStreamingData> impl_;
- };
-
- /**
- * A streaming task which the embedder must run on a background thread to
- * stream scripts into V8. Returned by ScriptCompiler::StartStreaming.
- */
- class V8_EXPORT ScriptStreamingTask final {
- public:
- void Run();
-
- private:
- friend class ScriptCompiler;
-
- explicit ScriptStreamingTask(internal::ScriptStreamingData* data)
- : data_(data) {}
-
- internal::ScriptStreamingData* data_;
- };
-
- /**
- * A task which the embedder must run on a background thread to
- * consume a V8 code cache. Returned by
- * ScriptCompiler::StarConsumingCodeCache.
- */
- class V8_EXPORT ConsumeCodeCacheTask final {
- public:
- ~ConsumeCodeCacheTask();
-
- void Run();
-
- private:
- friend class ScriptCompiler;
-
- explicit ConsumeCodeCacheTask(
- std::unique_ptr<internal::BackgroundDeserializeTask> impl);
-
- std::unique_ptr<internal::BackgroundDeserializeTask> impl_;
- };
-
- enum CompileOptions {
- kNoCompileOptions = 0,
- kConsumeCodeCache,
- kEagerCompile
- };
-
- /**
- * The reason for which we are not requesting or providing a code cache.
- */
- enum NoCacheReason {
- kNoCacheNoReason = 0,
- kNoCacheBecauseCachingDisabled,
- kNoCacheBecauseNoResource,
- kNoCacheBecauseInlineScript,
- kNoCacheBecauseModule,
- kNoCacheBecauseStreamingSource,
- kNoCacheBecauseInspector,
- kNoCacheBecauseScriptTooSmall,
- kNoCacheBecauseCacheTooCold,
- kNoCacheBecauseV8Extension,
- kNoCacheBecauseExtensionModule,
- kNoCacheBecausePacScript,
- kNoCacheBecauseInDocumentWrite,
- kNoCacheBecauseResourceWithNoCacheHandler,
- kNoCacheBecauseDeferredProduceCodeCache
- };
-
- /**
- * Compiles the specified script (context-independent).
- * Cached data as part of the source object can be optionally produced to be
- * consumed later to speed up compilation of identical source scripts.
- *
- * Note that when producing cached data, the source must point to NULL for
- * cached data. When consuming cached data, the cached data must have been
- * produced by the same version of V8, and the embedder needs to ensure the
- * cached data is the correct one for the given script.
- *
- * \param source Script source code.
- * \return Compiled script object (context independent; for running it must be
- * bound to a context).
- */
- static V8_WARN_UNUSED_RESULT MaybeLocal<UnboundScript> CompileUnboundScript(
- Isolate* isolate, Source* source,
- CompileOptions options = kNoCompileOptions,
- NoCacheReason no_cache_reason = kNoCacheNoReason);
-
- /**
- * Compiles the specified script (bound to current context).
- *
- * \param source Script source code.
- * \param pre_data Pre-parsing data, as obtained by ScriptData::PreCompile()
- * using pre_data speeds compilation if it's done multiple times.
- * Owned by caller, no references are kept when this function returns.
- * \return Compiled script object, bound to the context that was active
- * when this function was called. When run it will always use this
- * context.
- */
- static V8_WARN_UNUSED_RESULT MaybeLocal<Script> Compile(
- Local<Context> context, Source* source,
- CompileOptions options = kNoCompileOptions,
- NoCacheReason no_cache_reason = kNoCacheNoReason);
-
- /**
- * Returns a task which streams script data into V8, or NULL if the script
- * cannot be streamed. The user is responsible for running the task on a
- * background thread and deleting it. When ran, the task starts parsing the
- * script, and it will request data from the StreamedSource as needed. When
- * ScriptStreamingTask::Run exits, all data has been streamed and the script
- * can be compiled (see Compile below).
- *
- * This API allows to start the streaming with as little data as possible, and
- * the remaining data (for example, the ScriptOrigin) is passed to Compile.
- */
- static ScriptStreamingTask* StartStreaming(
- Isolate* isolate, StreamedSource* source,
- ScriptType type = ScriptType::kClassic);
-
- static ConsumeCodeCacheTask* StartConsumingCodeCache(
- Isolate* isolate, std::unique_ptr<CachedData> source);
-
- /**
- * Compiles a streamed script (bound to current context).
- *
- * This can only be called after the streaming has finished
- * (ScriptStreamingTask has been run). V8 doesn't construct the source string
- * during streaming, so the embedder needs to pass the full source here.
- */
- static V8_WARN_UNUSED_RESULT MaybeLocal<Script> Compile(
- Local<Context> context, StreamedSource* source,
- Local<String> full_source_string, const ScriptOrigin& origin);
-
- /**
- * Return a version tag for CachedData for the current V8 version & flags.
- *
- * This value is meant only for determining whether a previously generated
- * CachedData instance is still valid; the tag has no other meaing.
- *
- * Background: The data carried by CachedData may depend on the exact
- * V8 version number or current compiler flags. This means that when
- * persisting CachedData, the embedder must take care to not pass in
- * data from another V8 version, or the same version with different
- * features enabled.
- *
- * The easiest way to do so is to clear the embedder's cache on any
- * such change.
- *
- * Alternatively, this tag can be stored alongside the cached data and
- * compared when it is being used.
- */
- static uint32_t CachedDataVersionTag();
-
- /**
- * Compile an ES module, returning a Module that encapsulates
- * the compiled code.
- *
- * Corresponds to the ParseModule abstract operation in the
- * ECMAScript specification.
- */
- static V8_WARN_UNUSED_RESULT MaybeLocal<Module> CompileModule(
- Isolate* isolate, Source* source,
- CompileOptions options = kNoCompileOptions,
- NoCacheReason no_cache_reason = kNoCacheNoReason);
-
- /**
- * Compiles a streamed module script.
- *
- * This can only be called after the streaming has finished
- * (ScriptStreamingTask has been run). V8 doesn't construct the source string
- * during streaming, so the embedder needs to pass the full source here.
- */
- static V8_WARN_UNUSED_RESULT MaybeLocal<Module> CompileModule(
- Local<Context> context, StreamedSource* v8_source,
- Local<String> full_source_string, const ScriptOrigin& origin);
-
- /**
- * Compile a function for a given context. This is equivalent to running
- *
- * with (obj) {
- * return function(args) { ... }
- * }
- *
- * It is possible to specify multiple context extensions (obj in the above
- * example).
- */
- static V8_WARN_UNUSED_RESULT MaybeLocal<Function> CompileFunctionInContext(
- Local<Context> context, Source* source, size_t arguments_count,
- Local<String> arguments[], size_t context_extension_count,
- Local<Object> context_extensions[],
- CompileOptions options = kNoCompileOptions,
- NoCacheReason no_cache_reason = kNoCacheNoReason,
- Local<ScriptOrModule>* script_or_module_out = nullptr);
-
- /**
- * Creates and returns code cache for the specified unbound_script.
- * This will return nullptr if the script cannot be serialized. The
- * CachedData returned by this function should be owned by the caller.
- */
- static CachedData* CreateCodeCache(Local<UnboundScript> unbound_script);
-
- /**
- * Creates and returns code cache for the specified unbound_module_script.
- * This will return nullptr if the script cannot be serialized. The
- * CachedData returned by this function should be owned by the caller.
- */
- static CachedData* CreateCodeCache(
- Local<UnboundModuleScript> unbound_module_script);
-
- /**
- * Creates and returns code cache for the specified function that was
- * previously produced by CompileFunctionInContext.
- * This will return nullptr if the script cannot be serialized. The
- * CachedData returned by this function should be owned by the caller.
- */
- static CachedData* CreateCodeCacheForFunction(Local<Function> function);
-
- private:
- static V8_WARN_UNUSED_RESULT MaybeLocal<UnboundScript> CompileUnboundInternal(
- Isolate* isolate, Source* source, CompileOptions options,
- NoCacheReason no_cache_reason);
-};
-
-
-/**
- * An error message.
- */
-class V8_EXPORT Message {
- public:
- Local<String> Get() const;
-
- /**
- * Return the isolate to which the Message belongs.
- */
- Isolate* GetIsolate() const;
-
- V8_WARN_UNUSED_RESULT MaybeLocal<String> GetSource(
- Local<Context> context) const;
- V8_WARN_UNUSED_RESULT MaybeLocal<String> GetSourceLine(
- Local<Context> context) const;
-
- /**
- * Returns the origin for the script from where the function causing the
- * error originates.
- */
- ScriptOrigin GetScriptOrigin() const;
-
- /**
- * Returns the resource name for the script from where the function causing
- * the error originates.
- */
- Local<Value> GetScriptResourceName() const;
-
- /**
- * Exception stack trace. By default stack traces are not captured for
- * uncaught exceptions. SetCaptureStackTraceForUncaughtExceptions allows
- * to change this option.
- */
- Local<StackTrace> GetStackTrace() const;
-
- /**
- * Returns the number, 1-based, of the line where the error occurred.
- */
- V8_WARN_UNUSED_RESULT Maybe<int> GetLineNumber(Local<Context> context) const;
-
- /**
- * Returns the index within the script of the first character where
- * the error occurred.
- */
- int GetStartPosition() const;
-
- /**
- * Returns the index within the script of the last character where
- * the error occurred.
- */
- int GetEndPosition() const;
-
- /**
- * Returns the Wasm function index where the error occurred. Returns -1 if
- * message is not from a Wasm script.
- */
- int GetWasmFunctionIndex() const;
-
- /**
- * Returns the error level of the message.
- */
- int ErrorLevel() const;
-
- /**
- * Returns the index within the line of the first character where
- * the error occurred.
- */
- int GetStartColumn() const;
- V8_WARN_UNUSED_RESULT Maybe<int> GetStartColumn(Local<Context> context) const;
-
- /**
- * Returns the index within the line of the last character where
- * the error occurred.
- */
- int GetEndColumn() const;
- V8_WARN_UNUSED_RESULT Maybe<int> GetEndColumn(Local<Context> context) const;
-
- /**
- * Passes on the value set by the embedder when it fed the script from which
- * this Message was generated to V8.
- */
- bool IsSharedCrossOrigin() const;
- bool IsOpaque() const;
-
- // TODO(1245381): Print to a string instead of on a FILE.
- static void PrintCurrentStackTrace(Isolate* isolate, FILE* out);
-
- static const int kNoLineNumberInfo = 0;
- static const int kNoColumnInfo = 0;
- static const int kNoScriptIdInfo = 0;
- static const int kNoWasmFunctionIndexInfo = -1;
-};
-
-
-/**
- * Representation of a JavaScript stack trace. The information collected is a
- * snapshot of the execution stack and the information remains valid after
- * execution continues.
- */
-class V8_EXPORT StackTrace {
- public:
- /**
- * Flags that determine what information is placed captured for each
- * StackFrame when grabbing the current stack trace.
- * Note: these options are deprecated and we always collect all available
- * information (kDetailed).
- */
- enum StackTraceOptions {
- kLineNumber = 1,
- kColumnOffset = 1 << 1 | kLineNumber,
- kScriptName = 1 << 2,
- kFunctionName = 1 << 3,
- kIsEval = 1 << 4,
- kIsConstructor = 1 << 5,
- kScriptNameOrSourceURL = 1 << 6,
- kScriptId = 1 << 7,
- kExposeFramesAcrossSecurityOrigins = 1 << 8,
- kOverview = kLineNumber | kColumnOffset | kScriptName | kFunctionName,
- kDetailed = kOverview | kIsEval | kIsConstructor | kScriptNameOrSourceURL
- };
-
- /**
- * Returns a StackFrame at a particular index.
- */
- Local<StackFrame> GetFrame(Isolate* isolate, uint32_t index) const;
-
- /**
- * Returns the number of StackFrames.
- */
- int GetFrameCount() const;
-
- /**
- * Grab a snapshot of the current JavaScript execution stack.
- *
- * \param frame_limit The maximum number of stack frames we want to capture.
- * \param options Enumerates the set of things we will capture for each
- * StackFrame.
- */
- static Local<StackTrace> CurrentStackTrace(
- Isolate* isolate, int frame_limit, StackTraceOptions options = kDetailed);
-};
-
-
-/**
- * A single JavaScript stack frame.
- */
-class V8_EXPORT StackFrame {
- public:
- /**
- * Returns the number, 1-based, of the line for the associate function call.
- * This method will return Message::kNoLineNumberInfo if it is unable to
- * retrieve the line number, or if kLineNumber was not passed as an option
- * when capturing the StackTrace.
- */
- int GetLineNumber() const;
-
- /**
- * Returns the 1-based column offset on the line for the associated function
- * call.
- * This method will return Message::kNoColumnInfo if it is unable to retrieve
- * the column number, or if kColumnOffset was not passed as an option when
- * capturing the StackTrace.
- */
- int GetColumn() const;
-
- /**
- * Returns the id of the script for the function for this StackFrame.
- * This method will return Message::kNoScriptIdInfo if it is unable to
- * retrieve the script id, or if kScriptId was not passed as an option when
- * capturing the StackTrace.
- */
- int GetScriptId() const;
-
- /**
- * Returns the name of the resource that contains the script for the
- * function for this StackFrame.
- */
- Local<String> GetScriptName() const;
-
- /**
- * Returns the name of the resource that contains the script for the
- * function for this StackFrame or sourceURL value if the script name
- * is undefined and its source ends with //# sourceURL=... string or
- * deprecated //@ sourceURL=... string.
- */
- Local<String> GetScriptNameOrSourceURL() const;
-
- /**
- * Returns the source of the script for the function for this StackFrame.
- */
- Local<String> GetScriptSource() const;
-
- /**
- * Returns the source mapping URL (if one is present) of the script for
- * the function for this StackFrame.
- */
- Local<String> GetScriptSourceMappingURL() const;
-
- /**
- * Returns the name of the function associated with this stack frame.
- */
- Local<String> GetFunctionName() const;
-
- /**
- * Returns whether or not the associated function is compiled via a call to
- * eval().
- */
- bool IsEval() const;
-
- /**
- * Returns whether or not the associated function is called as a
- * constructor via "new".
- */
- bool IsConstructor() const;
-
- /**
- * Returns whether or not the associated functions is defined in wasm.
- */
- bool IsWasm() const;
-
- /**
- * Returns whether or not the associated function is defined by the user.
- */
- bool IsUserJavaScript() const;
-};
-
-
-// A StateTag represents a possible state of the VM.
-enum StateTag {
- JS,
- GC,
- PARSER,
- BYTECODE_COMPILER,
- COMPILER,
- OTHER,
- EXTERNAL,
- ATOMICS_WAIT,
- IDLE
-};
-
-// Holds the callee saved registers needed for the stack unwinder. It is the
-// empty struct if no registers are required. Implemented in
-// include/v8-unwinder-state.h.
-struct CalleeSavedRegisters;
-
-// A RegisterState represents the current state of registers used
-// by the sampling profiler API.
-struct V8_EXPORT RegisterState {
- RegisterState();
- ~RegisterState();
- RegisterState(const RegisterState& other);
- RegisterState& operator=(const RegisterState& other);
-
- void* pc; // Instruction pointer.
- void* sp; // Stack pointer.
- void* fp; // Frame pointer.
- void* lr; // Link register (or nullptr on platforms without a link register).
- // Callee saved registers (or null if no callee saved registers were stored)
- std::unique_ptr<CalleeSavedRegisters> callee_saved;
-};
-
-// The output structure filled up by GetStackSample API function.
-struct SampleInfo {
- size_t frames_count; // Number of frames collected.
- StateTag vm_state; // Current VM state.
- void* external_callback_entry; // External callback address if VM is
- // executing an external callback.
- void* context; // Incumbent native context address.
-};
-
-struct MemoryRange {
- const void* start = nullptr;
- size_t length_in_bytes = 0;
-};
-
-struct JSEntryStub {
- MemoryRange code;
-};
-
-struct JSEntryStubs {
- JSEntryStub js_entry_stub;
- JSEntryStub js_construct_entry_stub;
- JSEntryStub js_run_microtasks_entry_stub;
-};
-
-/**
- * A JSON Parser and Stringifier.
- */
-class V8_EXPORT JSON {
- public:
- /**
- * Tries to parse the string |json_string| and returns it as value if
- * successful.
- *
- * \param the context in which to parse and create the value.
- * \param json_string The string to parse.
- * \return The corresponding value if successfully parsed.
- */
- static V8_WARN_UNUSED_RESULT MaybeLocal<Value> Parse(
- Local<Context> context, Local<String> json_string);
-
- /**
- * Tries to stringify the JSON-serializable object |json_object| and returns
- * it as string if successful.
- *
- * \param json_object The JSON-serializable object to stringify.
- * \return The corresponding string if successfully stringified.
- */
- static V8_WARN_UNUSED_RESULT MaybeLocal<String> Stringify(
- Local<Context> context, Local<Value> json_object,
- Local<String> gap = Local<String>());
-};
-
-/**
- * Value serialization compatible with the HTML structured clone algorithm.
- * The format is backward-compatible (i.e. safe to store to disk).
- */
-class V8_EXPORT ValueSerializer {
- public:
- class V8_EXPORT Delegate {
- public:
- virtual ~Delegate() = default;
-
- /**
- * Handles the case where a DataCloneError would be thrown in the structured
- * clone spec. Other V8 embedders may throw some other appropriate exception
- * type.
- */
- virtual void ThrowDataCloneError(Local<String> message) = 0;
-
- /**
- * The embedder overrides this method to write some kind of host object, if
- * possible. If not, a suitable exception should be thrown and
- * Nothing<bool>() returned.
- */
- virtual Maybe<bool> WriteHostObject(Isolate* isolate, Local<Object> object);
-
- /**
- * Called when the ValueSerializer is going to serialize a
- * SharedArrayBuffer object. The embedder must return an ID for the
- * object, using the same ID if this SharedArrayBuffer has already been
- * serialized in this buffer. When deserializing, this ID will be passed to
- * ValueDeserializer::GetSharedArrayBufferFromId as |clone_id|.
- *
- * If the object cannot be serialized, an
- * exception should be thrown and Nothing<uint32_t>() returned.
- */
- virtual Maybe<uint32_t> GetSharedArrayBufferId(
- Isolate* isolate, Local<SharedArrayBuffer> shared_array_buffer);
-
- virtual Maybe<uint32_t> GetWasmModuleTransferId(
- Isolate* isolate, Local<WasmModuleObject> module);
- /**
- * Allocates memory for the buffer of at least the size provided. The actual
- * size (which may be greater or equal) is written to |actual_size|. If no
- * buffer has been allocated yet, nullptr will be provided.
- *
- * If the memory cannot be allocated, nullptr should be returned.
- * |actual_size| will be ignored. It is assumed that |old_buffer| is still
- * valid in this case and has not been modified.
- *
- * The default implementation uses the stdlib's `realloc()` function.
- */
- virtual void* ReallocateBufferMemory(void* old_buffer, size_t size,
- size_t* actual_size);
-
- /**
- * Frees a buffer allocated with |ReallocateBufferMemory|.
- *
- * The default implementation uses the stdlib's `free()` function.
- */
- virtual void FreeBufferMemory(void* buffer);
- };
-
- explicit ValueSerializer(Isolate* isolate);
- ValueSerializer(Isolate* isolate, Delegate* delegate);
- ~ValueSerializer();
-
- /**
- * Writes out a header, which includes the format version.
- */
- void WriteHeader();
-
- /**
- * Serializes a JavaScript value into the buffer.
- */
- V8_WARN_UNUSED_RESULT Maybe<bool> WriteValue(Local<Context> context,
- Local<Value> value);
-
- /**
- * Returns the stored data (allocated using the delegate's
- * ReallocateBufferMemory) and its size. This serializer should not be used
- * once the buffer is released. The contents are undefined if a previous write
- * has failed. Ownership of the buffer is transferred to the caller.
- */
- V8_WARN_UNUSED_RESULT std::pair<uint8_t*, size_t> Release();
-
- /**
- * Marks an ArrayBuffer as havings its contents transferred out of band.
- * Pass the corresponding ArrayBuffer in the deserializing context to
- * ValueDeserializer::TransferArrayBuffer.
- */
- void TransferArrayBuffer(uint32_t transfer_id,
- Local<ArrayBuffer> array_buffer);
-
-
- /**
- * Indicate whether to treat ArrayBufferView objects as host objects,
- * i.e. pass them to Delegate::WriteHostObject. This should not be
- * called when no Delegate was passed.
- *
- * The default is not to treat ArrayBufferViews as host objects.
- */
- void SetTreatArrayBufferViewsAsHostObjects(bool mode);
-
- /**
- * Write raw data in various common formats to the buffer.
- * Note that integer types are written in base-128 varint format, not with a
- * binary copy. For use during an override of Delegate::WriteHostObject.
- */
- void WriteUint32(uint32_t value);
- void WriteUint64(uint64_t value);
- void WriteDouble(double value);
- void WriteRawBytes(const void* source, size_t length);
-
- ValueSerializer(const ValueSerializer&) = delete;
- void operator=(const ValueSerializer&) = delete;
-
- private:
- struct PrivateData;
- PrivateData* private_;
-};
-
-/**
- * Deserializes values from data written with ValueSerializer, or a compatible
- * implementation.
- */
-class V8_EXPORT ValueDeserializer {
- public:
- class V8_EXPORT Delegate {
- public:
- virtual ~Delegate() = default;
-
- /**
- * The embedder overrides this method to read some kind of host object, if
- * possible. If not, a suitable exception should be thrown and
- * MaybeLocal<Object>() returned.
- */
- virtual MaybeLocal<Object> ReadHostObject(Isolate* isolate);
-
- /**
- * Get a WasmModuleObject given a transfer_id previously provided
- * by ValueSerializer::GetWasmModuleTransferId
- */
- virtual MaybeLocal<WasmModuleObject> GetWasmModuleFromId(
- Isolate* isolate, uint32_t transfer_id);
-
- /**
- * Get a SharedArrayBuffer given a clone_id previously provided
- * by ValueSerializer::GetSharedArrayBufferId
- */
- virtual MaybeLocal<SharedArrayBuffer> GetSharedArrayBufferFromId(
- Isolate* isolate, uint32_t clone_id);
- };
-
- ValueDeserializer(Isolate* isolate, const uint8_t* data, size_t size);
- ValueDeserializer(Isolate* isolate, const uint8_t* data, size_t size,
- Delegate* delegate);
- ~ValueDeserializer();
-
- /**
- * Reads and validates a header (including the format version).
- * May, for example, reject an invalid or unsupported wire format.
- */
- V8_WARN_UNUSED_RESULT Maybe<bool> ReadHeader(Local<Context> context);
-
- /**
- * Deserializes a JavaScript value from the buffer.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Value> ReadValue(Local<Context> context);
-
- /**
- * Accepts the array buffer corresponding to the one passed previously to
- * ValueSerializer::TransferArrayBuffer.
- */
- void TransferArrayBuffer(uint32_t transfer_id,
- Local<ArrayBuffer> array_buffer);
-
- /**
- * Similar to TransferArrayBuffer, but for SharedArrayBuffer.
- * The id is not necessarily in the same namespace as unshared ArrayBuffer
- * objects.
- */
- void TransferSharedArrayBuffer(uint32_t id,
- Local<SharedArrayBuffer> shared_array_buffer);
-
- /**
- * Must be called before ReadHeader to enable support for reading the legacy
- * wire format (i.e., which predates this being shipped).
- *
- * Don't use this unless you need to read data written by previous versions of
- * blink::ScriptValueSerializer.
- */
- void SetSupportsLegacyWireFormat(bool supports_legacy_wire_format);
-
- /**
- * Reads the underlying wire format version. Likely mostly to be useful to
- * legacy code reading old wire format versions. Must be called after
- * ReadHeader.
- */
- uint32_t GetWireFormatVersion() const;
-
- /**
- * Reads raw data in various common formats to the buffer.
- * Note that integer types are read in base-128 varint format, not with a
- * binary copy. For use during an override of Delegate::ReadHostObject.
- */
- V8_WARN_UNUSED_RESULT bool ReadUint32(uint32_t* value);
- V8_WARN_UNUSED_RESULT bool ReadUint64(uint64_t* value);
- V8_WARN_UNUSED_RESULT bool ReadDouble(double* value);
- V8_WARN_UNUSED_RESULT bool ReadRawBytes(size_t length, const void** data);
-
- ValueDeserializer(const ValueDeserializer&) = delete;
- void operator=(const ValueDeserializer&) = delete;
-
- private:
- struct PrivateData;
- PrivateData* private_;
-};
-
-
-// --- Value ---
-
-
-/**
- * The superclass of all JavaScript values and objects.
- */
-class V8_EXPORT Value : public Data {
- public:
- /**
- * Returns true if this value is the undefined value. See ECMA-262
- * 4.3.10.
- *
- * This is equivalent to `value === undefined` in JS.
- */
- V8_INLINE bool IsUndefined() const;
-
- /**
- * Returns true if this value is the null value. See ECMA-262
- * 4.3.11.
- *
- * This is equivalent to `value === null` in JS.
- */
- V8_INLINE bool IsNull() const;
-
- /**
- * Returns true if this value is either the null or the undefined value.
- * See ECMA-262
- * 4.3.11. and 4.3.12
- *
- * This is equivalent to `value == null` in JS.
- */
- V8_INLINE bool IsNullOrUndefined() const;
-
- /**
- * Returns true if this value is true.
- *
- * This is not the same as `BooleanValue()`. The latter performs a
- * conversion to boolean, i.e. the result of `Boolean(value)` in JS, whereas
- * this checks `value === true`.
- */
- bool IsTrue() const;
-
- /**
- * Returns true if this value is false.
- *
- * This is not the same as `!BooleanValue()`. The latter performs a
- * conversion to boolean, i.e. the result of `!Boolean(value)` in JS, whereas
- * this checks `value === false`.
- */
- bool IsFalse() const;
-
- /**
- * Returns true if this value is a symbol or a string.
- *
- * This is equivalent to
- * `typeof value === 'string' || typeof value === 'symbol'` in JS.
- */
- bool IsName() const;
-
- /**
- * Returns true if this value is an instance of the String type.
- * See ECMA-262 8.4.
- *
- * This is equivalent to `typeof value === 'string'` in JS.
- */
- V8_INLINE bool IsString() const;
-
- /**
- * Returns true if this value is a symbol.
- *
- * This is equivalent to `typeof value === 'symbol'` in JS.
- */
- bool IsSymbol() const;
-
- /**
- * Returns true if this value is a function.
- *
- * This is equivalent to `typeof value === 'function'` in JS.
- */
- bool IsFunction() const;
-
- /**
- * Returns true if this value is an array. Note that it will return false for
- * an Proxy for an array.
- */
- bool IsArray() const;
-
- /**
- * Returns true if this value is an object.
- */
- bool IsObject() const;
-
- /**
- * Returns true if this value is a bigint.
- *
- * This is equivalent to `typeof value === 'bigint'` in JS.
- */
- bool IsBigInt() const;
-
- /**
- * Returns true if this value is boolean.
- *
- * This is equivalent to `typeof value === 'boolean'` in JS.
- */
- bool IsBoolean() const;
-
- /**
- * Returns true if this value is a number.
- *
- * This is equivalent to `typeof value === 'number'` in JS.
- */
- bool IsNumber() const;
-
- /**
- * Returns true if this value is an `External` object.
- */
- bool IsExternal() const;
-
- /**
- * Returns true if this value is a 32-bit signed integer.
- */
- bool IsInt32() const;
-
- /**
- * Returns true if this value is a 32-bit unsigned integer.
- */
- bool IsUint32() const;
-
- /**
- * Returns true if this value is a Date.
- */
- bool IsDate() const;
-
- /**
- * Returns true if this value is an Arguments object.
- */
- bool IsArgumentsObject() const;
-
- /**
- * Returns true if this value is a BigInt object.
- */
- bool IsBigIntObject() const;
-
- /**
- * Returns true if this value is a Boolean object.
- */
- bool IsBooleanObject() const;
-
- /**
- * Returns true if this value is a Number object.
- */
- bool IsNumberObject() const;
-
- /**
- * Returns true if this value is a String object.
- */
- bool IsStringObject() const;
-
- /**
- * Returns true if this value is a Symbol object.
- */
- bool IsSymbolObject() const;
-
- /**
- * Returns true if this value is a NativeError.
- */
- bool IsNativeError() const;
-
- /**
- * Returns true if this value is a RegExp.
- */
- bool IsRegExp() const;
-
- /**
- * Returns true if this value is an async function.
- */
- bool IsAsyncFunction() const;
-
- /**
- * Returns true if this value is a Generator function.
- */
- bool IsGeneratorFunction() const;
-
- /**
- * Returns true if this value is a Generator object (iterator).
- */
- bool IsGeneratorObject() const;
-
- /**
- * Returns true if this value is a Promise.
- */
- bool IsPromise() const;
-
- /**
- * Returns true if this value is a Map.
- */
- bool IsMap() const;
-
- /**
- * Returns true if this value is a Set.
- */
- bool IsSet() const;
-
- /**
- * Returns true if this value is a Map Iterator.
- */
- bool IsMapIterator() const;
-
- /**
- * Returns true if this value is a Set Iterator.
- */
- bool IsSetIterator() const;
-
- /**
- * Returns true if this value is a WeakMap.
- */
- bool IsWeakMap() const;
-
- /**
- * Returns true if this value is a WeakSet.
- */
- bool IsWeakSet() const;
-
- /**
- * Returns true if this value is an ArrayBuffer.
- */
- bool IsArrayBuffer() const;
-
- /**
- * Returns true if this value is an ArrayBufferView.
- */
- bool IsArrayBufferView() const;
-
- /**
- * Returns true if this value is one of TypedArrays.
- */
- bool IsTypedArray() const;
-
- /**
- * Returns true if this value is an Uint8Array.
- */
- bool IsUint8Array() const;
-
- /**
- * Returns true if this value is an Uint8ClampedArray.
- */
- bool IsUint8ClampedArray() const;
-
- /**
- * Returns true if this value is an Int8Array.
- */
- bool IsInt8Array() const;
-
- /**
- * Returns true if this value is an Uint16Array.
- */
- bool IsUint16Array() const;
-
- /**
- * Returns true if this value is an Int16Array.
- */
- bool IsInt16Array() const;
-
- /**
- * Returns true if this value is an Uint32Array.
- */
- bool IsUint32Array() const;
-
- /**
- * Returns true if this value is an Int32Array.
- */
- bool IsInt32Array() const;
-
- /**
- * Returns true if this value is a Float32Array.
- */
- bool IsFloat32Array() const;
-
- /**
- * Returns true if this value is a Float64Array.
- */
- bool IsFloat64Array() const;
-
- /**
- * Returns true if this value is a BigInt64Array.
- */
- bool IsBigInt64Array() const;
-
- /**
- * Returns true if this value is a BigUint64Array.
- */
- bool IsBigUint64Array() const;
-
- /**
- * Returns true if this value is a DataView.
- */
- bool IsDataView() const;
-
- /**
- * Returns true if this value is a SharedArrayBuffer.
- */
- bool IsSharedArrayBuffer() const;
-
- /**
- * Returns true if this value is a JavaScript Proxy.
- */
- bool IsProxy() const;
-
- /**
- * Returns true if this value is a WasmMemoryObject.
- */
- bool IsWasmMemoryObject() const;
-
- /**
- * Returns true if this value is a WasmModuleObject.
- */
- bool IsWasmModuleObject() const;
-
- /**
- * Returns true if the value is a Module Namespace Object.
- */
- bool IsModuleNamespaceObject() const;
-
- /**
- * Perform the equivalent of `BigInt(value)` in JS.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<BigInt> ToBigInt(
- Local<Context> context) const;
- /**
- * Perform the equivalent of `Number(value)` in JS.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Number> ToNumber(
- Local<Context> context) const;
- /**
- * Perform the equivalent of `String(value)` in JS.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<String> ToString(
- Local<Context> context) const;
- /**
- * Provide a string representation of this value usable for debugging.
- * This operation has no observable side effects and will succeed
- * unless e.g. execution is being terminated.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<String> ToDetailString(
- Local<Context> context) const;
- /**
- * Perform the equivalent of `Object(value)` in JS.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Object> ToObject(
- Local<Context> context) const;
- /**
- * Perform the equivalent of `Number(value)` in JS and convert the result
- * to an integer. Negative values are rounded up, positive values are rounded
- * down. NaN is converted to 0. Infinite values yield undefined results.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Integer> ToInteger(
- Local<Context> context) const;
- /**
- * Perform the equivalent of `Number(value)` in JS and convert the result
- * to an unsigned 32-bit integer by performing the steps in
- * https://tc39.es/ecma262/#sec-touint32.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Uint32> ToUint32(
- Local<Context> context) const;
- /**
- * Perform the equivalent of `Number(value)` in JS and convert the result
- * to a signed 32-bit integer by performing the steps in
- * https://tc39.es/ecma262/#sec-toint32.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Int32> ToInt32(Local<Context> context) const;
-
- /**
- * Perform the equivalent of `Boolean(value)` in JS. This can never fail.
- */
- Local<Boolean> ToBoolean(Isolate* isolate) const;
-
- /**
- * Attempts to convert a string to an array index.
- * Returns an empty handle if the conversion fails.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Uint32> ToArrayIndex(
- Local<Context> context) const;
-
- /** Returns the equivalent of `ToBoolean()->Value()`. */
- bool BooleanValue(Isolate* isolate) const;
-
- /** Returns the equivalent of `ToNumber()->Value()`. */
- V8_WARN_UNUSED_RESULT Maybe<double> NumberValue(Local<Context> context) const;
- /** Returns the equivalent of `ToInteger()->Value()`. */
- V8_WARN_UNUSED_RESULT Maybe<int64_t> IntegerValue(
- Local<Context> context) const;
- /** Returns the equivalent of `ToUint32()->Value()`. */
- V8_WARN_UNUSED_RESULT Maybe<uint32_t> Uint32Value(
- Local<Context> context) const;
- /** Returns the equivalent of `ToInt32()->Value()`. */
- V8_WARN_UNUSED_RESULT Maybe<int32_t> Int32Value(Local<Context> context) const;
-
- /** JS == */
- V8_WARN_UNUSED_RESULT Maybe<bool> Equals(Local<Context> context,
- Local<Value> that) const;
- bool StrictEquals(Local<Value> that) const;
- bool SameValue(Local<Value> that) const;
-
- template <class T> V8_INLINE static Value* Cast(T* value);
-
- Local<String> TypeOf(Isolate*);
-
- Maybe<bool> InstanceOf(Local<Context> context, Local<Object> object);
-
- private:
- V8_INLINE bool QuickIsUndefined() const;
- V8_INLINE bool QuickIsNull() const;
- V8_INLINE bool QuickIsNullOrUndefined() const;
- V8_INLINE bool QuickIsString() const;
- bool FullIsUndefined() const;
- bool FullIsNull() const;
- bool FullIsString() const;
-
- static void CheckCast(Data* that);
-};
-
-
-/**
- * The superclass of primitive values. See ECMA-262 4.3.2.
- */
-class V8_EXPORT Primitive : public Value { };
-
-
-/**
- * A primitive boolean value (ECMA-262, 4.3.14). Either the true
- * or false value.
- */
-class V8_EXPORT Boolean : public Primitive {
- public:
- bool Value() const;
- V8_INLINE static Boolean* Cast(v8::Data* data);
- V8_INLINE static Local<Boolean> New(Isolate* isolate, bool value);
-
- private:
- static void CheckCast(v8::Data* that);
-};
-
-
-/**
- * A superclass for symbols and strings.
- */
-class V8_EXPORT Name : public Primitive {
- public:
- /**
- * Returns the identity hash for this object. The current implementation
- * uses an inline property on the object to store the identity hash.
- *
- * The return value will never be 0. Also, it is not guaranteed to be
- * unique.
- */
- int GetIdentityHash();
-
- V8_INLINE static Name* Cast(Data* data);
-
- private:
- static void CheckCast(Data* that);
-};
-
-/**
- * A flag describing different modes of string creation.
- *
- * Aside from performance implications there are no differences between the two
- * creation modes.
- */
-enum class NewStringType {
- /**
- * Create a new string, always allocating new storage memory.
- */
- kNormal,
-
- /**
- * Acts as a hint that the string should be created in the
- * old generation heap space and be deduplicated if an identical string
- * already exists.
- */
- kInternalized
-};
-
-/**
- * A JavaScript string value (ECMA-262, 4.3.17).
- */
-class V8_EXPORT String : public Name {
- public:
- static constexpr int kMaxLength =
- internal::kApiSystemPointerSize == 4 ? (1 << 28) - 16 : (1 << 29) - 24;
-
- enum Encoding {
- UNKNOWN_ENCODING = 0x1,
- TWO_BYTE_ENCODING = 0x0,
- ONE_BYTE_ENCODING = 0x8
- };
- /**
- * Returns the number of characters (UTF-16 code units) in this string.
- */
- int Length() const;
-
- /**
- * Returns the number of bytes in the UTF-8 encoded
- * representation of this string.
- */
- int Utf8Length(Isolate* isolate) const;
-
- /**
- * Returns whether this string is known to contain only one byte data,
- * i.e. ISO-8859-1 code points.
- * Does not read the string.
- * False negatives are possible.
- */
- bool IsOneByte() const;
-
- /**
- * Returns whether this string contain only one byte data,
- * i.e. ISO-8859-1 code points.
- * Will read the entire string in some cases.
- */
- bool ContainsOnlyOneByte() const;
-
- /**
- * Write the contents of the string to an external buffer.
- * If no arguments are given, expects the buffer to be large
- * enough to hold the entire string and NULL terminator. Copies
- * the contents of the string and the NULL terminator into the
- * buffer.
- *
- * WriteUtf8 will not write partial UTF-8 sequences, preferring to stop
- * before the end of the buffer.
- *
- * Copies up to length characters into the output buffer.
- * Only null-terminates if there is enough space in the buffer.
- *
- * \param buffer The buffer into which the string will be copied.
- * \param start The starting position within the string at which
- * copying begins.
- * \param length The number of characters to copy from the string. For
- * WriteUtf8 the number of bytes in the buffer.
- * \param nchars_ref The number of characters written, can be NULL.
- * \param options Various options that might affect performance of this or
- * subsequent operations.
- * \return The number of characters copied to the buffer excluding the null
- * terminator. For WriteUtf8: The number of bytes copied to the buffer
- * including the null terminator (if written).
- */
- enum WriteOptions {
- NO_OPTIONS = 0,
- HINT_MANY_WRITES_EXPECTED = 1,
- NO_NULL_TERMINATION = 2,
- PRESERVE_ONE_BYTE_NULL = 4,
- // Used by WriteUtf8 to replace orphan surrogate code units with the
- // unicode replacement character. Needs to be set to guarantee valid UTF-8
- // output.
- REPLACE_INVALID_UTF8 = 8
- };
-
- // 16-bit character codes.
- int Write(Isolate* isolate, uint16_t* buffer, int start = 0, int length = -1,
- int options = NO_OPTIONS) const;
- // One byte characters.
- int WriteOneByte(Isolate* isolate, uint8_t* buffer, int start = 0,
- int length = -1, int options = NO_OPTIONS) const;
- // UTF-8 encoded characters.
- int WriteUtf8(Isolate* isolate, char* buffer, int length = -1,
- int* nchars_ref = nullptr, int options = NO_OPTIONS) const;
-
- /**
- * A zero length string.
- */
- V8_INLINE static Local<String> Empty(Isolate* isolate);
-
- /**
- * Returns true if the string is external.
- */
- bool IsExternal() const;
-
- /**
- * Returns true if the string is both external and two-byte.
- */
- bool IsExternalTwoByte() const;
-
- /**
- * Returns true if the string is both external and one-byte.
- */
- bool IsExternalOneByte() const;
-
- class V8_EXPORT ExternalStringResourceBase {
- public:
- virtual ~ExternalStringResourceBase() = default;
-
- /**
- * If a string is cacheable, the value returned by
- * ExternalStringResource::data() may be cached, otherwise it is not
- * expected to be stable beyond the current top-level task.
- */
- virtual bool IsCacheable() const { return true; }
-
- // Disallow copying and assigning.
- ExternalStringResourceBase(const ExternalStringResourceBase&) = delete;
- void operator=(const ExternalStringResourceBase&) = delete;
-
- protected:
- ExternalStringResourceBase() = default;
-
- /**
- * Internally V8 will call this Dispose method when the external string
- * resource is no longer needed. The default implementation will use the
- * delete operator. This method can be overridden in subclasses to
- * control how allocated external string resources are disposed.
- */
- virtual void Dispose() { delete this; }
-
- /**
- * For a non-cacheable string, the value returned by
- * |ExternalStringResource::data()| has to be stable between |Lock()| and
- * |Unlock()|, that is the string must behave as is |IsCacheable()| returned
- * true.
- *
- * These two functions must be thread-safe, and can be called from anywhere.
- * They also must handle lock depth, in the sense that each can be called
- * several times, from different threads, and unlocking should only happen
- * when the balance of Lock() and Unlock() calls is 0.
- */
- virtual void Lock() const {}
-
- /**
- * Unlocks the string.
- */
- virtual void Unlock() const {}
-
- private:
- friend class internal::ExternalString;
- friend class v8::String;
- friend class internal::ScopedExternalStringLock;
- };
-
- /**
- * An ExternalStringResource is a wrapper around a two-byte string
- * buffer that resides outside V8's heap. Implement an
- * ExternalStringResource to manage the life cycle of the underlying
- * buffer. Note that the string data must be immutable.
- */
- class V8_EXPORT ExternalStringResource
- : public ExternalStringResourceBase {
- public:
- /**
- * Override the destructor to manage the life cycle of the underlying
- * buffer.
- */
- ~ExternalStringResource() override = default;
-
- /**
- * The string data from the underlying buffer. If the resource is cacheable
- * then data() must return the same value for all invocations.
- */
- virtual const uint16_t* data() const = 0;
-
- /**
- * The length of the string. That is, the number of two-byte characters.
- */
- virtual size_t length() const = 0;
-
- /**
- * Returns the cached data from the underlying buffer. This method can be
- * called only for cacheable resources (i.e. IsCacheable() == true) and only
- * after UpdateDataCache() was called.
- */
- const uint16_t* cached_data() const {
- CheckCachedDataInvariants();
- return cached_data_;
- }
-
- /**
- * Update {cached_data_} with the data from the underlying buffer. This can
- * be called only for cacheable resources.
- */
- void UpdateDataCache();
-
- protected:
- ExternalStringResource() = default;
-
- private:
- void CheckCachedDataInvariants() const;
-
- const uint16_t* cached_data_ = nullptr;
- };
-
- /**
- * An ExternalOneByteStringResource is a wrapper around an one-byte
- * string buffer that resides outside V8's heap. Implement an
- * ExternalOneByteStringResource to manage the life cycle of the
- * underlying buffer. Note that the string data must be immutable
- * and that the data must be Latin-1 and not UTF-8, which would require
- * special treatment internally in the engine and do not allow efficient
- * indexing. Use String::New or convert to 16 bit data for non-Latin1.
- */
-
- class V8_EXPORT ExternalOneByteStringResource
- : public ExternalStringResourceBase {
- public:
- /**
- * Override the destructor to manage the life cycle of the underlying
- * buffer.
- */
- ~ExternalOneByteStringResource() override = default;
-
- /**
- * The string data from the underlying buffer. If the resource is cacheable
- * then data() must return the same value for all invocations.
- */
- virtual const char* data() const = 0;
-
- /** The number of Latin-1 characters in the string.*/
- virtual size_t length() const = 0;
-
- /**
- * Returns the cached data from the underlying buffer. If the resource is
- * uncacheable or if UpdateDataCache() was not called before, it has
- * undefined behaviour.
- */
- const char* cached_data() const {
- CheckCachedDataInvariants();
- return cached_data_;
- }
-
- /**
- * Update {cached_data_} with the data from the underlying buffer. This can
- * be called only for cacheable resources.
- */
- void UpdateDataCache();
-
- protected:
- ExternalOneByteStringResource() = default;
-
- private:
- void CheckCachedDataInvariants() const;
-
- const char* cached_data_ = nullptr;
- };
-
- /**
- * If the string is an external string, return the ExternalStringResourceBase
- * regardless of the encoding, otherwise return NULL. The encoding of the
- * string is returned in encoding_out.
- */
- V8_INLINE ExternalStringResourceBase* GetExternalStringResourceBase(
- Encoding* encoding_out) const;
-
- /**
- * Get the ExternalStringResource for an external string. Returns
- * NULL if IsExternal() doesn't return true.
- */
- V8_INLINE ExternalStringResource* GetExternalStringResource() const;
-
- /**
- * Get the ExternalOneByteStringResource for an external one-byte string.
- * Returns NULL if IsExternalOneByte() doesn't return true.
- */
- const ExternalOneByteStringResource* GetExternalOneByteStringResource() const;
-
- V8_INLINE static String* Cast(v8::Data* data);
-
- /**
- * Allocates a new string from a UTF-8 literal. This is equivalent to calling
- * String::NewFromUtf(isolate, "...").ToLocalChecked(), but without the check
- * overhead.
- *
- * When called on a string literal containing '\0', the inferred length is the
- * length of the input array minus 1 (for the final '\0') and not the value
- * returned by strlen.
- **/
- template <int N>
- static V8_WARN_UNUSED_RESULT Local<String> NewFromUtf8Literal(
- Isolate* isolate, const char (&literal)[N],
- NewStringType type = NewStringType::kNormal) {
- static_assert(N <= kMaxLength, "String is too long");
- return NewFromUtf8Literal(isolate, literal, type, N - 1);
- }
-
- /** Allocates a new string from UTF-8 data. Only returns an empty value when
- * length > kMaxLength. **/
- static V8_WARN_UNUSED_RESULT MaybeLocal<String> NewFromUtf8(
- Isolate* isolate, const char* data,
- NewStringType type = NewStringType::kNormal, int length = -1);
-
- /** Allocates a new string from Latin-1 data. Only returns an empty value
- * when length > kMaxLength. **/
- static V8_WARN_UNUSED_RESULT MaybeLocal<String> NewFromOneByte(
- Isolate* isolate, const uint8_t* data,
- NewStringType type = NewStringType::kNormal, int length = -1);
-
- /** Allocates a new string from UTF-16 data. Only returns an empty value when
- * length > kMaxLength. **/
- static V8_WARN_UNUSED_RESULT MaybeLocal<String> NewFromTwoByte(
- Isolate* isolate, const uint16_t* data,
- NewStringType type = NewStringType::kNormal, int length = -1);
-
- /**
- * Creates a new string by concatenating the left and the right strings
- * passed in as parameters.
- */
- static Local<String> Concat(Isolate* isolate, Local<String> left,
- Local<String> right);
-
- /**
- * Creates a new external string using the data defined in the given
- * resource. When the external string is no longer live on V8's heap the
- * resource will be disposed by calling its Dispose method. The caller of
- * this function should not otherwise delete or modify the resource. Neither
- * should the underlying buffer be deallocated or modified except through the
- * destructor of the external string resource.
- */
- static V8_WARN_UNUSED_RESULT MaybeLocal<String> NewExternalTwoByte(
- Isolate* isolate, ExternalStringResource* resource);
-
- /**
- * Associate an external string resource with this string by transforming it
- * in place so that existing references to this string in the JavaScript heap
- * will use the external string resource. The external string resource's
- * character contents need to be equivalent to this string.
- * Returns true if the string has been changed to be an external string.
- * The string is not modified if the operation fails. See NewExternal for
- * information on the lifetime of the resource.
- */
- bool MakeExternal(ExternalStringResource* resource);
-
- /**
- * Creates a new external string using the one-byte data defined in the given
- * resource. When the external string is no longer live on V8's heap the
- * resource will be disposed by calling its Dispose method. The caller of
- * this function should not otherwise delete or modify the resource. Neither
- * should the underlying buffer be deallocated or modified except through the
- * destructor of the external string resource.
- */
- static V8_WARN_UNUSED_RESULT MaybeLocal<String> NewExternalOneByte(
- Isolate* isolate, ExternalOneByteStringResource* resource);
-
- /**
- * Associate an external string resource with this string by transforming it
- * in place so that existing references to this string in the JavaScript heap
- * will use the external string resource. The external string resource's
- * character contents need to be equivalent to this string.
- * Returns true if the string has been changed to be an external string.
- * The string is not modified if the operation fails. See NewExternal for
- * information on the lifetime of the resource.
- */
- bool MakeExternal(ExternalOneByteStringResource* resource);
-
- /**
- * Returns true if this string can be made external.
- */
- bool CanMakeExternal() const;
-
- /**
- * Returns true if the strings values are equal. Same as JS ==/===.
- */
- bool StringEquals(Local<String> str) const;
-
- /**
- * Converts an object to a UTF-8-encoded character array. Useful if
- * you want to print the object. If conversion to a string fails
- * (e.g. due to an exception in the toString() method of the object)
- * then the length() method returns 0 and the * operator returns
- * NULL.
- */
- class V8_EXPORT Utf8Value {
- public:
- Utf8Value(Isolate* isolate, Local<v8::Value> obj);
- ~Utf8Value();
- char* operator*() { return str_; }
- const char* operator*() const { return str_; }
- int length() const { return length_; }
-
- // Disallow copying and assigning.
- Utf8Value(const Utf8Value&) = delete;
- void operator=(const Utf8Value&) = delete;
-
- private:
- char* str_;
- int length_;
- };
-
- /**
- * Converts an object to a two-byte (UTF-16-encoded) string.
- * If conversion to a string fails (eg. due to an exception in the toString()
- * method of the object) then the length() method returns 0 and the * operator
- * returns NULL.
- */
- class V8_EXPORT Value {
- public:
- Value(Isolate* isolate, Local<v8::Value> obj);
- ~Value();
- uint16_t* operator*() { return str_; }
- const uint16_t* operator*() const { return str_; }
- int length() const { return length_; }
-
- // Disallow copying and assigning.
- Value(const Value&) = delete;
- void operator=(const Value&) = delete;
-
- private:
- uint16_t* str_;
- int length_;
- };
-
- private:
- void VerifyExternalStringResourceBase(ExternalStringResourceBase* v,
- Encoding encoding) const;
- void VerifyExternalStringResource(ExternalStringResource* val) const;
- ExternalStringResource* GetExternalStringResourceSlow() const;
- ExternalStringResourceBase* GetExternalStringResourceBaseSlow(
- String::Encoding* encoding_out) const;
-
- static Local<v8::String> NewFromUtf8Literal(Isolate* isolate,
- const char* literal,
- NewStringType type, int length);
-
- static void CheckCast(v8::Data* that);
-};
-
-// Zero-length string specialization (templated string size includes
-// terminator).
-template <>
-inline V8_WARN_UNUSED_RESULT Local<String> String::NewFromUtf8Literal(
- Isolate* isolate, const char (&literal)[1], NewStringType type) {
- return String::Empty(isolate);
-}
-
-/**
- * A JavaScript symbol (ECMA-262 edition 6)
- */
-class V8_EXPORT Symbol : public Name {
- public:
- /**
- * Returns the description string of the symbol, or undefined if none.
- */
- V8_DEPRECATE_SOON("Use Symbol::Description(isolate)")
- Local<Value> Description() const;
- Local<Value> Description(Isolate* isolate) const;
-
- /**
- * Create a symbol. If description is not empty, it will be used as the
- * description.
- */
- static Local<Symbol> New(Isolate* isolate,
- Local<String> description = Local<String>());
-
- /**
- * Access global symbol registry.
- * Note that symbols created this way are never collected, so
- * they should only be used for statically fixed properties.
- * Also, there is only one global name space for the descriptions used as
- * keys.
- * To minimize the potential for clashes, use qualified names as keys.
- */
- static Local<Symbol> For(Isolate* isolate, Local<String> description);
-
- /**
- * Retrieve a global symbol. Similar to |For|, but using a separate
- * registry that is not accessible by (and cannot clash with) JavaScript code.
- */
- static Local<Symbol> ForApi(Isolate* isolate, Local<String> description);
-
- // Well-known symbols
- static Local<Symbol> GetAsyncIterator(Isolate* isolate);
- static Local<Symbol> GetHasInstance(Isolate* isolate);
- static Local<Symbol> GetIsConcatSpreadable(Isolate* isolate);
- static Local<Symbol> GetIterator(Isolate* isolate);
- static Local<Symbol> GetMatch(Isolate* isolate);
- static Local<Symbol> GetReplace(Isolate* isolate);
- static Local<Symbol> GetSearch(Isolate* isolate);
- static Local<Symbol> GetSplit(Isolate* isolate);
- static Local<Symbol> GetToPrimitive(Isolate* isolate);
- static Local<Symbol> GetToStringTag(Isolate* isolate);
- static Local<Symbol> GetUnscopables(Isolate* isolate);
-
- V8_INLINE static Symbol* Cast(Data* data);
-
- private:
- Symbol();
- static void CheckCast(Data* that);
-};
-
-
-/**
- * A private symbol
- *
- * This is an experimental feature. Use at your own risk.
- */
-class V8_EXPORT Private : public Data {
- public:
- /**
- * Returns the print name string of the private symbol, or undefined if none.
- */
- Local<Value> Name() const;
-
- /**
- * Create a private symbol. If name is not empty, it will be the description.
- */
- static Local<Private> New(Isolate* isolate,
- Local<String> name = Local<String>());
-
- /**
- * Retrieve a global private symbol. If a symbol with this name has not
- * been retrieved in the same isolate before, it is created.
- * Note that private symbols created this way are never collected, so
- * they should only be used for statically fixed properties.
- * Also, there is only one global name space for the names used as keys.
- * To minimize the potential for clashes, use qualified names as keys,
- * e.g., "Class#property".
- */
- static Local<Private> ForApi(Isolate* isolate, Local<String> name);
-
- V8_INLINE static Private* Cast(Data* data);
-
- private:
- Private();
-
- static void CheckCast(Data* that);
-};
-
-
-/**
- * A JavaScript number value (ECMA-262, 4.3.20)
- */
-class V8_EXPORT Number : public Primitive {
- public:
- double Value() const;
- static Local<Number> New(Isolate* isolate, double value);
- V8_INLINE static Number* Cast(v8::Data* data);
-
- private:
- Number();
- static void CheckCast(v8::Data* that);
-};
-
-
-/**
- * A JavaScript value representing a signed integer.
- */
-class V8_EXPORT Integer : public Number {
- public:
- static Local<Integer> New(Isolate* isolate, int32_t value);
- static Local<Integer> NewFromUnsigned(Isolate* isolate, uint32_t value);
- int64_t Value() const;
- V8_INLINE static Integer* Cast(v8::Data* data);
-
- private:
- Integer();
- static void CheckCast(v8::Data* that);
-};
-
-
-/**
- * A JavaScript value representing a 32-bit signed integer.
- */
-class V8_EXPORT Int32 : public Integer {
- public:
- int32_t Value() const;
- V8_INLINE static Int32* Cast(v8::Data* data);
-
- private:
- Int32();
- static void CheckCast(v8::Data* that);
-};
-
-
-/**
- * A JavaScript value representing a 32-bit unsigned integer.
- */
-class V8_EXPORT Uint32 : public Integer {
- public:
- uint32_t Value() const;
- V8_INLINE static Uint32* Cast(v8::Data* data);
-
- private:
- Uint32();
- static void CheckCast(v8::Data* that);
-};
-
-/**
- * A JavaScript BigInt value (https://tc39.github.io/proposal-bigint)
- */
-class V8_EXPORT BigInt : public Primitive {
- public:
- static Local<BigInt> New(Isolate* isolate, int64_t value);
- static Local<BigInt> NewFromUnsigned(Isolate* isolate, uint64_t value);
- /**
- * Creates a new BigInt object using a specified sign bit and a
- * specified list of digits/words.
- * The resulting number is calculated as:
- *
- * (-1)^sign_bit * (words[0] * (2^64)^0 + words[1] * (2^64)^1 + ...)
- */
- static MaybeLocal<BigInt> NewFromWords(Local<Context> context, int sign_bit,
- int word_count, const uint64_t* words);
-
- /**
- * Returns the value of this BigInt as an unsigned 64-bit integer.
- * If `lossless` is provided, it will reflect whether the return value was
- * truncated or wrapped around. In particular, it is set to `false` if this
- * BigInt is negative.
- */
- uint64_t Uint64Value(bool* lossless = nullptr) const;
-
- /**
- * Returns the value of this BigInt as a signed 64-bit integer.
- * If `lossless` is provided, it will reflect whether this BigInt was
- * truncated or not.
- */
- int64_t Int64Value(bool* lossless = nullptr) const;
-
- /**
- * Returns the number of 64-bit words needed to store the result of
- * ToWordsArray().
- */
- int WordCount() const;
-
- /**
- * Writes the contents of this BigInt to a specified memory location.
- * `sign_bit` must be provided and will be set to 1 if this BigInt is
- * negative.
- * `*word_count` has to be initialized to the length of the `words` array.
- * Upon return, it will be set to the actual number of words that would
- * be needed to store this BigInt (i.e. the return value of `WordCount()`).
- */
- void ToWordsArray(int* sign_bit, int* word_count, uint64_t* words) const;
-
- V8_INLINE static BigInt* Cast(v8::Data* data);
-
- private:
- BigInt();
- static void CheckCast(v8::Data* that);
-};
-
-/**
- * PropertyAttribute.
- */
-enum PropertyAttribute {
- /** None. **/
- None = 0,
- /** ReadOnly, i.e., not writable. **/
- ReadOnly = 1 << 0,
- /** DontEnum, i.e., not enumerable. **/
- DontEnum = 1 << 1,
- /** DontDelete, i.e., not configurable. **/
- DontDelete = 1 << 2
-};
-
-/**
- * Accessor[Getter|Setter] are used as callback functions when
- * setting|getting a particular property. See Object and ObjectTemplate's
- * method SetAccessor.
- */
-using AccessorGetterCallback =
- void (*)(Local<String> property, const PropertyCallbackInfo<Value>& info);
-using AccessorNameGetterCallback =
- void (*)(Local<Name> property, const PropertyCallbackInfo<Value>& info);
-
-using AccessorSetterCallback = void (*)(Local<String> property,
- Local<Value> value,
- const PropertyCallbackInfo<void>& info);
-using AccessorNameSetterCallback =
- void (*)(Local<Name> property, Local<Value> value,
- const PropertyCallbackInfo<void>& info);
-
-/**
- * Access control specifications.
- *
- * Some accessors should be accessible across contexts. These
- * accessors have an explicit access control parameter which specifies
- * the kind of cross-context access that should be allowed.
- *
- * TODO(dcarney): Remove PROHIBITS_OVERWRITING as it is now unused.
- */
-enum AccessControl {
- DEFAULT = 0,
- ALL_CAN_READ = 1,
- ALL_CAN_WRITE = 1 << 1,
- PROHIBITS_OVERWRITING = 1 << 2
-};
-
-/**
- * Property filter bits. They can be or'ed to build a composite filter.
- */
-enum PropertyFilter {
- ALL_PROPERTIES = 0,
- ONLY_WRITABLE = 1,
- ONLY_ENUMERABLE = 2,
- ONLY_CONFIGURABLE = 4,
- SKIP_STRINGS = 8,
- SKIP_SYMBOLS = 16
-};
-
-/**
- * Options for marking whether callbacks may trigger JS-observable side effects.
- * Side-effect-free callbacks are allowlisted during debug evaluation with
- * throwOnSideEffect. It applies when calling a Function, FunctionTemplate,
- * or an Accessor callback. For Interceptors, please see
- * PropertyHandlerFlags's kHasNoSideEffect.
- * Callbacks that only cause side effects to the receiver are allowlisted if
- * invoked on receiver objects that are created within the same debug-evaluate
- * call, as these objects are temporary and the side effect does not escape.
- */
-enum class SideEffectType {
- kHasSideEffect,
- kHasNoSideEffect,
- kHasSideEffectToReceiver
-};
-
-/**
- * Keys/Properties filter enums:
- *
- * KeyCollectionMode limits the range of collected properties. kOwnOnly limits
- * the collected properties to the given Object only. kIncludesPrototypes will
- * include all keys of the objects's prototype chain as well.
- */
-enum class KeyCollectionMode { kOwnOnly, kIncludePrototypes };
-
-/**
- * kIncludesIndices allows for integer indices to be collected, while
- * kSkipIndices will exclude integer indices from being collected.
- */
-enum class IndexFilter { kIncludeIndices, kSkipIndices };
-
-/**
- * kConvertToString will convert integer indices to strings.
- * kKeepNumbers will return numbers for integer indices.
- */
-enum class KeyConversionMode { kConvertToString, kKeepNumbers, kNoNumbers };
-
-/**
- * Integrity level for objects.
- */
-enum class IntegrityLevel { kFrozen, kSealed };
-
-/**
- * A JavaScript object (ECMA-262, 4.3.3)
- */
-class V8_EXPORT Object : public Value {
- public:
- /**
- * Set only return Just(true) or Empty(), so if it should never fail, use
- * result.Check().
- */
- V8_WARN_UNUSED_RESULT Maybe<bool> Set(Local<Context> context,
- Local<Value> key, Local<Value> value);
-
- V8_WARN_UNUSED_RESULT Maybe<bool> Set(Local<Context> context, uint32_t index,
- Local<Value> value);
-
- // Implements CreateDataProperty (ECMA-262, 7.3.4).
- //
- // Defines a configurable, writable, enumerable property with the given value
- // on the object unless the property already exists and is not configurable
- // or the object is not extensible.
- //
- // Returns true on success.
- V8_WARN_UNUSED_RESULT Maybe<bool> CreateDataProperty(Local<Context> context,
- Local<Name> key,
- Local<Value> value);
- V8_WARN_UNUSED_RESULT Maybe<bool> CreateDataProperty(Local<Context> context,
- uint32_t index,
- Local<Value> value);
-
- // Implements DefineOwnProperty.
- //
- // In general, CreateDataProperty will be faster, however, does not allow
- // for specifying attributes.
- //
- // Returns true on success.
- V8_WARN_UNUSED_RESULT Maybe<bool> DefineOwnProperty(
- Local<Context> context, Local<Name> key, Local<Value> value,
- PropertyAttribute attributes = None);
-
- // Implements Object.DefineProperty(O, P, Attributes), see Ecma-262 19.1.2.4.
- //
- // The defineProperty function is used to add an own property or
- // update the attributes of an existing own property of an object.
- //
- // Both data and accessor descriptors can be used.
- //
- // In general, CreateDataProperty is faster, however, does not allow
- // for specifying attributes or an accessor descriptor.
- //
- // The PropertyDescriptor can change when redefining a property.
- //
- // Returns true on success.
- V8_WARN_UNUSED_RESULT Maybe<bool> DefineProperty(
- Local<Context> context, Local<Name> key, PropertyDescriptor& descriptor);
-
- V8_WARN_UNUSED_RESULT MaybeLocal<Value> Get(Local<Context> context,
- Local<Value> key);
-
- V8_WARN_UNUSED_RESULT MaybeLocal<Value> Get(Local<Context> context,
- uint32_t index);
-
- /**
- * Gets the property attributes of a property which can be None or
- * any combination of ReadOnly, DontEnum and DontDelete. Returns
- * None when the property doesn't exist.
- */
- V8_WARN_UNUSED_RESULT Maybe<PropertyAttribute> GetPropertyAttributes(
- Local<Context> context, Local<Value> key);
-
- /**
- * Returns Object.getOwnPropertyDescriptor as per ES2016 section 19.1.2.6.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Value> GetOwnPropertyDescriptor(
- Local<Context> context, Local<Name> key);
-
- /**
- * Object::Has() calls the abstract operation HasProperty(O, P) described
- * in ECMA-262, 7.3.10. Has() returns
- * true, if the object has the property, either own or on the prototype chain.
- * Interceptors, i.e., PropertyQueryCallbacks, are called if present.
- *
- * Has() has the same side effects as JavaScript's `variable in object`.
- * For example, calling Has() on a revoked proxy will throw an exception.
- *
- * \note Has() converts the key to a name, which possibly calls back into
- * JavaScript.
- *
- * See also v8::Object::HasOwnProperty() and
- * v8::Object::HasRealNamedProperty().
- */
- V8_WARN_UNUSED_RESULT Maybe<bool> Has(Local<Context> context,
- Local<Value> key);
-
- V8_WARN_UNUSED_RESULT Maybe<bool> Delete(Local<Context> context,
- Local<Value> key);
-
- V8_WARN_UNUSED_RESULT Maybe<bool> Has(Local<Context> context, uint32_t index);
-
- V8_WARN_UNUSED_RESULT Maybe<bool> Delete(Local<Context> context,
- uint32_t index);
-
- /**
- * Note: SideEffectType affects the getter only, not the setter.
- */
- V8_WARN_UNUSED_RESULT Maybe<bool> SetAccessor(
- Local<Context> context, Local<Name> name,
- AccessorNameGetterCallback getter,
- AccessorNameSetterCallback setter = nullptr,
- MaybeLocal<Value> data = MaybeLocal<Value>(),
- AccessControl settings = DEFAULT, PropertyAttribute attribute = None,
- SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
- SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
-
- void SetAccessorProperty(Local<Name> name, Local<Function> getter,
- Local<Function> setter = Local<Function>(),
- PropertyAttribute attribute = None,
- AccessControl settings = DEFAULT);
-
- /**
- * Sets a native data property like Template::SetNativeDataProperty, but
- * this method sets on this object directly.
- */
- V8_WARN_UNUSED_RESULT Maybe<bool> SetNativeDataProperty(
- Local<Context> context, Local<Name> name,
- AccessorNameGetterCallback getter,
- AccessorNameSetterCallback setter = nullptr,
- Local<Value> data = Local<Value>(), PropertyAttribute attributes = None,
- SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
- SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
-
- /**
- * Attempts to create a property with the given name which behaves like a data
- * property, except that the provided getter is invoked (and provided with the
- * data value) to supply its value the first time it is read. After the
- * property is accessed once, it is replaced with an ordinary data property.
- *
- * Analogous to Template::SetLazyDataProperty.
- */
- V8_WARN_UNUSED_RESULT Maybe<bool> SetLazyDataProperty(
- Local<Context> context, Local<Name> name,
- AccessorNameGetterCallback getter, Local<Value> data = Local<Value>(),
- PropertyAttribute attributes = None,
- SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
- SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
-
- /**
- * Functionality for private properties.
- * This is an experimental feature, use at your own risk.
- * Note: Private properties are not inherited. Do not rely on this, since it
- * may change.
- */
- Maybe<bool> HasPrivate(Local<Context> context, Local<Private> key);
- Maybe<bool> SetPrivate(Local<Context> context, Local<Private> key,
- Local<Value> value);
- Maybe<bool> DeletePrivate(Local<Context> context, Local<Private> key);
- MaybeLocal<Value> GetPrivate(Local<Context> context, Local<Private> key);
-
- /**
- * Returns an array containing the names of the enumerable properties
- * of this object, including properties from prototype objects. The
- * array returned by this method contains the same values as would
- * be enumerated by a for-in statement over this object.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Array> GetPropertyNames(
- Local<Context> context);
- V8_WARN_UNUSED_RESULT MaybeLocal<Array> GetPropertyNames(
- Local<Context> context, KeyCollectionMode mode,
- PropertyFilter property_filter, IndexFilter index_filter,
- KeyConversionMode key_conversion = KeyConversionMode::kKeepNumbers);
-
- /**
- * This function has the same functionality as GetPropertyNames but
- * the returned array doesn't contain the names of properties from
- * prototype objects.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Array> GetOwnPropertyNames(
- Local<Context> context);
-
- /**
- * Returns an array containing the names of the filtered properties
- * of this object, including properties from prototype objects. The
- * array returned by this method contains the same values as would
- * be enumerated by a for-in statement over this object.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Array> GetOwnPropertyNames(
- Local<Context> context, PropertyFilter filter,
- KeyConversionMode key_conversion = KeyConversionMode::kKeepNumbers);
-
- /**
- * Get the prototype object. This does not skip objects marked to
- * be skipped by __proto__ and it does not consult the security
- * handler.
- */
- Local<Value> GetPrototype();
-
- /**
- * Set the prototype object. This does not skip objects marked to
- * be skipped by __proto__ and it does not consult the security
- * handler.
- */
- V8_WARN_UNUSED_RESULT Maybe<bool> SetPrototype(Local<Context> context,
- Local<Value> prototype);
-
- /**
- * Finds an instance of the given function template in the prototype
- * chain.
- */
- Local<Object> FindInstanceInPrototypeChain(Local<FunctionTemplate> tmpl);
-
- /**
- * Call builtin Object.prototype.toString on this object.
- * This is different from Value::ToString() that may call
- * user-defined toString function. This one does not.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<String> ObjectProtoToString(
- Local<Context> context);
-
- /**
- * Returns the name of the function invoked as a constructor for this object.
- */
- Local<String> GetConstructorName();
-
- /**
- * Sets the integrity level of the object.
- */
- Maybe<bool> SetIntegrityLevel(Local<Context> context, IntegrityLevel level);
-
- /** Gets the number of internal fields for this Object. */
- int InternalFieldCount() const;
-
- /** Same as above, but works for PersistentBase. */
- V8_INLINE static int InternalFieldCount(
- const PersistentBase<Object>& object) {
- return object.val_->InternalFieldCount();
- }
-
- /** Same as above, but works for BasicTracedReference. */
- V8_INLINE static int InternalFieldCount(
- const BasicTracedReference<Object>& object) {
- return object->InternalFieldCount();
- }
-
- /** Gets the value from an internal field. */
- V8_INLINE Local<Value> GetInternalField(int index);
-
- /** Sets the value in an internal field. */
- void SetInternalField(int index, Local<Value> value);
-
- /**
- * Gets a 2-byte-aligned native pointer from an internal field. This field
- * must have been set by SetAlignedPointerInInternalField, everything else
- * leads to undefined behavior.
- */
- V8_INLINE void* GetAlignedPointerFromInternalField(int index);
-
- /** Same as above, but works for PersistentBase. */
- V8_INLINE static void* GetAlignedPointerFromInternalField(
- const PersistentBase<Object>& object, int index) {
- return object.val_->GetAlignedPointerFromInternalField(index);
- }
-
- /** Same as above, but works for TracedGlobal. */
- V8_INLINE static void* GetAlignedPointerFromInternalField(
- const BasicTracedReference<Object>& object, int index) {
- return object->GetAlignedPointerFromInternalField(index);
- }
-
- /**
- * Sets a 2-byte-aligned native pointer in an internal field. To retrieve such
- * a field, GetAlignedPointerFromInternalField must be used, everything else
- * leads to undefined behavior.
- */
- void SetAlignedPointerInInternalField(int index, void* value);
- void SetAlignedPointerInInternalFields(int argc, int indices[],
- void* values[]);
-
- /**
- * HasOwnProperty() is like JavaScript's Object.prototype.hasOwnProperty().
- *
- * See also v8::Object::Has() and v8::Object::HasRealNamedProperty().
- */
- V8_WARN_UNUSED_RESULT Maybe<bool> HasOwnProperty(Local<Context> context,
- Local<Name> key);
- V8_WARN_UNUSED_RESULT Maybe<bool> HasOwnProperty(Local<Context> context,
- uint32_t index);
- /**
- * Use HasRealNamedProperty() if you want to check if an object has an own
- * property without causing side effects, i.e., without calling interceptors.
- *
- * This function is similar to v8::Object::HasOwnProperty(), but it does not
- * call interceptors.
- *
- * \note Consider using non-masking interceptors, i.e., the interceptors are
- * not called if the receiver has the real named property. See
- * `v8::PropertyHandlerFlags::kNonMasking`.
- *
- * See also v8::Object::Has().
- */
- V8_WARN_UNUSED_RESULT Maybe<bool> HasRealNamedProperty(Local<Context> context,
- Local<Name> key);
- V8_WARN_UNUSED_RESULT Maybe<bool> HasRealIndexedProperty(
- Local<Context> context, uint32_t index);
- V8_WARN_UNUSED_RESULT Maybe<bool> HasRealNamedCallbackProperty(
- Local<Context> context, Local<Name> key);
-
- /**
- * If result.IsEmpty() no real property was located in the prototype chain.
- * This means interceptors in the prototype chain are not called.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Value> GetRealNamedPropertyInPrototypeChain(
- Local<Context> context, Local<Name> key);
-
- /**
- * Gets the property attributes of a real property in the prototype chain,
- * which can be None or any combination of ReadOnly, DontEnum and DontDelete.
- * Interceptors in the prototype chain are not called.
- */
- V8_WARN_UNUSED_RESULT Maybe<PropertyAttribute>
- GetRealNamedPropertyAttributesInPrototypeChain(Local<Context> context,
- Local<Name> key);
-
- /**
- * If result.IsEmpty() no real property was located on the object or
- * in the prototype chain.
- * This means interceptors in the prototype chain are not called.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Value> GetRealNamedProperty(
- Local<Context> context, Local<Name> key);
-
- /**
- * Gets the property attributes of a real property which can be
- * None or any combination of ReadOnly, DontEnum and DontDelete.
- * Interceptors in the prototype chain are not called.
- */
- V8_WARN_UNUSED_RESULT Maybe<PropertyAttribute> GetRealNamedPropertyAttributes(
- Local<Context> context, Local<Name> key);
-
- /** Tests for a named lookup interceptor.*/
- bool HasNamedLookupInterceptor() const;
-
- /** Tests for an index lookup interceptor.*/
- bool HasIndexedLookupInterceptor() const;
-
- /**
- * Returns the identity hash for this object. The current implementation
- * uses a hidden property on the object to store the identity hash.
- *
- * The return value will never be 0. Also, it is not guaranteed to be
- * unique.
- */
- int GetIdentityHash();
-
- /**
- * Clone this object with a fast but shallow copy. Values will point
- * to the same values as the original object.
- */
- // TODO(dcarney): take an isolate and optionally bail out?
- Local<Object> Clone();
-
- /**
- * Returns the context in which the object was created.
- */
- // TODO(chromium:1166077): Mark as deprecate once users are updated.
- V8_DEPRECATE_SOON("Use MaybeLocal<Context> GetCreationContext()")
- Local<Context> CreationContext();
- MaybeLocal<Context> GetCreationContext();
-
- /** Same as above, but works for Persistents */
- // TODO(chromium:1166077): Mark as deprecate once users are updated.
- V8_DEPRECATE_SOON(
- "Use MaybeLocal<Context> GetCreationContext(const "
- "PersistentBase<Object>& object)")
- static Local<Context> CreationContext(const PersistentBase<Object>& object);
- V8_INLINE static MaybeLocal<Context> GetCreationContext(
- const PersistentBase<Object>& object) {
- return object.val_->GetCreationContext();
- }
-
- /**
- * Checks whether a callback is set by the
- * ObjectTemplate::SetCallAsFunctionHandler method.
- * When an Object is callable this method returns true.
- */
- bool IsCallable() const;
-
- /**
- * True if this object is a constructor.
- */
- bool IsConstructor() const;
-
- /**
- * True if this object can carry information relevant to the embedder in its
- * embedder fields, false otherwise. This is generally true for objects
- * constructed through function templates but also holds for other types where
- * V8 automatically adds internal fields at compile time, such as e.g.
- * v8::ArrayBuffer.
- */
- bool IsApiWrapper() const;
-
- /**
- * True if this object was created from an object template which was marked
- * as undetectable. See v8::ObjectTemplate::MarkAsUndetectable for more
- * information.
- */
- bool IsUndetectable() const;
-
- /**
- * Call an Object as a function if a callback is set by the
- * ObjectTemplate::SetCallAsFunctionHandler method.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Value> CallAsFunction(Local<Context> context,
- Local<Value> recv,
- int argc,
- Local<Value> argv[]);
-
- /**
- * Call an Object as a constructor if a callback is set by the
- * ObjectTemplate::SetCallAsFunctionHandler method.
- * Note: This method behaves like the Function::NewInstance method.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Value> CallAsConstructor(
- Local<Context> context, int argc, Local<Value> argv[]);
-
- /**
- * Return the isolate to which the Object belongs to.
- */
- Isolate* GetIsolate();
-
- /**
- * If this object is a Set, Map, WeakSet or WeakMap, this returns a
- * representation of the elements of this object as an array.
- * If this object is a SetIterator or MapIterator, this returns all
- * elements of the underlying collection, starting at the iterator's current
- * position.
- * For other types, this will return an empty MaybeLocal<Array> (without
- * scheduling an exception).
- */
- MaybeLocal<Array> PreviewEntries(bool* is_key_value);
-
- static Local<Object> New(Isolate* isolate);
-
- /**
- * Creates a JavaScript object with the given properties, and
- * a the given prototype_or_null (which can be any JavaScript
- * value, and if it's null, the newly created object won't have
- * a prototype at all). This is similar to Object.create().
- * All properties will be created as enumerable, configurable
- * and writable properties.
- */
- static Local<Object> New(Isolate* isolate, Local<Value> prototype_or_null,
- Local<Name>* names, Local<Value>* values,
- size_t length);
-
- V8_INLINE static Object* Cast(Value* obj);
-
- /**
- * Support for TC39 "dynamic code brand checks" proposal.
- *
- * This API allows to query whether an object was constructed from a
- * "code like" ObjectTemplate.
- *
- * See also: v8::ObjectTemplate::SetCodeLike
- */
- bool IsCodeLike(Isolate* isolate) const;
-
- private:
- Object();
- static void CheckCast(Value* obj);
- Local<Value> SlowGetInternalField(int index);
- void* SlowGetAlignedPointerFromInternalField(int index);
-};
-
-
-/**
- * An instance of the built-in array constructor (ECMA-262, 15.4.2).
- */
-class V8_EXPORT Array : public Object {
- public:
- uint32_t Length() const;
-
- /**
- * Creates a JavaScript array with the given length. If the length
- * is negative the returned array will have length 0.
- */
- static Local<Array> New(Isolate* isolate, int length = 0);
-
- /**
- * Creates a JavaScript array out of a Local<Value> array in C++
- * with a known length.
- */
- static Local<Array> New(Isolate* isolate, Local<Value>* elements,
- size_t length);
- V8_INLINE static Array* Cast(Value* obj);
-
- private:
- Array();
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * An instance of the built-in Map constructor (ECMA-262, 6th Edition, 23.1.1).
- */
-class V8_EXPORT Map : public Object {
- public:
- size_t Size() const;
- void Clear();
- V8_WARN_UNUSED_RESULT MaybeLocal<Value> Get(Local<Context> context,
- Local<Value> key);
- V8_WARN_UNUSED_RESULT MaybeLocal<Map> Set(Local<Context> context,
- Local<Value> key,
- Local<Value> value);
- V8_WARN_UNUSED_RESULT Maybe<bool> Has(Local<Context> context,
- Local<Value> key);
- V8_WARN_UNUSED_RESULT Maybe<bool> Delete(Local<Context> context,
- Local<Value> key);
-
- /**
- * Returns an array of length Size() * 2, where index N is the Nth key and
- * index N + 1 is the Nth value.
- */
- Local<Array> AsArray() const;
-
- /**
- * Creates a new empty Map.
- */
- static Local<Map> New(Isolate* isolate);
-
- V8_INLINE static Map* Cast(Value* obj);
-
- private:
- Map();
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * An instance of the built-in Set constructor (ECMA-262, 6th Edition, 23.2.1).
- */
-class V8_EXPORT Set : public Object {
- public:
- size_t Size() const;
- void Clear();
- V8_WARN_UNUSED_RESULT MaybeLocal<Set> Add(Local<Context> context,
- Local<Value> key);
- V8_WARN_UNUSED_RESULT Maybe<bool> Has(Local<Context> context,
- Local<Value> key);
- V8_WARN_UNUSED_RESULT Maybe<bool> Delete(Local<Context> context,
- Local<Value> key);
-
- /**
- * Returns an array of the keys in this Set.
- */
- Local<Array> AsArray() const;
-
- /**
- * Creates a new empty Set.
- */
- static Local<Set> New(Isolate* isolate);
-
- V8_INLINE static Set* Cast(Value* obj);
-
- private:
- Set();
- static void CheckCast(Value* obj);
-};
-
-
-template<typename T>
-class ReturnValue {
- public:
- template <class S> V8_INLINE ReturnValue(const ReturnValue<S>& that)
- : value_(that.value_) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- }
- // Local setters
- template <typename S>
- V8_INLINE void Set(const Global<S>& handle);
- template <typename S>
- V8_INLINE void Set(const BasicTracedReference<S>& handle);
- template <typename S>
- V8_INLINE void Set(const Local<S> handle);
- // Fast primitive setters
- V8_INLINE void Set(bool value);
- V8_INLINE void Set(double i);
- V8_INLINE void Set(int32_t i);
- V8_INLINE void Set(uint32_t i);
- // Fast JS primitive setters
- V8_INLINE void SetNull();
- V8_INLINE void SetUndefined();
- V8_INLINE void SetEmptyString();
- // Convenience getter for Isolate
- V8_INLINE Isolate* GetIsolate() const;
-
- // Pointer setter: Uncompilable to prevent inadvertent misuse.
- template <typename S>
- V8_INLINE void Set(S* whatever);
-
- // Getter. Creates a new Local<> so it comes with a certain performance
- // hit. If the ReturnValue was not yet set, this will return the undefined
- // value.
- V8_INLINE Local<Value> Get() const;
-
- private:
- template<class F> friend class ReturnValue;
- template<class F> friend class FunctionCallbackInfo;
- template<class F> friend class PropertyCallbackInfo;
- template <class F, class G, class H>
- friend class PersistentValueMapBase;
- V8_INLINE void SetInternal(internal::Address value) { *value_ = value; }
- V8_INLINE internal::Address GetDefaultValue();
- V8_INLINE explicit ReturnValue(internal::Address* slot);
- internal::Address* value_;
-};
-
-
-/**
- * The argument information given to function call callbacks. This
- * class provides access to information about the context of the call,
- * including the receiver, the number and values of arguments, and
- * the holder of the function.
- */
-template<typename T>
-class FunctionCallbackInfo {
- public:
- /** The number of available arguments. */
- V8_INLINE int Length() const;
- /**
- * Accessor for the available arguments. Returns `undefined` if the index
- * is out of bounds.
- */
- V8_INLINE Local<Value> operator[](int i) const;
- /** Returns the receiver. This corresponds to the "this" value. */
- V8_INLINE Local<Object> This() const;
- /**
- * If the callback was created without a Signature, this is the same
- * value as This(). If there is a signature, and the signature didn't match
- * This() but one of its hidden prototypes, this will be the respective
- * hidden prototype.
- *
- * Note that this is not the prototype of This() on which the accessor
- * referencing this callback was found (which in V8 internally is often
- * referred to as holder [sic]).
- */
- V8_INLINE Local<Object> Holder() const;
- /** For construct calls, this returns the "new.target" value. */
- V8_INLINE Local<Value> NewTarget() const;
- /** Indicates whether this is a regular call or a construct call. */
- V8_INLINE bool IsConstructCall() const;
- /** The data argument specified when creating the callback. */
- V8_INLINE Local<Value> Data() const;
- /** The current Isolate. */
- V8_INLINE Isolate* GetIsolate() const;
- /** The ReturnValue for the call. */
- V8_INLINE ReturnValue<T> GetReturnValue() const;
- // This shouldn't be public, but the arm compiler needs it.
- static const int kArgsLength = 6;
-
- protected:
- friend class internal::FunctionCallbackArguments;
- friend class internal::CustomArguments<FunctionCallbackInfo>;
- friend class debug::ConsoleCallArguments;
- static const int kHolderIndex = 0;
- static const int kIsolateIndex = 1;
- static const int kReturnValueDefaultValueIndex = 2;
- static const int kReturnValueIndex = 3;
- static const int kDataIndex = 4;
- static const int kNewTargetIndex = 5;
-
- V8_INLINE FunctionCallbackInfo(internal::Address* implicit_args,
- internal::Address* values, int length);
- internal::Address* implicit_args_;
- internal::Address* values_;
- int length_;
-};
-
-
-/**
- * The information passed to a property callback about the context
- * of the property access.
- */
-template<typename T>
-class PropertyCallbackInfo {
- public:
- /**
- * \return The isolate of the property access.
- */
- V8_INLINE Isolate* GetIsolate() const;
-
- /**
- * \return The data set in the configuration, i.e., in
- * `NamedPropertyHandlerConfiguration` or
- * `IndexedPropertyHandlerConfiguration.`
- */
- V8_INLINE Local<Value> Data() const;
-
- /**
- * \return The receiver. In many cases, this is the object on which the
- * property access was intercepted. When using
- * `Reflect.get`, `Function.prototype.call`, or similar functions, it is the
- * object passed in as receiver or thisArg.
- *
- * \code
- * void GetterCallback(Local<Name> name,
- * const v8::PropertyCallbackInfo<v8::Value>& info) {
- * auto context = info.GetIsolate()->GetCurrentContext();
- *
- * v8::Local<v8::Value> a_this =
- * info.This()
- * ->GetRealNamedProperty(context, v8_str("a"))
- * .ToLocalChecked();
- * v8::Local<v8::Value> a_holder =
- * info.Holder()
- * ->GetRealNamedProperty(context, v8_str("a"))
- * .ToLocalChecked();
- *
- * CHECK(v8_str("r")->Equals(context, a_this).FromJust());
- * CHECK(v8_str("obj")->Equals(context, a_holder).FromJust());
- *
- * info.GetReturnValue().Set(name);
- * }
- *
- * v8::Local<v8::FunctionTemplate> templ =
- * v8::FunctionTemplate::New(isolate);
- * templ->InstanceTemplate()->SetHandler(
- * v8::NamedPropertyHandlerConfiguration(GetterCallback));
- * LocalContext env;
- * env->Global()
- * ->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
- * .ToLocalChecked()
- * ->NewInstance(env.local())
- * .ToLocalChecked())
- * .FromJust();
- *
- * CompileRun("obj.a = 'obj'; var r = {a: 'r'}; Reflect.get(obj, 'x', r)");
- * \endcode
- */
- V8_INLINE Local<Object> This() const;
-
- /**
- * \return The object in the prototype chain of the receiver that has the
- * interceptor. Suppose you have `x` and its prototype is `y`, and `y`
- * has an interceptor. Then `info.This()` is `x` and `info.Holder()` is `y`.
- * The Holder() could be a hidden object (the global object, rather
- * than the global proxy).
- *
- * \note For security reasons, do not pass the object back into the runtime.
- */
- V8_INLINE Local<Object> Holder() const;
-
- /**
- * \return The return value of the callback.
- * Can be changed by calling Set().
- * \code
- * info.GetReturnValue().Set(...)
- * \endcode
- *
- */
- V8_INLINE ReturnValue<T> GetReturnValue() const;
-
- /**
- * \return True if the intercepted function should throw if an error occurs.
- * Usually, `true` corresponds to `'use strict'`.
- *
- * \note Always `false` when intercepting `Reflect.set()`
- * independent of the language mode.
- */
- V8_INLINE bool ShouldThrowOnError() const;
-
- // This shouldn't be public, but the arm compiler needs it.
- static const int kArgsLength = 7;
-
- protected:
- friend class MacroAssembler;
- friend class internal::PropertyCallbackArguments;
- friend class internal::CustomArguments<PropertyCallbackInfo>;
- static const int kShouldThrowOnErrorIndex = 0;
- static const int kHolderIndex = 1;
- static const int kIsolateIndex = 2;
- static const int kReturnValueDefaultValueIndex = 3;
- static const int kReturnValueIndex = 4;
- static const int kDataIndex = 5;
- static const int kThisIndex = 6;
-
- V8_INLINE PropertyCallbackInfo(internal::Address* args) : args_(args) {}
- internal::Address* args_;
-};
-
-using FunctionCallback = void (*)(const FunctionCallbackInfo<Value>& info);
-
-enum class ConstructorBehavior { kThrow, kAllow };
-
-/**
- * A JavaScript function object (ECMA-262, 15.3).
- */
-class V8_EXPORT Function : public Object {
- public:
- /**
- * Create a function in the current execution context
- * for a given FunctionCallback.
- */
- static MaybeLocal<Function> New(
- Local<Context> context, FunctionCallback callback,
- Local<Value> data = Local<Value>(), int length = 0,
- ConstructorBehavior behavior = ConstructorBehavior::kAllow,
- SideEffectType side_effect_type = SideEffectType::kHasSideEffect);
-
- V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewInstance(
- Local<Context> context, int argc, Local<Value> argv[]) const;
-
- V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewInstance(
- Local<Context> context) const {
- return NewInstance(context, 0, nullptr);
- }
-
- /**
- * When side effect checks are enabled, passing kHasNoSideEffect allows the
- * constructor to be invoked without throwing. Calls made within the
- * constructor are still checked.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewInstanceWithSideEffectType(
- Local<Context> context, int argc, Local<Value> argv[],
- SideEffectType side_effect_type = SideEffectType::kHasSideEffect) const;
-
- V8_WARN_UNUSED_RESULT MaybeLocal<Value> Call(Local<Context> context,
- Local<Value> recv, int argc,
- Local<Value> argv[]);
-
- void SetName(Local<String> name);
- Local<Value> GetName() const;
-
- /**
- * Name inferred from variable or property assignment of this function.
- * Used to facilitate debugging and profiling of JavaScript code written
- * in an OO style, where many functions are anonymous but are assigned
- * to object properties.
- */
- Local<Value> GetInferredName() const;
-
- /**
- * displayName if it is set, otherwise name if it is configured, otherwise
- * function name, otherwise inferred name.
- */
- Local<Value> GetDebugName() const;
-
- /**
- * Returns zero based line number of function body and
- * kLineOffsetNotFound if no information available.
- */
- int GetScriptLineNumber() const;
- /**
- * Returns zero based column number of function body and
- * kLineOffsetNotFound if no information available.
- */
- int GetScriptColumnNumber() const;
-
- /**
- * Returns scriptId.
- */
- int ScriptId() const;
-
- /**
- * Returns the original function if this function is bound, else returns
- * v8::Undefined.
- */
- Local<Value> GetBoundFunction() const;
-
- /**
- * Calls builtin Function.prototype.toString on this function.
- * This is different from Value::ToString() that may call a user-defined
- * toString() function, and different than Object::ObjectProtoToString() which
- * always serializes "[object Function]".
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<String> FunctionProtoToString(
- Local<Context> context);
-
- ScriptOrigin GetScriptOrigin() const;
- V8_INLINE static Function* Cast(Value* obj);
- static const int kLineOffsetNotFound;
-
- private:
- Function();
- static void CheckCast(Value* obj);
-};
-
-#ifndef V8_PROMISE_INTERNAL_FIELD_COUNT
-// The number of required internal fields can be defined by embedder.
-#define V8_PROMISE_INTERNAL_FIELD_COUNT 0
-#endif
-
-/**
- * An instance of the built-in Promise constructor (ES6 draft).
- */
-class V8_EXPORT Promise : public Object {
- public:
- /**
- * State of the promise. Each value corresponds to one of the possible values
- * of the [[PromiseState]] field.
- */
- enum PromiseState { kPending, kFulfilled, kRejected };
-
- class V8_EXPORT Resolver : public Object {
- public:
- /**
- * Create a new resolver, along with an associated promise in pending state.
- */
- static V8_WARN_UNUSED_RESULT MaybeLocal<Resolver> New(
- Local<Context> context);
-
- /**
- * Extract the associated promise.
- */
- Local<Promise> GetPromise();
-
- /**
- * Resolve/reject the associated promise with a given value.
- * Ignored if the promise is no longer pending.
- */
- V8_WARN_UNUSED_RESULT Maybe<bool> Resolve(Local<Context> context,
- Local<Value> value);
-
- V8_WARN_UNUSED_RESULT Maybe<bool> Reject(Local<Context> context,
- Local<Value> value);
-
- V8_INLINE static Resolver* Cast(Value* obj);
-
- private:
- Resolver();
- static void CheckCast(Value* obj);
- };
-
- /**
- * Register a resolution/rejection handler with a promise.
- * The handler is given the respective resolution/rejection value as
- * an argument. If the promise is already resolved/rejected, the handler is
- * invoked at the end of turn.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Promise> Catch(Local<Context> context,
- Local<Function> handler);
-
- V8_WARN_UNUSED_RESULT MaybeLocal<Promise> Then(Local<Context> context,
- Local<Function> handler);
-
- V8_WARN_UNUSED_RESULT MaybeLocal<Promise> Then(Local<Context> context,
- Local<Function> on_fulfilled,
- Local<Function> on_rejected);
-
- /**
- * Returns true if the promise has at least one derived promise, and
- * therefore resolve/reject handlers (including default handler).
- */
- bool HasHandler() const;
-
- /**
- * Returns the content of the [[PromiseResult]] field. The Promise must not
- * be pending.
- */
- Local<Value> Result();
-
- /**
- * Returns the value of the [[PromiseState]] field.
- */
- PromiseState State();
-
- /**
- * Marks this promise as handled to avoid reporting unhandled rejections.
- */
- void MarkAsHandled();
-
- /**
- * Marks this promise as silent to prevent pausing the debugger when the
- * promise is rejected.
- */
- void MarkAsSilent();
-
- V8_INLINE static Promise* Cast(Value* obj);
-
- static const int kEmbedderFieldCount = V8_PROMISE_INTERNAL_FIELD_COUNT;
-
- private:
- Promise();
- static void CheckCast(Value* obj);
-};
-
-/**
- * An instance of a Property Descriptor, see Ecma-262 6.2.4.
- *
- * Properties in a descriptor are present or absent. If you do not set
- * `enumerable`, `configurable`, and `writable`, they are absent. If `value`,
- * `get`, or `set` are absent, but you must specify them in the constructor, use
- * empty handles.
- *
- * Accessors `get` and `set` must be callable or undefined if they are present.
- *
- * \note Only query properties if they are present, i.e., call `x()` only if
- * `has_x()` returns true.
- *
- * \code
- * // var desc = {writable: false}
- * v8::PropertyDescriptor d(Local<Value>()), false);
- * d.value(); // error, value not set
- * if (d.has_writable()) {
- * d.writable(); // false
- * }
- *
- * // var desc = {value: undefined}
- * v8::PropertyDescriptor d(v8::Undefined(isolate));
- *
- * // var desc = {get: undefined}
- * v8::PropertyDescriptor d(v8::Undefined(isolate), Local<Value>()));
- * \endcode
- */
-class V8_EXPORT PropertyDescriptor {
- public:
- // GenericDescriptor
- PropertyDescriptor();
-
- // DataDescriptor
- explicit PropertyDescriptor(Local<Value> value);
-
- // DataDescriptor with writable property
- PropertyDescriptor(Local<Value> value, bool writable);
-
- // AccessorDescriptor
- PropertyDescriptor(Local<Value> get, Local<Value> set);
-
- ~PropertyDescriptor();
-
- Local<Value> value() const;
- bool has_value() const;
-
- Local<Value> get() const;
- bool has_get() const;
- Local<Value> set() const;
- bool has_set() const;
-
- void set_enumerable(bool enumerable);
- bool enumerable() const;
- bool has_enumerable() const;
-
- void set_configurable(bool configurable);
- bool configurable() const;
- bool has_configurable() const;
-
- bool writable() const;
- bool has_writable() const;
-
- struct PrivateData;
- PrivateData* get_private() const { return private_; }
-
- PropertyDescriptor(const PropertyDescriptor&) = delete;
- void operator=(const PropertyDescriptor&) = delete;
-
- private:
- PrivateData* private_;
-};
-
-/**
- * An instance of the built-in Proxy constructor (ECMA-262, 6th Edition,
- * 26.2.1).
- */
-class V8_EXPORT Proxy : public Object {
- public:
- Local<Value> GetTarget();
- Local<Value> GetHandler();
- bool IsRevoked() const;
- void Revoke();
-
- /**
- * Creates a new Proxy for the target object.
- */
- static MaybeLocal<Proxy> New(Local<Context> context,
- Local<Object> local_target,
- Local<Object> local_handler);
-
- V8_INLINE static Proxy* Cast(Value* obj);
-
- private:
- Proxy();
- static void CheckCast(Value* obj);
-};
-
-/**
- * Points to an unowned continous buffer holding a known number of elements.
- *
- * This is similar to std::span (under consideration for C++20), but does not
- * require advanced C++ support. In the (far) future, this may be replaced with
- * or aliased to std::span.
- *
- * To facilitate future migration, this class exposes a subset of the interface
- * implemented by std::span.
- */
-template <typename T>
-class V8_EXPORT MemorySpan {
- public:
- /** The default constructor creates an empty span. */
- constexpr MemorySpan() = default;
-
- constexpr MemorySpan(T* data, size_t size) : data_(data), size_(size) {}
-
- /** Returns a pointer to the beginning of the buffer. */
- constexpr T* data() const { return data_; }
- /** Returns the number of elements that the buffer holds. */
- constexpr size_t size() const { return size_; }
-
- private:
- T* data_ = nullptr;
- size_t size_ = 0;
-};
-
-/**
- * An owned byte buffer with associated size.
- */
-struct OwnedBuffer {
- std::unique_ptr<const uint8_t[]> buffer;
- size_t size = 0;
- OwnedBuffer(std::unique_ptr<const uint8_t[]> buffer, size_t size)
- : buffer(std::move(buffer)), size(size) {}
- OwnedBuffer() = default;
-};
-
-// Wrapper around a compiled WebAssembly module, which is potentially shared by
-// different WasmModuleObjects.
-class V8_EXPORT CompiledWasmModule {
- public:
- /**
- * Serialize the compiled module. The serialized data does not include the
- * wire bytes.
- */
- OwnedBuffer Serialize();
-
- /**
- * Get the (wasm-encoded) wire bytes that were used to compile this module.
- */
- MemorySpan<const uint8_t> GetWireBytesRef();
-
- const std::string& source_url() const { return source_url_; }
-
- private:
- friend class WasmModuleObject;
- friend class WasmStreaming;
-
- explicit CompiledWasmModule(std::shared_ptr<internal::wasm::NativeModule>,
- const char* source_url, size_t url_length);
-
- const std::shared_ptr<internal::wasm::NativeModule> native_module_;
- const std::string source_url_;
-};
-
-// An instance of WebAssembly.Memory.
-class V8_EXPORT WasmMemoryObject : public Object {
- public:
- WasmMemoryObject() = delete;
-
- /**
- * Returns underlying ArrayBuffer.
- */
- Local<ArrayBuffer> Buffer();
-
- V8_INLINE static WasmMemoryObject* Cast(Value* obj);
-
- private:
- static void CheckCast(Value* object);
-};
-
-// An instance of WebAssembly.Module.
-class V8_EXPORT WasmModuleObject : public Object {
- public:
- WasmModuleObject() = delete;
-
- /**
- * Efficiently re-create a WasmModuleObject, without recompiling, from
- * a CompiledWasmModule.
- */
- static MaybeLocal<WasmModuleObject> FromCompiledModule(
- Isolate* isolate, const CompiledWasmModule&);
-
- /**
- * Get the compiled module for this module object. The compiled module can be
- * shared by several module objects.
- */
- CompiledWasmModule GetCompiledModule();
-
- V8_INLINE static WasmModuleObject* Cast(Value* obj);
-
- private:
- static void CheckCast(Value* obj);
-};
-
-/**
- * The V8 interface for WebAssembly streaming compilation. When streaming
- * compilation is initiated, V8 passes a {WasmStreaming} object to the embedder
- * such that the embedder can pass the input bytes for streaming compilation to
- * V8.
- */
-class V8_EXPORT WasmStreaming final {
- public:
- class WasmStreamingImpl;
-
- /**
- * Client to receive streaming event notifications.
- */
- class Client {
- public:
- virtual ~Client() = default;
- /**
- * Passes the fully compiled module to the client. This can be used to
- * implement code caching.
- */
- virtual void OnModuleCompiled(CompiledWasmModule compiled_module) = 0;
- };
-
- explicit WasmStreaming(std::unique_ptr<WasmStreamingImpl> impl);
-
- ~WasmStreaming();
-
- /**
- * Pass a new chunk of bytes to WebAssembly streaming compilation.
- * The buffer passed into {OnBytesReceived} is owned by the caller.
- */
- void OnBytesReceived(const uint8_t* bytes, size_t size);
-
- /**
- * {Finish} should be called after all received bytes where passed to
- * {OnBytesReceived} to tell V8 that there will be no more bytes. {Finish}
- * does not have to be called after {Abort} has been called already.
- */
- void Finish();
-
- /**
- * Abort streaming compilation. If {exception} has a value, then the promise
- * associated with streaming compilation is rejected with that value. If
- * {exception} does not have value, the promise does not get rejected.
- */
- void Abort(MaybeLocal<Value> exception);
-
- /**
- * Passes previously compiled module bytes. This must be called before
- * {OnBytesReceived}, {Finish}, or {Abort}. Returns true if the module bytes
- * can be used, false otherwise. The buffer passed via {bytes} and {size}
- * is owned by the caller. If {SetCompiledModuleBytes} returns true, the
- * buffer must remain valid until either {Finish} or {Abort} completes.
- */
- bool SetCompiledModuleBytes(const uint8_t* bytes, size_t size);
-
- /**
- * Sets the client object that will receive streaming event notifications.
- * This must be called before {OnBytesReceived}, {Finish}, or {Abort}.
- */
- void SetClient(std::shared_ptr<Client> client);
-
- /*
- * Sets the UTF-8 encoded source URL for the {Script} object. This must be
- * called before {Finish}.
- */
- void SetUrl(const char* url, size_t length);
-
- /**
- * Unpacks a {WasmStreaming} object wrapped in a {Managed} for the embedder.
- * Since the embedder is on the other side of the API, it cannot unpack the
- * {Managed} itself.
- */
- static std::shared_ptr<WasmStreaming> Unpack(Isolate* isolate,
- Local<Value> value);
-
- private:
- std::unique_ptr<WasmStreamingImpl> impl_;
-};
-
-// TODO(mtrofin): when streaming compilation is done, we can rename this
-// to simply WasmModuleObjectBuilder
-class V8_EXPORT WasmModuleObjectBuilderStreaming final {
- public:
- explicit WasmModuleObjectBuilderStreaming(Isolate* isolate);
- /**
- * The buffer passed into OnBytesReceived is owned by the caller.
- */
- void OnBytesReceived(const uint8_t*, size_t size);
- void Finish();
- /**
- * Abort streaming compilation. If {exception} has a value, then the promise
- * associated with streaming compilation is rejected with that value. If
- * {exception} does not have value, the promise does not get rejected.
- */
- void Abort(MaybeLocal<Value> exception);
- Local<Promise> GetPromise();
-
- ~WasmModuleObjectBuilderStreaming() = default;
-
- private:
- WasmModuleObjectBuilderStreaming(const WasmModuleObjectBuilderStreaming&) =
- delete;
- WasmModuleObjectBuilderStreaming(WasmModuleObjectBuilderStreaming&&) =
- default;
- WasmModuleObjectBuilderStreaming& operator=(
- const WasmModuleObjectBuilderStreaming&) = delete;
- WasmModuleObjectBuilderStreaming& operator=(
- WasmModuleObjectBuilderStreaming&&) = default;
- Isolate* isolate_ = nullptr;
-
-#if V8_CC_MSVC
- /**
- * We don't need the static Copy API, so the default
- * NonCopyablePersistentTraits would be sufficient, however,
- * MSVC eagerly instantiates the Copy.
- * We ensure we don't use Copy, however, by compiling with the
- * defaults everywhere else.
- */
- Persistent<Promise, CopyablePersistentTraits<Promise>> promise_;
-#else
- Persistent<Promise> promise_;
-#endif
- std::shared_ptr<internal::wasm::StreamingDecoder> streaming_decoder_;
-};
-
-#ifndef V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT
-// The number of required internal fields can be defined by embedder.
-#define V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT 2
-#endif
-
-
-enum class ArrayBufferCreationMode { kInternalized, kExternalized };
-
-/**
- * A wrapper around the backing store (i.e. the raw memory) of an array buffer.
- * See a document linked in http://crbug.com/v8/9908 for more information.
- *
- * The allocation and destruction of backing stores is generally managed by
- * V8. Clients should always use standard C++ memory ownership types (i.e.
- * std::unique_ptr and std::shared_ptr) to manage lifetimes of backing stores
- * properly, since V8 internal objects may alias backing stores.
- *
- * This object does not keep the underlying |ArrayBuffer::Allocator| alive by
- * default. Use Isolate::CreateParams::array_buffer_allocator_shared when
- * creating the Isolate to make it hold a reference to the allocator itself.
- */
-class V8_EXPORT BackingStore : public v8::internal::BackingStoreBase {
- public:
- ~BackingStore();
-
- /**
- * Return a pointer to the beginning of the memory block for this backing
- * store. The pointer is only valid as long as this backing store object
- * lives.
- */
- void* Data() const;
-
- /**
- * The length (in bytes) of this backing store.
- */
- size_t ByteLength() const;
-
- /**
- * Indicates whether the backing store was created for an ArrayBuffer or
- * a SharedArrayBuffer.
- */
- bool IsShared() const;
-
- /**
- * Prevent implicit instantiation of operator delete with size_t argument.
- * The size_t argument would be incorrect because ptr points to the
- * internal BackingStore object.
- */
- void operator delete(void* ptr) { ::operator delete(ptr); }
-
- /**
- * Wrapper around ArrayBuffer::Allocator::Reallocate that preserves IsShared.
- * Assumes that the backing_store was allocated by the ArrayBuffer allocator
- * of the given isolate.
- */
- static std::unique_ptr<BackingStore> Reallocate(
- v8::Isolate* isolate, std::unique_ptr<BackingStore> backing_store,
- size_t byte_length);
-
- /**
- * This callback is used only if the memory block for a BackingStore cannot be
- * allocated with an ArrayBuffer::Allocator. In such cases the destructor of
- * the BackingStore invokes the callback to free the memory block.
- */
- using DeleterCallback = void (*)(void* data, size_t length,
- void* deleter_data);
-
- /**
- * If the memory block of a BackingStore is static or is managed manually,
- * then this empty deleter along with nullptr deleter_data can be passed to
- * ArrayBuffer::NewBackingStore to indicate that.
- *
- * The manually managed case should be used with caution and only when it
- * is guaranteed that the memory block freeing happens after detaching its
- * ArrayBuffer.
- */
- static void EmptyDeleter(void* data, size_t length, void* deleter_data);
-
- private:
- /**
- * See [Shared]ArrayBuffer::GetBackingStore and
- * [Shared]ArrayBuffer::NewBackingStore.
- */
- BackingStore();
-};
-
-#if !defined(V8_IMMINENT_DEPRECATION_WARNINGS)
-// Use v8::BackingStore::DeleterCallback instead.
-using BackingStoreDeleterCallback = void (*)(void* data, size_t length,
- void* deleter_data);
-
-#endif
-
-/**
- * An instance of the built-in ArrayBuffer constructor (ES6 draft 15.13.5).
- */
-class V8_EXPORT ArrayBuffer : public Object {
- public:
- /**
- * A thread-safe allocator that V8 uses to allocate |ArrayBuffer|'s memory.
- * The allocator is a global V8 setting. It has to be set via
- * Isolate::CreateParams.
- *
- * Memory allocated through this allocator by V8 is accounted for as external
- * memory by V8. Note that V8 keeps track of the memory for all internalized
- * |ArrayBuffer|s. Responsibility for tracking external memory (using
- * Isolate::AdjustAmountOfExternalAllocatedMemory) is handed over to the
- * embedder upon externalization and taken over upon internalization (creating
- * an internalized buffer from an existing buffer).
- *
- * Note that it is unsafe to call back into V8 from any of the allocator
- * functions.
- */
- class V8_EXPORT Allocator {
- public:
- virtual ~Allocator() = default;
-
- /**
- * Allocate |length| bytes. Return nullptr if allocation is not successful.
- * Memory should be initialized to zeroes.
- */
- virtual void* Allocate(size_t length) = 0;
-
- /**
- * Allocate |length| bytes. Return nullptr if allocation is not successful.
- * Memory does not have to be initialized.
- */
- virtual void* AllocateUninitialized(size_t length) = 0;
-
- /**
- * Free the memory block of size |length|, pointed to by |data|.
- * That memory is guaranteed to be previously allocated by |Allocate|.
- */
- virtual void Free(void* data, size_t length) = 0;
-
- /**
- * Reallocate the memory block of size |old_length| to a memory block of
- * size |new_length| by expanding, contracting, or copying the existing
- * memory block. If |new_length| > |old_length|, then the new part of
- * the memory must be initialized to zeros. Return nullptr if reallocation
- * is not successful.
- *
- * The caller guarantees that the memory block was previously allocated
- * using Allocate or AllocateUninitialized.
- *
- * The default implementation allocates a new block and copies data.
- */
- virtual void* Reallocate(void* data, size_t old_length, size_t new_length);
-
- /**
- * ArrayBuffer allocation mode. kNormal is a malloc/free style allocation,
- * while kReservation is for larger allocations with the ability to set
- * access permissions.
- */
- enum class AllocationMode { kNormal, kReservation };
-
- /**
- * malloc/free based convenience allocator.
- *
- * Caller takes ownership, i.e. the returned object needs to be freed using
- * |delete allocator| once it is no longer in use.
- */
- static Allocator* NewDefaultAllocator();
- };
-
- /**
- * Data length in bytes.
- */
- size_t ByteLength() const;
-
- /**
- * Create a new ArrayBuffer. Allocate |byte_length| bytes.
- * Allocated memory will be owned by a created ArrayBuffer and
- * will be deallocated when it is garbage-collected,
- * unless the object is externalized.
- */
- static Local<ArrayBuffer> New(Isolate* isolate, size_t byte_length);
-
- /**
- * Create a new ArrayBuffer with an existing backing store.
- * The created array keeps a reference to the backing store until the array
- * is garbage collected. Note that the IsExternal bit does not affect this
- * reference from the array to the backing store.
- *
- * In future IsExternal bit will be removed. Until then the bit is set as
- * follows. If the backing store does not own the underlying buffer, then
- * the array is created in externalized state. Otherwise, the array is created
- * in internalized state. In the latter case the array can be transitioned
- * to the externalized state using Externalize(backing_store).
- */
- static Local<ArrayBuffer> New(Isolate* isolate,
- std::shared_ptr<BackingStore> backing_store);
-
- /**
- * Returns a new standalone BackingStore that is allocated using the array
- * buffer allocator of the isolate. The result can be later passed to
- * ArrayBuffer::New.
- *
- * If the allocator returns nullptr, then the function may cause GCs in the
- * given isolate and re-try the allocation. If GCs do not help, then the
- * function will crash with an out-of-memory error.
- */
- static std::unique_ptr<BackingStore> NewBackingStore(Isolate* isolate,
- size_t byte_length);
- /**
- * Returns a new standalone BackingStore that takes over the ownership of
- * the given buffer. The destructor of the BackingStore invokes the given
- * deleter callback.
- *
- * The result can be later passed to ArrayBuffer::New. The raw pointer
- * to the buffer must not be passed again to any V8 API function.
- */
- static std::unique_ptr<BackingStore> NewBackingStore(
- void* data, size_t byte_length, v8::BackingStore::DeleterCallback deleter,
- void* deleter_data);
-
- /**
- * Returns true if this ArrayBuffer may be detached.
- */
- bool IsDetachable() const;
-
- /**
- * Detaches this ArrayBuffer and all its views (typed arrays).
- * Detaching sets the byte length of the buffer and all typed arrays to zero,
- * preventing JavaScript from ever accessing underlying backing store.
- * ArrayBuffer should have been externalized and must be detachable.
- */
- void Detach();
-
- /**
- * Get a shared pointer to the backing store of this array buffer. This
- * pointer coordinates the lifetime management of the internal storage
- * with any live ArrayBuffers on the heap, even across isolates. The embedder
- * should not attempt to manage lifetime of the storage through other means.
- */
- std::shared_ptr<BackingStore> GetBackingStore();
-
- V8_INLINE static ArrayBuffer* Cast(Value* obj);
-
- static const int kInternalFieldCount = V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT;
- static const int kEmbedderFieldCount = V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT;
-
- private:
- ArrayBuffer();
- static void CheckCast(Value* obj);
-};
-
-
-#ifndef V8_ARRAY_BUFFER_VIEW_INTERNAL_FIELD_COUNT
-// The number of required internal fields can be defined by embedder.
-#define V8_ARRAY_BUFFER_VIEW_INTERNAL_FIELD_COUNT 2
-#endif
-
-
-/**
- * A base class for an instance of one of "views" over ArrayBuffer,
- * including TypedArrays and DataView (ES6 draft 15.13).
- */
-class V8_EXPORT ArrayBufferView : public Object {
- public:
- /**
- * Returns underlying ArrayBuffer.
- */
- Local<ArrayBuffer> Buffer();
- /**
- * Byte offset in |Buffer|.
- */
- size_t ByteOffset();
- /**
- * Size of a view in bytes.
- */
- size_t ByteLength();
-
- /**
- * Copy the contents of the ArrayBufferView's buffer to an embedder defined
- * memory without additional overhead that calling ArrayBufferView::Buffer
- * might incur.
- *
- * Will write at most min(|byte_length|, ByteLength) bytes starting at
- * ByteOffset of the underlying buffer to the memory starting at |dest|.
- * Returns the number of bytes actually written.
- */
- size_t CopyContents(void* dest, size_t byte_length);
-
- /**
- * Returns true if ArrayBufferView's backing ArrayBuffer has already been
- * allocated.
- */
- bool HasBuffer() const;
-
- V8_INLINE static ArrayBufferView* Cast(Value* obj);
-
- static const int kInternalFieldCount =
- V8_ARRAY_BUFFER_VIEW_INTERNAL_FIELD_COUNT;
- static const int kEmbedderFieldCount =
- V8_ARRAY_BUFFER_VIEW_INTERNAL_FIELD_COUNT;
-
- private:
- ArrayBufferView();
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * A base class for an instance of TypedArray series of constructors
- * (ES6 draft 15.13.6).
- */
-class V8_EXPORT TypedArray : public ArrayBufferView {
- public:
- /*
- * The largest typed array size that can be constructed using New.
- */
- static constexpr size_t kMaxLength =
- internal::kApiSystemPointerSize == 4
- ? internal::kSmiMaxValue
- : static_cast<size_t>(uint64_t{1} << 32);
-
- /**
- * Number of elements in this typed array
- * (e.g. for Int16Array, |ByteLength|/2).
- */
- size_t Length();
-
- V8_INLINE static TypedArray* Cast(Value* obj);
-
- private:
- TypedArray();
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * An instance of Uint8Array constructor (ES6 draft 15.13.6).
- */
-class V8_EXPORT Uint8Array : public TypedArray {
- public:
- static Local<Uint8Array> New(Local<ArrayBuffer> array_buffer,
- size_t byte_offset, size_t length);
- static Local<Uint8Array> New(Local<SharedArrayBuffer> shared_array_buffer,
- size_t byte_offset, size_t length);
- V8_INLINE static Uint8Array* Cast(Value* obj);
-
- private:
- Uint8Array();
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * An instance of Uint8ClampedArray constructor (ES6 draft 15.13.6).
- */
-class V8_EXPORT Uint8ClampedArray : public TypedArray {
- public:
- static Local<Uint8ClampedArray> New(Local<ArrayBuffer> array_buffer,
- size_t byte_offset, size_t length);
- static Local<Uint8ClampedArray> New(
- Local<SharedArrayBuffer> shared_array_buffer, size_t byte_offset,
- size_t length);
- V8_INLINE static Uint8ClampedArray* Cast(Value* obj);
-
- private:
- Uint8ClampedArray();
- static void CheckCast(Value* obj);
-};
-
-/**
- * An instance of Int8Array constructor (ES6 draft 15.13.6).
- */
-class V8_EXPORT Int8Array : public TypedArray {
- public:
- static Local<Int8Array> New(Local<ArrayBuffer> array_buffer,
- size_t byte_offset, size_t length);
- static Local<Int8Array> New(Local<SharedArrayBuffer> shared_array_buffer,
- size_t byte_offset, size_t length);
- V8_INLINE static Int8Array* Cast(Value* obj);
-
- private:
- Int8Array();
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * An instance of Uint16Array constructor (ES6 draft 15.13.6).
- */
-class V8_EXPORT Uint16Array : public TypedArray {
- public:
- static Local<Uint16Array> New(Local<ArrayBuffer> array_buffer,
- size_t byte_offset, size_t length);
- static Local<Uint16Array> New(Local<SharedArrayBuffer> shared_array_buffer,
- size_t byte_offset, size_t length);
- V8_INLINE static Uint16Array* Cast(Value* obj);
-
- private:
- Uint16Array();
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * An instance of Int16Array constructor (ES6 draft 15.13.6).
- */
-class V8_EXPORT Int16Array : public TypedArray {
- public:
- static Local<Int16Array> New(Local<ArrayBuffer> array_buffer,
- size_t byte_offset, size_t length);
- static Local<Int16Array> New(Local<SharedArrayBuffer> shared_array_buffer,
- size_t byte_offset, size_t length);
- V8_INLINE static Int16Array* Cast(Value* obj);
-
- private:
- Int16Array();
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * An instance of Uint32Array constructor (ES6 draft 15.13.6).
- */
-class V8_EXPORT Uint32Array : public TypedArray {
- public:
- static Local<Uint32Array> New(Local<ArrayBuffer> array_buffer,
- size_t byte_offset, size_t length);
- static Local<Uint32Array> New(Local<SharedArrayBuffer> shared_array_buffer,
- size_t byte_offset, size_t length);
- V8_INLINE static Uint32Array* Cast(Value* obj);
-
- private:
- Uint32Array();
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * An instance of Int32Array constructor (ES6 draft 15.13.6).
- */
-class V8_EXPORT Int32Array : public TypedArray {
- public:
- static Local<Int32Array> New(Local<ArrayBuffer> array_buffer,
- size_t byte_offset, size_t length);
- static Local<Int32Array> New(Local<SharedArrayBuffer> shared_array_buffer,
- size_t byte_offset, size_t length);
- V8_INLINE static Int32Array* Cast(Value* obj);
-
- private:
- Int32Array();
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * An instance of Float32Array constructor (ES6 draft 15.13.6).
- */
-class V8_EXPORT Float32Array : public TypedArray {
- public:
- static Local<Float32Array> New(Local<ArrayBuffer> array_buffer,
- size_t byte_offset, size_t length);
- static Local<Float32Array> New(Local<SharedArrayBuffer> shared_array_buffer,
- size_t byte_offset, size_t length);
- V8_INLINE static Float32Array* Cast(Value* obj);
-
- private:
- Float32Array();
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * An instance of Float64Array constructor (ES6 draft 15.13.6).
- */
-class V8_EXPORT Float64Array : public TypedArray {
- public:
- static Local<Float64Array> New(Local<ArrayBuffer> array_buffer,
- size_t byte_offset, size_t length);
- static Local<Float64Array> New(Local<SharedArrayBuffer> shared_array_buffer,
- size_t byte_offset, size_t length);
- V8_INLINE static Float64Array* Cast(Value* obj);
-
- private:
- Float64Array();
- static void CheckCast(Value* obj);
-};
-
-/**
- * An instance of BigInt64Array constructor.
- */
-class V8_EXPORT BigInt64Array : public TypedArray {
- public:
- static Local<BigInt64Array> New(Local<ArrayBuffer> array_buffer,
- size_t byte_offset, size_t length);
- static Local<BigInt64Array> New(Local<SharedArrayBuffer> shared_array_buffer,
- size_t byte_offset, size_t length);
- V8_INLINE static BigInt64Array* Cast(Value* obj);
-
- private:
- BigInt64Array();
- static void CheckCast(Value* obj);
-};
-
-/**
- * An instance of BigUint64Array constructor.
- */
-class V8_EXPORT BigUint64Array : public TypedArray {
- public:
- static Local<BigUint64Array> New(Local<ArrayBuffer> array_buffer,
- size_t byte_offset, size_t length);
- static Local<BigUint64Array> New(Local<SharedArrayBuffer> shared_array_buffer,
- size_t byte_offset, size_t length);
- V8_INLINE static BigUint64Array* Cast(Value* obj);
-
- private:
- BigUint64Array();
- static void CheckCast(Value* obj);
-};
-
-/**
- * An instance of DataView constructor (ES6 draft 15.13.7).
- */
-class V8_EXPORT DataView : public ArrayBufferView {
- public:
- static Local<DataView> New(Local<ArrayBuffer> array_buffer,
- size_t byte_offset, size_t length);
- static Local<DataView> New(Local<SharedArrayBuffer> shared_array_buffer,
- size_t byte_offset, size_t length);
- V8_INLINE static DataView* Cast(Value* obj);
-
- private:
- DataView();
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * An instance of the built-in SharedArrayBuffer constructor.
- */
-class V8_EXPORT SharedArrayBuffer : public Object {
- public:
- /**
- * Data length in bytes.
- */
- size_t ByteLength() const;
-
- /**
- * Create a new SharedArrayBuffer. Allocate |byte_length| bytes.
- * Allocated memory will be owned by a created SharedArrayBuffer and
- * will be deallocated when it is garbage-collected,
- * unless the object is externalized.
- */
- static Local<SharedArrayBuffer> New(Isolate* isolate, size_t byte_length);
-
- /**
- * Create a new SharedArrayBuffer with an existing backing store.
- * The created array keeps a reference to the backing store until the array
- * is garbage collected. Note that the IsExternal bit does not affect this
- * reference from the array to the backing store.
- *
- * In future IsExternal bit will be removed. Until then the bit is set as
- * follows. If the backing store does not own the underlying buffer, then
- * the array is created in externalized state. Otherwise, the array is created
- * in internalized state. In the latter case the array can be transitioned
- * to the externalized state using Externalize(backing_store).
- */
- static Local<SharedArrayBuffer> New(
- Isolate* isolate, std::shared_ptr<BackingStore> backing_store);
-
- /**
- * Returns a new standalone BackingStore that is allocated using the array
- * buffer allocator of the isolate. The result can be later passed to
- * SharedArrayBuffer::New.
- *
- * If the allocator returns nullptr, then the function may cause GCs in the
- * given isolate and re-try the allocation. If GCs do not help, then the
- * function will crash with an out-of-memory error.
- */
- static std::unique_ptr<BackingStore> NewBackingStore(Isolate* isolate,
- size_t byte_length);
- /**
- * Returns a new standalone BackingStore that takes over the ownership of
- * the given buffer. The destructor of the BackingStore invokes the given
- * deleter callback.
- *
- * The result can be later passed to SharedArrayBuffer::New. The raw pointer
- * to the buffer must not be passed again to any V8 functions.
- */
- static std::unique_ptr<BackingStore> NewBackingStore(
- void* data, size_t byte_length, v8::BackingStore::DeleterCallback deleter,
- void* deleter_data);
-
- /**
- * Get a shared pointer to the backing store of this array buffer. This
- * pointer coordinates the lifetime management of the internal storage
- * with any live ArrayBuffers on the heap, even across isolates. The embedder
- * should not attempt to manage lifetime of the storage through other means.
- */
- std::shared_ptr<BackingStore> GetBackingStore();
-
- V8_INLINE static SharedArrayBuffer* Cast(Value* obj);
-
- static const int kInternalFieldCount = V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT;
-
- private:
- SharedArrayBuffer();
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * An instance of the built-in Date constructor (ECMA-262, 15.9).
- */
-class V8_EXPORT Date : public Object {
- public:
- static V8_WARN_UNUSED_RESULT MaybeLocal<Value> New(Local<Context> context,
- double time);
-
- /**
- * A specialization of Value::NumberValue that is more efficient
- * because we know the structure of this object.
- */
- double ValueOf() const;
-
- V8_INLINE static Date* Cast(Value* obj);
-
- private:
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * A Number object (ECMA-262, 4.3.21).
- */
-class V8_EXPORT NumberObject : public Object {
- public:
- static Local<Value> New(Isolate* isolate, double value);
-
- double ValueOf() const;
-
- V8_INLINE static NumberObject* Cast(Value* obj);
-
- private:
- static void CheckCast(Value* obj);
-};
-
-/**
- * A BigInt object (https://tc39.github.io/proposal-bigint)
- */
-class V8_EXPORT BigIntObject : public Object {
- public:
- static Local<Value> New(Isolate* isolate, int64_t value);
-
- Local<BigInt> ValueOf() const;
-
- V8_INLINE static BigIntObject* Cast(Value* obj);
-
- private:
- static void CheckCast(Value* obj);
-};
-
-/**
- * A Boolean object (ECMA-262, 4.3.15).
- */
-class V8_EXPORT BooleanObject : public Object {
- public:
- static Local<Value> New(Isolate* isolate, bool value);
-
- bool ValueOf() const;
-
- V8_INLINE static BooleanObject* Cast(Value* obj);
-
- private:
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * A String object (ECMA-262, 4.3.18).
- */
-class V8_EXPORT StringObject : public Object {
- public:
- static Local<Value> New(Isolate* isolate, Local<String> value);
-
- Local<String> ValueOf() const;
-
- V8_INLINE static StringObject* Cast(Value* obj);
-
- private:
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * A Symbol object (ECMA-262 edition 6).
- */
-class V8_EXPORT SymbolObject : public Object {
- public:
- static Local<Value> New(Isolate* isolate, Local<Symbol> value);
-
- Local<Symbol> ValueOf() const;
-
- V8_INLINE static SymbolObject* Cast(Value* obj);
-
- private:
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * An instance of the built-in RegExp constructor (ECMA-262, 15.10).
- */
-class V8_EXPORT RegExp : public Object {
- public:
- /**
- * Regular expression flag bits. They can be or'ed to enable a set
- * of flags.
- * The kLinear value ('l') is experimental and can only be used with
- * --enable-experimental-regexp-engine. RegExps with kLinear flag are
- * guaranteed to be executed in asymptotic linear time wrt. the length of
- * the subject string.
- */
- enum Flags {
- kNone = 0,
- kGlobal = 1 << 0,
- kIgnoreCase = 1 << 1,
- kMultiline = 1 << 2,
- kSticky = 1 << 3,
- kUnicode = 1 << 4,
- kDotAll = 1 << 5,
- kLinear = 1 << 6,
- kHasIndices = 1 << 7,
- };
-
- static constexpr int kFlagCount = 8;
-
- /**
- * Creates a regular expression from the given pattern string and
- * the flags bit field. May throw a JavaScript exception as
- * described in ECMA-262, 15.10.4.1.
- *
- * For example,
- * RegExp::New(v8::String::New("foo"),
- * static_cast<RegExp::Flags>(kGlobal | kMultiline))
- * is equivalent to evaluating "/foo/gm".
- */
- static V8_WARN_UNUSED_RESULT MaybeLocal<RegExp> New(Local<Context> context,
- Local<String> pattern,
- Flags flags);
-
- /**
- * Like New, but additionally specifies a backtrack limit. If the number of
- * backtracks done in one Exec call hits the limit, a match failure is
- * immediately returned.
- */
- static V8_WARN_UNUSED_RESULT MaybeLocal<RegExp> NewWithBacktrackLimit(
- Local<Context> context, Local<String> pattern, Flags flags,
- uint32_t backtrack_limit);
-
- /**
- * Executes the current RegExp instance on the given subject string.
- * Equivalent to RegExp.prototype.exec as described in
- *
- * https://tc39.es/ecma262/#sec-regexp.prototype.exec
- *
- * On success, an Array containing the matched strings is returned. On
- * failure, returns Null.
- *
- * Note: modifies global context state, accessible e.g. through RegExp.input.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Object> Exec(Local<Context> context,
- Local<String> subject);
-
- /**
- * Returns the value of the source property: a string representing
- * the regular expression.
- */
- Local<String> GetSource() const;
-
- /**
- * Returns the flags bit field.
- */
- Flags GetFlags() const;
-
- V8_INLINE static RegExp* Cast(Value* obj);
-
- private:
- static void CheckCast(Value* obj);
-};
-
-/**
- * A JavaScript value that wraps a C++ void*. This type of value is mainly used
- * to associate C++ data structures with JavaScript objects.
- */
-class V8_EXPORT External : public Value {
- public:
- static Local<External> New(Isolate* isolate, void* value);
- V8_INLINE static External* Cast(Value* obj);
- void* Value() const;
- private:
- static void CheckCast(v8::Value* obj);
-};
-
-#define V8_INTRINSICS_LIST(F) \
- F(ArrayProto_entries, array_entries_iterator) \
- F(ArrayProto_forEach, array_for_each_iterator) \
- F(ArrayProto_keys, array_keys_iterator) \
- F(ArrayProto_values, array_values_iterator) \
- F(AsyncIteratorPrototype, initial_async_iterator_prototype) \
- F(ErrorPrototype, initial_error_prototype) \
- F(IteratorPrototype, initial_iterator_prototype) \
- F(ObjProto_valueOf, object_value_of_function)
-
-enum Intrinsic {
-#define V8_DECL_INTRINSIC(name, iname) k##name,
- V8_INTRINSICS_LIST(V8_DECL_INTRINSIC)
-#undef V8_DECL_INTRINSIC
-};
-
-
-// --- Templates ---
-
-
-/**
- * The superclass of object and function templates.
- */
-class V8_EXPORT Template : public Data {
- public:
- /**
- * Adds a property to each instance created by this template.
- *
- * The property must be defined either as a primitive value, or a template.
- */
- void Set(Local<Name> name, Local<Data> value,
- PropertyAttribute attributes = None);
- void SetPrivate(Local<Private> name, Local<Data> value,
- PropertyAttribute attributes = None);
- V8_INLINE void Set(Isolate* isolate, const char* name, Local<Data> value,
- PropertyAttribute attributes = None);
-
- void SetAccessorProperty(
- Local<Name> name,
- Local<FunctionTemplate> getter = Local<FunctionTemplate>(),
- Local<FunctionTemplate> setter = Local<FunctionTemplate>(),
- PropertyAttribute attribute = None,
- AccessControl settings = DEFAULT);
-
- /**
- * Whenever the property with the given name is accessed on objects
- * created from this Template the getter and setter callbacks
- * are called instead of getting and setting the property directly
- * on the JavaScript object.
- *
- * \param name The name of the property for which an accessor is added.
- * \param getter The callback to invoke when getting the property.
- * \param setter The callback to invoke when setting the property.
- * \param data A piece of data that will be passed to the getter and setter
- * callbacks whenever they are invoked.
- * \param settings Access control settings for the accessor. This is a bit
- * field consisting of one of more of
- * DEFAULT = 0, ALL_CAN_READ = 1, or ALL_CAN_WRITE = 2.
- * The default is to not allow cross-context access.
- * ALL_CAN_READ means that all cross-context reads are allowed.
- * ALL_CAN_WRITE means that all cross-context writes are allowed.
- * The combination ALL_CAN_READ | ALL_CAN_WRITE can be used to allow all
- * cross-context access.
- * \param attribute The attributes of the property for which an accessor
- * is added.
- * \param signature The signature describes valid receivers for the accessor
- * and is used to perform implicit instance checks against them. If the
- * receiver is incompatible (i.e. is not an instance of the constructor as
- * defined by FunctionTemplate::HasInstance()), an implicit TypeError is
- * thrown and no callback is invoked.
- */
- void SetNativeDataProperty(
- Local<String> name, AccessorGetterCallback getter,
- AccessorSetterCallback setter = nullptr,
- Local<Value> data = Local<Value>(), PropertyAttribute attribute = None,
- Local<AccessorSignature> signature = Local<AccessorSignature>(),
- AccessControl settings = DEFAULT,
- SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
- SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
- void SetNativeDataProperty(
- Local<Name> name, AccessorNameGetterCallback getter,
- AccessorNameSetterCallback setter = nullptr,
- Local<Value> data = Local<Value>(), PropertyAttribute attribute = None,
- Local<AccessorSignature> signature = Local<AccessorSignature>(),
- AccessControl settings = DEFAULT,
- SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
- SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
-
- /**
- * Like SetNativeDataProperty, but V8 will replace the native data property
- * with a real data property on first access.
- */
- void SetLazyDataProperty(
- Local<Name> name, AccessorNameGetterCallback getter,
- Local<Value> data = Local<Value>(), PropertyAttribute attribute = None,
- SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
- SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
-
- /**
- * During template instantiation, sets the value with the intrinsic property
- * from the correct context.
- */
- void SetIntrinsicDataProperty(Local<Name> name, Intrinsic intrinsic,
- PropertyAttribute attribute = None);
-
- private:
- Template();
-
- friend class ObjectTemplate;
- friend class FunctionTemplate;
-};
-
-// TODO(dcarney): Replace GenericNamedPropertyFooCallback with just
-// NamedPropertyFooCallback.
-
-/**
- * Interceptor for get requests on an object.
- *
- * Use `info.GetReturnValue().Set()` to set the return value of the
- * intercepted get request.
- *
- * \param property The name of the property for which the request was
- * intercepted.
- * \param info Information about the intercepted request, such as
- * isolate, receiver, return value, or whether running in `'use strict`' mode.
- * See `PropertyCallbackInfo`.
- *
- * \code
- * void GetterCallback(
- * Local<Name> name,
- * const v8::PropertyCallbackInfo<v8::Value>& info) {
- * info.GetReturnValue().Set(v8_num(42));
- * }
- *
- * v8::Local<v8::FunctionTemplate> templ =
- * v8::FunctionTemplate::New(isolate);
- * templ->InstanceTemplate()->SetHandler(
- * v8::NamedPropertyHandlerConfiguration(GetterCallback));
- * LocalContext env;
- * env->Global()
- * ->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
- * .ToLocalChecked()
- * ->NewInstance(env.local())
- * .ToLocalChecked())
- * .FromJust();
- * v8::Local<v8::Value> result = CompileRun("obj.a = 17; obj.a");
- * CHECK(v8_num(42)->Equals(env.local(), result).FromJust());
- * \endcode
- *
- * See also `ObjectTemplate::SetHandler`.
- */
-using GenericNamedPropertyGetterCallback =
- void (*)(Local<Name> property, const PropertyCallbackInfo<Value>& info);
-
-/**
- * Interceptor for set requests on an object.
- *
- * Use `info.GetReturnValue()` to indicate whether the request was intercepted
- * or not. If the setter successfully intercepts the request, i.e., if the
- * request should not be further executed, call
- * `info.GetReturnValue().Set(value)`. If the setter
- * did not intercept the request, i.e., if the request should be handled as
- * if no interceptor is present, do not not call `Set()`.
- *
- * \param property The name of the property for which the request was
- * intercepted.
- * \param value The value which the property will have if the request
- * is not intercepted.
- * \param info Information about the intercepted request, such as
- * isolate, receiver, return value, or whether running in `'use strict'` mode.
- * See `PropertyCallbackInfo`.
- *
- * See also
- * `ObjectTemplate::SetHandler.`
- */
-using GenericNamedPropertySetterCallback =
- void (*)(Local<Name> property, Local<Value> value,
- const PropertyCallbackInfo<Value>& info);
-
-/**
- * Intercepts all requests that query the attributes of the
- * property, e.g., getOwnPropertyDescriptor(), propertyIsEnumerable(), and
- * defineProperty().
- *
- * Use `info.GetReturnValue().Set(value)` to set the property attributes. The
- * value is an integer encoding a `v8::PropertyAttribute`.
- *
- * \param property The name of the property for which the request was
- * intercepted.
- * \param info Information about the intercepted request, such as
- * isolate, receiver, return value, or whether running in `'use strict'` mode.
- * See `PropertyCallbackInfo`.
- *
- * \note Some functions query the property attributes internally, even though
- * they do not return the attributes. For example, `hasOwnProperty()` can
- * trigger this interceptor depending on the state of the object.
- *
- * See also
- * `ObjectTemplate::SetHandler.`
- */
-using GenericNamedPropertyQueryCallback =
- void (*)(Local<Name> property, const PropertyCallbackInfo<Integer>& info);
-
-/**
- * Interceptor for delete requests on an object.
- *
- * Use `info.GetReturnValue()` to indicate whether the request was intercepted
- * or not. If the deleter successfully intercepts the request, i.e., if the
- * request should not be further executed, call
- * `info.GetReturnValue().Set(value)` with a boolean `value`. The `value` is
- * used as the return value of `delete`.
- *
- * \param property The name of the property for which the request was
- * intercepted.
- * \param info Information about the intercepted request, such as
- * isolate, receiver, return value, or whether running in `'use strict'` mode.
- * See `PropertyCallbackInfo`.
- *
- * \note If you need to mimic the behavior of `delete`, i.e., throw in strict
- * mode instead of returning false, use `info.ShouldThrowOnError()` to determine
- * if you are in strict mode.
- *
- * See also `ObjectTemplate::SetHandler.`
- */
-using GenericNamedPropertyDeleterCallback =
- void (*)(Local<Name> property, const PropertyCallbackInfo<Boolean>& info);
-
-/**
- * Returns an array containing the names of the properties the named
- * property getter intercepts.
- *
- * Note: The values in the array must be of type v8::Name.
- */
-using GenericNamedPropertyEnumeratorCallback =
- void (*)(const PropertyCallbackInfo<Array>& info);
-
-/**
- * Interceptor for defineProperty requests on an object.
- *
- * Use `info.GetReturnValue()` to indicate whether the request was intercepted
- * or not. If the definer successfully intercepts the request, i.e., if the
- * request should not be further executed, call
- * `info.GetReturnValue().Set(value)`. If the definer
- * did not intercept the request, i.e., if the request should be handled as
- * if no interceptor is present, do not not call `Set()`.
- *
- * \param property The name of the property for which the request was
- * intercepted.
- * \param desc The property descriptor which is used to define the
- * property if the request is not intercepted.
- * \param info Information about the intercepted request, such as
- * isolate, receiver, return value, or whether running in `'use strict'` mode.
- * See `PropertyCallbackInfo`.
- *
- * See also `ObjectTemplate::SetHandler`.
- */
-using GenericNamedPropertyDefinerCallback =
- void (*)(Local<Name> property, const PropertyDescriptor& desc,
- const PropertyCallbackInfo<Value>& info);
-
-/**
- * Interceptor for getOwnPropertyDescriptor requests on an object.
- *
- * Use `info.GetReturnValue().Set()` to set the return value of the
- * intercepted request. The return value must be an object that
- * can be converted to a PropertyDescriptor, e.g., a `v8::value` returned from
- * `v8::Object::getOwnPropertyDescriptor`.
- *
- * \param property The name of the property for which the request was
- * intercepted.
- * \info Information about the intercepted request, such as
- * isolate, receiver, return value, or whether running in `'use strict'` mode.
- * See `PropertyCallbackInfo`.
- *
- * \note If GetOwnPropertyDescriptor is intercepted, it will
- * always return true, i.e., indicate that the property was found.
- *
- * See also `ObjectTemplate::SetHandler`.
- */
-using GenericNamedPropertyDescriptorCallback =
- void (*)(Local<Name> property, const PropertyCallbackInfo<Value>& info);
-
-/**
- * See `v8::GenericNamedPropertyGetterCallback`.
- */
-using IndexedPropertyGetterCallback =
- void (*)(uint32_t index, const PropertyCallbackInfo<Value>& info);
-
-/**
- * See `v8::GenericNamedPropertySetterCallback`.
- */
-using IndexedPropertySetterCallback =
- void (*)(uint32_t index, Local<Value> value,
- const PropertyCallbackInfo<Value>& info);
-
-/**
- * See `v8::GenericNamedPropertyQueryCallback`.
- */
-using IndexedPropertyQueryCallback =
- void (*)(uint32_t index, const PropertyCallbackInfo<Integer>& info);
-
-/**
- * See `v8::GenericNamedPropertyDeleterCallback`.
- */
-using IndexedPropertyDeleterCallback =
- void (*)(uint32_t index, const PropertyCallbackInfo<Boolean>& info);
-
-/**
- * Returns an array containing the indices of the properties the indexed
- * property getter intercepts.
- *
- * Note: The values in the array must be uint32_t.
- */
-using IndexedPropertyEnumeratorCallback =
- void (*)(const PropertyCallbackInfo<Array>& info);
-
-/**
- * See `v8::GenericNamedPropertyDefinerCallback`.
- */
-using IndexedPropertyDefinerCallback =
- void (*)(uint32_t index, const PropertyDescriptor& desc,
- const PropertyCallbackInfo<Value>& info);
-
-/**
- * See `v8::GenericNamedPropertyDescriptorCallback`.
- */
-using IndexedPropertyDescriptorCallback =
- void (*)(uint32_t index, const PropertyCallbackInfo<Value>& info);
-
-/**
- * Access type specification.
- */
-enum AccessType {
- ACCESS_GET,
- ACCESS_SET,
- ACCESS_HAS,
- ACCESS_DELETE,
- ACCESS_KEYS
-};
-
-
-/**
- * Returns true if the given context should be allowed to access the given
- * object.
- */
-using AccessCheckCallback = bool (*)(Local<Context> accessing_context,
- Local<Object> accessed_object,
- Local<Value> data);
-
-/**
- * A FunctionTemplate is used to create functions at runtime. There
- * can only be one function created from a FunctionTemplate in a
- * context. The lifetime of the created function is equal to the
- * lifetime of the context. So in case the embedder needs to create
- * temporary functions that can be collected using Scripts is
- * preferred.
- *
- * Any modification of a FunctionTemplate after first instantiation will trigger
- * a crash.
- *
- * A FunctionTemplate can have properties, these properties are added to the
- * function object when it is created.
- *
- * A FunctionTemplate has a corresponding instance template which is
- * used to create object instances when the function is used as a
- * constructor. Properties added to the instance template are added to
- * each object instance.
- *
- * A FunctionTemplate can have a prototype template. The prototype template
- * is used to create the prototype object of the function.
- *
- * The following example shows how to use a FunctionTemplate:
- *
- * \code
- * v8::Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
- * t->Set(isolate, "func_property", v8::Number::New(isolate, 1));
- *
- * v8::Local<v8::Template> proto_t = t->PrototypeTemplate();
- * proto_t->Set(isolate,
- * "proto_method",
- * v8::FunctionTemplate::New(isolate, InvokeCallback));
- * proto_t->Set(isolate, "proto_const", v8::Number::New(isolate, 2));
- *
- * v8::Local<v8::ObjectTemplate> instance_t = t->InstanceTemplate();
- * instance_t->SetAccessor(
- String::NewFromUtf8Literal(isolate, "instance_accessor"),
- * InstanceAccessorCallback);
- * instance_t->SetHandler(
- * NamedPropertyHandlerConfiguration(PropertyHandlerCallback));
- * instance_t->Set(String::NewFromUtf8Literal(isolate, "instance_property"),
- * Number::New(isolate, 3));
- *
- * v8::Local<v8::Function> function = t->GetFunction();
- * v8::Local<v8::Object> instance = function->NewInstance();
- * \endcode
- *
- * Let's use "function" as the JS variable name of the function object
- * and "instance" for the instance object created above. The function
- * and the instance will have the following properties:
- *
- * \code
- * func_property in function == true;
- * function.func_property == 1;
- *
- * function.prototype.proto_method() invokes 'InvokeCallback'
- * function.prototype.proto_const == 2;
- *
- * instance instanceof function == true;
- * instance.instance_accessor calls 'InstanceAccessorCallback'
- * instance.instance_property == 3;
- * \endcode
- *
- * A FunctionTemplate can inherit from another one by calling the
- * FunctionTemplate::Inherit method. The following graph illustrates
- * the semantics of inheritance:
- *
- * \code
- * FunctionTemplate Parent -> Parent() . prototype -> { }
- * ^ ^
- * | Inherit(Parent) | .__proto__
- * | |
- * FunctionTemplate Child -> Child() . prototype -> { }
- * \endcode
- *
- * A FunctionTemplate 'Child' inherits from 'Parent', the prototype
- * object of the Child() function has __proto__ pointing to the
- * Parent() function's prototype object. An instance of the Child
- * function has all properties on Parent's instance templates.
- *
- * Let Parent be the FunctionTemplate initialized in the previous
- * section and create a Child FunctionTemplate by:
- *
- * \code
- * Local<FunctionTemplate> parent = t;
- * Local<FunctionTemplate> child = FunctionTemplate::New();
- * child->Inherit(parent);
- *
- * Local<Function> child_function = child->GetFunction();
- * Local<Object> child_instance = child_function->NewInstance();
- * \endcode
- *
- * The Child function and Child instance will have the following
- * properties:
- *
- * \code
- * child_func.prototype.__proto__ == function.prototype;
- * child_instance.instance_accessor calls 'InstanceAccessorCallback'
- * child_instance.instance_property == 3;
- * \endcode
- *
- * The additional 'c_function' parameter refers to a fast API call, which
- * must not trigger GC or JavaScript execution, or call into V8 in other
- * ways. For more information how to define them, see
- * include/v8-fast-api-calls.h. Please note that this feature is still
- * experimental.
- */
-class V8_EXPORT FunctionTemplate : public Template {
- public:
- /** Creates a function template.*/
- static Local<FunctionTemplate> New(
- Isolate* isolate, FunctionCallback callback = nullptr,
- Local<Value> data = Local<Value>(),
- Local<Signature> signature = Local<Signature>(), int length = 0,
- ConstructorBehavior behavior = ConstructorBehavior::kAllow,
- SideEffectType side_effect_type = SideEffectType::kHasSideEffect,
- const CFunction* c_function = nullptr, uint16_t instance_type = 0,
- uint16_t allowed_receiver_instance_type_range_start = 0,
- uint16_t allowed_receiver_instance_type_range_end = 0);
-
- /** Creates a function template for multiple overloaded fast API calls.*/
- static Local<FunctionTemplate> NewWithCFunctionOverloads(
- Isolate* isolate, FunctionCallback callback = nullptr,
- Local<Value> data = Local<Value>(),
- Local<Signature> signature = Local<Signature>(), int length = 0,
- ConstructorBehavior behavior = ConstructorBehavior::kAllow,
- SideEffectType side_effect_type = SideEffectType::kHasSideEffect,
- const MemorySpan<const CFunction>& c_function_overloads = {});
-
- /**
- * Creates a function template backed/cached by a private property.
- */
- static Local<FunctionTemplate> NewWithCache(
- Isolate* isolate, FunctionCallback callback,
- Local<Private> cache_property, Local<Value> data = Local<Value>(),
- Local<Signature> signature = Local<Signature>(), int length = 0,
- SideEffectType side_effect_type = SideEffectType::kHasSideEffect);
-
- /** Returns the unique function instance in the current execution context.*/
- V8_WARN_UNUSED_RESULT MaybeLocal<Function> GetFunction(
- Local<Context> context);
-
- /**
- * Similar to Context::NewRemoteContext, this creates an instance that
- * isn't backed by an actual object.
- *
- * The InstanceTemplate of this FunctionTemplate must have access checks with
- * handlers installed.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewRemoteInstance();
-
- /**
- * Set the call-handler callback for a FunctionTemplate. This
- * callback is called whenever the function created from this
- * FunctionTemplate is called. The 'c_function' represents a fast
- * API call, see the comment above the class declaration.
- */
- void SetCallHandler(
- FunctionCallback callback, Local<Value> data = Local<Value>(),
- SideEffectType side_effect_type = SideEffectType::kHasSideEffect,
- const MemorySpan<const CFunction>& c_function_overloads = {});
-
- /** Set the predefined length property for the FunctionTemplate. */
- void SetLength(int length);
-
- /** Get the InstanceTemplate. */
- Local<ObjectTemplate> InstanceTemplate();
-
- /**
- * Causes the function template to inherit from a parent function template.
- * This means the function's prototype.__proto__ is set to the parent
- * function's prototype.
- **/
- void Inherit(Local<FunctionTemplate> parent);
-
- /**
- * A PrototypeTemplate is the template used to create the prototype object
- * of the function created by this template.
- */
- Local<ObjectTemplate> PrototypeTemplate();
-
- /**
- * A PrototypeProviderTemplate is another function template whose prototype
- * property is used for this template. This is mutually exclusive with setting
- * a prototype template indirectly by calling PrototypeTemplate() or using
- * Inherit().
- **/
- void SetPrototypeProviderTemplate(Local<FunctionTemplate> prototype_provider);
-
- /**
- * Set the class name of the FunctionTemplate. This is used for
- * printing objects created with the function created from the
- * FunctionTemplate as its constructor.
- */
- void SetClassName(Local<String> name);
-
-
- /**
- * When set to true, no access check will be performed on the receiver of a
- * function call. Currently defaults to true, but this is subject to change.
- */
- void SetAcceptAnyReceiver(bool value);
-
- /**
- * Sets the ReadOnly flag in the attributes of the 'prototype' property
- * of functions created from this FunctionTemplate to true.
- */
- void ReadOnlyPrototype();
-
- /**
- * Removes the prototype property from functions created from this
- * FunctionTemplate.
- */
- void RemovePrototype();
-
- /**
- * Returns true if the given object is an instance of this function
- * template.
- */
- bool HasInstance(Local<Value> object);
-
- /**
- * Returns true if the given value is an API object that was constructed by an
- * instance of this function template (without checking for inheriting
- * function templates).
- *
- * This is an experimental feature and may still change significantly.
- */
- bool IsLeafTemplateForApiObject(v8::Local<v8::Value> value) const;
-
- V8_INLINE static FunctionTemplate* Cast(Data* data);
-
- private:
- FunctionTemplate();
-
- static void CheckCast(Data* that);
- friend class Context;
- friend class ObjectTemplate;
-};
-
-/**
- * Configuration flags for v8::NamedPropertyHandlerConfiguration or
- * v8::IndexedPropertyHandlerConfiguration.
- */
-enum class PropertyHandlerFlags {
- /**
- * None.
- */
- kNone = 0,
-
- /**
- * See ALL_CAN_READ above.
- */
- kAllCanRead = 1,
-
- /** Will not call into interceptor for properties on the receiver or prototype
- * chain, i.e., only call into interceptor for properties that do not exist.
- * Currently only valid for named interceptors.
- */
- kNonMasking = 1 << 1,
-
- /**
- * Will not call into interceptor for symbol lookup. Only meaningful for
- * named interceptors.
- */
- kOnlyInterceptStrings = 1 << 2,
-
- /**
- * The getter, query, enumerator callbacks do not produce side effects.
- */
- kHasNoSideEffect = 1 << 3,
-};
-
-struct NamedPropertyHandlerConfiguration {
- NamedPropertyHandlerConfiguration(
- GenericNamedPropertyGetterCallback getter,
- GenericNamedPropertySetterCallback setter,
- GenericNamedPropertyQueryCallback query,
- GenericNamedPropertyDeleterCallback deleter,
- GenericNamedPropertyEnumeratorCallback enumerator,
- GenericNamedPropertyDefinerCallback definer,
- GenericNamedPropertyDescriptorCallback descriptor,
- Local<Value> data = Local<Value>(),
- PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
- : getter(getter),
- setter(setter),
- query(query),
- deleter(deleter),
- enumerator(enumerator),
- definer(definer),
- descriptor(descriptor),
- data(data),
- flags(flags) {}
-
- NamedPropertyHandlerConfiguration(
- /** Note: getter is required */
- GenericNamedPropertyGetterCallback getter = nullptr,
- GenericNamedPropertySetterCallback setter = nullptr,
- GenericNamedPropertyQueryCallback query = nullptr,
- GenericNamedPropertyDeleterCallback deleter = nullptr,
- GenericNamedPropertyEnumeratorCallback enumerator = nullptr,
- Local<Value> data = Local<Value>(),
- PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
- : getter(getter),
- setter(setter),
- query(query),
- deleter(deleter),
- enumerator(enumerator),
- definer(nullptr),
- descriptor(nullptr),
- data(data),
- flags(flags) {}
-
- NamedPropertyHandlerConfiguration(
- GenericNamedPropertyGetterCallback getter,
- GenericNamedPropertySetterCallback setter,
- GenericNamedPropertyDescriptorCallback descriptor,
- GenericNamedPropertyDeleterCallback deleter,
- GenericNamedPropertyEnumeratorCallback enumerator,
- GenericNamedPropertyDefinerCallback definer,
- Local<Value> data = Local<Value>(),
- PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
- : getter(getter),
- setter(setter),
- query(nullptr),
- deleter(deleter),
- enumerator(enumerator),
- definer(definer),
- descriptor(descriptor),
- data(data),
- flags(flags) {}
-
- GenericNamedPropertyGetterCallback getter;
- GenericNamedPropertySetterCallback setter;
- GenericNamedPropertyQueryCallback query;
- GenericNamedPropertyDeleterCallback deleter;
- GenericNamedPropertyEnumeratorCallback enumerator;
- GenericNamedPropertyDefinerCallback definer;
- GenericNamedPropertyDescriptorCallback descriptor;
- Local<Value> data;
- PropertyHandlerFlags flags;
-};
-
-
-struct IndexedPropertyHandlerConfiguration {
- IndexedPropertyHandlerConfiguration(
- IndexedPropertyGetterCallback getter,
- IndexedPropertySetterCallback setter, IndexedPropertyQueryCallback query,
- IndexedPropertyDeleterCallback deleter,
- IndexedPropertyEnumeratorCallback enumerator,
- IndexedPropertyDefinerCallback definer,
- IndexedPropertyDescriptorCallback descriptor,
- Local<Value> data = Local<Value>(),
- PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
- : getter(getter),
- setter(setter),
- query(query),
- deleter(deleter),
- enumerator(enumerator),
- definer(definer),
- descriptor(descriptor),
- data(data),
- flags(flags) {}
-
- IndexedPropertyHandlerConfiguration(
- /** Note: getter is required */
- IndexedPropertyGetterCallback getter = nullptr,
- IndexedPropertySetterCallback setter = nullptr,
- IndexedPropertyQueryCallback query = nullptr,
- IndexedPropertyDeleterCallback deleter = nullptr,
- IndexedPropertyEnumeratorCallback enumerator = nullptr,
- Local<Value> data = Local<Value>(),
- PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
- : getter(getter),
- setter(setter),
- query(query),
- deleter(deleter),
- enumerator(enumerator),
- definer(nullptr),
- descriptor(nullptr),
- data(data),
- flags(flags) {}
-
- IndexedPropertyHandlerConfiguration(
- IndexedPropertyGetterCallback getter,
- IndexedPropertySetterCallback setter,
- IndexedPropertyDescriptorCallback descriptor,
- IndexedPropertyDeleterCallback deleter,
- IndexedPropertyEnumeratorCallback enumerator,
- IndexedPropertyDefinerCallback definer,
- Local<Value> data = Local<Value>(),
- PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
- : getter(getter),
- setter(setter),
- query(nullptr),
- deleter(deleter),
- enumerator(enumerator),
- definer(definer),
- descriptor(descriptor),
- data(data),
- flags(flags) {}
-
- IndexedPropertyGetterCallback getter;
- IndexedPropertySetterCallback setter;
- IndexedPropertyQueryCallback query;
- IndexedPropertyDeleterCallback deleter;
- IndexedPropertyEnumeratorCallback enumerator;
- IndexedPropertyDefinerCallback definer;
- IndexedPropertyDescriptorCallback descriptor;
- Local<Value> data;
- PropertyHandlerFlags flags;
-};
-
-
-/**
- * An ObjectTemplate is used to create objects at runtime.
- *
- * Properties added to an ObjectTemplate are added to each object
- * created from the ObjectTemplate.
- */
-class V8_EXPORT ObjectTemplate : public Template {
- public:
- /** Creates an ObjectTemplate. */
- static Local<ObjectTemplate> New(
- Isolate* isolate,
- Local<FunctionTemplate> constructor = Local<FunctionTemplate>());
-
- /** Creates a new instance of this template.*/
- V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewInstance(Local<Context> context);
-
- /**
- * Sets an accessor on the object template.
- *
- * Whenever the property with the given name is accessed on objects
- * created from this ObjectTemplate the getter and setter callbacks
- * are called instead of getting and setting the property directly
- * on the JavaScript object.
- *
- * \param name The name of the property for which an accessor is added.
- * \param getter The callback to invoke when getting the property.
- * \param setter The callback to invoke when setting the property.
- * \param data A piece of data that will be passed to the getter and setter
- * callbacks whenever they are invoked.
- * \param settings Access control settings for the accessor. This is a bit
- * field consisting of one of more of
- * DEFAULT = 0, ALL_CAN_READ = 1, or ALL_CAN_WRITE = 2.
- * The default is to not allow cross-context access.
- * ALL_CAN_READ means that all cross-context reads are allowed.
- * ALL_CAN_WRITE means that all cross-context writes are allowed.
- * The combination ALL_CAN_READ | ALL_CAN_WRITE can be used to allow all
- * cross-context access.
- * \param attribute The attributes of the property for which an accessor
- * is added.
- * \param signature The signature describes valid receivers for the accessor
- * and is used to perform implicit instance checks against them. If the
- * receiver is incompatible (i.e. is not an instance of the constructor as
- * defined by FunctionTemplate::HasInstance()), an implicit TypeError is
- * thrown and no callback is invoked.
- */
- void SetAccessor(
- Local<String> name, AccessorGetterCallback getter,
- AccessorSetterCallback setter = nullptr,
- Local<Value> data = Local<Value>(), AccessControl settings = DEFAULT,
- PropertyAttribute attribute = None,
- Local<AccessorSignature> signature = Local<AccessorSignature>(),
- SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
- SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
- void SetAccessor(
- Local<Name> name, AccessorNameGetterCallback getter,
- AccessorNameSetterCallback setter = nullptr,
- Local<Value> data = Local<Value>(), AccessControl settings = DEFAULT,
- PropertyAttribute attribute = None,
- Local<AccessorSignature> signature = Local<AccessorSignature>(),
- SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
- SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
-
- /**
- * Sets a named property handler on the object template.
- *
- * Whenever a property whose name is a string or a symbol is accessed on
- * objects created from this object template, the provided callback is
- * invoked instead of accessing the property directly on the JavaScript
- * object.
- *
- * @param configuration The NamedPropertyHandlerConfiguration that defines the
- * callbacks to invoke when accessing a property.
- */
- void SetHandler(const NamedPropertyHandlerConfiguration& configuration);
-
- /**
- * Sets an indexed property handler on the object template.
- *
- * Whenever an indexed property is accessed on objects created from
- * this object template, the provided callback is invoked instead of
- * accessing the property directly on the JavaScript object.
- *
- * \param getter The callback to invoke when getting a property.
- * \param setter The callback to invoke when setting a property.
- * \param query The callback to invoke to check if an object has a property.
- * \param deleter The callback to invoke when deleting a property.
- * \param enumerator The callback to invoke to enumerate all the indexed
- * properties of an object.
- * \param data A piece of data that will be passed to the callbacks
- * whenever they are invoked.
- */
- // TODO(dcarney): deprecate
- void SetIndexedPropertyHandler(
- IndexedPropertyGetterCallback getter,
- IndexedPropertySetterCallback setter = nullptr,
- IndexedPropertyQueryCallback query = nullptr,
- IndexedPropertyDeleterCallback deleter = nullptr,
- IndexedPropertyEnumeratorCallback enumerator = nullptr,
- Local<Value> data = Local<Value>()) {
- SetHandler(IndexedPropertyHandlerConfiguration(getter, setter, query,
- deleter, enumerator, data));
- }
-
- /**
- * Sets an indexed property handler on the object template.
- *
- * Whenever an indexed property is accessed on objects created from
- * this object template, the provided callback is invoked instead of
- * accessing the property directly on the JavaScript object.
- *
- * @param configuration The IndexedPropertyHandlerConfiguration that defines
- * the callbacks to invoke when accessing a property.
- */
- void SetHandler(const IndexedPropertyHandlerConfiguration& configuration);
-
- /**
- * Sets the callback to be used when calling instances created from
- * this template as a function. If no callback is set, instances
- * behave like normal JavaScript objects that cannot be called as a
- * function.
- */
- void SetCallAsFunctionHandler(FunctionCallback callback,
- Local<Value> data = Local<Value>());
-
- /**
- * Mark object instances of the template as undetectable.
- *
- * In many ways, undetectable objects behave as though they are not
- * there. They behave like 'undefined' in conditionals and when
- * printed. However, properties can be accessed and called as on
- * normal objects.
- */
- void MarkAsUndetectable();
-
- /**
- * Sets access check callback on the object template and enables access
- * checks.
- *
- * When accessing properties on instances of this object template,
- * the access check callback will be called to determine whether or
- * not to allow cross-context access to the properties.
- */
- void SetAccessCheckCallback(AccessCheckCallback callback,
- Local<Value> data = Local<Value>());
-
- /**
- * Like SetAccessCheckCallback but invokes an interceptor on failed access
- * checks instead of looking up all-can-read properties. You can only use
- * either this method or SetAccessCheckCallback, but not both at the same
- * time.
- */
- void SetAccessCheckCallbackAndHandler(
- AccessCheckCallback callback,
- const NamedPropertyHandlerConfiguration& named_handler,
- const IndexedPropertyHandlerConfiguration& indexed_handler,
- Local<Value> data = Local<Value>());
-
- /**
- * Gets the number of internal fields for objects generated from
- * this template.
- */
- int InternalFieldCount() const;
-
- /**
- * Sets the number of internal fields for objects generated from
- * this template.
- */
- void SetInternalFieldCount(int value);
-
- /**
- * Returns true if the object will be an immutable prototype exotic object.
- */
- bool IsImmutableProto() const;
-
- /**
- * Makes the ObjectTemplate for an immutable prototype exotic object, with an
- * immutable __proto__.
- */
- void SetImmutableProto();
-
- /**
- * Support for TC39 "dynamic code brand checks" proposal.
- *
- * This API allows to mark (& query) objects as "code like", which causes
- * them to be treated like Strings in the context of eval and function
- * constructor.
- *
- * Reference: https://github.com/tc39/proposal-dynamic-code-brand-checks
- */
- void SetCodeLike();
- bool IsCodeLike() const;
-
- V8_INLINE static ObjectTemplate* Cast(Data* data);
-
- private:
- ObjectTemplate();
- static Local<ObjectTemplate> New(internal::Isolate* isolate,
- Local<FunctionTemplate> constructor);
- static void CheckCast(Data* that);
- friend class FunctionTemplate;
-};
-
-/**
- * A Signature specifies which receiver is valid for a function.
- *
- * A receiver matches a given signature if the receiver (or any of its
- * hidden prototypes) was created from the signature's FunctionTemplate, or
- * from a FunctionTemplate that inherits directly or indirectly from the
- * signature's FunctionTemplate.
- */
-class V8_EXPORT Signature : public Data {
- public:
- static Local<Signature> New(
- Isolate* isolate,
- Local<FunctionTemplate> receiver = Local<FunctionTemplate>());
-
- V8_INLINE static Signature* Cast(Data* data);
-
- private:
- Signature();
-
- static void CheckCast(Data* that);
-};
-
-
-/**
- * An AccessorSignature specifies which receivers are valid parameters
- * to an accessor callback.
- */
-class V8_EXPORT AccessorSignature : public Data {
- public:
- static Local<AccessorSignature> New(
- Isolate* isolate,
- Local<FunctionTemplate> receiver = Local<FunctionTemplate>());
-
- V8_INLINE static AccessorSignature* Cast(Data* data);
-
- private:
- AccessorSignature();
-
- static void CheckCast(Data* that);
-};
-
-
-// --- Extensions ---
-
-/**
- * Ignore
- */
-class V8_EXPORT Extension {
- public:
- // Note that the strings passed into this constructor must live as long
- // as the Extension itself.
- Extension(const char* name, const char* source = nullptr, int dep_count = 0,
- const char** deps = nullptr, int source_length = -1);
- virtual ~Extension() { delete source_; }
- virtual Local<FunctionTemplate> GetNativeFunctionTemplate(
- Isolate* isolate, Local<String> name) {
- return Local<FunctionTemplate>();
- }
-
- const char* name() const { return name_; }
- size_t source_length() const { return source_length_; }
- const String::ExternalOneByteStringResource* source() const {
- return source_;
- }
- int dependency_count() const { return dep_count_; }
- const char** dependencies() const { return deps_; }
- void set_auto_enable(bool value) { auto_enable_ = value; }
- bool auto_enable() { return auto_enable_; }
-
- // Disallow copying and assigning.
- Extension(const Extension&) = delete;
- void operator=(const Extension&) = delete;
-
- private:
- const char* name_;
- size_t source_length_; // expected to initialize before source_
- String::ExternalOneByteStringResource* source_;
- int dep_count_;
- const char** deps_;
- bool auto_enable_;
-};
-
-void V8_EXPORT RegisterExtension(std::unique_ptr<Extension>);
-
-// --- Statics ---
-
-V8_INLINE Local<Primitive> Undefined(Isolate* isolate);
-V8_INLINE Local<Primitive> Null(Isolate* isolate);
-V8_INLINE Local<Boolean> True(Isolate* isolate);
-V8_INLINE Local<Boolean> False(Isolate* isolate);
-
-/**
- * A set of constraints that specifies the limits of the runtime's memory use.
- * You must set the heap size before initializing the VM - the size cannot be
- * adjusted after the VM is initialized.
- *
- * If you are using threads then you should hold the V8::Locker lock while
- * setting the stack limit and you must set a non-default stack limit separately
- * for each thread.
- *
- * The arguments for set_max_semi_space_size, set_max_old_space_size,
- * set_max_executable_size, set_code_range_size specify limits in MB.
- *
- * The argument for set_max_semi_space_size_in_kb is in KB.
- */
-class V8_EXPORT ResourceConstraints {
- public:
- /**
- * Configures the constraints with reasonable default values based on the
- * provided heap size limit. The heap size includes both the young and
- * the old generation.
- *
- * \param initial_heap_size_in_bytes The initial heap size or zero.
- * By default V8 starts with a small heap and dynamically grows it to
- * match the set of live objects. This may lead to ineffective
- * garbage collections at startup if the live set is large.
- * Setting the initial heap size avoids such garbage collections.
- * Note that this does not affect young generation garbage collections.
- *
- * \param maximum_heap_size_in_bytes The hard limit for the heap size.
- * When the heap size approaches this limit, V8 will perform series of
- * garbage collections and invoke the NearHeapLimitCallback. If the garbage
- * collections do not help and the callback does not increase the limit,
- * then V8 will crash with V8::FatalProcessOutOfMemory.
- */
- void ConfigureDefaultsFromHeapSize(size_t initial_heap_size_in_bytes,
- size_t maximum_heap_size_in_bytes);
-
- /**
- * Configures the constraints with reasonable default values based on the
- * capabilities of the current device the VM is running on.
- *
- * \param physical_memory The total amount of physical memory on the current
- * device, in bytes.
- * \param virtual_memory_limit The amount of virtual memory on the current
- * device, in bytes, or zero, if there is no limit.
- */
- void ConfigureDefaults(uint64_t physical_memory,
- uint64_t virtual_memory_limit);
-
- /**
- * The address beyond which the VM's stack may not grow.
- */
- uint32_t* stack_limit() const { return stack_limit_; }
- void set_stack_limit(uint32_t* value) { stack_limit_ = value; }
-
- /**
- * The amount of virtual memory reserved for generated code. This is relevant
- * for 64-bit architectures that rely on code range for calls in code.
- *
- * When V8_COMPRESS_POINTERS_IN_SHARED_CAGE is defined, there is a shared
- * process-wide code range that is lazily initialized. This value is used to
- * configure that shared code range when the first Isolate is
- * created. Subsequent Isolates ignore this value.
- */
- size_t code_range_size_in_bytes() const { return code_range_size_; }
- void set_code_range_size_in_bytes(size_t limit) { code_range_size_ = limit; }
-
- /**
- * The maximum size of the old generation.
- * When the old generation approaches this limit, V8 will perform series of
- * garbage collections and invoke the NearHeapLimitCallback.
- * If the garbage collections do not help and the callback does not
- * increase the limit, then V8 will crash with V8::FatalProcessOutOfMemory.
- */
- size_t max_old_generation_size_in_bytes() const {
- return max_old_generation_size_;
- }
- void set_max_old_generation_size_in_bytes(size_t limit) {
- max_old_generation_size_ = limit;
- }
-
- /**
- * The maximum size of the young generation, which consists of two semi-spaces
- * and a large object space. This affects frequency of Scavenge garbage
- * collections and should be typically much smaller that the old generation.
- */
- size_t max_young_generation_size_in_bytes() const {
- return max_young_generation_size_;
- }
- void set_max_young_generation_size_in_bytes(size_t limit) {
- max_young_generation_size_ = limit;
- }
-
- size_t initial_old_generation_size_in_bytes() const {
- return initial_old_generation_size_;
- }
- void set_initial_old_generation_size_in_bytes(size_t initial_size) {
- initial_old_generation_size_ = initial_size;
- }
-
- size_t initial_young_generation_size_in_bytes() const {
- return initial_young_generation_size_;
- }
- void set_initial_young_generation_size_in_bytes(size_t initial_size) {
- initial_young_generation_size_ = initial_size;
- }
-
- private:
- static constexpr size_t kMB = 1048576u;
- size_t code_range_size_ = 0;
- size_t max_old_generation_size_ = 0;
- size_t max_young_generation_size_ = 0;
- size_t initial_old_generation_size_ = 0;
- size_t initial_young_generation_size_ = 0;
- uint32_t* stack_limit_ = nullptr;
-};
-
-
-// --- Exceptions ---
-
-using FatalErrorCallback = void (*)(const char* location, const char* message);
-
-using OOMErrorCallback = void (*)(const char* location, bool is_heap_oom);
-
-using DcheckErrorCallback = void (*)(const char* file, int line,
- const char* message);
-
-using MessageCallback = void (*)(Local<Message> message, Local<Value> data);
-
-// --- Tracing ---
-
-enum LogEventStatus : int { kStart = 0, kEnd = 1, kStamp = 2 };
-using LogEventCallback = void (*)(const char* name,
- int /* LogEventStatus */ status);
-
-/**
- * Create new error objects by calling the corresponding error object
- * constructor with the message.
- */
-class V8_EXPORT Exception {
- public:
- static Local<Value> RangeError(Local<String> message);
- static Local<Value> ReferenceError(Local<String> message);
- static Local<Value> SyntaxError(Local<String> message);
- static Local<Value> TypeError(Local<String> message);
- static Local<Value> WasmCompileError(Local<String> message);
- static Local<Value> WasmLinkError(Local<String> message);
- static Local<Value> WasmRuntimeError(Local<String> message);
- static Local<Value> Error(Local<String> message);
-
- /**
- * Creates an error message for the given exception.
- * Will try to reconstruct the original stack trace from the exception value,
- * or capture the current stack trace if not available.
- */
- static Local<Message> CreateMessage(Isolate* isolate, Local<Value> exception);
-
- /**
- * Returns the original stack trace that was captured at the creation time
- * of a given exception, or an empty handle if not available.
- */
- static Local<StackTrace> GetStackTrace(Local<Value> exception);
-};
-
-
-// --- Counters Callbacks ---
-
-using CounterLookupCallback = int* (*)(const char* name);
-
-using CreateHistogramCallback = void* (*)(const char* name, int min, int max,
- size_t buckets);
-
-using AddHistogramSampleCallback = void (*)(void* histogram, int sample);
-
-// --- Crashkeys Callback ---
-enum class CrashKeyId {
- kIsolateAddress,
- kReadonlySpaceFirstPageAddress,
- kMapSpaceFirstPageAddress,
- kCodeSpaceFirstPageAddress,
- kDumpType,
-};
-
-using AddCrashKeyCallback = void (*)(CrashKeyId id, const std::string& value);
-
-// --- Enter/Leave Script Callback ---
-using BeforeCallEnteredCallback = void (*)(Isolate*);
-using CallCompletedCallback = void (*)(Isolate*);
-
-/**
- * HostImportModuleDynamicallyCallback is called when we require the
- * embedder to load a module. This is used as part of the dynamic
- * import syntax.
- *
- * The referrer contains metadata about the script/module that calls
- * import.
- *
- * The specifier is the name of the module that should be imported.
- *
- * The embedder must compile, instantiate, evaluate the Module, and
- * obtain its namespace object.
- *
- * The Promise returned from this function is forwarded to userland
- * JavaScript. The embedder must resolve this promise with the module
- * namespace object. In case of an exception, the embedder must reject
- * this promise with the exception. If the promise creation itself
- * fails (e.g. due to stack overflow), the embedder must propagate
- * that exception by returning an empty MaybeLocal.
- */
-using HostImportModuleDynamicallyCallback V8_DEPRECATED(
- "Use HostImportModuleDynamicallyWithImportAssertionsCallback instead") =
- MaybeLocal<Promise> (*)(Local<Context> context,
- Local<ScriptOrModule> referrer,
- Local<String> specifier);
-
-/**
- * HostImportModuleDynamicallyWithImportAssertionsCallback is called when we
- * require the embedder to load a module. This is used as part of the dynamic
- * import syntax.
- *
- * The referrer contains metadata about the script/module that calls
- * import.
- *
- * The specifier is the name of the module that should be imported.
- *
- * The import_assertions are import assertions for this request in the form:
- * [key1, value1, key2, value2, ...] where the keys and values are of type
- * v8::String. Note, unlike the FixedArray passed to ResolveModuleCallback and
- * returned from ModuleRequest::GetImportAssertions(), this array does not
- * contain the source Locations of the assertions.
- *
- * The embedder must compile, instantiate, evaluate the Module, and
- * obtain its namespace object.
- *
- * The Promise returned from this function is forwarded to userland
- * JavaScript. The embedder must resolve this promise with the module
- * namespace object. In case of an exception, the embedder must reject
- * this promise with the exception. If the promise creation itself
- * fails (e.g. due to stack overflow), the embedder must propagate
- * that exception by returning an empty MaybeLocal.
- */
-using HostImportModuleDynamicallyWithImportAssertionsCallback =
- MaybeLocal<Promise> (*)(Local<Context> context,
- Local<ScriptOrModule> referrer,
- Local<String> specifier,
- Local<FixedArray> import_assertions);
-
-/**
- * HostInitializeImportMetaObjectCallback is called the first time import.meta
- * is accessed for a module. Subsequent access will reuse the same value.
- *
- * The method combines two implementation-defined abstract operations into one:
- * HostGetImportMetaProperties and HostFinalizeImportMeta.
- *
- * The embedder should use v8::Object::CreateDataProperty to add properties on
- * the meta object.
- */
-using HostInitializeImportMetaObjectCallback = void (*)(Local<Context> context,
- Local<Module> module,
- Local<Object> meta);
-
-/**
- * PrepareStackTraceCallback is called when the stack property of an error is
- * first accessed. The return value will be used as the stack value. If this
- * callback is registed, the |Error.prepareStackTrace| API will be disabled.
- * |sites| is an array of call sites, specified in
- * https://v8.dev/docs/stack-trace-api
- */
-using PrepareStackTraceCallback = MaybeLocal<Value> (*)(Local<Context> context,
- Local<Value> error,
- Local<Array> sites);
-
-/**
- * PromiseHook with type kInit is called when a new promise is
- * created. When a new promise is created as part of the chain in the
- * case of Promise.then or in the intermediate promises created by
- * Promise.{race, all}/AsyncFunctionAwait, we pass the parent promise
- * otherwise we pass undefined.
- *
- * PromiseHook with type kResolve is called at the beginning of
- * resolve or reject function defined by CreateResolvingFunctions.
- *
- * PromiseHook with type kBefore is called at the beginning of the
- * PromiseReactionJob.
- *
- * PromiseHook with type kAfter is called right at the end of the
- * PromiseReactionJob.
- */
-enum class PromiseHookType { kInit, kResolve, kBefore, kAfter };
-
-using PromiseHook = void (*)(PromiseHookType type, Local<Promise> promise,
- Local<Value> parent);
-
-// --- Promise Reject Callback ---
-enum PromiseRejectEvent {
- kPromiseRejectWithNoHandler = 0,
- kPromiseHandlerAddedAfterReject = 1,
- kPromiseRejectAfterResolved = 2,
- kPromiseResolveAfterResolved = 3,
-};
-
-class PromiseRejectMessage {
- public:
- PromiseRejectMessage(Local<Promise> promise, PromiseRejectEvent event,
- Local<Value> value)
- : promise_(promise), event_(event), value_(value) {}
-
- V8_INLINE Local<Promise> GetPromise() const { return promise_; }
- V8_INLINE PromiseRejectEvent GetEvent() const { return event_; }
- V8_INLINE Local<Value> GetValue() const { return value_; }
-
- private:
- Local<Promise> promise_;
- PromiseRejectEvent event_;
- Local<Value> value_;
-};
-
-using PromiseRejectCallback = void (*)(PromiseRejectMessage message);
-
-// --- Microtasks Callbacks ---
-using MicrotasksCompletedCallbackWithData = void (*)(Isolate*, void*);
-using MicrotaskCallback = void (*)(void* data);
-
-/**
- * Policy for running microtasks:
- * - explicit: microtasks are invoked with the
- * Isolate::PerformMicrotaskCheckpoint() method;
- * - scoped: microtasks invocation is controlled by MicrotasksScope objects;
- * - auto: microtasks are invoked when the script call depth decrements
- * to zero.
- */
-enum class MicrotasksPolicy { kExplicit, kScoped, kAuto };
-
-/**
- * Represents the microtask queue, where microtasks are stored and processed.
- * https://html.spec.whatwg.org/multipage/webappapis.html#microtask-queue
- * https://html.spec.whatwg.org/multipage/webappapis.html#enqueuejob(queuename,-job,-arguments)
- * https://html.spec.whatwg.org/multipage/webappapis.html#perform-a-microtask-checkpoint
- *
- * A MicrotaskQueue instance may be associated to multiple Contexts by passing
- * it to Context::New(), and they can be detached by Context::DetachGlobal().
- * The embedder must keep the MicrotaskQueue instance alive until all associated
- * Contexts are gone or detached.
- *
- * Use the same instance of MicrotaskQueue for all Contexts that may access each
- * other synchronously. E.g. for Web embedding, use the same instance for all
- * origins that share the same URL scheme and eTLD+1.
- */
-class V8_EXPORT MicrotaskQueue {
- public:
- /**
- * Creates an empty MicrotaskQueue instance.
- */
- static std::unique_ptr<MicrotaskQueue> New(
- Isolate* isolate, MicrotasksPolicy policy = MicrotasksPolicy::kAuto);
-
- virtual ~MicrotaskQueue() = default;
-
- /**
- * Enqueues the callback to the queue.
- */
- virtual void EnqueueMicrotask(Isolate* isolate,
- Local<Function> microtask) = 0;
-
- /**
- * Enqueues the callback to the queue.
- */
- virtual void EnqueueMicrotask(v8::Isolate* isolate,
- MicrotaskCallback callback,
- void* data = nullptr) = 0;
-
- /**
- * Adds a callback to notify the embedder after microtasks were run. The
- * callback is triggered by explicit RunMicrotasks call or automatic
- * microtasks execution (see Isolate::SetMicrotasksPolicy).
- *
- * Callback will trigger even if microtasks were attempted to run,
- * but the microtasks queue was empty and no single microtask was actually
- * executed.
- *
- * Executing scripts inside the callback will not re-trigger microtasks and
- * the callback.
- */
- virtual void AddMicrotasksCompletedCallback(
- MicrotasksCompletedCallbackWithData callback, void* data = nullptr) = 0;
-
- /**
- * Removes callback that was installed by AddMicrotasksCompletedCallback.
- */
- virtual void RemoveMicrotasksCompletedCallback(
- MicrotasksCompletedCallbackWithData callback, void* data = nullptr) = 0;
-
- /**
- * Runs microtasks if no microtask is running on this MicrotaskQueue instance.
- */
- virtual void PerformCheckpoint(Isolate* isolate) = 0;
-
- /**
- * Returns true if a microtask is running on this MicrotaskQueue instance.
- */
- virtual bool IsRunningMicrotasks() const = 0;
-
- /**
- * Returns the current depth of nested MicrotasksScope that has
- * kRunMicrotasks.
- */
- virtual int GetMicrotasksScopeDepth() const = 0;
-
- MicrotaskQueue(const MicrotaskQueue&) = delete;
- MicrotaskQueue& operator=(const MicrotaskQueue&) = delete;
-
- private:
- friend class internal::MicrotaskQueue;
- MicrotaskQueue() = default;
-};
-
-/**
- * This scope is used to control microtasks when MicrotasksPolicy::kScoped
- * is used on Isolate. In this mode every non-primitive call to V8 should be
- * done inside some MicrotasksScope.
- * Microtasks are executed when topmost MicrotasksScope marked as kRunMicrotasks
- * exits.
- * kDoNotRunMicrotasks should be used to annotate calls not intended to trigger
- * microtasks.
- */
-class V8_EXPORT V8_NODISCARD MicrotasksScope {
- public:
- enum Type { kRunMicrotasks, kDoNotRunMicrotasks };
-
- MicrotasksScope(Isolate* isolate, Type type);
- MicrotasksScope(Isolate* isolate, MicrotaskQueue* microtask_queue, Type type);
- ~MicrotasksScope();
-
- /**
- * Runs microtasks if no kRunMicrotasks scope is currently active.
- */
- static void PerformCheckpoint(Isolate* isolate);
-
- /**
- * Returns current depth of nested kRunMicrotasks scopes.
- */
- static int GetCurrentDepth(Isolate* isolate);
-
- /**
- * Returns true while microtasks are being executed.
- */
- static bool IsRunningMicrotasks(Isolate* isolate);
-
- // Prevent copying.
- MicrotasksScope(const MicrotasksScope&) = delete;
- MicrotasksScope& operator=(const MicrotasksScope&) = delete;
-
- private:
- internal::Isolate* const isolate_;
- internal::MicrotaskQueue* const microtask_queue_;
- bool run_;
-};
-
-// --- Failed Access Check Callback ---
-using FailedAccessCheckCallback = void (*)(Local<Object> target,
- AccessType type, Local<Value> data);
-
-// --- AllowCodeGenerationFromStrings callbacks ---
-
-/**
- * Callback to check if code generation from strings is allowed. See
- * Context::AllowCodeGenerationFromStrings.
- */
-using AllowCodeGenerationFromStringsCallback = bool (*)(Local<Context> context,
- Local<String> source);
-
-struct ModifyCodeGenerationFromStringsResult {
- // If true, proceed with the codegen algorithm. Otherwise, block it.
- bool codegen_allowed = false;
- // Overwrite the original source with this string, if present.
- // Use the original source if empty.
- // This field is considered only if codegen_allowed is true.
- MaybeLocal<String> modified_source;
-};
-
-/**
- * Callback to check if codegen is allowed from a source object, and convert
- * the source to string if necessary. See: ModifyCodeGenerationFromStrings.
- */
-using ModifyCodeGenerationFromStringsCallback =
- ModifyCodeGenerationFromStringsResult (*)(Local<Context> context,
- Local<Value> source);
-using ModifyCodeGenerationFromStringsCallback2 =
- ModifyCodeGenerationFromStringsResult (*)(Local<Context> context,
- Local<Value> source,
- bool is_code_like);
-
-// --- WebAssembly compilation callbacks ---
-using ExtensionCallback = bool (*)(const FunctionCallbackInfo<Value>&);
-
-using AllowWasmCodeGenerationCallback = bool (*)(Local<Context> context,
- Local<String> source);
-
-// --- Callback for APIs defined on v8-supported objects, but implemented
-// by the embedder. Example: WebAssembly.{compile|instantiate}Streaming ---
-using ApiImplementationCallback = void (*)(const FunctionCallbackInfo<Value>&);
-
-// --- Callback for WebAssembly.compileStreaming ---
-using WasmStreamingCallback = void (*)(const FunctionCallbackInfo<Value>&);
-
-// --- Callback for loading source map file for Wasm profiling support
-using WasmLoadSourceMapCallback = Local<String> (*)(Isolate* isolate,
- const char* name);
-
-// --- Callback for checking if WebAssembly Simd is enabled ---
-using WasmSimdEnabledCallback = bool (*)(Local<Context> context);
-
-// --- Callback for checking if WebAssembly exceptions are enabled ---
-using WasmExceptionsEnabledCallback = bool (*)(Local<Context> context);
-
-// --- Callback for checking if the SharedArrayBuffer constructor is enabled ---
-using SharedArrayBufferConstructorEnabledCallback =
- bool (*)(Local<Context> context);
-
-// --- Garbage Collection Callbacks ---
-
-/**
- * Applications can register callback functions which will be called before and
- * after certain garbage collection operations. Allocations are not allowed in
- * the callback functions, you therefore cannot manipulate objects (set or
- * delete properties for example) since it is possible such operations will
- * result in the allocation of objects.
- */
-enum GCType {
- kGCTypeScavenge = 1 << 0,
- kGCTypeMarkSweepCompact = 1 << 1,
- kGCTypeIncrementalMarking = 1 << 2,
- kGCTypeProcessWeakCallbacks = 1 << 3,
- kGCTypeAll = kGCTypeScavenge | kGCTypeMarkSweepCompact |
- kGCTypeIncrementalMarking | kGCTypeProcessWeakCallbacks
-};
-
-/**
- * GCCallbackFlags is used to notify additional information about the GC
- * callback.
- * - kGCCallbackFlagConstructRetainedObjectInfos: The GC callback is for
- * constructing retained object infos.
- * - kGCCallbackFlagForced: The GC callback is for a forced GC for testing.
- * - kGCCallbackFlagSynchronousPhantomCallbackProcessing: The GC callback
- * is called synchronously without getting posted to an idle task.
- * - kGCCallbackFlagCollectAllAvailableGarbage: The GC callback is called
- * in a phase where V8 is trying to collect all available garbage
- * (e.g., handling a low memory notification).
- * - kGCCallbackScheduleIdleGarbageCollection: The GC callback is called to
- * trigger an idle garbage collection.
- */
-enum GCCallbackFlags {
- kNoGCCallbackFlags = 0,
- kGCCallbackFlagConstructRetainedObjectInfos = 1 << 1,
- kGCCallbackFlagForced = 1 << 2,
- kGCCallbackFlagSynchronousPhantomCallbackProcessing = 1 << 3,
- kGCCallbackFlagCollectAllAvailableGarbage = 1 << 4,
- kGCCallbackFlagCollectAllExternalMemory = 1 << 5,
- kGCCallbackScheduleIdleGarbageCollection = 1 << 6,
-};
-
-using GCCallback = void (*)(GCType type, GCCallbackFlags flags);
-
-using InterruptCallback = void (*)(Isolate* isolate, void* data);
-
-/**
- * This callback is invoked when the heap size is close to the heap limit and
- * V8 is likely to abort with out-of-memory error.
- * The callback can extend the heap limit by returning a value that is greater
- * than the current_heap_limit. The initial heap limit is the limit that was
- * set after heap setup.
- */
-using NearHeapLimitCallback = size_t (*)(void* data, size_t current_heap_limit,
- size_t initial_heap_limit);
-
-/**
- * Collection of shared per-process V8 memory information.
- *
- * Instances of this class can be passed to
- * v8::V8::GetSharedMemoryStatistics to get shared memory statistics from V8.
- */
-class V8_EXPORT SharedMemoryStatistics {
- public:
- SharedMemoryStatistics();
- size_t read_only_space_size() { return read_only_space_size_; }
- size_t read_only_space_used_size() { return read_only_space_used_size_; }
- size_t read_only_space_physical_size() {
- return read_only_space_physical_size_;
- }
-
- private:
- size_t read_only_space_size_;
- size_t read_only_space_used_size_;
- size_t read_only_space_physical_size_;
-
- friend class V8;
- friend class internal::ReadOnlyHeap;
-};
-
-/**
- * Collection of V8 heap information.
- *
- * Instances of this class can be passed to v8::Isolate::GetHeapStatistics to
- * get heap statistics from V8.
- */
-class V8_EXPORT HeapStatistics {
- public:
- HeapStatistics();
- size_t total_heap_size() { return total_heap_size_; }
- size_t total_heap_size_executable() { return total_heap_size_executable_; }
- size_t total_physical_size() { return total_physical_size_; }
- size_t total_available_size() { return total_available_size_; }
- size_t total_global_handles_size() { return total_global_handles_size_; }
- size_t used_global_handles_size() { return used_global_handles_size_; }
- size_t used_heap_size() { return used_heap_size_; }
- size_t heap_size_limit() { return heap_size_limit_; }
- size_t malloced_memory() { return malloced_memory_; }
- size_t external_memory() { return external_memory_; }
- size_t peak_malloced_memory() { return peak_malloced_memory_; }
- size_t number_of_native_contexts() { return number_of_native_contexts_; }
- size_t number_of_detached_contexts() { return number_of_detached_contexts_; }
-
- /**
- * Returns a 0/1 boolean, which signifies whether the V8 overwrite heap
- * garbage with a bit pattern.
- */
- size_t does_zap_garbage() { return does_zap_garbage_; }
-
- private:
- size_t total_heap_size_;
- size_t total_heap_size_executable_;
- size_t total_physical_size_;
- size_t total_available_size_;
- size_t used_heap_size_;
- size_t heap_size_limit_;
- size_t malloced_memory_;
- size_t external_memory_;
- size_t peak_malloced_memory_;
- bool does_zap_garbage_;
- size_t number_of_native_contexts_;
- size_t number_of_detached_contexts_;
- size_t total_global_handles_size_;
- size_t used_global_handles_size_;
-
- friend class V8;
- friend class Isolate;
-};
-
-
-class V8_EXPORT HeapSpaceStatistics {
- public:
- HeapSpaceStatistics();
- const char* space_name() { return space_name_; }
- size_t space_size() { return space_size_; }
- size_t space_used_size() { return space_used_size_; }
- size_t space_available_size() { return space_available_size_; }
- size_t physical_space_size() { return physical_space_size_; }
-
- private:
- const char* space_name_;
- size_t space_size_;
- size_t space_used_size_;
- size_t space_available_size_;
- size_t physical_space_size_;
-
- friend class Isolate;
-};
-
-
-class V8_EXPORT HeapObjectStatistics {
- public:
- HeapObjectStatistics();
- const char* object_type() { return object_type_; }
- const char* object_sub_type() { return object_sub_type_; }
- size_t object_count() { return object_count_; }
- size_t object_size() { return object_size_; }
-
- private:
- const char* object_type_;
- const char* object_sub_type_;
- size_t object_count_;
- size_t object_size_;
-
- friend class Isolate;
-};
-
-class V8_EXPORT HeapCodeStatistics {
- public:
- HeapCodeStatistics();
- size_t code_and_metadata_size() { return code_and_metadata_size_; }
- size_t bytecode_and_metadata_size() { return bytecode_and_metadata_size_; }
- size_t external_script_source_size() { return external_script_source_size_; }
-
- private:
- size_t code_and_metadata_size_;
- size_t bytecode_and_metadata_size_;
- size_t external_script_source_size_;
-
- friend class Isolate;
-};
-
-/**
- * A JIT code event is issued each time code is added, moved or removed.
- *
- * \note removal events are not currently issued.
- */
-struct JitCodeEvent {
- enum EventType {
- CODE_ADDED,
- CODE_MOVED,
- CODE_REMOVED,
- CODE_ADD_LINE_POS_INFO,
- CODE_START_LINE_INFO_RECORDING,
- CODE_END_LINE_INFO_RECORDING
- };
- // Definition of the code position type. The "POSITION" type means the place
- // in the source code which are of interest when making stack traces to
- // pin-point the source location of a stack frame as close as possible.
- // The "STATEMENT_POSITION" means the place at the beginning of each
- // statement, and is used to indicate possible break locations.
- enum PositionType { POSITION, STATEMENT_POSITION };
-
- // There are two different kinds of JitCodeEvents, one for JIT code generated
- // by the optimizing compiler, and one for byte code generated for the
- // interpreter. For JIT_CODE events, the |code_start| member of the event
- // points to the beginning of jitted assembly code, while for BYTE_CODE
- // events, |code_start| points to the first bytecode of the interpreted
- // function.
- enum CodeType { BYTE_CODE, JIT_CODE };
-
- // Type of event.
- EventType type;
- CodeType code_type;
- // Start of the instructions.
- void* code_start;
- // Size of the instructions.
- size_t code_len;
- // Script info for CODE_ADDED event.
- Local<UnboundScript> script;
- // User-defined data for *_LINE_INFO_* event. It's used to hold the source
- // code line information which is returned from the
- // CODE_START_LINE_INFO_RECORDING event. And it's passed to subsequent
- // CODE_ADD_LINE_POS_INFO and CODE_END_LINE_INFO_RECORDING events.
- void* user_data;
-
- struct name_t {
- // Name of the object associated with the code, note that the string is not
- // zero-terminated.
- const char* str;
- // Number of chars in str.
- size_t len;
- };
-
- struct line_info_t {
- // PC offset
- size_t offset;
- // Code position
- size_t pos;
- // The position type.
- PositionType position_type;
- };
-
- struct wasm_source_info_t {
- // Source file name.
- const char* filename;
- // Length of filename.
- size_t filename_size;
- // Line number table, which maps offsets of JITted code to line numbers of
- // source file.
- const line_info_t* line_number_table;
- // Number of entries in the line number table.
- size_t line_number_table_size;
- };
-
- wasm_source_info_t* wasm_source_info;
-
- union {
- // Only valid for CODE_ADDED.
- struct name_t name;
-
- // Only valid for CODE_ADD_LINE_POS_INFO
- struct line_info_t line_info;
-
- // New location of instructions. Only valid for CODE_MOVED.
- void* new_code_start;
- };
-
- Isolate* isolate;
-};
-
-/**
- * Option flags passed to the SetRAILMode function.
- * See documentation https://developers.google.com/web/tools/chrome-devtools/
- * profile/evaluate-performance/rail
- */
-enum RAILMode : unsigned {
- // Response performance mode: In this mode very low virtual machine latency
- // is provided. V8 will try to avoid JavaScript execution interruptions.
- // Throughput may be throttled.
- PERFORMANCE_RESPONSE,
- // Animation performance mode: In this mode low virtual machine latency is
- // provided. V8 will try to avoid as many JavaScript execution interruptions
- // as possible. Throughput may be throttled. This is the default mode.
- PERFORMANCE_ANIMATION,
- // Idle performance mode: The embedder is idle. V8 can complete deferred work
- // in this mode.
- PERFORMANCE_IDLE,
- // Load performance mode: In this mode high throughput is provided. V8 may
- // turn off latency optimizations.
- PERFORMANCE_LOAD
-};
-
-/**
- * Option flags passed to the SetJitCodeEventHandler function.
- */
-enum JitCodeEventOptions {
- kJitCodeEventDefault = 0,
- // Generate callbacks for already existent code.
- kJitCodeEventEnumExisting = 1
-};
-
-
-/**
- * Callback function passed to SetJitCodeEventHandler.
- *
- * \param event code add, move or removal event.
- */
-using JitCodeEventHandler = void (*)(const JitCodeEvent* event);
-
-/**
- * Callback function passed to SetUnhandledExceptionCallback.
- */
-#if defined(V8_OS_WIN)
-using UnhandledExceptionCallback =
- int (*)(_EXCEPTION_POINTERS* exception_pointers);
-#endif
-
-/**
- * Interface for iterating through all external resources in the heap.
- */
-class V8_EXPORT ExternalResourceVisitor {
- public:
- virtual ~ExternalResourceVisitor() = default;
- virtual void VisitExternalString(Local<String> string) {}
-};
-
-/**
- * Interface for iterating through all the persistent handles in the heap.
- */
-class V8_EXPORT PersistentHandleVisitor {
- public:
- virtual ~PersistentHandleVisitor() = default;
- virtual void VisitPersistentHandle(Persistent<Value>* value,
- uint16_t class_id) {}
-};
-
-/**
- * Memory pressure level for the MemoryPressureNotification.
- * kNone hints V8 that there is no memory pressure.
- * kModerate hints V8 to speed up incremental garbage collection at the cost of
- * of higher latency due to garbage collection pauses.
- * kCritical hints V8 to free memory as soon as possible. Garbage collection
- * pauses at this level will be large.
- */
-enum class MemoryPressureLevel { kNone, kModerate, kCritical };
-
-/**
- * Handler for embedder roots on non-unified heap garbage collections.
- */
-class V8_EXPORT EmbedderRootsHandler {
- public:
- virtual ~EmbedderRootsHandler() = default;
-
- /**
- * Returns true if the TracedGlobal handle should be considered as root for
- * the currently running non-tracing garbage collection and false otherwise.
- * The default implementation will keep all TracedGlobal references as roots.
- *
- * If this returns false, then V8 may decide that the object referred to by
- * such a handle is reclaimed. In that case:
- * - No action is required if handles are used with destructors, i.e., by just
- * using |TracedGlobal|.
- * - When run without destructors, i.e., by using |TracedReference|, V8 calls
- * |ResetRoot|.
- *
- * Note that the |handle| is different from the handle that the embedder holds
- * for retaining the object. The embedder may use |WrapperClassId()| to
- * distinguish cases where it wants handles to be treated as roots from not
- * being treated as roots.
- */
- virtual bool IsRoot(const v8::TracedReference<v8::Value>& handle) = 0;
- virtual bool IsRoot(const v8::TracedGlobal<v8::Value>& handle) = 0;
-
- /**
- * Used in combination with |IsRoot|. Called by V8 when an
- * object that is backed by a handle is reclaimed by a non-tracing garbage
- * collection. It is up to the embedder to reset the original handle.
- *
- * Note that the |handle| is different from the handle that the embedder holds
- * for retaining the object. It is up to the embedder to find the original
- * handle via the object or class id.
- */
- virtual void ResetRoot(const v8::TracedReference<v8::Value>& handle) = 0;
-};
-
-/**
- * Interface for tracing through the embedder heap. During a V8 garbage
- * collection, V8 collects hidden fields of all potential wrappers, and at the
- * end of its marking phase iterates the collection and asks the embedder to
- * trace through its heap and use reporter to report each JavaScript object
- * reachable from any of the given wrappers.
- */
-class V8_EXPORT EmbedderHeapTracer {
- public:
- using EmbedderStackState = cppgc::EmbedderStackState;
-
- enum TraceFlags : uint64_t {
- kNoFlags = 0,
- kReduceMemory = 1 << 0,
- kForced = 1 << 2,
- };
-
- /**
- * Interface for iterating through TracedGlobal handles.
- */
- class V8_EXPORT TracedGlobalHandleVisitor {
- public:
- virtual ~TracedGlobalHandleVisitor() = default;
- virtual void VisitTracedGlobalHandle(const TracedGlobal<Value>& handle) {}
- virtual void VisitTracedReference(const TracedReference<Value>& handle) {}
- };
-
- /**
- * Summary of a garbage collection cycle. See |TraceEpilogue| on how the
- * summary is reported.
- */
- struct TraceSummary {
- /**
- * Time spent managing the retained memory in milliseconds. This can e.g.
- * include the time tracing through objects in the embedder.
- */
- double time = 0.0;
-
- /**
- * Memory retained by the embedder through the |EmbedderHeapTracer|
- * mechanism in bytes.
- */
- size_t allocated_size = 0;
- };
-
- virtual ~EmbedderHeapTracer() = default;
-
- /**
- * Iterates all TracedGlobal handles created for the v8::Isolate the tracer is
- * attached to.
- */
- void IterateTracedGlobalHandles(TracedGlobalHandleVisitor* visitor);
-
- /**
- * Called by the embedder to set the start of the stack which is e.g. used by
- * V8 to determine whether handles are used from stack or heap.
- */
- void SetStackStart(void* stack_start);
-
- /**
- * Called by the embedder to notify V8 of an empty execution stack.
- */
- V8_DEPRECATE_SOON(
- "This call only optimized internal caches which V8 is able to figure out "
- "on its own now.")
- void NotifyEmptyEmbedderStack();
-
- /**
- * Called by v8 to register internal fields of found wrappers.
- *
- * The embedder is expected to store them somewhere and trace reachable
- * wrappers from them when called through |AdvanceTracing|.
- */
- virtual void RegisterV8References(
- const std::vector<std::pair<void*, void*> >& embedder_fields) = 0;
-
- void RegisterEmbedderReference(const BasicTracedReference<v8::Data>& ref);
-
- /**
- * Called at the beginning of a GC cycle.
- */
- virtual void TracePrologue(TraceFlags flags) {}
-
- /**
- * Called to advance tracing in the embedder.
- *
- * The embedder is expected to trace its heap starting from wrappers reported
- * by RegisterV8References method, and report back all reachable wrappers.
- * Furthermore, the embedder is expected to stop tracing by the given
- * deadline. A deadline of infinity means that tracing should be finished.
- *
- * Returns |true| if tracing is done, and false otherwise.
- */
- virtual bool AdvanceTracing(double deadline_in_ms) = 0;
-
- /*
- * Returns true if there no more tracing work to be done (see AdvanceTracing)
- * and false otherwise.
- */
- virtual bool IsTracingDone() = 0;
-
- /**
- * Called at the end of a GC cycle.
- *
- * Note that allocation is *not* allowed within |TraceEpilogue|. Can be
- * overriden to fill a |TraceSummary| that is used by V8 to schedule future
- * garbage collections.
- */
- virtual void TraceEpilogue(TraceSummary* trace_summary) {}
-
- /**
- * Called upon entering the final marking pause. No more incremental marking
- * steps will follow this call.
- */
- virtual void EnterFinalPause(EmbedderStackState stack_state) = 0;
-
- /*
- * Called by the embedder to request immediate finalization of the currently
- * running tracing phase that has been started with TracePrologue and not
- * yet finished with TraceEpilogue.
- *
- * Will be a noop when currently not in tracing.
- *
- * This is an experimental feature.
- */
- void FinalizeTracing();
-
- /**
- * See documentation on EmbedderRootsHandler.
- */
- virtual bool IsRootForNonTracingGC(
- const v8::TracedReference<v8::Value>& handle);
- virtual bool IsRootForNonTracingGC(const v8::TracedGlobal<v8::Value>& handle);
-
- /**
- * See documentation on EmbedderRootsHandler.
- */
- virtual void ResetHandleInNonTracingGC(
- const v8::TracedReference<v8::Value>& handle);
-
- /*
- * Called by the embedder to immediately perform a full garbage collection.
- *
- * Should only be used in testing code.
- */
- void GarbageCollectionForTesting(EmbedderStackState stack_state);
-
- /*
- * Called by the embedder to signal newly allocated or freed memory. Not bound
- * to tracing phases. Embedders should trade off when increments are reported
- * as V8 may consult global heuristics on whether to trigger garbage
- * collection on this change.
- */
- void IncreaseAllocatedSize(size_t bytes);
- void DecreaseAllocatedSize(size_t bytes);
-
- /*
- * Returns the v8::Isolate this tracer is attached too and |nullptr| if it
- * is not attached to any v8::Isolate.
- */
- v8::Isolate* isolate() const { return isolate_; }
-
- protected:
- v8::Isolate* isolate_ = nullptr;
-
- friend class internal::LocalEmbedderHeapTracer;
-};
-
-/**
- * Callback and supporting data used in SnapshotCreator to implement embedder
- * logic to serialize internal fields.
- * Internal fields that directly reference V8 objects are serialized without
- * calling this callback. Internal fields that contain aligned pointers are
- * serialized by this callback if it returns non-zero result. Otherwise it is
- * serialized verbatim.
- */
-struct SerializeInternalFieldsCallback {
- using CallbackFunction = StartupData (*)(Local<Object> holder, int index,
- void* data);
- SerializeInternalFieldsCallback(CallbackFunction function = nullptr,
- void* data_arg = nullptr)
- : callback(function), data(data_arg) {}
- CallbackFunction callback;
- void* data;
-};
-// Note that these fields are called "internal fields" in the API and called
-// "embedder fields" within V8.
-using SerializeEmbedderFieldsCallback = SerializeInternalFieldsCallback;
-
-/**
- * Callback and supporting data used to implement embedder logic to deserialize
- * internal fields.
- */
-struct DeserializeInternalFieldsCallback {
- using CallbackFunction = void (*)(Local<Object> holder, int index,
- StartupData payload, void* data);
- DeserializeInternalFieldsCallback(CallbackFunction function = nullptr,
- void* data_arg = nullptr)
- : callback(function), data(data_arg) {}
- void (*callback)(Local<Object> holder, int index, StartupData payload,
- void* data);
- void* data;
-};
-using DeserializeEmbedderFieldsCallback = DeserializeInternalFieldsCallback;
-
-/**
- * Controls how the default MeasureMemoryDelegate reports the result of
- * the memory measurement to JS. With kSummary only the total size is reported.
- * With kDetailed the result includes the size of each native context.
- */
-enum class MeasureMemoryMode { kSummary, kDetailed };
-
-/**
- * Controls how promptly a memory measurement request is executed.
- * By default the measurement is folded with the next scheduled GC which may
- * happen after a while and is forced after some timeout.
- * The kEager mode starts incremental GC right away and is useful for testing.
- * The kLazy mode does not force GC.
- */
-enum class MeasureMemoryExecution { kDefault, kEager, kLazy };
-
-/**
- * The delegate is used in Isolate::MeasureMemory API.
- *
- * It specifies the contexts that need to be measured and gets called when
- * the measurement is completed to report the results.
- */
-class V8_EXPORT MeasureMemoryDelegate {
- public:
- virtual ~MeasureMemoryDelegate() = default;
-
- /**
- * Returns true if the size of the given context needs to be measured.
- */
- virtual bool ShouldMeasure(Local<Context> context) = 0;
-
- /**
- * This function is called when memory measurement finishes.
- *
- * \param context_sizes_in_bytes a vector of (context, size) pairs that
- * includes each context for which ShouldMeasure returned true and that
- * was not garbage collected while the memory measurement was in progress.
- *
- * \param unattributed_size_in_bytes total size of objects that were not
- * attributed to any context (i.e. are likely shared objects).
- */
- virtual void MeasurementComplete(
- const std::vector<std::pair<Local<Context>, size_t>>&
- context_sizes_in_bytes,
- size_t unattributed_size_in_bytes) = 0;
-
- /**
- * Returns a default delegate that resolves the given promise when
- * the memory measurement completes.
- *
- * \param isolate the current isolate
- * \param context the current context
- * \param promise_resolver the promise resolver that is given the
- * result of the memory measurement.
- * \param mode the detail level of the result.
- */
- static std::unique_ptr<MeasureMemoryDelegate> Default(
- Isolate* isolate, Local<Context> context,
- Local<Promise::Resolver> promise_resolver, MeasureMemoryMode mode);
-};
-
-/**
- * Isolate represents an isolated instance of the V8 engine. V8 isolates have
- * completely separate states. Objects from one isolate must not be used in
- * other isolates. The embedder can create multiple isolates and use them in
- * parallel in multiple threads. An isolate can be entered by at most one
- * thread at any given time. The Locker/Unlocker API must be used to
- * synchronize.
- */
-class V8_EXPORT Isolate {
- public:
- /**
- * Initial configuration parameters for a new Isolate.
- */
- struct V8_EXPORT CreateParams {
- CreateParams();
- ~CreateParams();
-
- /**
- * Allows the host application to provide the address of a function that is
- * notified each time code is added, moved or removed.
- */
- JitCodeEventHandler code_event_handler = nullptr;
-
- /**
- * ResourceConstraints to use for the new Isolate.
- */
- ResourceConstraints constraints;
-
- /**
- * Explicitly specify a startup snapshot blob. The embedder owns the blob.
- */
- StartupData* snapshot_blob = nullptr;
-
- /**
- * Enables the host application to provide a mechanism for recording
- * statistics counters.
- */
- CounterLookupCallback counter_lookup_callback = nullptr;
-
- /**
- * Enables the host application to provide a mechanism for recording
- * histograms. The CreateHistogram function returns a
- * histogram which will later be passed to the AddHistogramSample
- * function.
- */
- CreateHistogramCallback create_histogram_callback = nullptr;
- AddHistogramSampleCallback add_histogram_sample_callback = nullptr;
-
- /**
- * The ArrayBuffer::Allocator to use for allocating and freeing the backing
- * store of ArrayBuffers.
- *
- * If the shared_ptr version is used, the Isolate instance and every
- * |BackingStore| allocated using this allocator hold a std::shared_ptr
- * to the allocator, in order to facilitate lifetime
- * management for the allocator instance.
- */
- ArrayBuffer::Allocator* array_buffer_allocator = nullptr;
- std::shared_ptr<ArrayBuffer::Allocator> array_buffer_allocator_shared;
-
- /**
- * Specifies an optional nullptr-terminated array of raw addresses in the
- * embedder that V8 can match against during serialization and use for
- * deserialization. This array and its content must stay valid for the
- * entire lifetime of the isolate.
- */
- const intptr_t* external_references = nullptr;
-
- /**
- * Whether calling Atomics.wait (a function that may block) is allowed in
- * this isolate. This can also be configured via SetAllowAtomicsWait.
- */
- bool allow_atomics_wait = true;
-
- /**
- * Termination is postponed when there is no active SafeForTerminationScope.
- */
- bool only_terminate_in_safe_scope = false;
-
- /**
- * The following parameters describe the offsets for addressing type info
- * for wrapped API objects and are used by the fast C API
- * (for details see v8-fast-api-calls.h).
- */
- int embedder_wrapper_type_index = -1;
- int embedder_wrapper_object_index = -1;
- };
-
- /**
- * Stack-allocated class which sets the isolate for all operations
- * executed within a local scope.
- */
- class V8_EXPORT V8_NODISCARD Scope {
- public:
- explicit Scope(Isolate* isolate) : isolate_(isolate) {
- isolate->Enter();
- }
-
- ~Scope() { isolate_->Exit(); }
-
- // Prevent copying of Scope objects.
- Scope(const Scope&) = delete;
- Scope& operator=(const Scope&) = delete;
-
- private:
- Isolate* const isolate_;
- };
-
- /**
- * Assert that no Javascript code is invoked.
- */
- class V8_EXPORT V8_NODISCARD DisallowJavascriptExecutionScope {
- public:
- enum OnFailure { CRASH_ON_FAILURE, THROW_ON_FAILURE, DUMP_ON_FAILURE };
-
- DisallowJavascriptExecutionScope(Isolate* isolate, OnFailure on_failure);
- ~DisallowJavascriptExecutionScope();
-
- // Prevent copying of Scope objects.
- DisallowJavascriptExecutionScope(const DisallowJavascriptExecutionScope&) =
- delete;
- DisallowJavascriptExecutionScope& operator=(
- const DisallowJavascriptExecutionScope&) = delete;
-
- private:
- OnFailure on_failure_;
- Isolate* isolate_;
-
- bool was_execution_allowed_assert_;
- bool was_execution_allowed_throws_;
- bool was_execution_allowed_dump_;
- };
-
- /**
- * Introduce exception to DisallowJavascriptExecutionScope.
- */
- class V8_EXPORT V8_NODISCARD AllowJavascriptExecutionScope {
- public:
- explicit AllowJavascriptExecutionScope(Isolate* isolate);
- ~AllowJavascriptExecutionScope();
-
- // Prevent copying of Scope objects.
- AllowJavascriptExecutionScope(const AllowJavascriptExecutionScope&) =
- delete;
- AllowJavascriptExecutionScope& operator=(
- const AllowJavascriptExecutionScope&) = delete;
-
- private:
- Isolate* isolate_;
- bool was_execution_allowed_assert_;
- bool was_execution_allowed_throws_;
- bool was_execution_allowed_dump_;
- };
-
- /**
- * Do not run microtasks while this scope is active, even if microtasks are
- * automatically executed otherwise.
- */
- class V8_EXPORT V8_NODISCARD SuppressMicrotaskExecutionScope {
- public:
- explicit SuppressMicrotaskExecutionScope(
- Isolate* isolate, MicrotaskQueue* microtask_queue = nullptr);
- ~SuppressMicrotaskExecutionScope();
-
- // Prevent copying of Scope objects.
- SuppressMicrotaskExecutionScope(const SuppressMicrotaskExecutionScope&) =
- delete;
- SuppressMicrotaskExecutionScope& operator=(
- const SuppressMicrotaskExecutionScope&) = delete;
-
- private:
- internal::Isolate* const isolate_;
- internal::MicrotaskQueue* const microtask_queue_;
- internal::Address previous_stack_height_;
-
- friend class internal::ThreadLocalTop;
- };
-
- /**
- * This scope allows terminations inside direct V8 API calls and forbid them
- * inside any recursive API calls without explicit SafeForTerminationScope.
- */
- class V8_EXPORT V8_NODISCARD SafeForTerminationScope {
- public:
- explicit SafeForTerminationScope(v8::Isolate* isolate);
- ~SafeForTerminationScope();
-
- // Prevent copying of Scope objects.
- SafeForTerminationScope(const SafeForTerminationScope&) = delete;
- SafeForTerminationScope& operator=(const SafeForTerminationScope&) = delete;
-
- private:
- internal::Isolate* isolate_;
- bool prev_value_;
- };
-
- /**
- * Types of garbage collections that can be requested via
- * RequestGarbageCollectionForTesting.
- */
- enum GarbageCollectionType {
- kFullGarbageCollection,
- kMinorGarbageCollection
- };
-
- /**
- * Features reported via the SetUseCounterCallback callback. Do not change
- * assigned numbers of existing items; add new features to the end of this
- * list.
- */
- enum UseCounterFeature {
- kUseAsm = 0,
- kBreakIterator = 1,
- kLegacyConst = 2,
- kMarkDequeOverflow = 3,
- kStoreBufferOverflow = 4,
- kSlotsBufferOverflow = 5,
- kObjectObserve = 6,
- kForcedGC = 7,
- kSloppyMode = 8,
- kStrictMode = 9,
- kStrongMode = 10,
- kRegExpPrototypeStickyGetter = 11,
- kRegExpPrototypeToString = 12,
- kRegExpPrototypeUnicodeGetter = 13,
- kIntlV8Parse = 14,
- kIntlPattern = 15,
- kIntlResolved = 16,
- kPromiseChain = 17,
- kPromiseAccept = 18,
- kPromiseDefer = 19,
- kHtmlCommentInExternalScript = 20,
- kHtmlComment = 21,
- kSloppyModeBlockScopedFunctionRedefinition = 22,
- kForInInitializer = 23,
- kArrayProtectorDirtied = 24,
- kArraySpeciesModified = 25,
- kArrayPrototypeConstructorModified = 26,
- kArrayInstanceProtoModified = 27,
- kArrayInstanceConstructorModified = 28,
- kLegacyFunctionDeclaration = 29,
- kRegExpPrototypeSourceGetter = 30, // Unused.
- kRegExpPrototypeOldFlagGetter = 31, // Unused.
- kDecimalWithLeadingZeroInStrictMode = 32,
- kLegacyDateParser = 33,
- kDefineGetterOrSetterWouldThrow = 34,
- kFunctionConstructorReturnedUndefined = 35,
- kAssigmentExpressionLHSIsCallInSloppy = 36,
- kAssigmentExpressionLHSIsCallInStrict = 37,
- kPromiseConstructorReturnedUndefined = 38,
- kConstructorNonUndefinedPrimitiveReturn = 39,
- kLabeledExpressionStatement = 40,
- kLineOrParagraphSeparatorAsLineTerminator = 41,
- kIndexAccessor = 42,
- kErrorCaptureStackTrace = 43,
- kErrorPrepareStackTrace = 44,
- kErrorStackTraceLimit = 45,
- kWebAssemblyInstantiation = 46,
- kDeoptimizerDisableSpeculation = 47,
- kArrayPrototypeSortJSArrayModifiedPrototype = 48,
- kFunctionTokenOffsetTooLongForToString = 49,
- kWasmSharedMemory = 50,
- kWasmThreadOpcodes = 51,
- kAtomicsNotify = 52, // Unused.
- kAtomicsWake = 53, // Unused.
- kCollator = 54,
- kNumberFormat = 55,
- kDateTimeFormat = 56,
- kPluralRules = 57,
- kRelativeTimeFormat = 58,
- kLocale = 59,
- kListFormat = 60,
- kSegmenter = 61,
- kStringLocaleCompare = 62,
- kStringToLocaleUpperCase = 63,
- kStringToLocaleLowerCase = 64,
- kNumberToLocaleString = 65,
- kDateToLocaleString = 66,
- kDateToLocaleDateString = 67,
- kDateToLocaleTimeString = 68,
- kAttemptOverrideReadOnlyOnPrototypeSloppy = 69,
- kAttemptOverrideReadOnlyOnPrototypeStrict = 70,
- kOptimizedFunctionWithOneShotBytecode = 71, // Unused.
- kRegExpMatchIsTrueishOnNonJSRegExp = 72,
- kRegExpMatchIsFalseishOnJSRegExp = 73,
- kDateGetTimezoneOffset = 74, // Unused.
- kStringNormalize = 75,
- kCallSiteAPIGetFunctionSloppyCall = 76,
- kCallSiteAPIGetThisSloppyCall = 77,
- kRegExpMatchAllWithNonGlobalRegExp = 78,
- kRegExpExecCalledOnSlowRegExp = 79,
- kRegExpReplaceCalledOnSlowRegExp = 80,
- kDisplayNames = 81,
- kSharedArrayBufferConstructed = 82,
- kArrayPrototypeHasElements = 83,
- kObjectPrototypeHasElements = 84,
- kNumberFormatStyleUnit = 85,
- kDateTimeFormatRange = 86,
- kDateTimeFormatDateTimeStyle = 87,
- kBreakIteratorTypeWord = 88,
- kBreakIteratorTypeLine = 89,
- kInvalidatedArrayBufferDetachingProtector = 90,
- kInvalidatedArrayConstructorProtector = 91,
- kInvalidatedArrayIteratorLookupChainProtector = 92,
- kInvalidatedArraySpeciesLookupChainProtector = 93,
- kInvalidatedIsConcatSpreadableLookupChainProtector = 94,
- kInvalidatedMapIteratorLookupChainProtector = 95,
- kInvalidatedNoElementsProtector = 96,
- kInvalidatedPromiseHookProtector = 97,
- kInvalidatedPromiseResolveLookupChainProtector = 98,
- kInvalidatedPromiseSpeciesLookupChainProtector = 99,
- kInvalidatedPromiseThenLookupChainProtector = 100,
- kInvalidatedRegExpSpeciesLookupChainProtector = 101,
- kInvalidatedSetIteratorLookupChainProtector = 102,
- kInvalidatedStringIteratorLookupChainProtector = 103,
- kInvalidatedStringLengthOverflowLookupChainProtector = 104,
- kInvalidatedTypedArraySpeciesLookupChainProtector = 105,
- kWasmSimdOpcodes = 106,
- kVarRedeclaredCatchBinding = 107,
- kWasmRefTypes = 108,
- kWasmBulkMemory = 109, // Unused.
- kWasmMultiValue = 110,
- kWasmExceptionHandling = 111,
- kInvalidatedMegaDOMProtector = 112,
-
- // If you add new values here, you'll also need to update Chromium's:
- // web_feature.mojom, use_counter_callback.cc, and enums.xml. V8 changes to
- // this list need to be landed first, then changes on the Chromium side.
- kUseCounterFeatureCount // This enum value must be last.
- };
-
- enum MessageErrorLevel {
- kMessageLog = (1 << 0),
- kMessageDebug = (1 << 1),
- kMessageInfo = (1 << 2),
- kMessageError = (1 << 3),
- kMessageWarning = (1 << 4),
- kMessageAll = kMessageLog | kMessageDebug | kMessageInfo | kMessageError |
- kMessageWarning,
- };
-
- using UseCounterCallback = void (*)(Isolate* isolate,
- UseCounterFeature feature);
-
- /**
- * Allocates a new isolate but does not initialize it. Does not change the
- * currently entered isolate.
- *
- * Only Isolate::GetData() and Isolate::SetData(), which access the
- * embedder-controlled parts of the isolate, are allowed to be called on the
- * uninitialized isolate. To initialize the isolate, call
- * Isolate::Initialize().
- *
- * When an isolate is no longer used its resources should be freed
- * by calling Dispose(). Using the delete operator is not allowed.
- *
- * V8::Initialize() must have run prior to this.
- */
- static Isolate* Allocate();
-
- /**
- * Initialize an Isolate previously allocated by Isolate::Allocate().
- */
- static void Initialize(Isolate* isolate, const CreateParams& params);
-
- /**
- * Creates a new isolate. Does not change the currently entered
- * isolate.
- *
- * When an isolate is no longer used its resources should be freed
- * by calling Dispose(). Using the delete operator is not allowed.
- *
- * V8::Initialize() must have run prior to this.
- */
- static Isolate* New(const CreateParams& params);
-
- /**
- * Returns the entered isolate for the current thread or NULL in
- * case there is no current isolate.
- *
- * This method must not be invoked before V8::Initialize() was invoked.
- */
- static Isolate* GetCurrent();
-
- /**
- * Returns the entered isolate for the current thread or NULL in
- * case there is no current isolate.
- *
- * No checks are performed by this method.
- */
- static Isolate* TryGetCurrent();
-
- /**
- * Clears the set of objects held strongly by the heap. This set of
- * objects are originally built when a WeakRef is created or
- * successfully dereferenced.
- *
- * This is invoked automatically after microtasks are run. See
- * MicrotasksPolicy for when microtasks are run.
- *
- * This needs to be manually invoked only if the embedder is manually running
- * microtasks via a custom MicrotaskQueue class's PerformCheckpoint. In that
- * case, it is the embedder's responsibility to make this call at a time which
- * does not interrupt synchronous ECMAScript code execution.
- */
- void ClearKeptObjects();
-
- /**
- * Custom callback used by embedders to help V8 determine if it should abort
- * when it throws and no internal handler is predicted to catch the
- * exception. If --abort-on-uncaught-exception is used on the command line,
- * then V8 will abort if either:
- * - no custom callback is set.
- * - the custom callback set returns true.
- * Otherwise, the custom callback will not be called and V8 will not abort.
- */
- using AbortOnUncaughtExceptionCallback = bool (*)(Isolate*);
- void SetAbortOnUncaughtExceptionCallback(
- AbortOnUncaughtExceptionCallback callback);
-
- /**
- * This specifies the callback called by the upcoming dynamic
- * import() language feature to load modules.
- */
- V8_DEPRECATED(
- "Use the version of SetHostImportModuleDynamicallyCallback that takes a "
- "HostImportModuleDynamicallyWithImportAssertionsCallback instead")
- void SetHostImportModuleDynamicallyCallback(
- HostImportModuleDynamicallyCallback callback);
-
- /**
- * This specifies the callback called by the upcoming dynamic
- * import() language feature to load modules.
- */
- void SetHostImportModuleDynamicallyCallback(
- HostImportModuleDynamicallyWithImportAssertionsCallback callback);
-
- /**
- * This specifies the callback called by the upcoming import.meta
- * language feature to retrieve host-defined meta data for a module.
- */
- void SetHostInitializeImportMetaObjectCallback(
- HostInitializeImportMetaObjectCallback callback);
-
- /**
- * This specifies the callback called when the stack property of Error
- * is accessed.
- */
- void SetPrepareStackTraceCallback(PrepareStackTraceCallback callback);
-
- /**
- * Optional notification that the system is running low on memory.
- * V8 uses these notifications to guide heuristics.
- * It is allowed to call this function from another thread while
- * the isolate is executing long running JavaScript code.
- */
- void MemoryPressureNotification(MemoryPressureLevel level);
-
- /**
- * Drop non-essential caches. Should only be called from testing code.
- * The method can potentially block for a long time and does not necessarily
- * trigger GC.
- */
- void ClearCachesForTesting();
-
- /**
- * Methods below this point require holding a lock (using Locker) in
- * a multi-threaded environment.
- */
-
- /**
- * Sets this isolate as the entered one for the current thread.
- * Saves the previously entered one (if any), so that it can be
- * restored when exiting. Re-entering an isolate is allowed.
- */
- void Enter();
-
- /**
- * Exits this isolate by restoring the previously entered one in the
- * current thread. The isolate may still stay the same, if it was
- * entered more than once.
- *
- * Requires: this == Isolate::GetCurrent().
- */
- void Exit();
-
- /**
- * Disposes the isolate. The isolate must not be entered by any
- * thread to be disposable.
- */
- void Dispose();
-
- /**
- * Dumps activated low-level V8 internal stats. This can be used instead
- * of performing a full isolate disposal.
- */
- void DumpAndResetStats();
-
- /**
- * Discards all V8 thread-specific data for the Isolate. Should be used
- * if a thread is terminating and it has used an Isolate that will outlive
- * the thread -- all thread-specific data for an Isolate is discarded when
- * an Isolate is disposed so this call is pointless if an Isolate is about
- * to be Disposed.
- */
- void DiscardThreadSpecificMetadata();
-
- /**
- * Associate embedder-specific data with the isolate. |slot| has to be
- * between 0 and GetNumberOfDataSlots() - 1.
- */
- V8_INLINE void SetData(uint32_t slot, void* data);
-
- /**
- * Retrieve embedder-specific data from the isolate.
- * Returns NULL if SetData has never been called for the given |slot|.
- */
- V8_INLINE void* GetData(uint32_t slot);
-
- /**
- * Returns the maximum number of available embedder data slots. Valid slots
- * are in the range of 0 - GetNumberOfDataSlots() - 1.
- */
- V8_INLINE static uint32_t GetNumberOfDataSlots();
-
- /**
- * Return data that was previously attached to the isolate snapshot via
- * SnapshotCreator, and removes the reference to it.
- * Repeated call with the same index returns an empty MaybeLocal.
- */
- template <class T>
- V8_INLINE MaybeLocal<T> GetDataFromSnapshotOnce(size_t index);
-
- /**
- * Get statistics about the heap memory usage.
- */
- void GetHeapStatistics(HeapStatistics* heap_statistics);
-
- /**
- * Returns the number of spaces in the heap.
- */
- size_t NumberOfHeapSpaces();
-
- /**
- * Get the memory usage of a space in the heap.
- *
- * \param space_statistics The HeapSpaceStatistics object to fill in
- * statistics.
- * \param index The index of the space to get statistics from, which ranges
- * from 0 to NumberOfHeapSpaces() - 1.
- * \returns true on success.
- */
- bool GetHeapSpaceStatistics(HeapSpaceStatistics* space_statistics,
- size_t index);
-
- /**
- * Returns the number of types of objects tracked in the heap at GC.
- */
- size_t NumberOfTrackedHeapObjectTypes();
-
- /**
- * Get statistics about objects in the heap.
- *
- * \param object_statistics The HeapObjectStatistics object to fill in
- * statistics of objects of given type, which were live in the previous GC.
- * \param type_index The index of the type of object to fill details about,
- * which ranges from 0 to NumberOfTrackedHeapObjectTypes() - 1.
- * \returns true on success.
- */
- bool GetHeapObjectStatisticsAtLastGC(HeapObjectStatistics* object_statistics,
- size_t type_index);
-
- /**
- * Get statistics about code and its metadata in the heap.
- *
- * \param object_statistics The HeapCodeStatistics object to fill in
- * statistics of code, bytecode and their metadata.
- * \returns true on success.
- */
- bool GetHeapCodeAndMetadataStatistics(HeapCodeStatistics* object_statistics);
-
- /**
- * This API is experimental and may change significantly.
- *
- * Enqueues a memory measurement request and invokes the delegate with the
- * results.
- *
- * \param delegate the delegate that defines which contexts to measure and
- * reports the results.
- *
- * \param execution promptness executing the memory measurement.
- * The kEager value is expected to be used only in tests.
- */
- bool MeasureMemory(
- std::unique_ptr<MeasureMemoryDelegate> delegate,
- MeasureMemoryExecution execution = MeasureMemoryExecution::kDefault);
-
- /**
- * Get a call stack sample from the isolate.
- * \param state Execution state.
- * \param frames Caller allocated buffer to store stack frames.
- * \param frames_limit Maximum number of frames to capture. The buffer must
- * be large enough to hold the number of frames.
- * \param sample_info The sample info is filled up by the function
- * provides number of actual captured stack frames and
- * the current VM state.
- * \note GetStackSample should only be called when the JS thread is paused or
- * interrupted. Otherwise the behavior is undefined.
- */
- void GetStackSample(const RegisterState& state, void** frames,
- size_t frames_limit, SampleInfo* sample_info);
-
- /**
- * Adjusts the amount of registered external memory. Used to give V8 an
- * indication of the amount of externally allocated memory that is kept alive
- * by JavaScript objects. V8 uses this to decide when to perform global
- * garbage collections. Registering externally allocated memory will trigger
- * global garbage collections more often than it would otherwise in an attempt
- * to garbage collect the JavaScript objects that keep the externally
- * allocated memory alive.
- *
- * \param change_in_bytes the change in externally allocated memory that is
- * kept alive by JavaScript objects.
- * \returns the adjusted value.
- */
- int64_t AdjustAmountOfExternalAllocatedMemory(int64_t change_in_bytes);
-
- /**
- * Returns the number of phantom handles without callbacks that were reset
- * by the garbage collector since the last call to this function.
- */
- size_t NumberOfPhantomHandleResetsSinceLastCall();
-
- /**
- * Returns heap profiler for this isolate. Will return NULL until the isolate
- * is initialized.
- */
- HeapProfiler* GetHeapProfiler();
-
- /**
- * Tells the VM whether the embedder is idle or not.
- */
- void SetIdle(bool is_idle);
-
- /** Returns the ArrayBuffer::Allocator used in this isolate. */
- ArrayBuffer::Allocator* GetArrayBufferAllocator();
-
- /** Returns true if this isolate has a current context. */
- bool InContext();
-
- /**
- * Returns the context of the currently running JavaScript, or the context
- * on the top of the stack if no JavaScript is running.
- */
- Local<Context> GetCurrentContext();
-
- /**
- * Returns either the last context entered through V8's C++ API, or the
- * context of the currently running microtask while processing microtasks.
- * If a context is entered while executing a microtask, that context is
- * returned.
- */
- Local<Context> GetEnteredOrMicrotaskContext();
-
- /**
- * Returns the Context that corresponds to the Incumbent realm in HTML spec.
- * https://html.spec.whatwg.org/multipage/webappapis.html#incumbent
- */
- Local<Context> GetIncumbentContext();
-
- /**
- * Schedules a v8::Exception::Error with the given message.
- * See ThrowException for more details. Templatized to provide compile-time
- * errors in case of too long strings (see v8::String::NewFromUtf8Literal).
- */
- template <int N>
- Local<Value> ThrowError(const char (&message)[N]) {
- return ThrowError(String::NewFromUtf8Literal(this, message));
- }
- Local<Value> ThrowError(Local<String> message);
-
- /**
- * Schedules an exception to be thrown when returning to JavaScript. When an
- * exception has been scheduled it is illegal to invoke any JavaScript
- * operation; the caller must return immediately and only after the exception
- * has been handled does it become legal to invoke JavaScript operations.
- */
- Local<Value> ThrowException(Local<Value> exception);
-
- using GCCallback = void (*)(Isolate* isolate, GCType type,
- GCCallbackFlags flags);
- using GCCallbackWithData = void (*)(Isolate* isolate, GCType type,
- GCCallbackFlags flags, void* data);
-
- /**
- * Enables the host application to receive a notification before a
- * garbage collection. Allocations are allowed in the callback function,
- * but the callback is not re-entrant: if the allocation inside it will
- * trigger the garbage collection, the callback won't be called again.
- * It is possible to specify the GCType filter for your callback. But it is
- * not possible to register the same callback function two times with
- * different GCType filters.
- */
- void AddGCPrologueCallback(GCCallbackWithData callback, void* data = nullptr,
- GCType gc_type_filter = kGCTypeAll);
- void AddGCPrologueCallback(GCCallback callback,
- GCType gc_type_filter = kGCTypeAll);
-
- /**
- * This function removes callback which was installed by
- * AddGCPrologueCallback function.
- */
- void RemoveGCPrologueCallback(GCCallbackWithData, void* data = nullptr);
- void RemoveGCPrologueCallback(GCCallback callback);
-
- /**
- * Sets the embedder heap tracer for the isolate.
- */
- void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
-
- /*
- * Gets the currently active heap tracer for the isolate.
- */
- EmbedderHeapTracer* GetEmbedderHeapTracer();
-
- /**
- * Sets an embedder roots handle that V8 should consider when performing
- * non-unified heap garbage collections.
- *
- * Using only EmbedderHeapTracer automatically sets up a default handler.
- * The intended use case is for setting a custom handler after invoking
- * `AttachCppHeap()`.
- *
- * V8 does not take ownership of the handler.
- */
- void SetEmbedderRootsHandler(EmbedderRootsHandler* handler);
-
- /**
- * Attaches a managed C++ heap as an extension to the JavaScript heap. The
- * embedder maintains ownership of the CppHeap. At most one C++ heap can be
- * attached to V8.
- *
- * This is an experimental feature and may still change significantly.
- */
- void AttachCppHeap(CppHeap*);
-
- /**
- * Detaches a managed C++ heap if one was attached using `AttachCppHeap()`.
- *
- * This is an experimental feature and may still change significantly.
- */
- void DetachCppHeap();
-
- /**
- * This is an experimental feature and may still change significantly.
-
- * \returns the C++ heap managed by V8. Only available if such a heap has been
- * attached using `AttachCppHeap()`.
- */
- CppHeap* GetCppHeap() const;
-
- /**
- * Use for |AtomicsWaitCallback| to indicate the type of event it receives.
- */
- enum class AtomicsWaitEvent {
- /** Indicates that this call is happening before waiting. */
- kStartWait,
- /** `Atomics.wait()` finished because of an `Atomics.wake()` call. */
- kWokenUp,
- /** `Atomics.wait()` finished because it timed out. */
- kTimedOut,
- /** `Atomics.wait()` was interrupted through |TerminateExecution()|. */
- kTerminatedExecution,
- /** `Atomics.wait()` was stopped through |AtomicsWaitWakeHandle|. */
- kAPIStopped,
- /** `Atomics.wait()` did not wait, as the initial condition was not met. */
- kNotEqual
- };
-
- /**
- * Passed to |AtomicsWaitCallback| as a means of stopping an ongoing
- * `Atomics.wait` call.
- */
- class V8_EXPORT AtomicsWaitWakeHandle {
- public:
- /**
- * Stop this `Atomics.wait()` call and call the |AtomicsWaitCallback|
- * with |kAPIStopped|.
- *
- * This function may be called from another thread. The caller has to ensure
- * through proper synchronization that it is not called after
- * the finishing |AtomicsWaitCallback|.
- *
- * Note that the ECMAScript specification does not plan for the possibility
- * of wakeups that are neither coming from a timeout or an `Atomics.wake()`
- * call, so this may invalidate assumptions made by existing code.
- * The embedder may accordingly wish to schedule an exception in the
- * finishing |AtomicsWaitCallback|.
- */
- void Wake();
- };
-
- /**
- * Embedder callback for `Atomics.wait()` that can be added through
- * |SetAtomicsWaitCallback|.
- *
- * This will be called just before starting to wait with the |event| value
- * |kStartWait| and after finishing waiting with one of the other
- * values of |AtomicsWaitEvent| inside of an `Atomics.wait()` call.
- *
- * |array_buffer| will refer to the underlying SharedArrayBuffer,
- * |offset_in_bytes| to the location of the waited-on memory address inside
- * the SharedArrayBuffer.
- *
- * |value| and |timeout_in_ms| will be the values passed to
- * the `Atomics.wait()` call. If no timeout was used, |timeout_in_ms|
- * will be `INFINITY`.
- *
- * In the |kStartWait| callback, |stop_handle| will be an object that
- * is only valid until the corresponding finishing callback and that
- * can be used to stop the wait process while it is happening.
- *
- * This callback may schedule exceptions, *unless* |event| is equal to
- * |kTerminatedExecution|.
- */
- using AtomicsWaitCallback = void (*)(AtomicsWaitEvent event,
- Local<SharedArrayBuffer> array_buffer,
- size_t offset_in_bytes, int64_t value,
- double timeout_in_ms,
- AtomicsWaitWakeHandle* stop_handle,
- void* data);
-
- /**
- * Set a new |AtomicsWaitCallback|. This overrides an earlier
- * |AtomicsWaitCallback|, if there was any. If |callback| is nullptr,
- * this unsets the callback. |data| will be passed to the callback
- * as its last parameter.
- */
- void SetAtomicsWaitCallback(AtomicsWaitCallback callback, void* data);
-
- /**
- * Enables the host application to receive a notification after a
- * garbage collection. Allocations are allowed in the callback function,
- * but the callback is not re-entrant: if the allocation inside it will
- * trigger the garbage collection, the callback won't be called again.
- * It is possible to specify the GCType filter for your callback. But it is
- * not possible to register the same callback function two times with
- * different GCType filters.
- */
- void AddGCEpilogueCallback(GCCallbackWithData callback, void* data = nullptr,
- GCType gc_type_filter = kGCTypeAll);
- void AddGCEpilogueCallback(GCCallback callback,
- GCType gc_type_filter = kGCTypeAll);
-
- /**
- * This function removes callback which was installed by
- * AddGCEpilogueCallback function.
- */
- void RemoveGCEpilogueCallback(GCCallbackWithData callback,
- void* data = nullptr);
- void RemoveGCEpilogueCallback(GCCallback callback);
-
- using GetExternallyAllocatedMemoryInBytesCallback = size_t (*)();
-
- /**
- * Set the callback that tells V8 how much memory is currently allocated
- * externally of the V8 heap. Ideally this memory is somehow connected to V8
- * objects and may get freed-up when the corresponding V8 objects get
- * collected by a V8 garbage collection.
- */
- void SetGetExternallyAllocatedMemoryInBytesCallback(
- GetExternallyAllocatedMemoryInBytesCallback callback);
-
- /**
- * Forcefully terminate the current thread of JavaScript execution
- * in the given isolate.
- *
- * This method can be used by any thread even if that thread has not
- * acquired the V8 lock with a Locker object.
- */
- void TerminateExecution();
-
- /**
- * Is V8 terminating JavaScript execution.
- *
- * Returns true if JavaScript execution is currently terminating
- * because of a call to TerminateExecution. In that case there are
- * still JavaScript frames on the stack and the termination
- * exception is still active.
- */
- bool IsExecutionTerminating();
-
- /**
- * Resume execution capability in the given isolate, whose execution
- * was previously forcefully terminated using TerminateExecution().
- *
- * When execution is forcefully terminated using TerminateExecution(),
- * the isolate can not resume execution until all JavaScript frames
- * have propagated the uncatchable exception which is generated. This
- * method allows the program embedding the engine to handle the
- * termination event and resume execution capability, even if
- * JavaScript frames remain on the stack.
- *
- * This method can be used by any thread even if that thread has not
- * acquired the V8 lock with a Locker object.
- */
- void CancelTerminateExecution();
-
- /**
- * Request V8 to interrupt long running JavaScript code and invoke
- * the given |callback| passing the given |data| to it. After |callback|
- * returns control will be returned to the JavaScript code.
- * There may be a number of interrupt requests in flight.
- * Can be called from another thread without acquiring a |Locker|.
- * Registered |callback| must not reenter interrupted Isolate.
- */
- void RequestInterrupt(InterruptCallback callback, void* data);
-
- /**
- * Returns true if there is ongoing background work within V8 that will
- * eventually post a foreground task, like asynchronous WebAssembly
- * compilation.
- */
- bool HasPendingBackgroundTasks();
-
- /**
- * Request garbage collection in this Isolate. It is only valid to call this
- * function if --expose_gc was specified.
- *
- * This should only be used for testing purposes and not to enforce a garbage
- * collection schedule. It has strong negative impact on the garbage
- * collection performance. Use IdleNotificationDeadline() or
- * LowMemoryNotification() instead to influence the garbage collection
- * schedule.
- */
- void RequestGarbageCollectionForTesting(GarbageCollectionType type);
-
- /**
- * Set the callback to invoke for logging event.
- */
- void SetEventLogger(LogEventCallback that);
-
- /**
- * Adds a callback to notify the host application right before a script
- * is about to run. If a script re-enters the runtime during executing, the
- * BeforeCallEnteredCallback is invoked for each re-entrance.
- * Executing scripts inside the callback will re-trigger the callback.
- */
- void AddBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
-
- /**
- * Removes callback that was installed by AddBeforeCallEnteredCallback.
- */
- void RemoveBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
-
- /**
- * Adds a callback to notify the host application when a script finished
- * running. If a script re-enters the runtime during executing, the
- * CallCompletedCallback is only invoked when the outer-most script
- * execution ends. Executing scripts inside the callback do not trigger
- * further callbacks.
- */
- void AddCallCompletedCallback(CallCompletedCallback callback);
-
- /**
- * Removes callback that was installed by AddCallCompletedCallback.
- */
- void RemoveCallCompletedCallback(CallCompletedCallback callback);
-
- /**
- * Set the PromiseHook callback for various promise lifecycle
- * events.
- */
- void SetPromiseHook(PromiseHook hook);
-
- /**
- * Set callback to notify about promise reject with no handler, or
- * revocation of such a previous notification once the handler is added.
- */
- void SetPromiseRejectCallback(PromiseRejectCallback callback);
-
- /**
- * Runs the default MicrotaskQueue until it gets empty and perform other
- * microtask checkpoint steps, such as calling ClearKeptObjects. Asserts that
- * the MicrotasksPolicy is not kScoped. Any exceptions thrown by microtask
- * callbacks are swallowed.
- */
- void PerformMicrotaskCheckpoint();
-
- /**
- * Enqueues the callback to the default MicrotaskQueue
- */
- void EnqueueMicrotask(Local<Function> microtask);
-
- /**
- * Enqueues the callback to the default MicrotaskQueue
- */
- void EnqueueMicrotask(MicrotaskCallback callback, void* data = nullptr);
-
- /**
- * Controls how Microtasks are invoked. See MicrotasksPolicy for details.
- */
- void SetMicrotasksPolicy(MicrotasksPolicy policy);
-
- /**
- * Returns the policy controlling how Microtasks are invoked.
- */
- MicrotasksPolicy GetMicrotasksPolicy() const;
-
- /**
- * Adds a callback to notify the host application after
- * microtasks were run on the default MicrotaskQueue. The callback is
- * triggered by explicit RunMicrotasks call or automatic microtasks execution
- * (see SetMicrotaskPolicy).
- *
- * Callback will trigger even if microtasks were attempted to run,
- * but the microtasks queue was empty and no single microtask was actually
- * executed.
- *
- * Executing scripts inside the callback will not re-trigger microtasks and
- * the callback.
- */
- void AddMicrotasksCompletedCallback(
- MicrotasksCompletedCallbackWithData callback, void* data = nullptr);
-
- /**
- * Removes callback that was installed by AddMicrotasksCompletedCallback.
- */
- void RemoveMicrotasksCompletedCallback(
- MicrotasksCompletedCallbackWithData callback, void* data = nullptr);
-
- /**
- * Sets a callback for counting the number of times a feature of V8 is used.
- */
- void SetUseCounterCallback(UseCounterCallback callback);
-
- /**
- * Enables the host application to provide a mechanism for recording
- * statistics counters.
- */
- void SetCounterFunction(CounterLookupCallback);
-
- /**
- * Enables the host application to provide a mechanism for recording
- * histograms. The CreateHistogram function returns a
- * histogram which will later be passed to the AddHistogramSample
- * function.
- */
- void SetCreateHistogramFunction(CreateHistogramCallback);
- void SetAddHistogramSampleFunction(AddHistogramSampleCallback);
-
- /**
- * Enables the host application to provide a mechanism for recording
- * event based metrics. In order to use this interface
- * include/v8-metrics.h
- * needs to be included and the recorder needs to be derived from the
- * Recorder base class defined there.
- * This method can only be called once per isolate and must happen during
- * isolate initialization before background threads are spawned.
- */
- void SetMetricsRecorder(
- const std::shared_ptr<metrics::Recorder>& metrics_recorder);
-
- /**
- * Enables the host application to provide a mechanism for recording a
- * predefined set of data as crash keys to be used in postmortem debugging in
- * case of a crash.
- */
- void SetAddCrashKeyCallback(AddCrashKeyCallback);
-
- /**
- * Optional notification that the embedder is idle.
- * V8 uses the notification to perform garbage collection.
- * This call can be used repeatedly if the embedder remains idle.
- * Returns true if the embedder should stop calling IdleNotificationDeadline
- * until real work has been done. This indicates that V8 has done
- * as much cleanup as it will be able to do.
- *
- * The deadline_in_seconds argument specifies the deadline V8 has to finish
- * garbage collection work. deadline_in_seconds is compared with
- * MonotonicallyIncreasingTime() and should be based on the same timebase as
- * that function. There is no guarantee that the actual work will be done
- * within the time limit.
- */
- bool IdleNotificationDeadline(double deadline_in_seconds);
-
- /**
- * Optional notification that the system is running low on memory.
- * V8 uses these notifications to attempt to free memory.
- */
- void LowMemoryNotification();
-
- /**
- * Optional notification that a context has been disposed. V8 uses these
- * notifications to guide the GC heuristic and cancel FinalizationRegistry
- * cleanup tasks. Returns the number of context disposals - including this one
- * - since the last time V8 had a chance to clean up.
- *
- * The optional parameter |dependant_context| specifies whether the disposed
- * context was depending on state from other contexts or not.
- */
- int ContextDisposedNotification(bool dependant_context = true);
-
- /**
- * Optional notification that the isolate switched to the foreground.
- * V8 uses these notifications to guide heuristics.
- */
- void IsolateInForegroundNotification();
-
- /**
- * Optional notification that the isolate switched to the background.
- * V8 uses these notifications to guide heuristics.
- */
- void IsolateInBackgroundNotification();
-
- /**
- * Optional notification which will enable the memory savings mode.
- * V8 uses this notification to guide heuristics which may result in a
- * smaller memory footprint at the cost of reduced runtime performance.
- */
- void EnableMemorySavingsMode();
-
- /**
- * Optional notification which will disable the memory savings mode.
- */
- void DisableMemorySavingsMode();
-
- /**
- * Optional notification to tell V8 the current performance requirements
- * of the embedder based on RAIL.
- * V8 uses these notifications to guide heuristics.
- * This is an unfinished experimental feature. Semantics and implementation
- * may change frequently.
- */
- void SetRAILMode(RAILMode rail_mode);
-
- /**
- * Update load start time of the RAIL mode
- */
- void UpdateLoadStartTime();
-
- /**
- * Optional notification to tell V8 the current isolate is used for debugging
- * and requires higher heap limit.
- */
- void IncreaseHeapLimitForDebugging();
-
- /**
- * Restores the original heap limit after IncreaseHeapLimitForDebugging().
- */
- void RestoreOriginalHeapLimit();
-
- /**
- * Returns true if the heap limit was increased for debugging and the
- * original heap limit was not restored yet.
- */
- bool IsHeapLimitIncreasedForDebugging();
-
- /**
- * Allows the host application to provide the address of a function that is
- * notified each time code is added, moved or removed.
- *
- * \param options options for the JIT code event handler.
- * \param event_handler the JIT code event handler, which will be invoked
- * each time code is added, moved or removed.
- * \note \p event_handler won't get notified of existent code.
- * \note since code removal notifications are not currently issued, the
- * \p event_handler may get notifications of code that overlaps earlier
- * code notifications. This happens when code areas are reused, and the
- * earlier overlapping code areas should therefore be discarded.
- * \note the events passed to \p event_handler and the strings they point to
- * are not guaranteed to live past each call. The \p event_handler must
- * copy strings and other parameters it needs to keep around.
- * \note the set of events declared in JitCodeEvent::EventType is expected to
- * grow over time, and the JitCodeEvent structure is expected to accrue
- * new members. The \p event_handler function must ignore event codes
- * it does not recognize to maintain future compatibility.
- * \note Use Isolate::CreateParams to get events for code executed during
- * Isolate setup.
- */
- void SetJitCodeEventHandler(JitCodeEventOptions options,
- JitCodeEventHandler event_handler);
-
- /**
- * Modifies the stack limit for this Isolate.
- *
- * \param stack_limit An address beyond which the Vm's stack may not grow.
- *
- * \note If you are using threads then you should hold the V8::Locker lock
- * while setting the stack limit and you must set a non-default stack
- * limit separately for each thread.
- */
- void SetStackLimit(uintptr_t stack_limit);
-
- /**
- * Returns a memory range that can potentially contain jitted code. Code for
- * V8's 'builtins' will not be in this range if embedded builtins is enabled.
- *
- * On Win64, embedders are advised to install function table callbacks for
- * these ranges, as default SEH won't be able to unwind through jitted code.
- * The first page of the code range is reserved for the embedder and is
- * committed, writable, and executable, to be used to store unwind data, as
- * documented in
- * https://docs.microsoft.com/en-us/cpp/build/exception-handling-x64.
- *
- * Might be empty on other platforms.
- *
- * https://code.google.com/p/v8/issues/detail?id=3598
- */
- void GetCodeRange(void** start, size_t* length_in_bytes);
-
- /**
- * As GetCodeRange, but for embedded builtins (these live in a distinct
- * memory region from other V8 Code objects).
- */
- void GetEmbeddedCodeRange(const void** start, size_t* length_in_bytes);
-
- /**
- * Returns the JSEntryStubs necessary for use with the Unwinder API.
- */
- JSEntryStubs GetJSEntryStubs();
-
- static constexpr size_t kMinCodePagesBufferSize = 32;
-
- /**
- * Copies the code heap pages currently in use by V8 into |code_pages_out|.
- * |code_pages_out| must have at least kMinCodePagesBufferSize capacity and
- * must be empty.
- *
- * Signal-safe, does not allocate, does not access the V8 heap.
- * No code on the stack can rely on pages that might be missing.
- *
- * Returns the number of pages available to be copied, which might be greater
- * than |capacity|. In this case, only |capacity| pages will be copied into
- * |code_pages_out|. The caller should provide a bigger buffer on the next
- * call in order to get all available code pages, but this is not required.
- */
- size_t CopyCodePages(size_t capacity, MemoryRange* code_pages_out);
-
- /** Set the callback to invoke in case of fatal errors. */
- void SetFatalErrorHandler(FatalErrorCallback that);
-
- /** Set the callback to invoke in case of OOM errors. */
- void SetOOMErrorHandler(OOMErrorCallback that);
-
- /**
- * Add a callback to invoke in case the heap size is close to the heap limit.
- * If multiple callbacks are added, only the most recently added callback is
- * invoked.
- */
- void AddNearHeapLimitCallback(NearHeapLimitCallback callback, void* data);
-
- /**
- * Remove the given callback and restore the heap limit to the
- * given limit. If the given limit is zero, then it is ignored.
- * If the current heap size is greater than the given limit,
- * then the heap limit is restored to the minimal limit that
- * is possible for the current heap size.
- */
- void RemoveNearHeapLimitCallback(NearHeapLimitCallback callback,
- size_t heap_limit);
-
- /**
- * If the heap limit was changed by the NearHeapLimitCallback, then the
- * initial heap limit will be restored once the heap size falls below the
- * given threshold percentage of the initial heap limit.
- * The threshold percentage is a number in (0.0, 1.0) range.
- */
- void AutomaticallyRestoreInitialHeapLimit(double threshold_percent = 0.5);
-
- /**
- * Set the callback to invoke to check if code generation from
- * strings should be allowed.
- */
- void SetModifyCodeGenerationFromStringsCallback(
- ModifyCodeGenerationFromStringsCallback2 callback);
-
- /**
- * Set the callback to invoke to check if wasm code generation should
- * be allowed.
- */
- void SetAllowWasmCodeGenerationCallback(
- AllowWasmCodeGenerationCallback callback);
-
- /**
- * Embedder over{ride|load} injection points for wasm APIs. The expectation
- * is that the embedder sets them at most once.
- */
- void SetWasmModuleCallback(ExtensionCallback callback);
- void SetWasmInstanceCallback(ExtensionCallback callback);
-
- void SetWasmStreamingCallback(WasmStreamingCallback callback);
-
- void SetWasmLoadSourceMapCallback(WasmLoadSourceMapCallback callback);
-
- void SetWasmSimdEnabledCallback(WasmSimdEnabledCallback callback);
-
- void SetWasmExceptionsEnabledCallback(WasmExceptionsEnabledCallback callback);
-
- void SetSharedArrayBufferConstructorEnabledCallback(
- SharedArrayBufferConstructorEnabledCallback callback);
-
- /**
- * This function can be called by the embedder to signal V8 that the dynamic
- * enabling of features has finished. V8 can now set up dynamically added
- * features.
- */
- void InstallConditionalFeatures(Local<Context> context);
-
- /**
- * Check if V8 is dead and therefore unusable. This is the case after
- * fatal errors such as out-of-memory situations.
- */
- bool IsDead();
-
- /**
- * Adds a message listener (errors only).
- *
- * The same message listener can be added more than once and in that
- * case it will be called more than once for each message.
- *
- * If data is specified, it will be passed to the callback when it is called.
- * Otherwise, the exception object will be passed to the callback instead.
- */
- bool AddMessageListener(MessageCallback that,
- Local<Value> data = Local<Value>());
-
- /**
- * Adds a message listener.
- *
- * The same message listener can be added more than once and in that
- * case it will be called more than once for each message.
- *
- * If data is specified, it will be passed to the callback when it is called.
- * Otherwise, the exception object will be passed to the callback instead.
- *
- * A listener can listen for particular error levels by providing a mask.
- */
- bool AddMessageListenerWithErrorLevel(MessageCallback that,
- int message_levels,
- Local<Value> data = Local<Value>());
-
- /**
- * Remove all message listeners from the specified callback function.
- */
- void RemoveMessageListeners(MessageCallback that);
-
- /** Callback function for reporting failed access checks.*/
- void SetFailedAccessCheckCallbackFunction(FailedAccessCheckCallback);
-
- /**
- * Tells V8 to capture current stack trace when uncaught exception occurs
- * and report it to the message listeners. The option is off by default.
- */
- void SetCaptureStackTraceForUncaughtExceptions(
- bool capture, int frame_limit = 10,
- StackTrace::StackTraceOptions options = StackTrace::kOverview);
-
- /**
- * Iterates through all external resources referenced from current isolate
- * heap. GC is not invoked prior to iterating, therefore there is no
- * guarantee that visited objects are still alive.
- */
- void VisitExternalResources(ExternalResourceVisitor* visitor);
-
- /**
- * Iterates through all the persistent handles in the current isolate's heap
- * that have class_ids.
- */
- void VisitHandlesWithClassIds(PersistentHandleVisitor* visitor);
-
- /**
- * Iterates through all the persistent handles in the current isolate's heap
- * that have class_ids and are weak to be marked as inactive if there is no
- * pending activity for the handle.
- */
- void VisitWeakHandles(PersistentHandleVisitor* visitor);
-
- /**
- * Check if this isolate is in use.
- * True if at least one thread Enter'ed this isolate.
- */
- bool IsInUse();
-
- /**
- * Set whether calling Atomics.wait (a function that may block) is allowed in
- * this isolate. This can also be configured via
- * CreateParams::allow_atomics_wait.
- */
- void SetAllowAtomicsWait(bool allow);
-
- /**
- * Time zone redetection indicator for
- * DateTimeConfigurationChangeNotification.
- *
- * kSkip indicates V8 that the notification should not trigger redetecting
- * host time zone. kRedetect indicates V8 that host time zone should be
- * redetected, and used to set the default time zone.
- *
- * The host time zone detection may require file system access or similar
- * operations unlikely to be available inside a sandbox. If v8 is run inside a
- * sandbox, the host time zone has to be detected outside the sandbox before
- * calling DateTimeConfigurationChangeNotification function.
- */
- enum class TimeZoneDetection { kSkip, kRedetect };
-
- /**
- * Notification that the embedder has changed the time zone, daylight savings
- * time or other date / time configuration parameters. V8 keeps a cache of
- * various values used for date / time computation. This notification will
- * reset those cached values for the current context so that date / time
- * configuration changes would be reflected.
- *
- * This API should not be called more than needed as it will negatively impact
- * the performance of date operations.
- */
- void DateTimeConfigurationChangeNotification(
- TimeZoneDetection time_zone_detection = TimeZoneDetection::kSkip);
-
- /**
- * Notification that the embedder has changed the locale. V8 keeps a cache of
- * various values used for locale computation. This notification will reset
- * those cached values for the current context so that locale configuration
- * changes would be reflected.
- *
- * This API should not be called more than needed as it will negatively impact
- * the performance of locale operations.
- */
- void LocaleConfigurationChangeNotification();
-
- Isolate() = delete;
- ~Isolate() = delete;
- Isolate(const Isolate&) = delete;
- Isolate& operator=(const Isolate&) = delete;
- // Deleting operator new and delete here is allowed as ctor and dtor is also
- // deleted.
- void* operator new(size_t size) = delete;
- void* operator new[](size_t size) = delete;
- void operator delete(void*, size_t) = delete;
- void operator delete[](void*, size_t) = delete;
-
- private:
- template <class K, class V, class Traits>
- friend class PersistentValueMapBase;
-
- internal::Address* GetDataFromSnapshotOnce(size_t index);
- void ReportExternalAllocationLimitReached();
-};
-
-class V8_EXPORT StartupData {
- public:
- /**
- * Whether the data created can be rehashed and and the hash seed can be
- * recomputed when deserialized.
- * Only valid for StartupData returned by SnapshotCreator::CreateBlob().
- */
- bool CanBeRehashed() const;
- /**
- * Allows embedders to verify whether the data is valid for the current
- * V8 instance.
- */
- bool IsValid() const;
-
- const char* data;
- int raw_size;
-};
-
-/**
- * EntropySource is used as a callback function when v8 needs a source
- * of entropy.
- */
-using EntropySource = bool (*)(unsigned char* buffer, size_t length);
-
-/**
- * ReturnAddressLocationResolver is used as a callback function when v8 is
- * resolving the location of a return address on the stack. Profilers that
- * change the return address on the stack can use this to resolve the stack
- * location to wherever the profiler stashed the original return address.
- *
- * \param return_addr_location A location on stack where a machine
- * return address resides.
- * \returns Either return_addr_location, or else a pointer to the profiler's
- * copy of the original return address.
- *
- * \note The resolver function must not cause garbage collection.
- */
-using ReturnAddressLocationResolver =
- uintptr_t (*)(uintptr_t return_addr_location);
-
-/**
- * Container class for static utility functions.
- */
-class V8_EXPORT V8 {
- public:
- /**
- * Hand startup data to V8, in case the embedder has chosen to build
- * V8 with external startup data.
- *
- * Note:
- * - By default the startup data is linked into the V8 library, in which
- * case this function is not meaningful.
- * - If this needs to be called, it needs to be called before V8
- * tries to make use of its built-ins.
- * - To avoid unnecessary copies of data, V8 will point directly into the
- * given data blob, so pretty please keep it around until V8 exit.
- * - Compression of the startup blob might be useful, but needs to
- * handled entirely on the embedders' side.
- * - The call will abort if the data is invalid.
- */
- static void SetSnapshotDataBlob(StartupData* startup_blob);
-
- /** Set the callback to invoke in case of Dcheck failures. */
- static void SetDcheckErrorHandler(DcheckErrorCallback that);
-
-
- /**
- * Sets V8 flags from a string.
- */
- static void SetFlagsFromString(const char* str);
- static void SetFlagsFromString(const char* str, size_t length);
-
- /**
- * Sets V8 flags from the command line.
- */
- static void SetFlagsFromCommandLine(int* argc,
- char** argv,
- bool remove_flags);
-
- /** Get the version string. */
- static const char* GetVersion();
-
- /**
- * Initializes V8. This function needs to be called before the first Isolate
- * is created. It always returns true.
- */
- V8_INLINE static bool Initialize() {
- const int kBuildConfiguration =
- (internal::PointerCompressionIsEnabled() ? kPointerCompression : 0) |
- (internal::SmiValuesAre31Bits() ? k31BitSmis : 0) |
- (internal::HeapSandboxIsEnabled() ? kHeapSandbox : 0);
- return Initialize(kBuildConfiguration);
- }
-
- /**
- * Allows the host application to provide a callback which can be used
- * as a source of entropy for random number generators.
- */
- static void SetEntropySource(EntropySource source);
-
- /**
- * Allows the host application to provide a callback that allows v8 to
- * cooperate with a profiler that rewrites return addresses on stack.
- */
- static void SetReturnAddressLocationResolver(
- ReturnAddressLocationResolver return_address_resolver);
-
- /**
- * Releases any resources used by v8 and stops any utility threads
- * that may be running. Note that disposing v8 is permanent, it
- * cannot be reinitialized.
- *
- * It should generally not be necessary to dispose v8 before exiting
- * a process, this should happen automatically. It is only necessary
- * to use if the process needs the resources taken up by v8.
- */
- static bool Dispose();
-
- /**
- * Initialize the ICU library bundled with V8. The embedder should only
- * invoke this method when using the bundled ICU. Returns true on success.
- *
- * If V8 was compiled with the ICU data in an external file, the location
- * of the data file has to be provided.
- */
- static bool InitializeICU(const char* icu_data_file = nullptr);
-
- /**
- * Initialize the ICU library bundled with V8. The embedder should only
- * invoke this method when using the bundled ICU. If V8 was compiled with
- * the ICU data in an external file and when the default location of that
- * file should be used, a path to the executable must be provided.
- * Returns true on success.
- *
- * The default is a file called icudtl.dat side-by-side with the executable.
- *
- * Optionally, the location of the data file can be provided to override the
- * default.
- */
- static bool InitializeICUDefaultLocation(const char* exec_path,
- const char* icu_data_file = nullptr);
-
- /**
- * Initialize the external startup data. The embedder only needs to
- * invoke this method when external startup data was enabled in a build.
- *
- * If V8 was compiled with the startup data in an external file, then
- * V8 needs to be given those external files during startup. There are
- * three ways to do this:
- * - InitializeExternalStartupData(const char*)
- * This will look in the given directory for the file "snapshot_blob.bin".
- * - InitializeExternalStartupDataFromFile(const char*)
- * As above, but will directly use the given file name.
- * - Call SetSnapshotDataBlob.
- * This will read the blobs from the given data structure and will
- * not perform any file IO.
- */
- static void InitializeExternalStartupData(const char* directory_path);
- static void InitializeExternalStartupDataFromFile(const char* snapshot_blob);
-
- /**
- * Sets the v8::Platform to use. This should be invoked before V8 is
- * initialized.
- */
- static void InitializePlatform(Platform* platform);
-
- /**
- * Clears all references to the v8::Platform. This should be invoked after
- * V8 was disposed.
- */
- static void ShutdownPlatform();
-
- /**
- * Activate trap-based bounds checking for WebAssembly.
- *
- * \param use_v8_signal_handler Whether V8 should install its own signal
- * handler or rely on the embedder's.
- */
- static bool EnableWebAssemblyTrapHandler(bool use_v8_signal_handler);
-
-#if defined(V8_OS_WIN)
- /**
- * On Win64, by default V8 does not emit unwinding data for jitted code,
- * which means the OS cannot walk the stack frames and the system Structured
- * Exception Handling (SEH) cannot unwind through V8-generated code:
- * https://code.google.com/p/v8/issues/detail?id=3598.
- *
- * This function allows embedders to register a custom exception handler for
- * exceptions in V8-generated code.
- */
- static void SetUnhandledExceptionCallback(
- UnhandledExceptionCallback unhandled_exception_callback);
-#endif
-
- /**
- * Get statistics about the shared memory usage.
- */
- static void GetSharedMemoryStatistics(SharedMemoryStatistics* statistics);
-
- private:
- V8();
-
- enum BuildConfigurationFeatures {
- kPointerCompression = 1 << 0,
- k31BitSmis = 1 << 1,
- kHeapSandbox = 1 << 2,
- };
-
- /**
- * Checks that the embedder build configuration is compatible with
- * the V8 binary and if so initializes V8.
- */
- static bool Initialize(int build_config);
-
- static internal::Address* GlobalizeReference(internal::Isolate* isolate,
- internal::Address* handle);
- static internal::Address* GlobalizeTracedReference(internal::Isolate* isolate,
- internal::Address* handle,
- internal::Address* slot,
- bool has_destructor);
- static void MoveGlobalReference(internal::Address** from,
- internal::Address** to);
- static void MoveTracedGlobalReference(internal::Address** from,
- internal::Address** to);
- static void CopyTracedGlobalReference(const internal::Address* const* from,
- internal::Address** to);
- static internal::Address* CopyGlobalReference(internal::Address* from);
- static void DisposeGlobal(internal::Address* global_handle);
- static void DisposeTracedGlobal(internal::Address* global_handle);
- static void MakeWeak(internal::Address* location, void* data,
- WeakCallbackInfo<void>::Callback weak_callback,
- WeakCallbackType type);
- static void MakeWeak(internal::Address** location_addr);
- static void* ClearWeak(internal::Address* location);
- static void SetFinalizationCallbackTraced(
- internal::Address* location, void* parameter,
- WeakCallbackInfo<void>::Callback callback);
- static void AnnotateStrongRetainer(internal::Address* location,
- const char* label);
- static Value* Eternalize(Isolate* isolate, Value* handle);
-
- template <class K, class V, class T>
- friend class PersistentValueMapBase;
-
- static void FromJustIsNothing();
- static void ToLocalEmpty();
- static void InternalFieldOutOfBounds(int index);
- template <class T>
- friend class BasicTracedReference;
- template <class T>
- friend class Global;
- template <class T> friend class Local;
- template <class T>
- friend class MaybeLocal;
- template <class T>
- friend class Maybe;
- template <class T>
- friend class TracedGlobal;
- friend class TracedReferenceBase;
- template <class T>
- friend class TracedReference;
- template <class T>
- friend class WeakCallbackInfo;
- template <class T> friend class Eternal;
- template <class T> friend class PersistentBase;
- template <class T, class M> friend class Persistent;
- friend class Context;
-};
-
-/**
- * Helper class to create a snapshot data blob.
- *
- * The Isolate used by a SnapshotCreator is owned by it, and will be entered
- * and exited by the constructor and destructor, respectively; The destructor
- * will also destroy the Isolate. Experimental language features, including
- * those available by default, are not available while creating a snapshot.
- */
-class V8_EXPORT SnapshotCreator {
- public:
- enum class FunctionCodeHandling { kClear, kKeep };
-
- /**
- * Initialize and enter an isolate, and set it up for serialization.
- * The isolate is either created from scratch or from an existing snapshot.
- * The caller keeps ownership of the argument snapshot.
- * \param existing_blob existing snapshot from which to create this one.
- * \param external_references a null-terminated array of external references
- * that must be equivalent to CreateParams::external_references.
- */
- SnapshotCreator(Isolate* isolate,
- const intptr_t* external_references = nullptr,
- StartupData* existing_blob = nullptr);
-
- /**
- * Create and enter an isolate, and set it up for serialization.
- * The isolate is either created from scratch or from an existing snapshot.
- * The caller keeps ownership of the argument snapshot.
- * \param existing_blob existing snapshot from which to create this one.
- * \param external_references a null-terminated array of external references
- * that must be equivalent to CreateParams::external_references.
- */
- SnapshotCreator(const intptr_t* external_references = nullptr,
- StartupData* existing_blob = nullptr);
-
- /**
- * Destroy the snapshot creator, and exit and dispose of the Isolate
- * associated with it.
- */
- ~SnapshotCreator();
-
- /**
- * \returns the isolate prepared by the snapshot creator.
- */
- Isolate* GetIsolate();
-
- /**
- * Set the default context to be included in the snapshot blob.
- * The snapshot will not contain the global proxy, and we expect one or a
- * global object template to create one, to be provided upon deserialization.
- *
- * \param callback optional callback to serialize internal fields.
- */
- void SetDefaultContext(Local<Context> context,
- SerializeInternalFieldsCallback callback =
- SerializeInternalFieldsCallback());
-
- /**
- * Add additional context to be included in the snapshot blob.
- * The snapshot will include the global proxy.
- *
- * \param callback optional callback to serialize internal fields.
- *
- * \returns the index of the context in the snapshot blob.
- */
- size_t AddContext(Local<Context> context,
- SerializeInternalFieldsCallback callback =
- SerializeInternalFieldsCallback());
-
- /**
- * Attach arbitrary V8::Data to the context snapshot, which can be retrieved
- * via Context::GetDataFromSnapshot after deserialization. This data does not
- * survive when a new snapshot is created from an existing snapshot.
- * \returns the index for retrieval.
- */
- template <class T>
- V8_INLINE size_t AddData(Local<Context> context, Local<T> object);
-
- /**
- * Attach arbitrary V8::Data to the isolate snapshot, which can be retrieved
- * via Isolate::GetDataFromSnapshot after deserialization. This data does not
- * survive when a new snapshot is created from an existing snapshot.
- * \returns the index for retrieval.
- */
- template <class T>
- V8_INLINE size_t AddData(Local<T> object);
-
- /**
- * Created a snapshot data blob.
- * This must not be called from within a handle scope.
- * \param function_code_handling whether to include compiled function code
- * in the snapshot.
- * \returns { nullptr, 0 } on failure, and a startup snapshot on success. The
- * caller acquires ownership of the data array in the return value.
- */
- StartupData CreateBlob(FunctionCodeHandling function_code_handling);
-
- // Disallow copying and assigning.
- SnapshotCreator(const SnapshotCreator&) = delete;
- void operator=(const SnapshotCreator&) = delete;
-
- private:
- size_t AddData(Local<Context> context, internal::Address object);
- size_t AddData(internal::Address object);
-
- void* data_;
-};
-
-/**
- * A simple Maybe type, representing an object which may or may not have a
- * value, see https://hackage.haskell.org/package/base/docs/Data-Maybe.html.
- *
- * If an API method returns a Maybe<>, the API method can potentially fail
- * either because an exception is thrown, or because an exception is pending,
- * e.g. because a previous API call threw an exception that hasn't been caught
- * yet, or because a TerminateExecution exception was thrown. In that case, a
- * "Nothing" value is returned.
- */
-template <class T>
-class Maybe {
- public:
- V8_INLINE bool IsNothing() const { return !has_value_; }
- V8_INLINE bool IsJust() const { return has_value_; }
-
- /**
- * An alias for |FromJust|. Will crash if the Maybe<> is nothing.
- */
- V8_INLINE T ToChecked() const { return FromJust(); }
-
- /**
- * Short-hand for ToChecked(), which doesn't return a value. To be used, where
- * the actual value of the Maybe is not needed like Object::Set.
- */
- V8_INLINE void Check() const {
- if (V8_UNLIKELY(!IsJust())) V8::FromJustIsNothing();
- }
-
- /**
- * Converts this Maybe<> to a value of type T. If this Maybe<> is
- * nothing (empty), |false| is returned and |out| is left untouched.
- */
- V8_WARN_UNUSED_RESULT V8_INLINE bool To(T* out) const {
- if (V8_LIKELY(IsJust())) *out = value_;
- return IsJust();
- }
-
- /**
- * Converts this Maybe<> to a value of type T. If this Maybe<> is
- * nothing (empty), V8 will crash the process.
- */
- V8_INLINE T FromJust() const {
- if (V8_UNLIKELY(!IsJust())) V8::FromJustIsNothing();
- return value_;
- }
-
- /**
- * Converts this Maybe<> to a value of type T, using a default value if this
- * Maybe<> is nothing (empty).
- */
- V8_INLINE T FromMaybe(const T& default_value) const {
- return has_value_ ? value_ : default_value;
- }
-
- V8_INLINE bool operator==(const Maybe& other) const {
- return (IsJust() == other.IsJust()) &&
- (!IsJust() || FromJust() == other.FromJust());
- }
-
- V8_INLINE bool operator!=(const Maybe& other) const {
- return !operator==(other);
- }
-
- private:
- Maybe() : has_value_(false) {}
- explicit Maybe(const T& t) : has_value_(true), value_(t) {}
-
- bool has_value_;
- T value_;
-
- template <class U>
- friend Maybe<U> Nothing();
- template <class U>
- friend Maybe<U> Just(const U& u);
-};
-
-template <class T>
-inline Maybe<T> Nothing() {
- return Maybe<T>();
-}
-
-template <class T>
-inline Maybe<T> Just(const T& t) {
- return Maybe<T>(t);
-}
-
-// A template specialization of Maybe<T> for the case of T = void.
-template <>
-class Maybe<void> {
- public:
- V8_INLINE bool IsNothing() const { return !is_valid_; }
- V8_INLINE bool IsJust() const { return is_valid_; }
-
- V8_INLINE bool operator==(const Maybe& other) const {
- return IsJust() == other.IsJust();
- }
-
- V8_INLINE bool operator!=(const Maybe& other) const {
- return !operator==(other);
- }
-
- private:
- struct JustTag {};
-
- Maybe() : is_valid_(false) {}
- explicit Maybe(JustTag) : is_valid_(true) {}
-
- bool is_valid_;
-
- template <class U>
- friend Maybe<U> Nothing();
- friend Maybe<void> JustVoid();
-};
-
-inline Maybe<void> JustVoid() { return Maybe<void>(Maybe<void>::JustTag()); }
-
-/**
- * An external exception handler.
- */
-class V8_EXPORT TryCatch {
- public:
- /**
- * Creates a new try/catch block and registers it with v8. Note that
- * all TryCatch blocks should be stack allocated because the memory
- * location itself is compared against JavaScript try/catch blocks.
- */
- explicit TryCatch(Isolate* isolate);
-
- /**
- * Unregisters and deletes this try/catch block.
- */
- ~TryCatch();
-
- /**
- * Returns true if an exception has been caught by this try/catch block.
- */
- bool HasCaught() const;
-
- /**
- * For certain types of exceptions, it makes no sense to continue execution.
- *
- * If CanContinue returns false, the correct action is to perform any C++
- * cleanup needed and then return. If CanContinue returns false and
- * HasTerminated returns true, it is possible to call
- * CancelTerminateExecution in order to continue calling into the engine.
- */
- bool CanContinue() const;
-
- /**
- * Returns true if an exception has been caught due to script execution
- * being terminated.
- *
- * There is no JavaScript representation of an execution termination
- * exception. Such exceptions are thrown when the TerminateExecution
- * methods are called to terminate a long-running script.
- *
- * If such an exception has been thrown, HasTerminated will return true,
- * indicating that it is possible to call CancelTerminateExecution in order
- * to continue calling into the engine.
- */
- bool HasTerminated() const;
-
- /**
- * Throws the exception caught by this TryCatch in a way that avoids
- * it being caught again by this same TryCatch. As with ThrowException
- * it is illegal to execute any JavaScript operations after calling
- * ReThrow; the caller must return immediately to where the exception
- * is caught.
- */
- Local<Value> ReThrow();
-
- /**
- * Returns the exception caught by this try/catch block. If no exception has
- * been caught an empty handle is returned.
- */
- Local<Value> Exception() const;
-
- /**
- * Returns the .stack property of an object. If no .stack
- * property is present an empty handle is returned.
- */
- V8_WARN_UNUSED_RESULT static MaybeLocal<Value> StackTrace(
- Local<Context> context, Local<Value> exception);
-
- /**
- * Returns the .stack property of the thrown object. If no .stack property is
- * present or if this try/catch block has not caught an exception, an empty
- * handle is returned.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Value> StackTrace(
- Local<Context> context) const;
-
- /**
- * Returns the message associated with this exception. If there is
- * no message associated an empty handle is returned.
- */
- Local<v8::Message> Message() const;
-
- /**
- * Clears any exceptions that may have been caught by this try/catch block.
- * After this method has been called, HasCaught() will return false. Cancels
- * the scheduled exception if it is caught and ReThrow() is not called before.
- *
- * It is not necessary to clear a try/catch block before using it again; if
- * another exception is thrown the previously caught exception will just be
- * overwritten. However, it is often a good idea since it makes it easier
- * to determine which operation threw a given exception.
- */
- void Reset();
-
- /**
- * Set verbosity of the external exception handler.
- *
- * By default, exceptions that are caught by an external exception
- * handler are not reported. Call SetVerbose with true on an
- * external exception handler to have exceptions caught by the
- * handler reported as if they were not caught.
- */
- void SetVerbose(bool value);
-
- /**
- * Returns true if verbosity is enabled.
- */
- bool IsVerbose() const;
-
- /**
- * Set whether or not this TryCatch should capture a Message object
- * which holds source information about where the exception
- * occurred. True by default.
- */
- void SetCaptureMessage(bool value);
-
- /**
- * There are cases when the raw address of C++ TryCatch object cannot be
- * used for comparisons with addresses into the JS stack. The cases are:
- * 1) ARM, ARM64 and MIPS simulators which have separate JS stack.
- * 2) Address sanitizer allocates local C++ object in the heap when
- * UseAfterReturn mode is enabled.
- * This method returns address that can be used for comparisons with
- * addresses into the JS stack. When neither simulator nor ASAN's
- * UseAfterReturn is enabled, then the address returned will be the address
- * of the C++ try catch handler itself.
- */
- static void* JSStackComparableAddress(TryCatch* handler) {
- if (handler == nullptr) return nullptr;
- return handler->js_stack_comparable_address_;
- }
-
- TryCatch(const TryCatch&) = delete;
- void operator=(const TryCatch&) = delete;
-
- private:
- // Declaring operator new and delete as deleted is not spec compliant.
- // Therefore declare them private instead to disable dynamic alloc
- void* operator new(size_t size);
- void* operator new[](size_t size);
- void operator delete(void*, size_t);
- void operator delete[](void*, size_t);
-
- void ResetInternal();
-
- internal::Isolate* isolate_;
- TryCatch* next_;
- void* exception_;
- void* message_obj_;
- void* js_stack_comparable_address_;
- bool is_verbose_ : 1;
- bool can_continue_ : 1;
- bool capture_message_ : 1;
- bool rethrow_ : 1;
- bool has_terminated_ : 1;
-
- friend class internal::Isolate;
-};
-
-
-// --- Context ---
-
-
-/**
- * A container for extension names.
- */
-class V8_EXPORT ExtensionConfiguration {
- public:
- ExtensionConfiguration() : name_count_(0), names_(nullptr) {}
- ExtensionConfiguration(int name_count, const char* names[])
- : name_count_(name_count), names_(names) { }
-
- const char** begin() const { return &names_[0]; }
- const char** end() const { return &names_[name_count_]; }
-
- private:
- const int name_count_;
- const char** names_;
-};
-
-/**
- * A sandboxed execution context with its own set of built-in objects
- * and functions.
- */
-class V8_EXPORT Context : public Data {
- public:
- /**
- * Returns the global proxy object.
- *
- * Global proxy object is a thin wrapper whose prototype points to actual
- * context's global object with the properties like Object, etc. This is done
- * that way for security reasons (for more details see
- * https://wiki.mozilla.org/Gecko:SplitWindow).
- *
- * Please note that changes to global proxy object prototype most probably
- * would break VM---v8 expects only global object as a prototype of global
- * proxy object.
- */
- Local<Object> Global();
-
- /**
- * Detaches the global object from its context before
- * the global object can be reused to create a new context.
- */
- void DetachGlobal();
-
- /**
- * Creates a new context and returns a handle to the newly allocated
- * context.
- *
- * \param isolate The isolate in which to create the context.
- *
- * \param extensions An optional extension configuration containing
- * the extensions to be installed in the newly created context.
- *
- * \param global_template An optional object template from which the
- * global object for the newly created context will be created.
- *
- * \param global_object An optional global object to be reused for
- * the newly created context. This global object must have been
- * created by a previous call to Context::New with the same global
- * template. The state of the global object will be completely reset
- * and only object identify will remain.
- */
- static Local<Context> New(
- Isolate* isolate, ExtensionConfiguration* extensions = nullptr,
- MaybeLocal<ObjectTemplate> global_template = MaybeLocal<ObjectTemplate>(),
- MaybeLocal<Value> global_object = MaybeLocal<Value>(),
- DeserializeInternalFieldsCallback internal_fields_deserializer =
- DeserializeInternalFieldsCallback(),
- MicrotaskQueue* microtask_queue = nullptr);
-
- /**
- * Create a new context from a (non-default) context snapshot. There
- * is no way to provide a global object template since we do not create
- * a new global object from template, but we can reuse a global object.
- *
- * \param isolate See v8::Context::New.
- *
- * \param context_snapshot_index The index of the context snapshot to
- * deserialize from. Use v8::Context::New for the default snapshot.
- *
- * \param embedder_fields_deserializer Optional callback to deserialize
- * internal fields. It should match the SerializeInternalFieldCallback used
- * to serialize.
- *
- * \param extensions See v8::Context::New.
- *
- * \param global_object See v8::Context::New.
- */
- static MaybeLocal<Context> FromSnapshot(
- Isolate* isolate, size_t context_snapshot_index,
- DeserializeInternalFieldsCallback embedder_fields_deserializer =
- DeserializeInternalFieldsCallback(),
- ExtensionConfiguration* extensions = nullptr,
- MaybeLocal<Value> global_object = MaybeLocal<Value>(),
- MicrotaskQueue* microtask_queue = nullptr);
-
- /**
- * Returns an global object that isn't backed by an actual context.
- *
- * The global template needs to have access checks with handlers installed.
- * If an existing global object is passed in, the global object is detached
- * from its context.
- *
- * Note that this is different from a detached context where all accesses to
- * the global proxy will fail. Instead, the access check handlers are invoked.
- *
- * It is also not possible to detach an object returned by this method.
- * Instead, the access check handlers need to return nothing to achieve the
- * same effect.
- *
- * It is possible, however, to create a new context from the global object
- * returned by this method.
- */
- static MaybeLocal<Object> NewRemoteContext(
- Isolate* isolate, Local<ObjectTemplate> global_template,
- MaybeLocal<Value> global_object = MaybeLocal<Value>());
-
- /**
- * Sets the security token for the context. To access an object in
- * another context, the security tokens must match.
- */
- void SetSecurityToken(Local<Value> token);
-
- /** Restores the security token to the default value. */
- void UseDefaultSecurityToken();
-
- /** Returns the security token of this context.*/
- Local<Value> GetSecurityToken();
-
- /**
- * Enter this context. After entering a context, all code compiled
- * and run is compiled and run in this context. If another context
- * is already entered, this old context is saved so it can be
- * restored when the new context is exited.
- */
- void Enter();
-
- /**
- * Exit this context. Exiting the current context restores the
- * context that was in place when entering the current context.
- */
- void Exit();
-
- /** Returns the isolate associated with a current context. */
- Isolate* GetIsolate();
-
- /** Returns the microtask queue associated with a current context. */
- MicrotaskQueue* GetMicrotaskQueue();
-
- /**
- * The field at kDebugIdIndex used to be reserved for the inspector.
- * It now serves no purpose.
- */
- enum EmbedderDataFields { kDebugIdIndex = 0 };
-
- /**
- * Return the number of fields allocated for embedder data.
- */
- uint32_t GetNumberOfEmbedderDataFields();
-
- /**
- * Gets the embedder data with the given index, which must have been set by a
- * previous call to SetEmbedderData with the same index.
- */
- V8_INLINE Local<Value> GetEmbedderData(int index);
-
- /**
- * Gets the binding object used by V8 extras. Extra natives get a reference
- * to this object and can use it to "export" functionality by adding
- * properties. Extra natives can also "import" functionality by accessing
- * properties added by the embedder using the V8 API.
- */
- Local<Object> GetExtrasBindingObject();
-
- /**
- * Sets the embedder data with the given index, growing the data as
- * needed. Note that index 0 currently has a special meaning for Chrome's
- * debugger.
- */
- void SetEmbedderData(int index, Local<Value> value);
-
- /**
- * Gets a 2-byte-aligned native pointer from the embedder data with the given
- * index, which must have been set by a previous call to
- * SetAlignedPointerInEmbedderData with the same index. Note that index 0
- * currently has a special meaning for Chrome's debugger.
- */
- V8_INLINE void* GetAlignedPointerFromEmbedderData(int index);
-
- /**
- * Sets a 2-byte-aligned native pointer in the embedder data with the given
- * index, growing the data as needed. Note that index 0 currently has a
- * special meaning for Chrome's debugger.
- */
- void SetAlignedPointerInEmbedderData(int index, void* value);
-
- /**
- * Control whether code generation from strings is allowed. Calling
- * this method with false will disable 'eval' and the 'Function'
- * constructor for code running in this context. If 'eval' or the
- * 'Function' constructor are used an exception will be thrown.
- *
- * If code generation from strings is not allowed the
- * V8::AllowCodeGenerationFromStrings callback will be invoked if
- * set before blocking the call to 'eval' or the 'Function'
- * constructor. If that callback returns true, the call will be
- * allowed, otherwise an exception will be thrown. If no callback is
- * set an exception will be thrown.
- */
- void AllowCodeGenerationFromStrings(bool allow);
-
- /**
- * Returns true if code generation from strings is allowed for the context.
- * For more details see AllowCodeGenerationFromStrings(bool) documentation.
- */
- bool IsCodeGenerationFromStringsAllowed() const;
-
- /**
- * Sets the error description for the exception that is thrown when
- * code generation from strings is not allowed and 'eval' or the 'Function'
- * constructor are called.
- */
- void SetErrorMessageForCodeGenerationFromStrings(Local<String> message);
-
- /**
- * Return data that was previously attached to the context snapshot via
- * SnapshotCreator, and removes the reference to it.
- * Repeated call with the same index returns an empty MaybeLocal.
- */
- template <class T>
- V8_INLINE MaybeLocal<T> GetDataFromSnapshotOnce(size_t index);
-
- /**
- * If callback is set, abort any attempt to execute JavaScript in this
- * context, call the specified callback, and throw an exception.
- * To unset abort, pass nullptr as callback.
- */
- using AbortScriptExecutionCallback = void (*)(Isolate* isolate,
- Local<Context> context);
- void SetAbortScriptExecution(AbortScriptExecutionCallback callback);
-
- /**
- * Returns the value that was set or restored by
- * SetContinuationPreservedEmbedderData(), if any.
- */
- Local<Value> GetContinuationPreservedEmbedderData() const;
-
- /**
- * Sets a value that will be stored on continuations and reset while the
- * continuation runs.
- */
- void SetContinuationPreservedEmbedderData(Local<Value> context);
-
- /**
- * Set or clear hooks to be invoked for promise lifecycle operations.
- * To clear a hook, set it to an empty v8::Function. Each function will
- * receive the observed promise as the first argument. If a chaining
- * operation is used on a promise, the init will additionally receive
- * the parent promise as the second argument.
- */
- void SetPromiseHooks(Local<Function> init_hook,
- Local<Function> before_hook,
- Local<Function> after_hook,
- Local<Function> resolve_hook);
-
- /**
- * Stack-allocated class which sets the execution context for all
- * operations executed within a local scope.
- */
- class V8_NODISCARD Scope {
- public:
- explicit V8_INLINE Scope(Local<Context> context) : context_(context) {
- context_->Enter();
- }
- V8_INLINE ~Scope() { context_->Exit(); }
-
- private:
- Local<Context> context_;
- };
-
- /**
- * Stack-allocated class to support the backup incumbent settings object
- * stack.
- * https://html.spec.whatwg.org/multipage/webappapis.html#backup-incumbent-settings-object-stack
- */
- class V8_EXPORT V8_NODISCARD BackupIncumbentScope final {
- public:
- /**
- * |backup_incumbent_context| is pushed onto the backup incumbent settings
- * object stack.
- */
- explicit BackupIncumbentScope(Local<Context> backup_incumbent_context);
- ~BackupIncumbentScope();
-
- /**
- * Returns address that is comparable with JS stack address. Note that JS
- * stack may be allocated separately from the native stack. See also
- * |TryCatch::JSStackComparableAddress| for details.
- */
- uintptr_t JSStackComparableAddress() const {
- return js_stack_comparable_address_;
- }
-
- private:
- friend class internal::Isolate;
-
- Local<Context> backup_incumbent_context_;
- uintptr_t js_stack_comparable_address_ = 0;
- const BackupIncumbentScope* prev_ = nullptr;
- };
-
- V8_INLINE static Context* Cast(Data* data);
-
- private:
- friend class Value;
- friend class Script;
- friend class Object;
- friend class Function;
-
- static void CheckCast(Data* obj);
-
- internal::Address* GetDataFromSnapshotOnce(size_t index);
- Local<Value> SlowGetEmbedderData(int index);
- void* SlowGetAlignedPointerFromEmbedderData(int index);
-};
-
-/**
- * Multiple threads in V8 are allowed, but only one thread at a time is allowed
- * to use any given V8 isolate, see the comments in the Isolate class. The
- * definition of 'using a V8 isolate' includes accessing handles or holding onto
- * object pointers obtained from V8 handles while in the particular V8 isolate.
- * It is up to the user of V8 to ensure, perhaps with locking, that this
- * constraint is not violated. In addition to any other synchronization
- * mechanism that may be used, the v8::Locker and v8::Unlocker classes must be
- * used to signal thread switches to V8.
- *
- * v8::Locker is a scoped lock object. While it's active, i.e. between its
- * construction and destruction, the current thread is allowed to use the locked
- * isolate. V8 guarantees that an isolate can be locked by at most one thread at
- * any time. In other words, the scope of a v8::Locker is a critical section.
- *
- * Sample usage:
-* \code
- * ...
- * {
- * v8::Locker locker(isolate);
- * v8::Isolate::Scope isolate_scope(isolate);
- * ...
- * // Code using V8 and isolate goes here.
- * ...
- * } // Destructor called here
- * \endcode
- *
- * If you wish to stop using V8 in a thread A you can do this either by
- * destroying the v8::Locker object as above or by constructing a v8::Unlocker
- * object:
- *
- * \code
- * {
- * isolate->Exit();
- * v8::Unlocker unlocker(isolate);
- * ...
- * // Code not using V8 goes here while V8 can run in another thread.
- * ...
- * } // Destructor called here.
- * isolate->Enter();
- * \endcode
- *
- * The Unlocker object is intended for use in a long-running callback from V8,
- * where you want to release the V8 lock for other threads to use.
- *
- * The v8::Locker is a recursive lock, i.e. you can lock more than once in a
- * given thread. This can be useful if you have code that can be called either
- * from code that holds the lock or from code that does not. The Unlocker is
- * not recursive so you can not have several Unlockers on the stack at once, and
- * you can not use an Unlocker in a thread that is not inside a Locker's scope.
- *
- * An unlocker will unlock several lockers if it has to and reinstate the
- * correct depth of locking on its destruction, e.g.:
- *
- * \code
- * // V8 not locked.
- * {
- * v8::Locker locker(isolate);
- * Isolate::Scope isolate_scope(isolate);
- * // V8 locked.
- * {
- * v8::Locker another_locker(isolate);
- * // V8 still locked (2 levels).
- * {
- * isolate->Exit();
- * v8::Unlocker unlocker(isolate);
- * // V8 not locked.
- * }
- * isolate->Enter();
- * // V8 locked again (2 levels).
- * }
- * // V8 still locked (1 level).
- * }
- * // V8 Now no longer locked.
- * \endcode
- */
-class V8_EXPORT Unlocker {
- public:
- /**
- * Initialize Unlocker for a given Isolate.
- */
- V8_INLINE explicit Unlocker(Isolate* isolate) { Initialize(isolate); }
-
- ~Unlocker();
- private:
- void Initialize(Isolate* isolate);
-
- internal::Isolate* isolate_;
-};
-
-
-class V8_EXPORT Locker {
- public:
- /**
- * Initialize Locker for a given Isolate.
- */
- V8_INLINE explicit Locker(Isolate* isolate) { Initialize(isolate); }
-
- ~Locker();
-
- /**
- * Returns whether or not the locker for a given isolate, is locked by the
- * current thread.
- */
- static bool IsLocked(Isolate* isolate);
-
- /**
- * Returns whether v8::Locker is being used by this V8 instance.
- */
- static bool IsActive();
-
- // Disallow copying and assigning.
- Locker(const Locker&) = delete;
- void operator=(const Locker&) = delete;
-
- private:
- void Initialize(Isolate* isolate);
-
- bool has_lock_;
- bool top_level_;
- internal::Isolate* isolate_;
-};
-
-/**
- * Various helpers for skipping over V8 frames in a given stack.
- *
- * The unwinder API is only supported on the x64, ARM64 and ARM32 architectures.
- */
-class V8_EXPORT Unwinder {
- public:
- /**
- * Attempt to unwind the stack to the most recent C++ frame. This function is
- * signal-safe and does not access any V8 state and thus doesn't require an
- * Isolate.
- *
- * The unwinder needs to know the location of the JS Entry Stub (a piece of
- * code that is run when C++ code calls into generated JS code). This is used
- * for edge cases where the current frame is being constructed or torn down
- * when the stack sample occurs.
- *
- * The unwinder also needs the virtual memory range of all possible V8 code
- * objects. There are two ranges required - the heap code range and the range
- * for code embedded in the binary.
- *
- * Available on x64, ARM64 and ARM32.
- *
- * \param code_pages A list of all of the ranges in which V8 has allocated
- * executable code. The caller should obtain this list by calling
- * Isolate::CopyCodePages() during the same interrupt/thread suspension that
- * captures the stack.
- * \param register_state The current registers. This is an in-out param that
- * will be overwritten with the register values after unwinding, on success.
- * \param stack_base The resulting stack pointer and frame pointer values are
- * bounds-checked against the stack_base and the original stack pointer value
- * to ensure that they are valid locations in the given stack. If these values
- * or any intermediate frame pointer values used during unwinding are ever out
- * of these bounds, unwinding will fail.
- *
- * \return True on success.
- */
- static bool TryUnwindV8Frames(const JSEntryStubs& entry_stubs,
- size_t code_pages_length,
- const MemoryRange* code_pages,
- RegisterState* register_state,
- const void* stack_base);
-
- /**
- * Whether the PC is within the V8 code range represented by code_pages.
- *
- * If this returns false, then calling UnwindV8Frames() with the same PC
- * and unwind_state will always fail. If it returns true, then unwinding may
- * (but not necessarily) be successful.
- *
- * Available on x64, ARM64 and ARM32
- */
- static bool PCIsInV8(size_t code_pages_length, const MemoryRange* code_pages,
- void* pc);
-};
-
-// --- Implementation ---
-
-template <class T>
-Local<T> Local<T>::New(Isolate* isolate, Local<T> that) {
- return New(isolate, that.val_);
-}
-
-template <class T>
-Local<T> Local<T>::New(Isolate* isolate, const PersistentBase<T>& that) {
- return New(isolate, that.val_);
-}
-
-template <class T>
-Local<T> Local<T>::New(Isolate* isolate, const BasicTracedReference<T>& that) {
- return New(isolate, *that);
-}
-
-template <class T>
-Local<T> Local<T>::New(Isolate* isolate, T* that) {
- if (that == nullptr) return Local<T>();
- T* that_ptr = that;
- internal::Address* p = reinterpret_cast<internal::Address*>(that_ptr);
- return Local<T>(reinterpret_cast<T*>(HandleScope::CreateHandle(
- reinterpret_cast<internal::Isolate*>(isolate), *p)));
-}
-
-
-template<class T>
-template<class S>
-void Eternal<T>::Set(Isolate* isolate, Local<S> handle) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- val_ = reinterpret_cast<T*>(
- V8::Eternalize(isolate, reinterpret_cast<Value*>(*handle)));
-}
-
-template <class T>
-Local<T> Eternal<T>::Get(Isolate* isolate) const {
- // The eternal handle will never go away, so as with the roots, we don't even
- // need to open a handle.
- return Local<T>(val_);
-}
-
-
-template <class T>
-Local<T> MaybeLocal<T>::ToLocalChecked() {
- if (V8_UNLIKELY(val_ == nullptr)) V8::ToLocalEmpty();
- return Local<T>(val_);
-}
-
-
-template <class T>
-void* WeakCallbackInfo<T>::GetInternalField(int index) const {
-#ifdef V8_ENABLE_CHECKS
- if (index < 0 || index >= kEmbedderFieldsInWeakCallback) {
- V8::InternalFieldOutOfBounds(index);
- }
-#endif
- return embedder_fields_[index];
-}
-
-
-template <class T>
-T* PersistentBase<T>::New(Isolate* isolate, T* that) {
- if (that == nullptr) return nullptr;
- internal::Address* p = reinterpret_cast<internal::Address*>(that);
- return reinterpret_cast<T*>(
- V8::GlobalizeReference(reinterpret_cast<internal::Isolate*>(isolate),
- p));
-}
-
-
-template <class T, class M>
-template <class S, class M2>
-void Persistent<T, M>::Copy(const Persistent<S, M2>& that) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- this->Reset();
- if (that.IsEmpty()) return;
- internal::Address* p = reinterpret_cast<internal::Address*>(that.val_);
- this->val_ = reinterpret_cast<T*>(V8::CopyGlobalReference(p));
- M::Copy(that, this);
-}
-
-template <class T>
-bool PersistentBase<T>::IsWeak() const {
- using I = internal::Internals;
- if (this->IsEmpty()) return false;
- return I::GetNodeState(reinterpret_cast<internal::Address*>(this->val_)) ==
- I::kNodeStateIsWeakValue;
-}
-
-
-template <class T>
-void PersistentBase<T>::Reset() {
- if (this->IsEmpty()) return;
- V8::DisposeGlobal(reinterpret_cast<internal::Address*>(this->val_));
- val_ = nullptr;
-}
-
-
-template <class T>
-template <class S>
-void PersistentBase<T>::Reset(Isolate* isolate, const Local<S>& other) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- Reset();
- if (other.IsEmpty()) return;
- this->val_ = New(isolate, other.val_);
-}
-
-
-template <class T>
-template <class S>
-void PersistentBase<T>::Reset(Isolate* isolate,
- const PersistentBase<S>& other) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- Reset();
- if (other.IsEmpty()) return;
- this->val_ = New(isolate, other.val_);
-}
-
-
-template <class T>
-template <typename P>
-V8_INLINE void PersistentBase<T>::SetWeak(
- P* parameter, typename WeakCallbackInfo<P>::Callback callback,
- WeakCallbackType type) {
- using Callback = WeakCallbackInfo<void>::Callback;
-#if (__GNUC__ >= 8) && !defined(__clang__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-function-type"
-#endif
- V8::MakeWeak(reinterpret_cast<internal::Address*>(this->val_), parameter,
- reinterpret_cast<Callback>(callback), type);
-#if (__GNUC__ >= 8) && !defined(__clang__)
-#pragma GCC diagnostic pop
-#endif
-}
-
-template <class T>
-void PersistentBase<T>::SetWeak() {
- V8::MakeWeak(reinterpret_cast<internal::Address**>(&this->val_));
-}
-
-template <class T>
-template <typename P>
-P* PersistentBase<T>::ClearWeak() {
- return reinterpret_cast<P*>(
- V8::ClearWeak(reinterpret_cast<internal::Address*>(this->val_)));
-}
-
-template <class T>
-void PersistentBase<T>::AnnotateStrongRetainer(const char* label) {
- V8::AnnotateStrongRetainer(reinterpret_cast<internal::Address*>(this->val_),
- label);
-}
-
-template <class T>
-void PersistentBase<T>::SetWrapperClassId(uint16_t class_id) {
- using I = internal::Internals;
- if (this->IsEmpty()) return;
- internal::Address* obj = reinterpret_cast<internal::Address*>(this->val_);
- uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
- *reinterpret_cast<uint16_t*>(addr) = class_id;
-}
-
-
-template <class T>
-uint16_t PersistentBase<T>::WrapperClassId() const {
- using I = internal::Internals;
- if (this->IsEmpty()) return 0;
- internal::Address* obj = reinterpret_cast<internal::Address*>(this->val_);
- uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
- return *reinterpret_cast<uint16_t*>(addr);
-}
-
-template <class T>
-Global<T>::Global(Global&& other) : PersistentBase<T>(other.val_) {
- if (other.val_ != nullptr) {
- V8::MoveGlobalReference(reinterpret_cast<internal::Address**>(&other.val_),
- reinterpret_cast<internal::Address**>(&this->val_));
- other.val_ = nullptr;
- }
-}
-
-template <class T>
-template <class S>
-Global<T>& Global<T>::operator=(Global<S>&& rhs) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- if (this != &rhs) {
- this->Reset();
- if (rhs.val_ != nullptr) {
- this->val_ = rhs.val_;
- V8::MoveGlobalReference(
- reinterpret_cast<internal::Address**>(&rhs.val_),
- reinterpret_cast<internal::Address**>(&this->val_));
- rhs.val_ = nullptr;
- }
- }
- return *this;
-}
-
-template <class T>
-internal::Address* BasicTracedReference<T>::New(
- Isolate* isolate, T* that, void* slot, DestructionMode destruction_mode) {
- if (that == nullptr) return nullptr;
- internal::Address* p = reinterpret_cast<internal::Address*>(that);
- return V8::GlobalizeTracedReference(
- reinterpret_cast<internal::Isolate*>(isolate), p,
- reinterpret_cast<internal::Address*>(slot),
- destruction_mode == kWithDestructor);
-}
-
-void TracedReferenceBase::Reset() {
- if (IsEmpty()) return;
- V8::DisposeTracedGlobal(reinterpret_cast<internal::Address*>(val_));
- SetSlotThreadSafe(nullptr);
-}
-
-v8::Local<v8::Value> TracedReferenceBase::Get(v8::Isolate* isolate) const {
- if (IsEmpty()) return Local<Value>();
- return Local<Value>::New(isolate, reinterpret_cast<Value*>(val_));
-}
-
-V8_INLINE bool operator==(const TracedReferenceBase& lhs,
- const TracedReferenceBase& rhs) {
- v8::internal::Address* a = reinterpret_cast<v8::internal::Address*>(lhs.val_);
- v8::internal::Address* b = reinterpret_cast<v8::internal::Address*>(rhs.val_);
- if (a == nullptr) return b == nullptr;
- if (b == nullptr) return false;
- return *a == *b;
-}
-
-template <typename U>
-V8_INLINE bool operator==(const TracedReferenceBase& lhs,
- const v8::Local<U>& rhs) {
- v8::internal::Address* a = reinterpret_cast<v8::internal::Address*>(lhs.val_);
- v8::internal::Address* b = reinterpret_cast<v8::internal::Address*>(*rhs);
- if (a == nullptr) return b == nullptr;
- if (b == nullptr) return false;
- return *a == *b;
-}
-
-template <typename U>
-V8_INLINE bool operator==(const v8::Local<U>& lhs,
- const TracedReferenceBase& rhs) {
- return rhs == lhs;
-}
-
-V8_INLINE bool operator!=(const TracedReferenceBase& lhs,
- const TracedReferenceBase& rhs) {
- return !(lhs == rhs);
-}
-
-template <typename U>
-V8_INLINE bool operator!=(const TracedReferenceBase& lhs,
- const v8::Local<U>& rhs) {
- return !(lhs == rhs);
-}
-
-template <typename U>
-V8_INLINE bool operator!=(const v8::Local<U>& lhs,
- const TracedReferenceBase& rhs) {
- return !(rhs == lhs);
-}
-
-template <class T>
-template <class S>
-void TracedGlobal<T>::Reset(Isolate* isolate, const Local<S>& other) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- Reset();
- if (other.IsEmpty()) return;
- this->val_ = this->New(isolate, other.val_, &this->val_,
- BasicTracedReference<T>::kWithDestructor);
-}
-
-template <class T>
-template <class S>
-TracedGlobal<T>& TracedGlobal<T>::operator=(TracedGlobal<S>&& rhs) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- *this = std::move(rhs.template As<T>());
- return *this;
-}
-
-template <class T>
-template <class S>
-TracedGlobal<T>& TracedGlobal<T>::operator=(const TracedGlobal<S>& rhs) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- *this = rhs.template As<T>();
- return *this;
-}
-
-template <class T>
-TracedGlobal<T>& TracedGlobal<T>::operator=(TracedGlobal&& rhs) {
- if (this != &rhs) {
- V8::MoveTracedGlobalReference(
- reinterpret_cast<internal::Address**>(&rhs.val_),
- reinterpret_cast<internal::Address**>(&this->val_));
- }
- return *this;
-}
-
-template <class T>
-TracedGlobal<T>& TracedGlobal<T>::operator=(const TracedGlobal& rhs) {
- if (this != &rhs) {
- this->Reset();
- if (rhs.val_ != nullptr) {
- V8::CopyTracedGlobalReference(
- reinterpret_cast<const internal::Address* const*>(&rhs.val_),
- reinterpret_cast<internal::Address**>(&this->val_));
- }
- }
- return *this;
-}
-
-template <class T>
-template <class S>
-void TracedReference<T>::Reset(Isolate* isolate, const Local<S>& other) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- this->Reset();
- if (other.IsEmpty()) return;
- this->SetSlotThreadSafe(
- this->New(isolate, other.val_, &this->val_,
- BasicTracedReference<T>::kWithoutDestructor));
-}
-
-template <class T>
-template <class S>
-TracedReference<T>& TracedReference<T>::operator=(TracedReference<S>&& rhs) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- *this = std::move(rhs.template As<T>());
- return *this;
-}
-
-template <class T>
-template <class S>
-TracedReference<T>& TracedReference<T>::operator=(
- const TracedReference<S>& rhs) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- *this = rhs.template As<T>();
- return *this;
-}
-
-template <class T>
-TracedReference<T>& TracedReference<T>::operator=(TracedReference&& rhs) {
- if (this != &rhs) {
- V8::MoveTracedGlobalReference(
- reinterpret_cast<internal::Address**>(&rhs.val_),
- reinterpret_cast<internal::Address**>(&this->val_));
- }
- return *this;
-}
-
-template <class T>
-TracedReference<T>& TracedReference<T>::operator=(const TracedReference& rhs) {
- if (this != &rhs) {
- this->Reset();
- if (rhs.val_ != nullptr) {
- V8::CopyTracedGlobalReference(
- reinterpret_cast<const internal::Address* const*>(&rhs.val_),
- reinterpret_cast<internal::Address**>(&this->val_));
- }
- }
- return *this;
-}
-
-void TracedReferenceBase::SetWrapperClassId(uint16_t class_id) {
- using I = internal::Internals;
- if (IsEmpty()) return;
- internal::Address* obj = reinterpret_cast<internal::Address*>(val_);
- uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
- *reinterpret_cast<uint16_t*>(addr) = class_id;
-}
-
-uint16_t TracedReferenceBase::WrapperClassId() const {
- using I = internal::Internals;
- if (IsEmpty()) return 0;
- internal::Address* obj = reinterpret_cast<internal::Address*>(val_);
- uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
- return *reinterpret_cast<uint16_t*>(addr);
-}
-
-template <class T>
-void TracedGlobal<T>::SetFinalizationCallback(
- void* parameter, typename WeakCallbackInfo<void>::Callback callback) {
- V8::SetFinalizationCallbackTraced(
- reinterpret_cast<internal::Address*>(this->val_), parameter, callback);
-}
-
-template <typename T>
-ReturnValue<T>::ReturnValue(internal::Address* slot) : value_(slot) {}
-
-template <typename T>
-template <typename S>
-void ReturnValue<T>::Set(const Global<S>& handle) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- if (V8_UNLIKELY(handle.IsEmpty())) {
- *value_ = GetDefaultValue();
- } else {
- *value_ = *reinterpret_cast<internal::Address*>(*handle);
- }
-}
-
-template <typename T>
-template <typename S>
-void ReturnValue<T>::Set(const BasicTracedReference<S>& handle) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- if (V8_UNLIKELY(handle.IsEmpty())) {
- *value_ = GetDefaultValue();
- } else {
- *value_ = *reinterpret_cast<internal::Address*>(handle.val_);
- }
-}
-
-template <typename T>
-template <typename S>
-void ReturnValue<T>::Set(const Local<S> handle) {
- static_assert(std::is_void<T>::value || std::is_base_of<T, S>::value,
- "type check");
- if (V8_UNLIKELY(handle.IsEmpty())) {
- *value_ = GetDefaultValue();
- } else {
- *value_ = *reinterpret_cast<internal::Address*>(*handle);
- }
-}
-
-template<typename T>
-void ReturnValue<T>::Set(double i) {
- static_assert(std::is_base_of<T, Number>::value, "type check");
- Set(Number::New(GetIsolate(), i));
-}
-
-template<typename T>
-void ReturnValue<T>::Set(int32_t i) {
- static_assert(std::is_base_of<T, Integer>::value, "type check");
- using I = internal::Internals;
- if (V8_LIKELY(I::IsValidSmi(i))) {
- *value_ = I::IntToSmi(i);
- return;
- }
- Set(Integer::New(GetIsolate(), i));
-}
-
-template<typename T>
-void ReturnValue<T>::Set(uint32_t i) {
- static_assert(std::is_base_of<T, Integer>::value, "type check");
- // Can't simply use INT32_MAX here for whatever reason.
- bool fits_into_int32_t = (i & (1U << 31)) == 0;
- if (V8_LIKELY(fits_into_int32_t)) {
- Set(static_cast<int32_t>(i));
- return;
- }
- Set(Integer::NewFromUnsigned(GetIsolate(), i));
-}
-
-template<typename T>
-void ReturnValue<T>::Set(bool value) {
- static_assert(std::is_base_of<T, Boolean>::value, "type check");
- using I = internal::Internals;
- int root_index;
- if (value) {
- root_index = I::kTrueValueRootIndex;
- } else {
- root_index = I::kFalseValueRootIndex;
- }
- *value_ = *I::GetRoot(GetIsolate(), root_index);
-}
-
-template<typename T>
-void ReturnValue<T>::SetNull() {
- static_assert(std::is_base_of<T, Primitive>::value, "type check");
- using I = internal::Internals;
- *value_ = *I::GetRoot(GetIsolate(), I::kNullValueRootIndex);
-}
-
-template<typename T>
-void ReturnValue<T>::SetUndefined() {
- static_assert(std::is_base_of<T, Primitive>::value, "type check");
- using I = internal::Internals;
- *value_ = *I::GetRoot(GetIsolate(), I::kUndefinedValueRootIndex);
-}
-
-template<typename T>
-void ReturnValue<T>::SetEmptyString() {
- static_assert(std::is_base_of<T, String>::value, "type check");
- using I = internal::Internals;
- *value_ = *I::GetRoot(GetIsolate(), I::kEmptyStringRootIndex);
-}
-
-template <typename T>
-Isolate* ReturnValue<T>::GetIsolate() const {
- // Isolate is always the pointer below the default value on the stack.
- return *reinterpret_cast<Isolate**>(&value_[-2]);
-}
-
-template <typename T>
-Local<Value> ReturnValue<T>::Get() const {
- using I = internal::Internals;
- if (*value_ == *I::GetRoot(GetIsolate(), I::kTheHoleValueRootIndex))
- return Local<Value>(*Undefined(GetIsolate()));
- return Local<Value>::New(GetIsolate(), reinterpret_cast<Value*>(value_));
-}
-
-template <typename T>
-template <typename S>
-void ReturnValue<T>::Set(S* whatever) {
- static_assert(sizeof(S) < 0, "incompilable to prevent inadvertent misuse");
-}
-
-template <typename T>
-internal::Address ReturnValue<T>::GetDefaultValue() {
- // Default value is always the pointer below value_ on the stack.
- return value_[-1];
-}
-
-template <typename T>
-FunctionCallbackInfo<T>::FunctionCallbackInfo(internal::Address* implicit_args,
- internal::Address* values,
- int length)
- : implicit_args_(implicit_args), values_(values), length_(length) {}
-
-template<typename T>
-Local<Value> FunctionCallbackInfo<T>::operator[](int i) const {
- // values_ points to the first argument (not the receiver).
- if (i < 0 || length_ <= i) return Local<Value>(*Undefined(GetIsolate()));
- return Local<Value>(reinterpret_cast<Value*>(values_ + i));
-}
-
-
-template<typename T>
-Local<Object> FunctionCallbackInfo<T>::This() const {
- // values_ points to the first argument (not the receiver).
- return Local<Object>(reinterpret_cast<Object*>(values_ - 1));
-}
-
-
-template<typename T>
-Local<Object> FunctionCallbackInfo<T>::Holder() const {
- return Local<Object>(reinterpret_cast<Object*>(
- &implicit_args_[kHolderIndex]));
-}
-
-template <typename T>
-Local<Value> FunctionCallbackInfo<T>::NewTarget() const {
- return Local<Value>(
- reinterpret_cast<Value*>(&implicit_args_[kNewTargetIndex]));
-}
-
-template <typename T>
-Local<Value> FunctionCallbackInfo<T>::Data() const {
- return Local<Value>(reinterpret_cast<Value*>(&implicit_args_[kDataIndex]));
-}
-
-
-template<typename T>
-Isolate* FunctionCallbackInfo<T>::GetIsolate() const {
- return *reinterpret_cast<Isolate**>(&implicit_args_[kIsolateIndex]);
-}
-
-
-template<typename T>
-ReturnValue<T> FunctionCallbackInfo<T>::GetReturnValue() const {
- return ReturnValue<T>(&implicit_args_[kReturnValueIndex]);
-}
-
-
-template<typename T>
-bool FunctionCallbackInfo<T>::IsConstructCall() const {
- return !NewTarget()->IsUndefined();
-}
-
-
-template<typename T>
-int FunctionCallbackInfo<T>::Length() const {
- return length_;
-}
-
-ScriptOrigin::ScriptOrigin(
- Local<Value> resource_name, Local<Integer> line_offset,
- Local<Integer> column_offset, Local<Boolean> is_shared_cross_origin,
- Local<Integer> script_id, Local<Value> source_map_url,
- Local<Boolean> is_opaque, Local<Boolean> is_wasm, Local<Boolean> is_module,
- Local<PrimitiveArray> host_defined_options)
- : ScriptOrigin(
- Isolate::GetCurrent(), resource_name,
- line_offset.IsEmpty() ? 0 : static_cast<int>(line_offset->Value()),
- column_offset.IsEmpty() ? 0
- : static_cast<int>(column_offset->Value()),
- !is_shared_cross_origin.IsEmpty() && is_shared_cross_origin->IsTrue(),
- static_cast<int>(script_id.IsEmpty() ? -1 : script_id->Value()),
- source_map_url, !is_opaque.IsEmpty() && is_opaque->IsTrue(),
- !is_wasm.IsEmpty() && is_wasm->IsTrue(),
- !is_module.IsEmpty() && is_module->IsTrue(), host_defined_options) {}
-
-ScriptOrigin::ScriptOrigin(Local<Value> resource_name, int line_offset,
- int column_offset, bool is_shared_cross_origin,
- int script_id, Local<Value> source_map_url,
- bool is_opaque, bool is_wasm, bool is_module,
- Local<PrimitiveArray> host_defined_options)
- : isolate_(Isolate::GetCurrent()),
- resource_name_(resource_name),
- resource_line_offset_(line_offset),
- resource_column_offset_(column_offset),
- options_(is_shared_cross_origin, is_opaque, is_wasm, is_module),
- script_id_(script_id),
- source_map_url_(source_map_url),
- host_defined_options_(host_defined_options) {}
-
-ScriptOrigin::ScriptOrigin(Isolate* isolate, Local<Value> resource_name,
- int line_offset, int column_offset,
- bool is_shared_cross_origin, int script_id,
- Local<Value> source_map_url, bool is_opaque,
- bool is_wasm, bool is_module,
- Local<PrimitiveArray> host_defined_options)
- : isolate_(isolate),
- resource_name_(resource_name),
- resource_line_offset_(line_offset),
- resource_column_offset_(column_offset),
- options_(is_shared_cross_origin, is_opaque, is_wasm, is_module),
- script_id_(script_id),
- source_map_url_(source_map_url),
- host_defined_options_(host_defined_options) {}
-
-Local<Value> ScriptOrigin::ResourceName() const { return resource_name_; }
-
-Local<PrimitiveArray> ScriptOrigin::HostDefinedOptions() const {
- return host_defined_options_;
-}
-
-Local<Integer> ScriptOrigin::ResourceLineOffset() const {
- return v8::Integer::New(isolate_, resource_line_offset_);
-}
-
-Local<Integer> ScriptOrigin::ResourceColumnOffset() const {
- return v8::Integer::New(isolate_, resource_column_offset_);
-}
-
-Local<Integer> ScriptOrigin::ScriptID() const {
- return v8::Integer::New(isolate_, script_id_);
-}
-
-int ScriptOrigin::LineOffset() const { return resource_line_offset_; }
-
-int ScriptOrigin::ColumnOffset() const { return resource_column_offset_; }
-
-int ScriptOrigin::ScriptId() const { return script_id_; }
-
-Local<Value> ScriptOrigin::SourceMapUrl() const { return source_map_url_; }
-
-ScriptCompiler::Source::Source(Local<String> string, const ScriptOrigin& origin,
- CachedData* data,
- ConsumeCodeCacheTask* consume_cache_task)
- : source_string(string),
- resource_name(origin.ResourceName()),
- resource_line_offset(origin.LineOffset()),
- resource_column_offset(origin.ColumnOffset()),
- resource_options(origin.Options()),
- source_map_url(origin.SourceMapUrl()),
- host_defined_options(origin.HostDefinedOptions()),
- cached_data(data),
- consume_cache_task(consume_cache_task) {}
-
-ScriptCompiler::Source::Source(Local<String> string, CachedData* data,
- ConsumeCodeCacheTask* consume_cache_task)
- : source_string(string),
- cached_data(data),
- consume_cache_task(consume_cache_task) {}
-
-const ScriptCompiler::CachedData* ScriptCompiler::Source::GetCachedData()
- const {
- return cached_data.get();
-}
-
-const ScriptOriginOptions& ScriptCompiler::Source::GetResourceOptions() const {
- return resource_options;
-}
-
-Local<Boolean> Boolean::New(Isolate* isolate, bool value) {
- return value ? True(isolate) : False(isolate);
-}
-
-void Template::Set(Isolate* isolate, const char* name, Local<Data> value,
- PropertyAttribute attributes) {
- Set(String::NewFromUtf8(isolate, name, NewStringType::kInternalized)
- .ToLocalChecked(),
- value, attributes);
-}
-
-FunctionTemplate* FunctionTemplate::Cast(Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return reinterpret_cast<FunctionTemplate*>(data);
-}
-
-ObjectTemplate* ObjectTemplate::Cast(Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return reinterpret_cast<ObjectTemplate*>(data);
-}
-
-Signature* Signature::Cast(Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return reinterpret_cast<Signature*>(data);
-}
-
-AccessorSignature* AccessorSignature::Cast(Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return reinterpret_cast<AccessorSignature*>(data);
-}
-
-Local<Value> Object::GetInternalField(int index) {
-#ifndef V8_ENABLE_CHECKS
- using A = internal::Address;
- using I = internal::Internals;
- A obj = *reinterpret_cast<A*>(this);
- // Fast path: If the object is a plain JSObject, which is the common case, we
- // know where to find the internal fields and can return the value directly.
- int instance_type = I::GetInstanceType(obj);
- if (v8::internal::CanHaveInternalField(instance_type)) {
- int offset = I::kJSObjectHeaderSize + (I::kEmbedderDataSlotSize * index);
- A value = I::ReadRawField<A>(obj, offset);
-#ifdef V8_COMPRESS_POINTERS
- // We read the full pointer value and then decompress it in order to avoid
- // dealing with potential endiannes issues.
- value = I::DecompressTaggedAnyField(obj, static_cast<uint32_t>(value));
-#endif
- internal::Isolate* isolate =
- internal::IsolateFromNeverReadOnlySpaceObject(obj);
- A* result = HandleScope::CreateHandle(isolate, value);
- return Local<Value>(reinterpret_cast<Value*>(result));
- }
-#endif
- return SlowGetInternalField(index);
-}
-
-
-void* Object::GetAlignedPointerFromInternalField(int index) {
-#ifndef V8_ENABLE_CHECKS
- using A = internal::Address;
- using I = internal::Internals;
- A obj = *reinterpret_cast<A*>(this);
- // Fast path: If the object is a plain JSObject, which is the common case, we
- // know where to find the internal fields and can return the value directly.
- auto instance_type = I::GetInstanceType(obj);
- if (v8::internal::CanHaveInternalField(instance_type)) {
- int offset = I::kJSObjectHeaderSize + (I::kEmbedderDataSlotSize * index);
-#ifdef V8_HEAP_SANDBOX
- offset += I::kEmbedderDataSlotRawPayloadOffset;
-#endif
- internal::Isolate* isolate = I::GetIsolateForHeapSandbox(obj);
- A value = I::ReadExternalPointerField(
- isolate, obj, offset, internal::kEmbedderDataSlotPayloadTag);
- return reinterpret_cast<void*>(value);
- }
-#endif
- return SlowGetAlignedPointerFromInternalField(index);
-}
-
-String* String::Cast(v8::Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return static_cast<String*>(data);
-}
-
-Local<String> String::Empty(Isolate* isolate) {
- using S = internal::Address;
- using I = internal::Internals;
- I::CheckInitialized(isolate);
- S* slot = I::GetRoot(isolate, I::kEmptyStringRootIndex);
- return Local<String>(reinterpret_cast<String*>(slot));
-}
-
-
-String::ExternalStringResource* String::GetExternalStringResource() const {
- using A = internal::Address;
- using I = internal::Internals;
- A obj = *reinterpret_cast<const A*>(this);
-
- ExternalStringResource* result;
- if (I::IsExternalTwoByteString(I::GetInstanceType(obj))) {
- internal::Isolate* isolate = I::GetIsolateForHeapSandbox(obj);
- A value =
- I::ReadExternalPointerField(isolate, obj, I::kStringResourceOffset,
- internal::kExternalStringResourceTag);
- result = reinterpret_cast<String::ExternalStringResource*>(value);
- } else {
- result = GetExternalStringResourceSlow();
- }
-#ifdef V8_ENABLE_CHECKS
- VerifyExternalStringResource(result);
-#endif
- return result;
-}
-
-
-String::ExternalStringResourceBase* String::GetExternalStringResourceBase(
- String::Encoding* encoding_out) const {
- using A = internal::Address;
- using I = internal::Internals;
- A obj = *reinterpret_cast<const A*>(this);
- int type = I::GetInstanceType(obj) & I::kFullStringRepresentationMask;
- *encoding_out = static_cast<Encoding>(type & I::kStringEncodingMask);
- ExternalStringResourceBase* resource;
- if (type == I::kExternalOneByteRepresentationTag ||
- type == I::kExternalTwoByteRepresentationTag) {
- internal::Isolate* isolate = I::GetIsolateForHeapSandbox(obj);
- A value =
- I::ReadExternalPointerField(isolate, obj, I::kStringResourceOffset,
- internal::kExternalStringResourceTag);
- resource = reinterpret_cast<ExternalStringResourceBase*>(value);
- } else {
- resource = GetExternalStringResourceBaseSlow(encoding_out);
- }
-#ifdef V8_ENABLE_CHECKS
- VerifyExternalStringResourceBase(resource, *encoding_out);
-#endif
- return resource;
-}
-
-
-bool Value::IsUndefined() const {
-#ifdef V8_ENABLE_CHECKS
- return FullIsUndefined();
-#else
- return QuickIsUndefined();
-#endif
-}
-
-bool Value::QuickIsUndefined() const {
- using A = internal::Address;
- using I = internal::Internals;
- A obj = *reinterpret_cast<const A*>(this);
- if (!I::HasHeapObjectTag(obj)) return false;
- if (I::GetInstanceType(obj) != I::kOddballType) return false;
- return (I::GetOddballKind(obj) == I::kUndefinedOddballKind);
-}
-
-
-bool Value::IsNull() const {
-#ifdef V8_ENABLE_CHECKS
- return FullIsNull();
-#else
- return QuickIsNull();
-#endif
-}
-
-bool Value::QuickIsNull() const {
- using A = internal::Address;
- using I = internal::Internals;
- A obj = *reinterpret_cast<const A*>(this);
- if (!I::HasHeapObjectTag(obj)) return false;
- if (I::GetInstanceType(obj) != I::kOddballType) return false;
- return (I::GetOddballKind(obj) == I::kNullOddballKind);
-}
-
-bool Value::IsNullOrUndefined() const {
-#ifdef V8_ENABLE_CHECKS
- return FullIsNull() || FullIsUndefined();
-#else
- return QuickIsNullOrUndefined();
-#endif
-}
-
-bool Value::QuickIsNullOrUndefined() const {
- using A = internal::Address;
- using I = internal::Internals;
- A obj = *reinterpret_cast<const A*>(this);
- if (!I::HasHeapObjectTag(obj)) return false;
- if (I::GetInstanceType(obj) != I::kOddballType) return false;
- int kind = I::GetOddballKind(obj);
- return kind == I::kNullOddballKind || kind == I::kUndefinedOddballKind;
-}
-
-bool Value::IsString() const {
-#ifdef V8_ENABLE_CHECKS
- return FullIsString();
-#else
- return QuickIsString();
-#endif
-}
-
-bool Value::QuickIsString() const {
- using A = internal::Address;
- using I = internal::Internals;
- A obj = *reinterpret_cast<const A*>(this);
- if (!I::HasHeapObjectTag(obj)) return false;
- return (I::GetInstanceType(obj) < I::kFirstNonstringType);
-}
-
-
-template <class T> Value* Value::Cast(T* value) {
- return static_cast<Value*>(value);
-}
-
-template <>
-V8_INLINE Value* Value::Cast(Data* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Value*>(value);
-}
-
-Boolean* Boolean::Cast(v8::Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return static_cast<Boolean*>(data);
-}
-
-Name* Name::Cast(v8::Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return static_cast<Name*>(data);
-}
-
-Symbol* Symbol::Cast(v8::Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return static_cast<Symbol*>(data);
-}
-
-Private* Private::Cast(Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return reinterpret_cast<Private*>(data);
-}
-
-ModuleRequest* ModuleRequest::Cast(Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return reinterpret_cast<ModuleRequest*>(data);
-}
-
-Module* Module::Cast(Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return reinterpret_cast<Module*>(data);
-}
-
-Number* Number::Cast(v8::Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return static_cast<Number*>(data);
-}
-
-Integer* Integer::Cast(v8::Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return static_cast<Integer*>(data);
-}
-
-Int32* Int32::Cast(v8::Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return static_cast<Int32*>(data);
-}
-
-Uint32* Uint32::Cast(v8::Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return static_cast<Uint32*>(data);
-}
-
-BigInt* BigInt::Cast(v8::Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return static_cast<BigInt*>(data);
-}
-
-Context* Context::Cast(v8::Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return static_cast<Context*>(data);
-}
-
-Date* Date::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Date*>(value);
-}
-
-
-StringObject* StringObject::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<StringObject*>(value);
-}
-
-
-SymbolObject* SymbolObject::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<SymbolObject*>(value);
-}
-
-
-NumberObject* NumberObject::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<NumberObject*>(value);
-}
-
-BigIntObject* BigIntObject::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<BigIntObject*>(value);
-}
-
-BooleanObject* BooleanObject::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<BooleanObject*>(value);
-}
-
-
-RegExp* RegExp::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<RegExp*>(value);
-}
-
-
-Object* Object::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Object*>(value);
-}
-
-
-Array* Array::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Array*>(value);
-}
-
-
-Map* Map::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Map*>(value);
-}
-
-
-Set* Set::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Set*>(value);
-}
-
-
-Promise* Promise::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Promise*>(value);
-}
-
-
-Proxy* Proxy::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Proxy*>(value);
-}
-
-WasmMemoryObject* WasmMemoryObject::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<WasmMemoryObject*>(value);
-}
-
-WasmModuleObject* WasmModuleObject::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<WasmModuleObject*>(value);
-}
-
-Promise::Resolver* Promise::Resolver::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Promise::Resolver*>(value);
-}
-
-
-ArrayBuffer* ArrayBuffer::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<ArrayBuffer*>(value);
-}
-
-
-ArrayBufferView* ArrayBufferView::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<ArrayBufferView*>(value);
-}
-
-
-TypedArray* TypedArray::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<TypedArray*>(value);
-}
-
-
-Uint8Array* Uint8Array::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Uint8Array*>(value);
-}
-
-
-Int8Array* Int8Array::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Int8Array*>(value);
-}
-
-
-Uint16Array* Uint16Array::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Uint16Array*>(value);
-}
-
-
-Int16Array* Int16Array::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Int16Array*>(value);
-}
-
-
-Uint32Array* Uint32Array::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Uint32Array*>(value);
-}
-
-
-Int32Array* Int32Array::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Int32Array*>(value);
-}
-
-
-Float32Array* Float32Array::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Float32Array*>(value);
-}
-
-
-Float64Array* Float64Array::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Float64Array*>(value);
-}
-
-BigInt64Array* BigInt64Array::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<BigInt64Array*>(value);
-}
-
-BigUint64Array* BigUint64Array::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<BigUint64Array*>(value);
-}
-
-Uint8ClampedArray* Uint8ClampedArray::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Uint8ClampedArray*>(value);
-}
-
-
-DataView* DataView::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<DataView*>(value);
-}
-
-
-SharedArrayBuffer* SharedArrayBuffer::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<SharedArrayBuffer*>(value);
-}
-
-
-Function* Function::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Function*>(value);
-}
-
-
-External* External::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<External*>(value);
-}
-
-
-template<typename T>
-Isolate* PropertyCallbackInfo<T>::GetIsolate() const {
- return *reinterpret_cast<Isolate**>(&args_[kIsolateIndex]);
-}
-
-
-template<typename T>
-Local<Value> PropertyCallbackInfo<T>::Data() const {
- return Local<Value>(reinterpret_cast<Value*>(&args_[kDataIndex]));
-}
-
-
-template<typename T>
-Local<Object> PropertyCallbackInfo<T>::This() const {
- return Local<Object>(reinterpret_cast<Object*>(&args_[kThisIndex]));
-}
-
-
-template<typename T>
-Local<Object> PropertyCallbackInfo<T>::Holder() const {
- return Local<Object>(reinterpret_cast<Object*>(&args_[kHolderIndex]));
-}
-
-
-template<typename T>
-ReturnValue<T> PropertyCallbackInfo<T>::GetReturnValue() const {
- return ReturnValue<T>(&args_[kReturnValueIndex]);
-}
-
-template <typename T>
-bool PropertyCallbackInfo<T>::ShouldThrowOnError() const {
- using I = internal::Internals;
- if (args_[kShouldThrowOnErrorIndex] !=
- I::IntToSmi(I::kInferShouldThrowMode)) {
- return args_[kShouldThrowOnErrorIndex] != I::IntToSmi(I::kDontThrow);
- }
- return v8::internal::ShouldThrowOnError(
- reinterpret_cast<v8::internal::Isolate*>(GetIsolate()));
-}
-
-Local<Primitive> Undefined(Isolate* isolate) {
- using S = internal::Address;
- using I = internal::Internals;
- I::CheckInitialized(isolate);
- S* slot = I::GetRoot(isolate, I::kUndefinedValueRootIndex);
- return Local<Primitive>(reinterpret_cast<Primitive*>(slot));
-}
-
-
-Local<Primitive> Null(Isolate* isolate) {
- using S = internal::Address;
- using I = internal::Internals;
- I::CheckInitialized(isolate);
- S* slot = I::GetRoot(isolate, I::kNullValueRootIndex);
- return Local<Primitive>(reinterpret_cast<Primitive*>(slot));
-}
-
-
-Local<Boolean> True(Isolate* isolate) {
- using S = internal::Address;
- using I = internal::Internals;
- I::CheckInitialized(isolate);
- S* slot = I::GetRoot(isolate, I::kTrueValueRootIndex);
- return Local<Boolean>(reinterpret_cast<Boolean*>(slot));
-}
-
-
-Local<Boolean> False(Isolate* isolate) {
- using S = internal::Address;
- using I = internal::Internals;
- I::CheckInitialized(isolate);
- S* slot = I::GetRoot(isolate, I::kFalseValueRootIndex);
- return Local<Boolean>(reinterpret_cast<Boolean*>(slot));
-}
-
-
-void Isolate::SetData(uint32_t slot, void* data) {
- using I = internal::Internals;
- I::SetEmbedderData(this, slot, data);
-}
-
-
-void* Isolate::GetData(uint32_t slot) {
- using I = internal::Internals;
- return I::GetEmbedderData(this, slot);
-}
-
-
-uint32_t Isolate::GetNumberOfDataSlots() {
- using I = internal::Internals;
- return I::kNumIsolateDataSlots;
-}
-
-template <class T>
-MaybeLocal<T> Isolate::GetDataFromSnapshotOnce(size_t index) {
- T* data = reinterpret_cast<T*>(GetDataFromSnapshotOnce(index));
- if (data) internal::PerformCastCheck(data);
- return Local<T>(data);
-}
-
-Local<Value> Context::GetEmbedderData(int index) {
-#ifndef V8_ENABLE_CHECKS
- using A = internal::Address;
- using I = internal::Internals;
- A ctx = *reinterpret_cast<const A*>(this);
- A embedder_data =
- I::ReadTaggedPointerField(ctx, I::kNativeContextEmbedderDataOffset);
- int value_offset =
- I::kEmbedderDataArrayHeaderSize + (I::kEmbedderDataSlotSize * index);
- A value = I::ReadRawField<A>(embedder_data, value_offset);
-#ifdef V8_COMPRESS_POINTERS
- // We read the full pointer value and then decompress it in order to avoid
- // dealing with potential endiannes issues.
- value =
- I::DecompressTaggedAnyField(embedder_data, static_cast<uint32_t>(value));
-#endif
- internal::Isolate* isolate = internal::IsolateFromNeverReadOnlySpaceObject(
- *reinterpret_cast<A*>(this));
- A* result = HandleScope::CreateHandle(isolate, value);
- return Local<Value>(reinterpret_cast<Value*>(result));
-#else
- return SlowGetEmbedderData(index);
-#endif
-}
-
-
-void* Context::GetAlignedPointerFromEmbedderData(int index) {
-#ifndef V8_ENABLE_CHECKS
- using A = internal::Address;
- using I = internal::Internals;
- A ctx = *reinterpret_cast<const A*>(this);
- A embedder_data =
- I::ReadTaggedPointerField(ctx, I::kNativeContextEmbedderDataOffset);
- int value_offset =
- I::kEmbedderDataArrayHeaderSize + (I::kEmbedderDataSlotSize * index);
-#ifdef V8_HEAP_SANDBOX
- value_offset += I::kEmbedderDataSlotRawPayloadOffset;
-#endif
- internal::Isolate* isolate = I::GetIsolateForHeapSandbox(ctx);
- return reinterpret_cast<void*>(
- I::ReadExternalPointerField(isolate, embedder_data, value_offset,
- internal::kEmbedderDataSlotPayloadTag));
-#else
- return SlowGetAlignedPointerFromEmbedderData(index);
-#endif
-}
-
-template <class T>
-MaybeLocal<T> Context::GetDataFromSnapshotOnce(size_t index) {
- T* data = reinterpret_cast<T*>(GetDataFromSnapshotOnce(index));
- if (data) internal::PerformCastCheck(data);
- return Local<T>(data);
-}
-
-template <class T>
-size_t SnapshotCreator::AddData(Local<Context> context, Local<T> object) {
- T* object_ptr = *object;
- internal::Address* p = reinterpret_cast<internal::Address*>(object_ptr);
- return AddData(context, *p);
-}
-
-template <class T>
-size_t SnapshotCreator::AddData(Local<T> object) {
- T* object_ptr = *object;
- internal::Address* p = reinterpret_cast<internal::Address*>(object_ptr);
- return AddData(*p);
-}
/**
* \example shell.cc
@@ -12332,7 +79,6 @@ size_t SnapshotCreator::AddData(Local<T> object) {
* command-line and executes them.
*/
-
/**
* \example process.cc
*/
diff --git a/chromium/v8/infra/mb/mb_config.pyl b/chromium/v8/infra/mb/mb_config.pyl
index 236dc1e8474..e3afd9787b9 100644
--- a/chromium/v8/infra/mb/mb_config.pyl
+++ b/chromium/v8/infra/mb/mb_config.pyl
@@ -77,6 +77,9 @@
'V8 Linux64 - no wasm - builder': 'release_x64_webassembly_disabled',
# Windows.
'V8 Win32 - builder': 'release_x86_minimal_symbols',
+ 'V8 Win32 - builder (goma cache silo)': 'release_x64',
+ 'V8 Win32 - builder (reclient)': 'release_x86_minimal_symbols_reclient',
+ 'V8 Win32 - builder (reclient compare)': 'release_x86_minimal_symbols_reclient',
'V8 Win32 - debug builder': 'debug_x86_minimal_symbols',
# TODO(machenbach): Remove after switching to x64 on infra side.
'V8 Win64 ASAN': 'release_x64_asan_no_lsan',
@@ -85,11 +88,10 @@
'V8 Win64 - debug': 'debug_x64_minimal_symbols',
'V8 Win64 - msvc': 'release_x64_msvc',
# Mac.
- 'V8 Mac64': 'release_x64',
- 'V8 Mac64 - debug': 'debug_x64',
+ 'V8 Mac64 - builder': 'release_x64',
+ 'V8 Mac64 - debug builder': 'debug_x64',
'V8 Official Mac ARM64': 'release_arm64',
'V8 Official Mac ARM64 Debug': 'debug_arm64',
- 'V8 Mac64 GC Stress': 'debug_x64',
'V8 Mac64 ASAN': 'release_x64_asan_no_lsan',
'V8 Mac - arm64 - release builder': 'release_arm64',
'V8 Mac - arm64 - debug builder': 'debug_arm64',
@@ -105,6 +107,8 @@
'V8 Linux gcc': 'release_x86_gcc',
# FYI.
'V8 iOS - sim': 'release_x64_ios_simulator',
+ 'V8 Linux64 - arm64 - sim - heap sandbox - debug - builder': 'debug_x64_heap_sandbox_arm64_sim',
+ 'V8 Linux64 - cppgc-non-default - debug - builder': 'debug_x64_non_default_cppgc',
'V8 Linux64 - debug - perfetto - builder': 'debug_x64_perfetto',
'V8 Linux64 - disable runtime call stats': 'release_x64_disable_runtime_call_stats',
'V8 Linux64 - debug - single generation - builder': 'debug_x64_single_generation',
@@ -194,6 +198,8 @@
'V8 Linux - s390x - sim': 'release_simulate_s390x',
# RISC-V
'V8 Linux - riscv64 - sim - builder': 'release_simulate_riscv64',
+ # Loongson
+ 'V8 Linux - loong64 - sim - builder': 'release_simulate_loong64',
},
'tryserver.v8': {
'v8_android_arm_compile_rel': 'release_android_arm',
@@ -216,6 +222,7 @@
'v8_linux_vtunejit': 'debug_x86_vtunejit',
'v8_linux64_arm64_pointer_compression_rel_ng':
'release_simulate_arm64_pointer_compression',
+ 'v8_linux64_cppgc_non_default_dbg_ng': 'debug_x64_non_default_cppgc',
'v8_linux64_dbg_ng': 'debug_x64_trybot',
'v8_linux64_dict_tracking_dbg_ng': 'debug_x64_dict_tracking_trybot',
'v8_linux64_disable_runtime_call_stats_rel': 'release_x64_disable_runtime_call_stats',
@@ -225,6 +232,7 @@
'v8_linux64_gcov_coverage': 'release_x64_gcc_coverage',
'v8_linux64_header_includes_dbg': 'debug_x64_header_includes',
'v8_linux64_heap_sandbox_dbg_ng': 'debug_x64_heap_sandbox',
+ 'v8_linux_arm64_sim_heap_sandbox_dbg_ng': 'debug_x64_heap_sandbox_arm64_sim',
'v8_linux64_fyi_rel_ng': 'release_x64_test_features_trybot',
'v8_linux64_nodcheck_rel_ng': 'release_x64',
'v8_linux64_perfetto_dbg_ng': 'debug_x64_perfetto',
@@ -237,6 +245,7 @@
'v8_linux64_asan_rel_ng': 'release_x64_asan_minimal_symbols',
'v8_linux64_cfi_rel_ng': 'release_x64_cfi',
'v8_linux64_fuzzilli_ng': 'release_x64_fuzzilli',
+ 'v8_linux64_loong64_rel_ng': 'release_simulate_loong64',
'v8_linux64_msan_rel_ng': 'release_simulate_arm64_msan_minimal_symbols',
'v8_linux64_riscv64_rel_ng': 'release_simulate_riscv64',
'v8_linux64_tsan_rel_ng': 'release_x64_tsan_minimal_symbols',
@@ -282,6 +291,9 @@
'v8_linux_arm64_gc_stress_dbg_ng': 'debug_simulate_arm64',
'v8_linux_mipsel_compile_rel': 'release_simulate_mipsel',
'v8_linux_mips64el_compile_rel': 'release_simulate_mips64el',
+ 'v8_numfuzz_ng': 'release_x64',
+ 'v8_numfuzz_dbg_ng': 'debug_x64',
+ 'v8_numfuzz_tsan_ng': 'release_x64_tsan',
},
},
@@ -413,6 +425,8 @@
'release_bot', 'simulate_arm64', 'msan_no_origins'],
'release_simulate_arm64_trybot': [
'release_trybot', 'simulate_arm64'],
+ 'release_simulate_loong64': [
+ 'release_bot', 'simulate_loong64'],
'release_simulate_mipsel': [
'release_bot', 'simulate_mipsel'],
'release_simulate_mips64el': [
@@ -560,8 +574,12 @@
'debug_bot', 'x64', 'v8_check_header_includes'],
'debug_x64_heap_sandbox': [
'debug_bot', 'x64', 'v8_enable_heap_sandbox'],
+ 'debug_x64_heap_sandbox_arm64_sim': [
+ 'debug_bot', 'simulate_arm64', 'v8_enable_heap_sandbox'],
'debug_x64_minimal_symbols': [
'debug_bot', 'x64', 'minimal_symbols'],
+ 'debug_x64_non_default_cppgc': [
+ 'debug_bot', 'x64', 'non_default_cppgc'],
'debug_x64_perfetto': [
'debug_bot', 'x64', 'perfetto'],
'debug_x64_single_generation': [
@@ -606,6 +624,8 @@
'release_trybot', 'x86', 'gcmole'],
'release_x86_minimal_symbols': [
'release_bot', 'x86', 'minimal_symbols'],
+ 'release_x86_minimal_symbols_reclient': [
+ 'release_bot_reclient', 'x86', 'minimal_symbols'],
'release_x86_no_i18n_trybot': [
'release_trybot', 'x86', 'v8_no_i18n'],
'release_x86_predictable': [
@@ -779,12 +799,16 @@
'gn_args': 'use_sysroot=false',
},
+ 'non_default_cppgc': {
+ 'gn_args': 'cppgc_enable_object_names=true cppgc_enable_young_generation=true',
+ },
+
'perfetto': {
'gn_args': 'v8_use_perfetto=true',
},
'reclient': {
- 'gn_args': 'use_rbe=true',
+ 'gn_args': 'use_rbe=true use_remoteexec=true',
},
'release': {
@@ -823,6 +847,10 @@
'gn_args': 'target_cpu="x64" v8_target_cpu="arm64"',
},
+ 'simulate_loong64': {
+ 'gn_args': 'target_cpu="x64" v8_target_cpu="loong64"',
+ },
+
'simulate_mipsel': {
'gn_args':
'target_cpu="x86" v8_target_cpu="mipsel" mips_arch_variant="r2"',
diff --git a/chromium/v8/infra/testing/builders.pyl b/chromium/v8/infra/testing/builders.pyl
index f37c66ba90a..f17f6512129 100644
--- a/chromium/v8/infra/testing/builders.pyl
+++ b/chromium/v8/infra/testing/builders.pyl
@@ -42,7 +42,6 @@
{'name': 'mozilla', 'variant': 'default'},
{'name': 'test262', 'variant': 'default', 'shards': 10},
{'name': 'v8testing', 'variant': 'default', 'shards': 4},
- {'name': 'v8testing', 'variant': 'trusted', 'shards': 4},
],
},
##############################################################################
@@ -286,7 +285,6 @@
{'name': 'test262', 'variant': 'default'},
{'name': 'v8testing', 'shards': 7},
{'name': 'v8testing', 'variant': 'extra', 'shards': 7},
- {'name': 'v8testing', 'variant': 'trusted', 'shards': 2},
],
},
'v8_linux_arm_lite_rel_ng_triggered': {
@@ -307,7 +305,6 @@
{'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing', 'shards': 10},
{'name': 'v8testing', 'variant': 'extra', 'shards': 10},
- {'name': 'v8testing', 'variant': 'trusted', 'shards': 2},
],
},
##############################################################################
@@ -335,6 +332,15 @@
{'name': 'v8testing', 'shards': 3},
],
},
+ 'v8_linux64_cppgc_non_default_dbg_ng_triggered': {
+ 'swarming_dimensions' : {
+ 'cpu': 'x86-64-avx2',
+ 'os': 'Ubuntu-18.04',
+ },
+ 'tests': [
+ {'name': 'v8testing', 'shards': 3},
+ ],
+ },
'v8_linux64_dbg_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
@@ -355,6 +361,7 @@
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
{'name': 'v8testing', 'variant': 'stress_concurrent_allocation'},
{'name': 'v8testing', 'variant': 'stress_concurrent_inlining'},
+ {'name': 'v8testing', 'variant': 'no_concurrent_inlining'},
],
},
'v8_linux64_dict_tracking_dbg_ng_triggered': {
@@ -396,8 +403,6 @@
{'name': 'mjsunit', 'variant': 'stress_snapshot'},
# Experimental regexp engine.
{'name': 'mjsunit', 'variant': 'experimental_regexp'},
- # Concurrent inlining.
- {'name': 'mjsunit', 'variant': 'concurrent_inlining'},
# Wasm write protect code space.
{'name': 'mjsunit', 'variant': 'wasm_write_protect_code'},
],
@@ -461,6 +466,7 @@
{'name': 'v8testing', 'variant': 'extra'},
{'name': 'v8testing', 'variant': 'no_lfa'},
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
+ {'name': 'v8testing', 'variant': 'no_concurrent_inlining'},
],
},
'v8_linux64_perfetto_dbg_ng_triggered': {
@@ -506,6 +512,7 @@
{'name': 'v8testing', 'variant': 'no_lfa'},
{'name': 'v8testing', 'variant': 'slow_path'},
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
+ {'name': 'v8testing', 'variant': 'no_concurrent_inlining'},
],
},
'v8_linux64_tsan_rel_ng_triggered': {
@@ -576,7 +583,6 @@
{'name': 'test262', 'variant': 'default', 'shards': 4},
{'name': 'v8testing', 'shards': 14},
{'name': 'v8testing', 'variant': 'extra', 'shards': 12},
- {'name': 'v8testing', 'variant': 'trusted', 'shards': 5},
],
},
'v8_linux_arm64_gc_stress_dbg_ng_triggered': {
@@ -587,6 +593,14 @@
{'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 12},
],
},
+ 'v8_linux_arm64_sim_heap_sandbox_dbg_ng_triggered': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'tests': [
+ {'name': 'v8testing', 'shards': 14},
+ ],
+ },
'v8_linux_arm64_rel_ng_triggered': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
@@ -597,7 +611,6 @@
{'name': 'test262', 'variant': 'default', 'shards': 4},
{'name': 'v8testing', 'shards': 14},
{'name': 'v8testing', 'variant': 'extra', 'shards': 12},
- {'name': 'v8testing', 'variant': 'trusted', 'shards': 5},
],
},
'v8_linux_arm64_cfi_rel_ng_triggered': {
@@ -618,6 +631,16 @@
],
},
##############################################################################
+ # Linux64 with Loongson simulators
+ 'v8_linux64_loong64_rel_ng_triggered': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'tests': [
+ {'name': 'v8testing', 'shards': 3},
+ ],
+ },
+ ##############################################################################
# Linux64 with RISC-V simulators
'v8_linux64_riscv64_rel_ng_triggered': {
'swarming_dimensions': {
@@ -1125,6 +1148,7 @@
{'name': 'v8testing', 'variant': 'minor_mc'},
{'name': 'v8testing', 'variant': 'no_lfa'},
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
+ {'name': 'v8testing', 'variant': 'no_concurrent_inlining'},
# Noavx.
{
'name': 'mozilla',
@@ -1186,6 +1210,7 @@
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
{'name': 'v8testing', 'variant': 'stress_concurrent_allocation'},
{'name': 'v8testing', 'variant': 'stress_concurrent_inlining'},
+ {'name': 'v8testing', 'variant': 'no_concurrent_inlining'},
# Noavx.
{
'name': 'mozilla',
@@ -1229,12 +1254,19 @@
{'name': 'mjsunit', 'variant': 'stress_snapshot'},
# Experimental regexp engine.
{'name': 'mjsunit', 'variant': 'experimental_regexp'},
- # Concurrent inlining.
- {'name': 'mjsunit', 'variant': 'concurrent_inlining'},
# Wasm write protect code space.
{'name': 'mjsunit', 'variant': 'wasm_write_protect_code'},
],
},
+ 'V8 Linux64 - cppgc-non-default - debug': {
+ 'swarming_dimensions': {
+ 'cpu': 'x86-64-avx2',
+ 'os': 'Ubuntu-18.04',
+ },
+ 'tests': [
+ {'name': 'v8testing', 'shards': 3},
+ ],
+ },
'V8 Linux64 - debug - perfetto': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
@@ -1284,8 +1316,6 @@
{'name': 'mjsunit', 'variant': 'stress_snapshot'},
# Experimental regexp engine.
{'name': 'mjsunit', 'variant': 'experimental_regexp'},
- # Concurrent inlining.
- {'name': 'mjsunit', 'variant': 'concurrent_inlining'},
# Wasm write protect code space.
{'name': 'mjsunit', 'variant': 'wasm_write_protect_code'},
],
@@ -1627,7 +1657,6 @@
{'name': 'mozilla', 'variant': 'default'},
{'name': 'test262', 'variant': 'default', 'shards': 10},
{'name': 'v8testing', 'variant': 'default', 'shards': 4},
- {'name': 'v8testing', 'variant': 'trusted', 'shards': 4},
],
},
'V8 Arm': {
@@ -1717,7 +1746,6 @@
{'name': 'test262', 'variant': 'default'},
{'name': 'v8testing', 'shards': 6},
{'name': 'v8testing', 'variant': 'extra', 'shards': 3},
- {'name': 'v8testing', 'variant': 'trusted'},
# Armv8-a.
{
'name': 'mozilla',
@@ -1766,7 +1794,6 @@
{'name': 'test262', 'variant': 'default'},
{'name': 'v8testing', 'shards': 10},
{'name': 'v8testing', 'variant': 'extra', 'shards': 10},
- {'name': 'v8testing', 'variant': 'trusted', 'shards': 4},
# Armv8-a.
{
'name': 'mozilla',
@@ -1835,7 +1862,6 @@
{'name': 'test262', 'variant': 'default'},
{'name': 'v8testing', 'shards': 3},
{'name': 'v8testing', 'variant': 'extra', 'shards': 2},
- {'name': 'v8testing', 'variant': 'trusted'},
],
},
'V8 Linux - arm64 - sim - debug': {
@@ -1852,7 +1878,6 @@
{'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing', 'shards': 12},
{'name': 'v8testing', 'variant': 'extra', 'shards': 11},
- {'name': 'v8testing', 'variant': 'trusted', 'shards': 2},
],
},
'V8 Linux - arm64 - sim - gc stress': {
@@ -1872,6 +1897,32 @@
},
],
},
+ 'V8 Linux64 - arm64 - sim - heap sandbox - debug': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'swarming_task_attrs': {
+ 'expiration': 14400,
+ 'hard_timeout': 7200,
+ 'priority': 35,
+ },
+ 'tests': [
+ {'name': 'v8testing', 'shards': 14},
+ ],
+ },
+ 'V8 Linux - loong64 - sim': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'swarming_task_attrs': {
+ 'expiration': 14400,
+ 'hard_timeout': 3600,
+ 'priority': 35,
+ },
+ 'tests': [
+ {'name': 'v8testing', 'shards': 3},
+ ],
+ },
'V8 Linux - mips64el - sim': {
'swarming_dimensions': {
'os': 'Ubuntu-18.04',
@@ -1999,6 +2050,11 @@
},
{
'name': 'numfuzz',
+ 'suffix': 'stack',
+ 'test_args': ['--total-timeout-sec=2100', '--stress-stack-size=1']
+ },
+ {
+ 'name': 'numfuzz',
'suffix': 'combined',
'test_args': [
'--total-timeout-sec=2100',
@@ -2009,6 +2065,7 @@
'--stress-marking=4',
'--stress-scavenge=4',
'--stress-thread-pool-size=2',
+ '--stress-stack-size=1',
],
'shards': 4
},
@@ -2048,6 +2105,11 @@
},
{
'name': 'numfuzz',
+ 'suffix': 'stack',
+ 'test_args': ['--total-timeout-sec=2100', '--stress-stack-size=1']
+ },
+ {
+ 'name': 'numfuzz',
'suffix': 'combined',
'test_args': [
'--total-timeout-sec=2100',
@@ -2058,6 +2120,7 @@
'--stress-marking=4',
'--stress-scavenge=4',
'--stress-thread-pool-size=2',
+ '--stress-stack-size=1',
],
'shards': 3
},
@@ -2074,4 +2137,115 @@
},
],
},
+ 'v8_numfuzz_ng_triggered': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'tests': [
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'deopt',
+ 'test_args': ['--total-timeout-sec=900', '--stress-deopt=1']
+ },
+ ],
+ },
+ 'v8_numfuzz_tsan_ng_triggered': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'tests': [
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'marking',
+ 'test_args': ['--total-timeout-sec=900', '--stress-marking=1']
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'delay',
+ 'test_args': ['--total-timeout-sec=900', '--stress-delay-tasks=1']
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'threads',
+ 'test_args': ['--total-timeout-sec=900', '--stress-thread-pool-size=1']
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'stack',
+ 'test_args': ['--total-timeout-sec=900', '--stress-stack-size=1']
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'combined',
+ 'test_args': [
+ '--total-timeout-sec=900',
+ '--stress-delay-tasks=4',
+ '--stress-deopt=2',
+ '--stress-compaction=2',
+ '--stress-gc=4',
+ '--stress-marking=4',
+ '--stress-scavenge=4',
+ '--stress-thread-pool-size=2',
+ '--stress-stack-size=1',
+ ],
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'scavenge',
+ 'test_args': ['--total-timeout-sec=900', '--stress-scavenge=1']
+ },
+ ],
+ },
+ 'v8_numfuzz_dbg_ng_triggered': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'tests': [
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'marking',
+ 'test_args': ['--total-timeout-sec=900', '--stress-marking=1'],
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'delay',
+ 'test_args': ['--total-timeout-sec=900', '--stress-delay-tasks=1']
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'threads',
+ 'test_args': ['--total-timeout-sec=900', '--stress-thread-pool-size=1']
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'stack',
+ 'test_args': ['--total-timeout-sec=900', '--stress-stack-size=1']
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'combined',
+ 'test_args': [
+ '--total-timeout-sec=900',
+ '--stress-delay-tasks=4',
+ '--stress-deopt=2',
+ '--stress-compaction=2',
+ '--stress-gc=4',
+ '--stress-marking=4',
+ '--stress-scavenge=4',
+ '--stress-thread-pool-size=2',
+ '--stress-stack-size=1',
+ ],
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'scavenge',
+ 'test_args': ['--total-timeout-sec=900', '--stress-scavenge=1']
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'deopt',
+ 'test_args': ['--total-timeout-sec=900', '--stress-deopt=1'],
+ },
+ ],
+ },
}
diff --git a/chromium/v8/samples/cppgc/cppgc-sample.cc b/chromium/v8/samples/cppgc/hello-world.cc
index d76c16a5536..d76c16a5536 100644
--- a/chromium/v8/samples/cppgc/cppgc-sample.cc
+++ b/chromium/v8/samples/cppgc/hello-world.cc
diff --git a/chromium/v8/samples/hello-world.cc b/chromium/v8/samples/hello-world.cc
index 6e506475e45..92436e01773 100644
--- a/chromium/v8/samples/hello-world.cc
+++ b/chromium/v8/samples/hello-world.cc
@@ -7,7 +7,12 @@
#include <string.h>
#include "include/libplatform/libplatform.h"
-#include "include/v8.h"
+#include "include/v8-context.h"
+#include "include/v8-initialization.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-primitive.h"
+#include "include/v8-script.h"
int main(int argc, char* argv[]) {
// Initialize V8.
diff --git a/chromium/v8/samples/process.cc b/chromium/v8/samples/process.cc
index 16e70a20644..28b6f119c3a 100644
--- a/chromium/v8/samples/process.cc
+++ b/chromium/v8/samples/process.cc
@@ -25,16 +25,29 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <include/v8.h>
-
-#include <include/libplatform/libplatform.h>
-
#include <stdlib.h>
#include <string.h>
#include <map>
#include <string>
+#include "include/libplatform/libplatform.h"
+#include "include/v8-array-buffer.h"
+#include "include/v8-context.h"
+#include "include/v8-exception.h"
+#include "include/v8-external.h"
+#include "include/v8-function.h"
+#include "include/v8-initialization.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-object.h"
+#include "include/v8-persistent-handle.h"
+#include "include/v8-primitive.h"
+#include "include/v8-script.h"
+#include "include/v8-snapshot.h"
+#include "include/v8-template.h"
+#include "include/v8-value.h"
+
using std::map;
using std::pair;
using std::string;
diff --git a/chromium/v8/samples/shell.cc b/chromium/v8/samples/shell.cc
index 7de600a88fd..ab8abeb71e3 100644
--- a/chromium/v8/samples/shell.cc
+++ b/chromium/v8/samples/shell.cc
@@ -25,16 +25,21 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <include/v8.h>
-
-#include <include/libplatform/libplatform.h>
-
#include <assert.h>
#include <fcntl.h>
+#include <include/libplatform/libplatform.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include "include/v8-context.h"
+#include "include/v8-exception.h"
+#include "include/v8-initialization.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-script.h"
+#include "include/v8-template.h"
+
/**
* This sample program shows how to implement a simple javascript shell
* based on V8. This includes initializing V8 with command line options,
@@ -376,8 +381,8 @@ void ReportException(v8::Isolate* isolate, v8::TryCatch* try_catch) {
stack_trace_string->IsString() &&
stack_trace_string.As<v8::String>()->Length() > 0) {
v8::String::Utf8Value stack_trace(isolate, stack_trace_string);
- const char* stack_trace_string = ToCString(stack_trace);
- fprintf(stderr, "%s\n", stack_trace_string);
+ const char* err = ToCString(stack_trace);
+ fprintf(stderr, "%s\n", err);
}
}
}
diff --git a/chromium/v8/src/DEPS b/chromium/v8/src/DEPS
index b3fcddf2f43..341435e28d3 100644
--- a/chromium/v8/src/DEPS
+++ b/chromium/v8/src/DEPS
@@ -52,6 +52,7 @@ include_rules = [
"+src/interpreter/setup-interpreter.h",
"-src/regexp",
"+src/regexp/regexp.h",
+ "+src/regexp/regexp-flags.h",
"+src/regexp/regexp-stack.h",
"+src/regexp/regexp-utils.h",
"-src/trap-handler",
@@ -65,6 +66,10 @@ include_rules = [
"+builtins-generated",
"+torque-generated",
"+starboard",
+ # Using cppgc inside v8 is not (yet) allowed.
+ "-include/cppgc",
+ "+include/cppgc/platform.h",
+ "+include/cppgc/source-location.h",
]
specific_include_rules = {
diff --git a/chromium/v8/src/api/api-arguments.h b/chromium/v8/src/api/api-arguments.h
index 464ebadf37b..98354757be6 100644
--- a/chromium/v8/src/api/api-arguments.h
+++ b/chromium/v8/src/api/api-arguments.h
@@ -5,6 +5,7 @@
#ifndef V8_API_API_ARGUMENTS_H_
#define V8_API_API_ARGUMENTS_H_
+#include "include/v8-template.h"
#include "src/api/api.h"
#include "src/debug/debug.h"
#include "src/execution/isolate.h"
diff --git a/chromium/v8/src/api/api-inl.h b/chromium/v8/src/api/api-inl.h
index c5c774800b7..c033c3d2e8f 100644
--- a/chromium/v8/src/api/api-inl.h
+++ b/chromium/v8/src/api/api-inl.h
@@ -264,12 +264,12 @@ void CopyDoubleElementsToTypedBuffer(T* dst, uint32_t length,
}
}
-template <const CTypeInfo* type_info, typename T>
+template <CTypeInfo::Identifier type_info_id, typename T>
bool CopyAndConvertArrayToCppBuffer(Local<Array> src, T* dst,
uint32_t max_length) {
static_assert(
- std::is_same<
- T, typename i::CTypeInfoTraits<type_info->GetType()>::ctype>::value,
+ std::is_same<T, typename i::CTypeInfoTraits<
+ CTypeInfo(type_info_id).GetType()>::ctype>::value,
"Type mismatch between the expected CTypeInfo::Type and the destination "
"array");
@@ -299,11 +299,20 @@ bool CopyAndConvertArrayToCppBuffer(Local<Array> src, T* dst,
}
}
+// Deprecated; to be removed.
template <const CTypeInfo* type_info, typename T>
inline bool V8_EXPORT TryCopyAndConvertArrayToCppBuffer(Local<Array> src,
T* dst,
uint32_t max_length) {
- return CopyAndConvertArrayToCppBuffer<type_info, T>(src, dst, max_length);
+ return CopyAndConvertArrayToCppBuffer<type_info->GetId(), T>(src, dst,
+ max_length);
+}
+
+template <CTypeInfo::Identifier type_info_id, typename T>
+inline bool V8_EXPORT TryToCopyAndConvertArrayToCppBuffer(Local<Array> src,
+ T* dst,
+ uint32_t max_length) {
+ return CopyAndConvertArrayToCppBuffer<type_info_id, T>(src, dst, max_length);
}
namespace internal {
diff --git a/chromium/v8/src/api/api-natives.h b/chromium/v8/src/api/api-natives.h
index fb59eb6cfcd..38a8a7b9171 100644
--- a/chromium/v8/src/api/api-natives.h
+++ b/chromium/v8/src/api/api-natives.h
@@ -5,7 +5,7 @@
#ifndef V8_API_API_NATIVES_H_
#define V8_API_API_NATIVES_H_
-#include "include/v8.h"
+#include "include/v8-template.h"
#include "src/base/macros.h"
#include "src/handles/handles.h"
#include "src/handles/maybe-handles.h"
diff --git a/chromium/v8/src/api/api.cc b/chromium/v8/src/api/api.cc
index a8af304a530..93fa70ae1bb 100644
--- a/chromium/v8/src/api/api.cc
+++ b/chromium/v8/src/api/api.cc
@@ -7,16 +7,24 @@
#include <algorithm> // For min
#include <cmath> // For isnan.
#include <limits>
+#include <sstream>
#include <string>
#include <utility> // For move
#include <vector>
-#include "include/cppgc/custom-space.h"
+#include "include/v8-callbacks.h"
#include "include/v8-cppgc.h"
+#include "include/v8-date.h"
+#include "include/v8-extension.h"
#include "include/v8-fast-api-calls.h"
+#include "include/v8-function.h"
+#include "include/v8-json.h"
+#include "include/v8-locker.h"
+#include "include/v8-primitive-object.h"
#include "include/v8-profiler.h"
#include "include/v8-unwinder-state.h"
#include "include/v8-util.h"
+#include "include/v8-wasm.h"
#include "src/api/api-inl.h"
#include "src/api/api-natives.h"
#include "src/base/functional.h"
@@ -56,6 +64,7 @@
#include "src/init/icu_util.h"
#include "src/init/startup-data-util.h"
#include "src/init/v8.h"
+#include "src/init/vm-cage.h"
#include "src/json/json-parser.h"
#include "src/json/json-stringifier.h"
#include "src/logging/counters-scopes.h"
@@ -99,7 +108,6 @@
#include "src/profiler/heap-snapshot-generator-inl.h"
#include "src/profiler/profile-generator-inl.h"
#include "src/profiler/tick-sample.h"
-#include "src/regexp/regexp-stack.h"
#include "src/regexp/regexp-utils.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/code-serializer.h"
@@ -177,6 +185,49 @@ static ScriptOrigin GetScriptOriginForScript(i::Isolate* isolate,
return origin;
}
+ScriptOrigin::ScriptOrigin(
+ Local<Value> resource_name, Local<Integer> line_offset,
+ Local<Integer> column_offset, Local<Boolean> is_shared_cross_origin,
+ Local<Integer> script_id, Local<Value> source_map_url,
+ Local<Boolean> is_opaque, Local<Boolean> is_wasm, Local<Boolean> is_module,
+ Local<PrimitiveArray> host_defined_options)
+ : ScriptOrigin(
+ Isolate::GetCurrent(), resource_name,
+ line_offset.IsEmpty() ? 0 : static_cast<int>(line_offset->Value()),
+ column_offset.IsEmpty() ? 0
+ : static_cast<int>(column_offset->Value()),
+ !is_shared_cross_origin.IsEmpty() && is_shared_cross_origin->IsTrue(),
+ static_cast<int>(script_id.IsEmpty() ? -1 : script_id->Value()),
+ source_map_url, !is_opaque.IsEmpty() && is_opaque->IsTrue(),
+ !is_wasm.IsEmpty() && is_wasm->IsTrue(),
+ !is_module.IsEmpty() && is_module->IsTrue(), host_defined_options) {}
+
+ScriptOrigin::ScriptOrigin(Local<Value> resource_name, int line_offset,
+ int column_offset, bool is_shared_cross_origin,
+ int script_id, Local<Value> source_map_url,
+ bool is_opaque, bool is_wasm, bool is_module,
+ Local<PrimitiveArray> host_defined_options)
+ : isolate_(Isolate::GetCurrent()),
+ resource_name_(resource_name),
+ resource_line_offset_(line_offset),
+ resource_column_offset_(column_offset),
+ options_(is_shared_cross_origin, is_opaque, is_wasm, is_module),
+ script_id_(script_id),
+ source_map_url_(source_map_url),
+ host_defined_options_(host_defined_options) {}
+
+Local<Integer> ScriptOrigin::ResourceLineOffset() const {
+ return v8::Integer::New(isolate_, resource_line_offset_);
+}
+
+Local<Integer> ScriptOrigin::ResourceColumnOffset() const {
+ return v8::Integer::New(isolate_, resource_column_offset_);
+}
+
+Local<Integer> ScriptOrigin::ScriptID() const {
+ return v8::Integer::New(isolate_, script_id_);
+}
+
// --- E x c e p t i o n B e h a v i o r ---
void i::FatalProcessOutOfMemory(i::Isolate* isolate, const char* location) {
@@ -331,6 +382,37 @@ void V8::SetSnapshotDataBlob(StartupData* snapshot_blob) {
namespace {
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+// ArrayBufferAllocator to use when the virtual memory cage is enabled, in which
+// case all ArrayBuffer backing stores need to be allocated inside the data
+// cage. Note, the current implementation is extremely inefficient as it uses
+// the BoundedPageAllocator. In the future, we'll need a proper allocator
+// implementation.
+class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
+ public:
+ ArrayBufferAllocator() { CHECK(page_allocator_); }
+
+ void* Allocate(size_t length) override {
+ return page_allocator_->AllocatePages(nullptr, RoundUp(length, page_size_),
+ page_size_,
+ PageAllocator::kReadWrite);
+ }
+
+ void* AllocateUninitialized(size_t length) override {
+ return Allocate(length);
+ }
+
+ void Free(void* data, size_t length) override {
+ page_allocator_->FreePages(data, RoundUp(length, page_size_));
+ }
+
+ private:
+ PageAllocator* page_allocator_ = internal::GetArrayBufferPageAllocator();
+ const size_t page_size_ = page_allocator_->AllocatePageSize();
+};
+
+#else
+
class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
void* Allocate(size_t length) override {
@@ -372,6 +454,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
return new_data;
}
};
+#endif // V8_VIRTUAL_MEMORY_CAGE
struct SnapshotCreatorData {
explicit SnapshotCreatorData(Isolate* isolate)
@@ -746,9 +829,17 @@ void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
}
}
-i::Address* V8::GlobalizeReference(i::Isolate* isolate, i::Address* obj) {
- LOG_API(isolate, Persistent, New);
- i::Handle<i::Object> result = isolate->global_handles()->Create(*obj);
+namespace api_internal {
+i::Address* GlobalizeTracedReference(i::Isolate* isolate, i::Address* obj,
+ internal::Address* slot,
+ bool has_destructor) {
+ LOG_API(isolate, TracedGlobal, New);
+#ifdef DEBUG
+ Utils::ApiCheck((slot != nullptr), "v8::GlobalizeTracedReference",
+ "the address slot must be not null");
+#endif
+ i::Handle<i::Object> result =
+ isolate->global_handles()->CreateTraced(*obj, slot, has_destructor);
#ifdef VERIFY_HEAP
if (i::FLAG_verify_heap) {
i::Object(*obj).ObjectVerify(isolate);
@@ -757,16 +848,9 @@ i::Address* V8::GlobalizeReference(i::Isolate* isolate, i::Address* obj) {
return result.location();
}
-i::Address* V8::GlobalizeTracedReference(i::Isolate* isolate, i::Address* obj,
- internal::Address* slot,
- bool has_destructor) {
- LOG_API(isolate, TracedGlobal, New);
-#ifdef DEBUG
- Utils::ApiCheck((slot != nullptr), "v8::GlobalizeTracedReference",
- "the address slot must be not null");
-#endif
- i::Handle<i::Object> result =
- isolate->global_handles()->CreateTraced(*obj, slot, has_destructor);
+i::Address* GlobalizeReference(i::Isolate* isolate, i::Address* obj) {
+ LOG_API(isolate, Persistent, New);
+ i::Handle<i::Object> result = isolate->global_handles()->Create(*obj);
#ifdef VERIFY_HEAP
if (i::FLAG_verify_heap) {
i::Object(*obj).ObjectVerify(isolate);
@@ -775,59 +859,38 @@ i::Address* V8::GlobalizeTracedReference(i::Isolate* isolate, i::Address* obj,
return result.location();
}
-i::Address* V8::CopyGlobalReference(i::Address* from) {
+i::Address* CopyGlobalReference(i::Address* from) {
i::Handle<i::Object> result = i::GlobalHandles::CopyGlobal(from);
return result.location();
}
-void V8::MoveGlobalReference(internal::Address** from, internal::Address** to) {
+void MoveGlobalReference(internal::Address** from, internal::Address** to) {
i::GlobalHandles::MoveGlobal(from, to);
}
-void V8::MoveTracedGlobalReference(internal::Address** from,
- internal::Address** to) {
- i::GlobalHandles::MoveTracedGlobal(from, to);
-}
-
-void V8::CopyTracedGlobalReference(const internal::Address* const* from,
- internal::Address** to) {
- i::GlobalHandles::CopyTracedGlobal(from, to);
-}
-
-void V8::MakeWeak(i::Address* location, void* parameter,
- WeakCallbackInfo<void>::Callback weak_callback,
- WeakCallbackType type) {
+void MakeWeak(i::Address* location, void* parameter,
+ WeakCallbackInfo<void>::Callback weak_callback,
+ WeakCallbackType type) {
i::GlobalHandles::MakeWeak(location, parameter, weak_callback, type);
}
-void V8::MakeWeak(i::Address** location_addr) {
+void MakeWeak(i::Address** location_addr) {
i::GlobalHandles::MakeWeak(location_addr);
}
-void* V8::ClearWeak(i::Address* location) {
+void* ClearWeak(i::Address* location) {
return i::GlobalHandles::ClearWeakness(location);
}
-void V8::AnnotateStrongRetainer(i::Address* location, const char* label) {
+void AnnotateStrongRetainer(i::Address* location, const char* label) {
i::GlobalHandles::AnnotateStrongRetainer(location, label);
}
-void V8::DisposeGlobal(i::Address* location) {
+void DisposeGlobal(i::Address* location) {
i::GlobalHandles::Destroy(location);
}
-void V8::DisposeTracedGlobal(internal::Address* location) {
- i::GlobalHandles::DestroyTraced(location);
-}
-
-void V8::SetFinalizationCallbackTraced(
- internal::Address* location, void* parameter,
- WeakCallbackInfo<void>::Callback callback) {
- i::GlobalHandles::SetFinalizationCallbackForTraced(location, parameter,
- callback);
-}
-
-Value* V8::Eternalize(Isolate* v8_isolate, Value* value) {
+Value* Eternalize(Isolate* v8_isolate, Value* value) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
i::Object object = *Utils::OpenHandle(value);
int index = -1;
@@ -836,20 +899,42 @@ Value* V8::Eternalize(Isolate* v8_isolate, Value* value) {
isolate->eternal_handles()->Get(index).location());
}
-void V8::FromJustIsNothing() {
+void MoveTracedGlobalReference(internal::Address** from,
+ internal::Address** to) {
+ i::GlobalHandles::MoveTracedGlobal(from, to);
+}
+
+void CopyTracedGlobalReference(const internal::Address* const* from,
+ internal::Address** to) {
+ i::GlobalHandles::CopyTracedGlobal(from, to);
+}
+
+void DisposeTracedGlobal(internal::Address* location) {
+ i::GlobalHandles::DestroyTraced(location);
+}
+
+void SetFinalizationCallbackTraced(internal::Address* location, void* parameter,
+ WeakCallbackInfo<void>::Callback callback) {
+ i::GlobalHandles::SetFinalizationCallbackForTraced(location, parameter,
+ callback);
+}
+
+void FromJustIsNothing() {
Utils::ApiCheck(false, "v8::FromJust", "Maybe value is Nothing.");
}
-void V8::ToLocalEmpty() {
+void ToLocalEmpty() {
Utils::ApiCheck(false, "v8::ToLocalChecked", "Empty MaybeLocal.");
}
-void V8::InternalFieldOutOfBounds(int index) {
+void InternalFieldOutOfBounds(int index) {
Utils::ApiCheck(0 <= index && index < kInternalFieldsInWeakCallback,
"WeakCallbackInfo::GetInternalField",
"Internal field out of bounds.");
}
+} // namespace api_internal
+
// --- H a n d l e s ---
HandleScope::HandleScope(Isolate* isolate) { Initialize(isolate); }
@@ -862,7 +947,7 @@ void HandleScope::Initialize(Isolate* isolate) {
// We make an exception if the serializer is enabled, which means that the
// Isolate is exclusively used to create a snapshot.
Utils::ApiCheck(
- !v8::Locker::IsActive() ||
+ !v8::Locker::WasEverUsed() ||
internal_isolate->thread_manager()->IsLockedByCurrentThread() ||
internal_isolate->serializer_enabled(),
"HandleScope::HandleScope",
@@ -2387,42 +2472,44 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
i::Handle<i::String> str = Utils::OpenHandle(*(source->source_string));
- std::unique_ptr<i::AlignedCachedData> cached_data;
- if (options == kConsumeCodeCache) {
- if (source->consume_cache_task) {
- // If there's a cache consume task, finish it
- i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
- source->consume_cache_task->impl_->Finish(isolate, str,
- source->resource_options);
- i::Handle<i::SharedFunctionInfo> result;
- if (maybe_function_info.ToHandle(&result)) {
- RETURN_ESCAPED(ToApiHandle<UnboundScript>(result));
- }
- // If the above failed, then we must have rejected the cache. Continue
- // with normal compilation, disabling the code cache consumption.
- source->cached_data->rejected = true;
- options = kNoCompileOptions;
- } else {
- DCHECK(source->cached_data);
- // AlignedCachedData takes care of pointer-aligning the data.
- cached_data.reset(new i::AlignedCachedData(source->cached_data->data,
- source->cached_data->length));
- }
- }
-
i::Handle<i::SharedFunctionInfo> result;
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileScript");
i::ScriptDetails script_details = GetScriptDetails(
isolate, source->resource_name, source->resource_line_offset,
source->resource_column_offset, source->source_map_url,
source->host_defined_options, source->resource_options);
- i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
- i::Compiler::GetSharedFunctionInfoForScript(
- isolate, str, script_details, nullptr, cached_data.get(), options,
- no_cache_reason, i::NOT_NATIVES_CODE);
+
+ i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info;
if (options == kConsumeCodeCache) {
- source->cached_data->rejected = cached_data->rejected();
+ if (source->consume_cache_task) {
+ // Take ownership of the internal deserialization task and clear it off
+ // the consume task on the source.
+ DCHECK_NOT_NULL(source->consume_cache_task->impl_);
+ std::unique_ptr<i::BackgroundDeserializeTask> deserialize_task =
+ std::move(source->consume_cache_task->impl_);
+ maybe_function_info =
+ i::Compiler::GetSharedFunctionInfoForScriptWithDeserializeTask(
+ isolate, str, script_details, deserialize_task.get(), options,
+ no_cache_reason, i::NOT_NATIVES_CODE);
+ source->cached_data->rejected = deserialize_task->rejected();
+ } else {
+ DCHECK(source->cached_data);
+ // AlignedCachedData takes care of pointer-aligning the data.
+ auto cached_data = std::make_unique<i::AlignedCachedData>(
+ source->cached_data->data, source->cached_data->length);
+ maybe_function_info =
+ i::Compiler::GetSharedFunctionInfoForScriptWithCachedData(
+ isolate, str, script_details, cached_data.get(), options,
+ no_cache_reason, i::NOT_NATIVES_CODE);
+ source->cached_data->rejected = cached_data->rejected();
+ }
+ } else {
+ // Compile without any cache.
+ maybe_function_info = i::Compiler::GetSharedFunctionInfoForScript(
+ isolate, str, script_details, options, no_cache_reason,
+ i::NOT_NATIVES_CODE);
}
+
has_pending_exception = !maybe_function_info.ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(UnboundScript);
RETURN_ESCAPED(ToApiHandle<UnboundScript>(result));
@@ -2446,7 +2533,7 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
!source->GetResourceOptions().IsModule(), "v8::ScriptCompiler::Compile",
"v8::ScriptCompiler::CompileModule must be used to compile modules");
auto isolate = context->GetIsolate();
- auto maybe =
+ MaybeLocal<UnboundScript> maybe =
CompileUnboundInternal(isolate, source, options, no_cache_reason);
Local<UnboundScript> result;
if (!maybe.ToLocal(&result)) return MaybeLocal<Script>();
@@ -2463,11 +2550,10 @@ MaybeLocal<Module> ScriptCompiler::CompileModule(
Utils::ApiCheck(source->GetResourceOptions().IsModule(),
"v8::ScriptCompiler::CompileModule",
"Invalid ScriptOrigin: is_module must be true");
- auto maybe =
+ MaybeLocal<UnboundScript> maybe =
CompileUnboundInternal(isolate, source, options, no_cache_reason);
Local<UnboundScript> unbound;
if (!maybe.ToLocal(&unbound)) return MaybeLocal<Module>();
-
i::Handle<i::SharedFunctionInfo> shared = Utils::OpenHandle(*unbound);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
return ToApiHandle<Module>(i_isolate->factory()->NewSourceTextModule(shared));
@@ -2726,7 +2812,7 @@ v8::TryCatch::TryCatch(v8::Isolate* isolate)
has_terminated_(false) {
ResetInternal();
// Special handling for simulators which have a separate JS stack.
- js_stack_comparable_address_ = reinterpret_cast<void*>(
+ js_stack_comparable_address_ = static_cast<internal::Address>(
i::SimulatorStack::RegisterJSStackComparableAddress(isolate_));
isolate_->RegisterTryCatchHandler(this);
}
@@ -3009,6 +3095,14 @@ MaybeLocal<String> Message::GetSourceLine(Local<Context> context) const {
void Message::PrintCurrentStackTrace(Isolate* isolate, FILE* out) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
+ std::ostringstream stack_trace_stream;
+ i_isolate->PrintCurrentStackTrace(stack_trace_stream);
+ i::PrintF(out, "%s", stack_trace_stream.str().c_str());
+}
+
+void Message::PrintCurrentStackTrace(Isolate* isolate, std::ostream& out) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i_isolate->PrintCurrentStackTrace(out);
}
@@ -3275,7 +3369,6 @@ struct ValueDeserializer::PrivateData {
: isolate(i), deserializer(i, data, delegate) {}
i::Isolate* isolate;
i::ValueDeserializer deserializer;
- bool has_aborted = false;
bool supports_legacy_wire_format = false;
};
@@ -3285,16 +3378,8 @@ ValueDeserializer::ValueDeserializer(Isolate* isolate, const uint8_t* data,
ValueDeserializer::ValueDeserializer(Isolate* isolate, const uint8_t* data,
size_t size, Delegate* delegate) {
- if (base::IsValueInRangeForNumericType<int>(size)) {
- private_ = new PrivateData(
- reinterpret_cast<i::Isolate*>(isolate),
- base::Vector<const uint8_t>(data, static_cast<int>(size)), delegate);
- } else {
- private_ =
- new PrivateData(reinterpret_cast<i::Isolate*>(isolate),
- base::Vector<const uint8_t>(nullptr, 0), nullptr);
- private_->has_aborted = true;
- }
+ private_ = new PrivateData(reinterpret_cast<i::Isolate*>(isolate),
+ base::Vector<const uint8_t>(data, size), delegate);
}
ValueDeserializer::~ValueDeserializer() { delete private_; }
@@ -3304,15 +3389,6 @@ Maybe<bool> ValueDeserializer::ReadHeader(Local<Context> context) {
ENTER_V8_NO_SCRIPT(isolate, context, ValueDeserializer, ReadHeader,
Nothing<bool>(), i::HandleScope);
- // We could have aborted during the constructor.
- // If so, ReadHeader is where we report it.
- if (private_->has_aborted) {
- isolate->Throw(*isolate->factory()->NewError(
- i::MessageTemplate::kDataCloneDeserializationError));
- has_pending_exception = true;
- RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
- }
-
bool read_header = false;
has_pending_exception = !private_->deserializer.ReadHeader().To(&read_header);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
@@ -3336,12 +3412,10 @@ void ValueDeserializer::SetSupportsLegacyWireFormat(
}
uint32_t ValueDeserializer::GetWireFormatVersion() const {
- CHECK(!private_->has_aborted);
return private_->deserializer.GetWireFormatVersion();
}
MaybeLocal<Value> ValueDeserializer::ReadValue(Local<Context> context) {
- CHECK(!private_->has_aborted);
PREPARE_FOR_EXECUTION(context, ValueDeserializer, ReadValue, Value);
i::MaybeHandle<i::Object> result;
if (GetWireFormatVersion() > 0) {
@@ -3358,14 +3432,12 @@ MaybeLocal<Value> ValueDeserializer::ReadValue(Local<Context> context) {
void ValueDeserializer::TransferArrayBuffer(uint32_t transfer_id,
Local<ArrayBuffer> array_buffer) {
- CHECK(!private_->has_aborted);
private_->deserializer.TransferArrayBuffer(transfer_id,
Utils::OpenHandle(*array_buffer));
}
void ValueDeserializer::TransferSharedArrayBuffer(
uint32_t transfer_id, Local<SharedArrayBuffer> shared_array_buffer) {
- CHECK(!private_->has_aborted);
private_->deserializer.TransferArrayBuffer(
transfer_id, Utils::OpenHandle(*shared_array_buffer));
}
@@ -5530,12 +5602,13 @@ static inline int WriteHelper(i::Isolate* isolate, const String* string,
int end = start + length;
if ((length == -1) || (length > str->length() - start)) end = str->length();
if (end < 0) return 0;
- if (start < end) i::String::WriteToFlat(*str, buffer, start, end);
+ int write_length = end - start;
+ if (start < end) i::String::WriteToFlat(*str, buffer, start, write_length);
if (!(options & String::NO_NULL_TERMINATION) &&
- (length == -1 || end - start < length)) {
- buffer[end - start] = '\0';
+ (length == -1 || write_length < length)) {
+ buffer[write_length] = '\0';
}
- return end - start;
+ return write_length;
}
int String::WriteOneByte(Isolate* isolate, uint8_t* buffer, int start,
@@ -5852,6 +5925,12 @@ void v8::V8::InitializePlatform(Platform* platform) {
i::V8::InitializePlatform(platform);
}
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+bool v8::V8::InitializeVirtualMemoryCage() {
+ return i::V8::InitializeVirtualMemoryCage();
+}
+#endif
+
void v8::V8::ShutdownPlatform() { i::V8::ShutdownPlatform(); }
bool v8::V8::Initialize(const int build_config) {
@@ -5882,6 +5961,16 @@ bool v8::V8::Initialize(const int build_config) {
V8_HEAP_SANDBOX_BOOL ? "ENABLED" : "DISABLED");
}
+ const bool kEmbedderVirtualMemoryCage =
+ (build_config & kVirtualMemoryCage) != 0;
+ if (kEmbedderVirtualMemoryCage != V8_VIRTUAL_MEMORY_CAGE_BOOL) {
+ FATAL(
+ "Embedder-vs-V8 build configuration mismatch. On embedder side "
+ "virtual memory cage is %s while on V8 side it's %s.",
+ kEmbedderVirtualMemoryCage ? "ENABLED" : "DISABLED",
+ V8_VIRTUAL_MEMORY_CAGE_BOOL ? "ENABLED" : "DISABLED");
+ }
+
i::V8::Initialize();
return true;
}
@@ -5998,6 +6087,21 @@ void v8::V8::InitializeExternalStartupDataFromFile(const char* snapshot_blob) {
const char* v8::V8::GetVersion() { return i::Version::GetVersion(); }
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+PageAllocator* v8::V8::GetVirtualMemoryCagePageAllocator() {
+ CHECK(i::GetProcessWideVirtualMemoryCage()->is_initialized());
+ return i::GetProcessWideVirtualMemoryCage()->page_allocator();
+}
+
+size_t v8::V8::GetVirtualMemoryCageSizeInBytes() {
+ if (!i::GetProcessWideVirtualMemoryCage()->is_initialized()) {
+ return 0;
+ } else {
+ return i::GetProcessWideVirtualMemoryCage()->size();
+ }
+}
+#endif
+
void V8::GetSharedMemoryStatistics(SharedMemoryStatistics* statistics) {
i::ReadOnlyHeap::PopulateReadOnlySpaceStatistics(statistics);
}
@@ -6268,7 +6372,7 @@ void Context::DetachGlobal() {
i::Handle<i::Context> context = Utils::OpenHandle(this);
i::Isolate* isolate = context->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- isolate->bootstrapper()->DetachGlobal(context);
+ isolate->DetachGlobal(context);
}
Local<v8::Object> Context::GetExtrasBindingObject() {
@@ -7067,7 +7171,7 @@ REGEXP_FLAG_ASSERT_EQ(kLinear);
v8::RegExp::Flags v8::RegExp::GetFlags() const {
i::Handle<i::JSRegExp> obj = Utils::OpenHandle(this);
- return RegExp::Flags(static_cast<int>(obj->GetFlags()));
+ return RegExp::Flags(static_cast<int>(obj->flags()));
}
MaybeLocal<v8::Object> v8::RegExp::Exec(Local<Context> context,
@@ -8435,12 +8539,6 @@ void Isolate::SetAbortOnUncaughtExceptionCallback(
}
void Isolate::SetHostImportModuleDynamicallyCallback(
- i::Isolate::DeprecatedHostImportModuleDynamicallyCallback callback) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- isolate->SetHostImportModuleDynamicallyCallback(callback);
-}
-
-void Isolate::SetHostImportModuleDynamicallyCallback(
HostImportModuleDynamicallyWithImportAssertionsCallback callback) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->SetHostImportModuleDynamicallyCallback(callback);
@@ -8938,7 +9036,7 @@ void Isolate::IsolateInBackgroundNotification() {
void Isolate::MemoryPressureNotification(MemoryPressureLevel level) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
bool on_isolate_thread =
- v8::Locker::IsActive()
+ v8::Locker::WasEverUsed()
? isolate->thread_manager()->IsLockedByCurrentThread()
: i::ThreadId::Current() == isolate->thread_id();
isolate->heap()->MemoryPressureNotification(level, on_isolate_thread);
@@ -9080,6 +9178,10 @@ CALLBACK_SETTER(WasmSimdEnabledCallback, WasmSimdEnabledCallback,
CALLBACK_SETTER(WasmExceptionsEnabledCallback, WasmExceptionsEnabledCallback,
wasm_exceptions_enabled_callback)
+CALLBACK_SETTER(WasmDynamicTieringEnabledCallback,
+ WasmDynamicTieringEnabledCallback,
+ wasm_dynamic_tiering_enabled_callback)
+
CALLBACK_SETTER(SharedArrayBufferConstructorEnabledCallback,
SharedArrayBufferConstructorEnabledCallback,
sharedarraybuffer_constructor_enabled_callback)
@@ -9223,7 +9325,7 @@ void v8::Isolate::LocaleConfigurationChangeNotification() {
#ifdef V8_INTL_SUPPORT
i_isolate->ResetDefaultLocale();
- i_isolate->ClearCachedIcuObjects();
+ i_isolate->clear_cached_icu_objects();
#endif // V8_INTL_SUPPORT
}
@@ -10090,7 +10192,7 @@ void WasmStreaming::OnBytesReceived(const uint8_t* bytes, size_t size) {
UNREACHABLE();
}
-void WasmStreaming::Finish() { UNREACHABLE(); }
+void WasmStreaming::Finish(bool can_use_compiled_module) { UNREACHABLE(); }
void WasmStreaming::Abort(MaybeLocal<Value> exception) { UNREACHABLE(); }
@@ -10336,6 +10438,46 @@ bool ConvertDouble(double d) {
} // namespace internal
+template <>
+bool V8_EXPORT V8_WARN_UNUSED_RESULT TryToCopyAndConvertArrayToCppBuffer<
+ internal::CTypeInfoBuilder<int32_t>::Build().GetId(), int32_t>(
+ Local<Array> src, int32_t* dst, uint32_t max_length) {
+ return CopyAndConvertArrayToCppBuffer<
+ CTypeInfo(CTypeInfo::Type::kInt32, CTypeInfo::SequenceType::kIsSequence)
+ .GetId(),
+ int32_t>(src, dst, max_length);
+}
+
+template <>
+bool V8_EXPORT V8_WARN_UNUSED_RESULT TryToCopyAndConvertArrayToCppBuffer<
+ internal::CTypeInfoBuilder<uint32_t>::Build().GetId(), uint32_t>(
+ Local<Array> src, uint32_t* dst, uint32_t max_length) {
+ return CopyAndConvertArrayToCppBuffer<
+ CTypeInfo(CTypeInfo::Type::kUint32, CTypeInfo::SequenceType::kIsSequence)
+ .GetId(),
+ uint32_t>(src, dst, max_length);
+}
+
+template <>
+bool V8_EXPORT V8_WARN_UNUSED_RESULT TryToCopyAndConvertArrayToCppBuffer<
+ internal::CTypeInfoBuilder<float>::Build().GetId(), float>(
+ Local<Array> src, float* dst, uint32_t max_length) {
+ return CopyAndConvertArrayToCppBuffer<
+ CTypeInfo(CTypeInfo::Type::kFloat32, CTypeInfo::SequenceType::kIsSequence)
+ .GetId(),
+ float>(src, dst, max_length);
+}
+
+template <>
+bool V8_EXPORT V8_WARN_UNUSED_RESULT TryToCopyAndConvertArrayToCppBuffer<
+ internal::CTypeInfoBuilder<double>::Build().GetId(), double>(
+ Local<Array> src, double* dst, uint32_t max_length) {
+ return CopyAndConvertArrayToCppBuffer<
+ CTypeInfo(CTypeInfo::Type::kFloat64, CTypeInfo::SequenceType::kIsSequence)
+ .GetId(),
+ double>(src, dst, max_length);
+}
+
} // namespace v8
#undef TRACE_BS
diff --git a/chromium/v8/src/api/api.h b/chromium/v8/src/api/api.h
index 7d2a0c3e9cf..c255dad1e64 100644
--- a/chromium/v8/src/api/api.h
+++ b/chromium/v8/src/api/api.h
@@ -7,6 +7,11 @@
#include <memory>
+#include "include/v8-container.h"
+#include "include/v8-external.h"
+#include "include/v8-proxy.h"
+#include "include/v8-typed-array.h"
+#include "include/v8-wasm.h"
#include "src/execution/isolate.h"
#include "src/heap/factory.h"
#include "src/objects/bigint.h"
@@ -18,12 +23,16 @@
#include "src/objects/objects.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/source-text-module.h"
-#include "src/utils/detachable-vector.h"
-
#include "src/objects/templates.h"
+#include "src/utils/detachable-vector.h"
namespace v8 {
+class AccessorSignature;
+class Extension;
+class Signature;
+class Template;
+
namespace internal {
class JSArrayBufferView;
class JSFinalizationRegistry;
@@ -33,7 +42,7 @@ namespace debug {
class AccessorPair;
class GeneratorObject;
class Script;
-class WeakMap;
+class EphemeronTable;
} // namespace debug
// Constants used in the implementation of the API. The most natural thing
@@ -126,7 +135,7 @@ class RegisteredExtension {
V(Proxy, JSProxy) \
V(debug::GeneratorObject, JSGeneratorObject) \
V(debug::Script, Script) \
- V(debug::WeakMap, JSWeakMap) \
+ V(debug::EphemeronTable, EphemeronHashTable) \
V(debug::AccessorPair, AccessorPair) \
V(Promise, JSPromise) \
V(Primitive, Object) \
diff --git a/chromium/v8/src/asmjs/asm-parser.cc b/chromium/v8/src/asmjs/asm-parser.cc
index 8babca7a3b0..b6743117fe2 100644
--- a/chromium/v8/src/asmjs/asm-parser.cc
+++ b/chromium/v8/src/asmjs/asm-parser.cc
@@ -698,7 +698,8 @@ void AsmJsParser::ValidateFunctionTable() {
FAIL("Function table definition doesn't match use");
}
module_builder_->SetIndirectFunction(
- static_cast<uint32_t>(table_info->index + count), info->index);
+ 0, static_cast<uint32_t>(table_info->index + count), info->index,
+ WasmModuleBuilder::WasmElemSegment::kRelativeToDeclaredFunctions);
}
++count;
if (Check(',')) {
@@ -962,7 +963,6 @@ void AsmJsParser::ValidateFunctionLocals(size_t param_count,
if (Check('-')) {
negate = true;
}
- double dvalue = 0.0;
if (CheckForDouble(&dvalue)) {
info->kind = VarKind::kLocal;
info->type = AsmType::Float();
@@ -1670,9 +1670,9 @@ AsmType* AsmJsParser::MultiplicativeExpression() {
uint32_t uvalue;
if (CheckForUnsignedBelow(0x100000, &uvalue)) {
if (Check('*')) {
- AsmType* a;
- RECURSEn(a = UnaryExpression());
- if (!a->IsA(AsmType::Int())) {
+ AsmType* type;
+ RECURSEn(type = UnaryExpression());
+ if (!type->IsA(AsmType::Int())) {
FAILn("Expected int");
}
int32_t value = static_cast<int32_t>(uvalue);
@@ -1688,9 +1688,9 @@ AsmType* AsmJsParser::MultiplicativeExpression() {
int32_t value = -static_cast<int32_t>(uvalue);
current_function_builder_->EmitI32Const(value);
if (Check('*')) {
- AsmType* a;
- RECURSEn(a = UnaryExpression());
- if (!a->IsA(AsmType::Int())) {
+ AsmType* type;
+ RECURSEn(type = UnaryExpression());
+ if (!type->IsA(AsmType::Int())) {
FAILn("Expected int");
}
current_function_builder_->Emit(kExprI32Mul);
@@ -1706,7 +1706,6 @@ AsmType* AsmJsParser::MultiplicativeExpression() {
}
for (;;) {
if (Check('*')) {
- uint32_t uvalue;
if (Check('-')) {
if (!PeekForZero() && CheckForUnsigned(&uvalue)) {
if (uvalue >= 0x100000) {
@@ -2114,7 +2113,7 @@ AsmType* AsmJsParser::ValidateCall() {
// both cases we might be seeing the {function_name} for the first time and
// hence allocate a {VarInfo} here, all subsequent uses of the same name then
// need to match the information stored at this point.
- base::Optional<TemporaryVariableScope> tmp;
+ base::Optional<TemporaryVariableScope> tmp_scope;
if (Check('[')) {
AsmType* index = nullptr;
RECURSEn(index = EqualityExpression());
@@ -2134,13 +2133,16 @@ AsmType* AsmJsParser::ValidateCall() {
EXPECT_TOKENn(']');
VarInfo* function_info = GetVarInfo(function_name);
if (function_info->kind == VarKind::kUnused) {
- uint32_t index = module_builder_->AllocateIndirectFunctions(mask + 1);
- if (index == std::numeric_limits<uint32_t>::max()) {
+ if (module_builder_->NumTables() == 0) {
+ module_builder_->AddTable(kWasmFuncRef, 0);
+ }
+ uint32_t func_index = module_builder_->IncreaseTableMinSize(0, mask + 1);
+ if (func_index == std::numeric_limits<uint32_t>::max()) {
FAILn("Exceeded maximum function table size");
}
function_info->kind = VarKind::kTable;
function_info->mask = mask;
- function_info->index = index;
+ function_info->index = func_index;
function_info->mutable_variable = false;
} else {
if (function_info->kind != VarKind::kTable) {
@@ -2153,8 +2155,8 @@ AsmType* AsmJsParser::ValidateCall() {
current_function_builder_->EmitI32Const(function_info->index);
current_function_builder_->Emit(kExprI32Add);
// We have to use a temporary for the correct order of evaluation.
- tmp.emplace(this);
- current_function_builder_->EmitSetLocal(tmp->get());
+ tmp_scope.emplace(this);
+ current_function_builder_->EmitSetLocal(tmp_scope->get());
// The position of function table calls is after the table lookup.
call_pos = scanner_.Position();
} else {
@@ -2390,7 +2392,7 @@ AsmType* AsmJsParser::ValidateCall() {
}
}
if (function_info->kind == VarKind::kTable) {
- current_function_builder_->EmitGetLocal(tmp->get());
+ current_function_builder_->EmitGetLocal(tmp_scope->get());
current_function_builder_->AddAsmWasmOffset(call_pos, to_number_pos);
current_function_builder_->Emit(kExprCallIndirect);
current_function_builder_->EmitU32V(signature_index);
diff --git a/chromium/v8/src/ast/ast.cc b/chromium/v8/src/ast/ast.cc
index cf57b9e9b78..ac89df574d8 100644
--- a/chromium/v8/src/ast/ast.cc
+++ b/chromium/v8/src/ast/ast.cc
@@ -529,9 +529,10 @@ int ArrayLiteral::InitDepthAndFlags() {
int array_index = 0;
for (; array_index < constants_length; array_index++) {
Expression* element = values()->at(array_index);
- MaterializedLiteral* literal = element->AsMaterializedLiteral();
- if (literal != nullptr) {
- int subliteral_depth = literal->InitDepthAndFlags() + 1;
+ MaterializedLiteral* materialized_literal =
+ element->AsMaterializedLiteral();
+ if (materialized_literal != nullptr) {
+ int subliteral_depth = materialized_literal->InitDepthAndFlags() + 1;
if (subliteral_depth > depth_acc) depth_acc = subliteral_depth;
}
diff --git a/chromium/v8/src/ast/prettyprinter.cc b/chromium/v8/src/ast/prettyprinter.cc
index fb3690164d4..44f4ea155f8 100644
--- a/chromium/v8/src/ast/prettyprinter.cc
+++ b/chromium/v8/src/ast/prettyprinter.cc
@@ -13,6 +13,7 @@
#include "src/base/vector.h"
#include "src/common/globals.h"
#include "src/objects/objects-inl.h"
+#include "src/regexp/regexp-flags.h"
#include "src/strings/string-builder-inl.h"
namespace v8 {
@@ -72,6 +73,12 @@ void CallPrinter::Find(AstNode* node, bool print) {
}
}
+void CallPrinter::Print(char c) {
+ if (!found_ || done_) return;
+ num_prints_++;
+ builder_->AppendCharacter(c);
+}
+
void CallPrinter::Print(const char* str) {
if (!found_ || done_) return;
num_prints_++;
@@ -269,13 +276,10 @@ void CallPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
Print("/");
PrintLiteral(node->pattern(), false);
Print("/");
- if (node->flags() & RegExp::kHasIndices) Print("d");
- if (node->flags() & RegExp::kGlobal) Print("g");
- if (node->flags() & RegExp::kIgnoreCase) Print("i");
- if (node->flags() & RegExp::kLinear) Print("l");
- if (node->flags() & RegExp::kMultiline) Print("m");
- if (node->flags() & RegExp::kUnicode) Print("u");
- if (node->flags() & RegExp::kSticky) Print("y");
+#define V(Lower, Camel, LowerCamel, Char, Bit) \
+ if (node->flags() & RegExp::k##Camel) Print(Char);
+ REGEXP_FLAG_LIST(V)
+#undef V
}
@@ -342,17 +346,12 @@ void CallPrinter::VisitAssignment(Assignment* node) {
Find(node->target());
if (node->target()->IsArrayLiteral()) {
// Special case the visit for destructuring array assignment.
- bool was_found = false;
if (node->value()->position() == position_) {
is_iterator_error_ = true;
was_found = !found_;
found_ = true;
}
Find(node->value(), true);
- if (was_found) {
- done_ = true;
- found_ = false;
- }
} else {
Find(node->value());
}
@@ -963,7 +962,7 @@ void AstPrinter::VisitWithStatement(WithStatement* node) {
void AstPrinter::VisitSwitchStatement(SwitchStatement* node) {
- IndentedScope indent(this, "SWITCH", node->position());
+ IndentedScope switch_indent(this, "SWITCH", node->position());
PrintIndentedVisit("TAG", node->tag());
for (CaseClause* clause : *node->cases()) {
if (clause->is_default()) {
@@ -1189,13 +1188,10 @@ void AstPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
PrintLiteralIndented("PATTERN", node->raw_pattern(), false);
int i = 0;
base::EmbeddedVector<char, 128> buf;
- if (node->flags() & RegExp::kHasIndices) buf[i++] = 'd';
- if (node->flags() & RegExp::kGlobal) buf[i++] = 'g';
- if (node->flags() & RegExp::kIgnoreCase) buf[i++] = 'i';
- if (node->flags() & RegExp::kLinear) buf[i++] = 'l';
- if (node->flags() & RegExp::kMultiline) buf[i++] = 'm';
- if (node->flags() & RegExp::kUnicode) buf[i++] = 'u';
- if (node->flags() & RegExp::kSticky) buf[i++] = 'y';
+#define V(Lower, Camel, LowerCamel, Char, Bit) \
+ if (node->flags() & RegExp::k##Camel) buf[i++] = Char;
+ REGEXP_FLAG_LIST(V)
+#undef V
buf[i] = '\0';
PrintIndented("FLAGS ");
Print("%s", buf.begin());
@@ -1246,7 +1242,7 @@ void AstPrinter::PrintObjectProperties(
void AstPrinter::VisitArrayLiteral(ArrayLiteral* node) {
- IndentedScope indent(this, "ARRAY LITERAL", node->position());
+ IndentedScope array_indent(this, "ARRAY LITERAL", node->position());
if (node->values()->length() > 0) {
IndentedScope indent(this, "VALUES", node->position());
for (int i = 0; i < node->values()->length(); i++) {
diff --git a/chromium/v8/src/ast/prettyprinter.h b/chromium/v8/src/ast/prettyprinter.h
index e26d98e7a39..a61c43e14e1 100644
--- a/chromium/v8/src/ast/prettyprinter.h
+++ b/chromium/v8/src/ast/prettyprinter.h
@@ -29,7 +29,7 @@ class CallPrinter final : public AstVisitor<CallPrinter> {
// The following routine prints the node with position |position| into a
// string.
Handle<String> Print(FunctionLiteral* program, int position);
- enum ErrorHint {
+ enum class ErrorHint {
kNone,
kNormalIterator,
kAsyncIterator,
@@ -52,6 +52,7 @@ class CallPrinter final : public AstVisitor<CallPrinter> {
#undef DECLARE_VISIT
private:
+ void Print(char c);
void Print(const char* str);
void Print(Handle<String> str);
diff --git a/chromium/v8/src/ast/scopes.cc b/chromium/v8/src/ast/scopes.cc
index 94782cab303..bf490a42bb9 100644
--- a/chromium/v8/src/ast/scopes.cc
+++ b/chromium/v8/src/ast/scopes.cc
@@ -566,7 +566,6 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
// Check if there's a conflict with a lexical declaration
Scope* query_scope = sloppy_block_function->scope()->outer_scope();
- Variable* var = nullptr;
bool should_hoist = true;
// It is not sufficient to just do a Lookup on query_scope: for
@@ -576,7 +575,7 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
// Don't use a generic cache scope, as the cache scope would be the outer
// scope and we terminate the iteration there anyway.
do {
- var = query_scope->LookupInScopeOrScopeInfo(name, query_scope);
+ Variable* var = query_scope->LookupInScopeOrScopeInfo(name, query_scope);
if (var != nullptr && IsLexicalVariableMode(var->mode())) {
should_hoist = false;
break;
@@ -840,12 +839,12 @@ void Scope::Snapshot::Reparent(DeclarationScope* new_parent) {
new_parent->sibling_ = top_inner_scope_;
}
- Scope* outer_scope_ = outer_scope_and_calls_eval_.GetPointer();
- new_parent->unresolved_list_.MoveTail(&outer_scope_->unresolved_list_,
+ Scope* outer_scope = outer_scope_and_calls_eval_.GetPointer();
+ new_parent->unresolved_list_.MoveTail(&outer_scope->unresolved_list_,
top_unresolved_);
// Move temporaries allocated for complex parameter initializers.
- DeclarationScope* outer_closure = outer_scope_->GetClosureScope();
+ DeclarationScope* outer_closure = outer_scope->GetClosureScope();
for (auto it = top_local_; it != outer_closure->locals()->end(); ++it) {
Variable* local = *it;
DCHECK_EQ(VariableMode::kTemporary, local->mode());
@@ -2014,7 +2013,7 @@ Variable* Scope::Lookup(VariableProxy* proxy, Scope* scope,
// scope when we get to it (we may still have deserialized scopes
// in-between the initial and cache scopes so we can't just check the
// cache before the loop).
- Variable* var = scope->variables_.Lookup(proxy->raw_name());
+ var = scope->variables_.Lookup(proxy->raw_name());
if (var != nullptr) return var;
}
var = scope->LookupInScopeInfo(proxy->raw_name(),
@@ -2063,7 +2062,7 @@ Variable* Scope::Lookup(VariableProxy* proxy, Scope* scope,
// TODO(verwaest): Separate through AnalyzePartially.
if (mode == kParsedScope && !scope->scope_info_.is_null()) {
DCHECK_NULL(cache_scope);
- Scope* cache_scope = scope->GetNonEvalDeclarationScope();
+ cache_scope = scope->GetNonEvalDeclarationScope();
return Lookup<kDeserializedScope>(proxy, scope, outer_scope_end,
cache_scope);
}
diff --git a/chromium/v8/src/base/atomicops.h b/chromium/v8/src/base/atomicops.h
index 888157dc61f..20efe3479cc 100644
--- a/chromium/v8/src/base/atomicops.h
+++ b/chromium/v8/src/base/atomicops.h
@@ -191,11 +191,31 @@ inline void Release_Store(volatile Atomic8* ptr, Atomic8 value) {
std::memory_order_release);
}
+inline void Release_Store(volatile Atomic16* ptr, Atomic16 value) {
+ std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
+ std::memory_order_release);
+}
+
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_release);
}
+inline void SeqCst_Store(volatile Atomic8* ptr, Atomic8 value) {
+ std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
+ std::memory_order_seq_cst);
+}
+
+inline void SeqCst_Store(volatile Atomic16* ptr, Atomic16 value) {
+ std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
+ std::memory_order_seq_cst);
+}
+
+inline void SeqCst_Store(volatile Atomic32* ptr, Atomic32 value) {
+ std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
+ std::memory_order_seq_cst);
+}
+
inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) {
return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
std::memory_order_relaxed);
@@ -279,6 +299,11 @@ inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
std::memory_order_release);
}
+inline void SeqCst_Store(volatile Atomic64* ptr, Atomic64 value) {
+ std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
+ std::memory_order_seq_cst);
+}
+
inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) {
return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
std::memory_order_relaxed);
diff --git a/chromium/v8/src/base/bounded-page-allocator.cc b/chromium/v8/src/base/bounded-page-allocator.cc
index fa7b10324d8..e5f090682f4 100644
--- a/chromium/v8/src/base/bounded-page-allocator.cc
+++ b/chromium/v8/src/base/bounded-page-allocator.cc
@@ -7,13 +7,14 @@
namespace v8 {
namespace base {
-BoundedPageAllocator::BoundedPageAllocator(v8::PageAllocator* page_allocator,
- Address start, size_t size,
- size_t allocate_page_size)
+BoundedPageAllocator::BoundedPageAllocator(
+ v8::PageAllocator* page_allocator, Address start, size_t size,
+ size_t allocate_page_size, PageInitializationMode page_initialization_mode)
: allocate_page_size_(allocate_page_size),
commit_page_size_(page_allocator->CommitPageSize()),
page_allocator_(page_allocator),
- region_allocator_(start, size, allocate_page_size_) {
+ region_allocator_(start, size, allocate_page_size_),
+ page_initialization_mode_(page_initialization_mode) {
DCHECK_NOT_NULL(page_allocator);
DCHECK(IsAligned(allocate_page_size, page_allocator->AllocatePageSize()));
DCHECK(IsAligned(allocate_page_size_, commit_page_size_));
@@ -30,19 +31,30 @@ void* BoundedPageAllocator::AllocatePages(void* hint, size_t size,
PageAllocator::Permission access) {
MutexGuard guard(&mutex_);
DCHECK(IsAligned(alignment, region_allocator_.page_size()));
-
- // Region allocator does not support alignments bigger than it's own
- // allocation alignment.
- DCHECK_LE(alignment, allocate_page_size_);
-
- // TODO(ishell): Consider using randomized version here.
- Address address = region_allocator_.AllocateRegion(size);
+ DCHECK(IsAligned(alignment, allocate_page_size_));
+
+ Address address;
+ if (alignment <= allocate_page_size_) {
+ // TODO(ishell): Consider using randomized version here.
+ address = region_allocator_.AllocateRegion(size);
+ } else {
+ // Currently, this should only be necessary when V8_VIRTUAL_MEMORY_CAGE is
+ // enabled, in which case a bounded page allocator is used to allocate WASM
+ // memory buffers, which have a larger alignment.
+ address = region_allocator_.AllocateAlignedRegion(size, alignment);
+ }
if (address == RegionAllocator::kAllocationFailure) {
return nullptr;
}
- CHECK(page_allocator_->SetPermissions(reinterpret_cast<void*>(address), size,
- access));
- return reinterpret_cast<void*>(address);
+
+ void* ptr = reinterpret_cast<void*>(address);
+ if (!page_allocator_->SetPermissions(ptr, size, access)) {
+ // This most likely means that we ran out of memory.
+ CHECK_EQ(region_allocator_.FreeRegion(address), size);
+ return nullptr;
+ }
+
+ return ptr;
}
bool BoundedPageAllocator::AllocatePagesAt(Address address, size_t size,
@@ -59,8 +71,13 @@ bool BoundedPageAllocator::AllocatePagesAt(Address address, size_t size,
}
}
- CHECK(page_allocator_->SetPermissions(reinterpret_cast<void*>(address), size,
- access));
+ void* ptr = reinterpret_cast<void*>(address);
+ if (!page_allocator_->SetPermissions(ptr, size, access)) {
+ // This most likely means that we ran out of memory.
+ CHECK_EQ(region_allocator_.FreeRegion(address), size);
+ return false;
+ }
+
return true;
}
@@ -94,8 +111,17 @@ bool BoundedPageAllocator::FreePages(void* raw_address, size_t size) {
Address address = reinterpret_cast<Address>(raw_address);
size_t freed_size = region_allocator_.FreeRegion(address);
if (freed_size != size) return false;
- CHECK(page_allocator_->SetPermissions(raw_address, size,
- PageAllocator::kNoAccess));
+ if (page_initialization_mode_ ==
+ PageInitializationMode::kAllocatedPagesMustBeZeroInitialized) {
+ // When we are required to return zero-initialized pages, we decommit the
+ // pages here, which will cause any wired pages to be removed by the OS.
+ CHECK(page_allocator_->DecommitPages(raw_address, size));
+ } else {
+ DCHECK_EQ(page_initialization_mode_,
+ PageInitializationMode::kAllocatedPagesCanBeUninitialized);
+ CHECK(page_allocator_->SetPermissions(raw_address, size,
+ PageAllocator::kNoAccess));
+ }
return true;
}
@@ -128,8 +154,18 @@ bool BoundedPageAllocator::ReleasePages(void* raw_address, size_t size,
// Keep the region in "used" state just uncommit some pages.
Address free_address = address + new_size;
size_t free_size = size - new_size;
- return page_allocator_->SetPermissions(reinterpret_cast<void*>(free_address),
- free_size, PageAllocator::kNoAccess);
+ if (page_initialization_mode_ ==
+ PageInitializationMode::kAllocatedPagesMustBeZeroInitialized) {
+ // See comment in FreePages().
+ return page_allocator_->DecommitPages(reinterpret_cast<void*>(free_address),
+ free_size);
+ } else {
+ DCHECK_EQ(page_initialization_mode_,
+ PageInitializationMode::kAllocatedPagesCanBeUninitialized);
+ return page_allocator_->SetPermissions(
+ reinterpret_cast<void*>(free_address), free_size,
+ PageAllocator::kNoAccess);
+ }
}
bool BoundedPageAllocator::SetPermissions(void* address, size_t size,
@@ -144,5 +180,9 @@ bool BoundedPageAllocator::DiscardSystemPages(void* address, size_t size) {
return page_allocator_->DiscardSystemPages(address, size);
}
+bool BoundedPageAllocator::DecommitPages(void* address, size_t size) {
+ return page_allocator_->DecommitPages(address, size);
+}
+
} // namespace base
} // namespace v8
diff --git a/chromium/v8/src/base/bounded-page-allocator.h b/chromium/v8/src/base/bounded-page-allocator.h
index 1c8c8467112..a98a2299f84 100644
--- a/chromium/v8/src/base/bounded-page-allocator.h
+++ b/chromium/v8/src/base/bounded-page-allocator.h
@@ -12,10 +12,23 @@
namespace v8 {
namespace base {
+// Defines the page initialization mode of a BoundedPageAllocator.
+enum class PageInitializationMode {
+ // The contents of allocated pages must be zero initialized. This causes any
+ // committed pages to be decommitted during FreePages and ReleasePages. This
+ // requires the embedder to provide the PageAllocator::DecommitPages API.
+ kAllocatedPagesMustBeZeroInitialized,
+ // Allocated pages do not have to be be zero initialized and can contain old
+ // data. This is slightly faster as comitted pages are not decommitted
+ // during FreePages and ReleasePages, but only made inaccessible.
+ kAllocatedPagesCanBeUninitialized,
+};
+
// This is a v8::PageAllocator implementation that allocates pages within the
// pre-reserved region of virtual space. This class requires the virtual space
// to be kept reserved during the lifetime of this object.
// The main application of bounded page allocator are
+// - the V8 virtual memory cage
// - V8 heap pointer compression which requires the whole V8 heap to be
// allocated within a contiguous range of virtual address space,
// - executable page allocation, which allows to use PC-relative 32-bit code
@@ -28,7 +41,8 @@ class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator {
using Address = uintptr_t;
BoundedPageAllocator(v8::PageAllocator* page_allocator, Address start,
- size_t size, size_t allocate_page_size);
+ size_t size, size_t allocate_page_size,
+ PageInitializationMode page_initialization_mode);
BoundedPageAllocator(const BoundedPageAllocator&) = delete;
BoundedPageAllocator& operator=(const BoundedPageAllocator&) = delete;
~BoundedPageAllocator() override = default;
@@ -71,12 +85,15 @@ class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator {
bool DiscardSystemPages(void* address, size_t size) override;
+ bool DecommitPages(void* address, size_t size) override;
+
private:
v8::base::Mutex mutex_;
const size_t allocate_page_size_;
const size_t commit_page_size_;
v8::PageAllocator* const page_allocator_;
v8::base::RegionAllocator region_allocator_;
+ const PageInitializationMode page_initialization_mode_;
};
} // namespace base
diff --git a/chromium/v8/src/base/build_config.h b/chromium/v8/src/base/build_config.h
index d7a0c9f3cf6..3303916776f 100644
--- a/chromium/v8/src/base/build_config.h
+++ b/chromium/v8/src/base/build_config.h
@@ -33,6 +33,9 @@
#elif defined(__MIPSEB__) || defined(__MIPSEL__)
#define V8_HOST_ARCH_MIPS 1
#define V8_HOST_ARCH_32_BIT 1
+#elif defined(__loongarch64)
+#define V8_HOST_ARCH_LOONG64 1
+#define V8_HOST_ARCH_64_BIT 1
#elif defined(__PPC64__) || defined(_ARCH_PPC64)
#define V8_HOST_ARCH_PPC64 1
#define V8_HOST_ARCH_64_BIT 1
@@ -83,7 +86,7 @@
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && \
!V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64 && \
!V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390 && \
- !V8_TARGET_ARCH_RISCV64
+ !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64
#if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1
#elif defined(_M_IX86) || defined(__i386__)
@@ -128,6 +131,8 @@
#define V8_TARGET_ARCH_32_BIT 1
#elif V8_TARGET_ARCH_MIPS64
#define V8_TARGET_ARCH_64_BIT 1
+#elif V8_TARGET_ARCH_LOONG64
+#define V8_TARGET_ARCH_64_BIT 1
#elif V8_TARGET_ARCH_PPC
#define V8_TARGET_ARCH_32_BIT 1
#elif V8_TARGET_ARCH_PPC64
@@ -171,6 +176,9 @@
#if (V8_TARGET_ARCH_RISCV64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_RISCV64))
#error Target architecture riscv64 is only supported on riscv64 and x64 host
#endif
+#if (V8_TARGET_ARCH_LOONG64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_LOONG64))
+#error Target architecture loong64 is only supported on loong64 and x64 host
+#endif
// Determine architecture endianness.
#if V8_TARGET_ARCH_IA32
@@ -181,6 +189,8 @@
#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_ARM64
#define V8_TARGET_LITTLE_ENDIAN 1
+#elif V8_TARGET_ARCH_LOONG64
+#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_MIPS
#if defined(__MIPSEB__)
#define V8_TARGET_BIG_ENDIAN 1
diff --git a/chromium/v8/src/base/compiler-specific.h b/chromium/v8/src/base/compiler-specific.h
index f7e2e0e14d5..0c37e56afae 100644
--- a/chromium/v8/src/base/compiler-specific.h
+++ b/chromium/v8/src/base/compiler-specific.h
@@ -7,13 +7,15 @@
#include "include/v8config.h"
-// Annotate a using ALLOW_UNUSED_TYPE = or function indicating it's ok if it's
-// not used. Use like:
-// using Bar = Foo;
+// Annotation to silence compiler warnings about unused
+// types/functions/variables. Use like:
+//
+// using V8_ALLOW_UNUSED Bar = Foo;
+// V8_ALLOW_UNUSED void foo() {}
#if V8_HAS_ATTRIBUTE_UNUSED
-#define ALLOW_UNUSED_TYPE __attribute__((unused))
+#define V8_ALLOW_UNUSED __attribute__((unused))
#else
-#define ALLOW_UNUSED_TYPE
+#define V8_ALLOW_UNUSED
#endif
// Tell the compiler a function is using a printf-style format string.
diff --git a/chromium/v8/src/base/flags.h b/chromium/v8/src/base/flags.h
index 96d99059ca5..2a36ca77e82 100644
--- a/chromium/v8/src/base/flags.h
+++ b/chromium/v8/src/base/flags.h
@@ -89,39 +89,39 @@ class Flags final {
mask_type mask_;
};
-#define DEFINE_OPERATORS_FOR_FLAGS(Type) \
- ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT inline constexpr Type operator&( \
- Type::flag_type lhs, Type::flag_type rhs) { \
- return Type(lhs) & rhs; \
- } \
- ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT inline constexpr Type operator&( \
- Type::flag_type lhs, const Type& rhs) { \
- return rhs & lhs; \
- } \
- ALLOW_UNUSED_TYPE inline void operator&(Type::flag_type lhs, \
- Type::mask_type rhs) {} \
- ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT inline constexpr Type operator|( \
- Type::flag_type lhs, Type::flag_type rhs) { \
- return Type(lhs) | rhs; \
- } \
- ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT inline constexpr Type operator|( \
- Type::flag_type lhs, const Type& rhs) { \
- return rhs | lhs; \
- } \
- ALLOW_UNUSED_TYPE inline void operator|(Type::flag_type lhs, \
- Type::mask_type rhs) {} \
- ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT inline constexpr Type operator^( \
- Type::flag_type lhs, Type::flag_type rhs) { \
- return Type(lhs) ^ rhs; \
- } \
- ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT inline constexpr Type operator^( \
- Type::flag_type lhs, const Type& rhs) { \
- return rhs ^ lhs; \
- } \
- ALLOW_UNUSED_TYPE inline void operator^(Type::flag_type lhs, \
- Type::mask_type rhs) {} \
- ALLOW_UNUSED_TYPE inline constexpr Type operator~(Type::flag_type val) { \
- return ~Type(val); \
+#define DEFINE_OPERATORS_FOR_FLAGS(Type) \
+ V8_ALLOW_UNUSED V8_WARN_UNUSED_RESULT inline constexpr Type operator&( \
+ Type::flag_type lhs, Type::flag_type rhs) { \
+ return Type(lhs) & rhs; \
+ } \
+ V8_ALLOW_UNUSED V8_WARN_UNUSED_RESULT inline constexpr Type operator&( \
+ Type::flag_type lhs, const Type& rhs) { \
+ return rhs & lhs; \
+ } \
+ V8_ALLOW_UNUSED inline void operator&(Type::flag_type lhs, \
+ Type::mask_type rhs) {} \
+ V8_ALLOW_UNUSED V8_WARN_UNUSED_RESULT inline constexpr Type operator|( \
+ Type::flag_type lhs, Type::flag_type rhs) { \
+ return Type(lhs) | rhs; \
+ } \
+ V8_ALLOW_UNUSED V8_WARN_UNUSED_RESULT inline constexpr Type operator|( \
+ Type::flag_type lhs, const Type& rhs) { \
+ return rhs | lhs; \
+ } \
+ V8_ALLOW_UNUSED inline void operator|(Type::flag_type lhs, \
+ Type::mask_type rhs) {} \
+ V8_ALLOW_UNUSED V8_WARN_UNUSED_RESULT inline constexpr Type operator^( \
+ Type::flag_type lhs, Type::flag_type rhs) { \
+ return Type(lhs) ^ rhs; \
+ } \
+ V8_ALLOW_UNUSED V8_WARN_UNUSED_RESULT inline constexpr Type operator^( \
+ Type::flag_type lhs, const Type& rhs) { \
+ return rhs ^ lhs; \
+ } \
+ V8_ALLOW_UNUSED inline void operator^(Type::flag_type lhs, \
+ Type::mask_type rhs) {} \
+ V8_ALLOW_UNUSED inline constexpr Type operator~(Type::flag_type val) { \
+ return ~Type(val); \
}
} // namespace base
diff --git a/chromium/v8/src/base/macros.h b/chromium/v8/src/base/macros.h
index fca0b2ebb2c..3a73afc1ce7 100644
--- a/chromium/v8/src/base/macros.h
+++ b/chromium/v8/src/base/macros.h
@@ -18,6 +18,11 @@
// This macro does nothing. That's all.
#define NOTHING(...)
+#define CONCAT_(a, b) a##b
+#define CONCAT(a, b) CONCAT_(a, b)
+// Creates an unique identifier. Useful for scopes to avoid shadowing names.
+#define UNIQUE_IDENTIFIER(base) CONCAT(base, __COUNTER__)
+
// TODO(all) Replace all uses of this macro with C++'s offsetof. To do that, we
// have to make sure that only standard-layout types and simple field
// designators are used.
@@ -162,6 +167,13 @@ V8_INLINE Dest bit_cast(Source const& source) {
#endif
#endif
+// Define V8_USE_UNDEFINED_BEHAVIOR_SANITIZER macro.
+#if defined(__has_feature)
+#if __has_feature(undefined_behavior_sanitizer)
+#define V8_USE_UNDEFINED_BEHAVIOR_SANITIZER 1
+#endif
+#endif
+
// DISABLE_CFI_PERF -- Disable Control Flow Integrity checks for Perf reasons.
#define DISABLE_CFI_PERF V8_CLANG_NO_SANITIZE("cfi")
diff --git a/chromium/v8/src/base/optional.h b/chromium/v8/src/base/optional.h
index 77e9bb896e3..31fe9a972c1 100644
--- a/chromium/v8/src/base/optional.h
+++ b/chromium/v8/src/base/optional.h
@@ -35,7 +35,7 @@ constexpr in_place_t in_place = {};
// http://en.cppreference.com/w/cpp/utility/optional/nullopt
constexpr nullopt_t nullopt(0);
-// Forward declaration, which is refered by following helpers.
+// Forward declaration, which is referred by following helpers.
template <typename T>
class Optional;
diff --git a/chromium/v8/src/base/page-allocator.cc b/chromium/v8/src/base/page-allocator.cc
index 1438c883377..2956bf14755 100644
--- a/chromium/v8/src/base/page-allocator.cc
+++ b/chromium/v8/src/base/page-allocator.cc
@@ -151,5 +151,9 @@ bool PageAllocator::DiscardSystemPages(void* address, size_t size) {
return base::OS::DiscardSystemPages(address, size);
}
+bool PageAllocator::DecommitPages(void* address, size_t size) {
+ return base::OS::DecommitPages(address, size);
+}
+
} // namespace base
} // namespace v8
diff --git a/chromium/v8/src/base/page-allocator.h b/chromium/v8/src/base/page-allocator.h
index a98f0847907..7374c678377 100644
--- a/chromium/v8/src/base/page-allocator.h
+++ b/chromium/v8/src/base/page-allocator.h
@@ -47,6 +47,8 @@ class V8_BASE_EXPORT PageAllocator
bool DiscardSystemPages(void* address, size_t size) override;
+ bool DecommitPages(void* address, size_t size) override;
+
private:
friend class v8::base::SharedMemory;
diff --git a/chromium/v8/src/base/platform/platform-fuchsia.cc b/chromium/v8/src/base/platform/platform-fuchsia.cc
index bd0000c4a1a..11dba08d79f 100644
--- a/chromium/v8/src/base/platform/platform-fuchsia.cc
+++ b/chromium/v8/src/base/platform/platform-fuchsia.cc
@@ -133,6 +133,14 @@ bool OS::DiscardSystemPages(void* address, size_t size) {
return status == ZX_OK;
}
+bool OS::DecommitPages(void* address, size_t size) {
+ // We rely on DiscardSystemPages decommitting the pages immediately (via
+ // ZX_VMO_OP_DECOMMIT) so that they are guaranteed to be zero-initialized
+ // should they be accessed again later on.
+ return SetPermissions(address, size, MemoryPermission::kNoAccess) &&
+ DiscardSystemPages(address, size);
+}
+
// static
bool OS::HasLazyCommits() {
// TODO(scottmg): Port, https://crbug.com/731217.
diff --git a/chromium/v8/src/base/platform/platform-posix.cc b/chromium/v8/src/base/platform/platform-posix.cc
index 179a17cc0f4..f05f22c9136 100644
--- a/chromium/v8/src/base/platform/platform-posix.cc
+++ b/chromium/v8/src/base/platform/platform-posix.cc
@@ -341,6 +341,10 @@ void* OS::GetRandomMmapAddr() {
// TODO(RISCV): We need more information from the kernel to correctly mask
// this address for RISC-V. https://github.com/v8-riscv/v8/issues/375
raw_addr &= uint64_t{0xFFFFFF0000};
+#elif V8_TARGET_ARCH_LOONG64
+ // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel chance
+ // to fulfill request.
+ raw_addr &= uint64_t{0xFFFFFF0000};
#else
raw_addr &= 0x3FFFF000;
@@ -491,6 +495,20 @@ bool OS::DiscardSystemPages(void* address, size_t size) {
return ret == 0;
}
+bool OS::DecommitPages(void* address, size_t size) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
+ DCHECK_EQ(0, size % CommitPageSize());
+ // From https://pubs.opengroup.org/onlinepubs/9699919799/functions/mmap.html:
+ // "If a MAP_FIXED request is successful, then any previous mappings [...] for
+ // those whole pages containing any part of the address range [pa,pa+len)
+ // shall be removed, as if by an appropriate call to munmap(), before the new
+ // mapping is established." As a consequence, the memory will be
+ // zero-initialized on next access.
+ void* ptr = mmap(address, size, PROT_NONE,
+ MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ return ptr == address;
+}
+
// static
bool OS::HasLazyCommits() {
#if V8_OS_AIX || V8_OS_LINUX || V8_OS_MACOSX
@@ -530,6 +548,8 @@ void OS::DebugBreak() {
asm("break");
#elif V8_HOST_ARCH_MIPS64
asm("break");
+#elif V8_HOST_ARCH_LOONG64
+ asm("break 0");
#elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64
asm("twge 2,2");
#elif V8_HOST_ARCH_IA32
@@ -566,25 +586,29 @@ class PosixMemoryMappedFile final : public OS::MemoryMappedFile {
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name,
FileMode mode) {
const char* fopen_mode = (mode == FileMode::kReadOnly) ? "r" : "r+";
- if (FILE* file = fopen(name, fopen_mode)) {
- if (fseek(file, 0, SEEK_END) == 0) {
- long size = ftell(file); // NOLINT(runtime/int)
- if (size == 0) return new PosixMemoryMappedFile(file, nullptr, 0);
- if (size > 0) {
- int prot = PROT_READ;
- int flags = MAP_PRIVATE;
- if (mode == FileMode::kReadWrite) {
- prot |= PROT_WRITE;
- flags = MAP_SHARED;
- }
- void* const memory =
- mmap(OS::GetRandomMmapAddr(), size, prot, flags, fileno(file), 0);
- if (memory != MAP_FAILED) {
- return new PosixMemoryMappedFile(file, memory, size);
+ struct stat statbuf;
+ // Make sure path exists and is not a directory.
+ if (stat(name, &statbuf) == 0 && !S_ISDIR(statbuf.st_mode)) {
+ if (FILE* file = fopen(name, fopen_mode)) {
+ if (fseek(file, 0, SEEK_END) == 0) {
+ long size = ftell(file); // NOLINT(runtime/int)
+ if (size == 0) return new PosixMemoryMappedFile(file, nullptr, 0);
+ if (size > 0) {
+ int prot = PROT_READ;
+ int flags = MAP_PRIVATE;
+ if (mode == FileMode::kReadWrite) {
+ prot |= PROT_WRITE;
+ flags = MAP_SHARED;
+ }
+ void* const memory =
+ mmap(OS::GetRandomMmapAddr(), size, prot, flags, fileno(file), 0);
+ if (memory != MAP_FAILED) {
+ return new PosixMemoryMappedFile(file, memory, size);
+ }
}
}
+ fclose(file);
}
- fclose(file);
}
return nullptr;
}
diff --git a/chromium/v8/src/base/platform/platform-win32.cc b/chromium/v8/src/base/platform/platform-win32.cc
index 79c1aa06ce9..6b5c5df4963 100644
--- a/chromium/v8/src/base/platform/platform-win32.cc
+++ b/chromium/v8/src/base/platform/platform-win32.cc
@@ -935,6 +935,21 @@ bool OS::DiscardSystemPages(void* address, size_t size) {
}
// static
+bool OS::DecommitPages(void* address, size_t size) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
+ DCHECK_EQ(0, size % CommitPageSize());
+ // https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualfree:
+ // "If a page is decommitted but not released, its state changes to reserved.
+ // Subsequently, you can call VirtualAlloc to commit it, or VirtualFree to
+ // release it. Attempts to read from or write to a reserved page results in an
+ // access violation exception."
+ // https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualalloc
+ // for MEM_COMMIT: "The function also guarantees that when the caller later
+ // initially accesses the memory, the contents will be zero."
+ return VirtualFree(address, size, MEM_DECOMMIT) != 0;
+}
+
+// static
bool OS::HasLazyCommits() {
// TODO(alph): implement for the platform.
return false;
diff --git a/chromium/v8/src/base/platform/platform.h b/chromium/v8/src/base/platform/platform.h
index 6ad724db08e..2e7ad32974f 100644
--- a/chromium/v8/src/base/platform/platform.h
+++ b/chromium/v8/src/base/platform/platform.h
@@ -47,7 +47,7 @@
// And, intrin.h is a very expensive header that we want to avoid here, and
// the cheaper intrin0.h is not available for all build configurations. That is
// why we declare this intrinsic.
-unsigned long __readfsdword(unsigned long); // NOLINT(runtime/int)
+extern "C" unsigned long __readfsdword(unsigned long); // NOLINT(runtime/int)
#endif // V8_CC_MSVC && V8_HOST_ARCH_IA32
#endif // V8_NO_FAST_TLS
@@ -311,6 +311,8 @@ class V8_BASE_EXPORT OS {
V8_WARN_UNUSED_RESULT static bool DiscardSystemPages(void* address,
size_t size);
+ V8_WARN_UNUSED_RESULT static bool DecommitPages(void* address, size_t size);
+
static const int msPerSecond = 1000;
#if V8_OS_POSIX
diff --git a/chromium/v8/src/base/region-allocator.cc b/chromium/v8/src/base/region-allocator.cc
index 9224dc99dc3..53932d2864f 100644
--- a/chromium/v8/src/base/region-allocator.cc
+++ b/chromium/v8/src/base/region-allocator.cc
@@ -200,6 +200,35 @@ bool RegionAllocator::AllocateRegionAt(Address requested_address, size_t size,
return true;
}
+RegionAllocator::Address RegionAllocator::AllocateAlignedRegion(
+ size_t size, size_t alignment) {
+ DCHECK(IsAligned(size, page_size_));
+ DCHECK(IsAligned(alignment, page_size_));
+ DCHECK_GE(alignment, page_size_);
+
+ const size_t padded_size = size + alignment - page_size_;
+ Region* region = FreeListFindRegion(padded_size);
+ if (region == nullptr) return kAllocationFailure;
+
+ if (!IsAligned(region->begin(), alignment)) {
+ size_t start = RoundUp(region->begin(), alignment);
+ region = Split(region, start - region->begin());
+ DCHECK_EQ(region->begin(), start);
+ DCHECK(IsAligned(region->begin(), alignment));
+ }
+
+ if (region->size() != size) {
+ Split(region, size);
+ }
+ DCHECK(IsAligned(region->begin(), alignment));
+ DCHECK_EQ(region->size(), size);
+
+ // Mark region as used.
+ FreeListRemoveRegion(region);
+ region->set_state(RegionState::kAllocated);
+ return region->begin();
+}
+
size_t RegionAllocator::TrimRegion(Address address, size_t new_size) {
DCHECK(IsAligned(new_size, page_size_));
diff --git a/chromium/v8/src/base/region-allocator.h b/chromium/v8/src/base/region-allocator.h
index adc4bd10b67..f80524870f4 100644
--- a/chromium/v8/src/base/region-allocator.h
+++ b/chromium/v8/src/base/region-allocator.h
@@ -61,6 +61,11 @@ class V8_BASE_EXPORT RegionAllocator final {
bool AllocateRegionAt(Address requested_address, size_t size,
RegionState region_state = RegionState::kAllocated);
+ // Allocates a region of |size| aligned to |alignment|. The size and alignment
+ // must be a multiple of |page_size|. Returns the address of the region on
+ // success or kAllocationFailure.
+ Address AllocateAlignedRegion(size_t size, size_t alignment);
+
// Frees region at given |address|, returns the size of the region.
// There must be a used region starting at given address otherwise nothing
// will be freed and 0 will be returned.
diff --git a/chromium/v8/src/base/sanitizer/asan.h b/chromium/v8/src/base/sanitizer/asan.h
index 82f03aa2581..6466fc6163b 100644
--- a/chromium/v8/src/base/sanitizer/asan.h
+++ b/chromium/v8/src/base/sanitizer/asan.h
@@ -24,8 +24,9 @@
// Check that all bytes in a memory region are poisoned. This is different from
// `__asan_region_is_poisoned()` which only requires a single byte in the region
-// to be poisoned.
-#define ASAN_CHECK_MEMORY_REGION_IS_POISONED(start, size) \
+// to be poisoned. Please note that the macro only works if both start and size
+// are multiple of asan's shadow memory granularity.
+#define ASAN_CHECK_WHOLE_MEMORY_REGION_IS_POISONED(start, size) \
do { \
for (size_t i = 0; i < size; i++) { \
CHECK(__asan_address_is_poisoned(reinterpret_cast<const char*>(start) + \
@@ -47,7 +48,7 @@
#define ASAN_UNPOISON_MEMORY_REGION(start, size) \
ASAN_POISON_MEMORY_REGION(start, size)
-#define ASAN_CHECK_MEMORY_REGION_IS_POISONED(start, size) \
+#define ASAN_CHECK_WHOLE_MEMORY_REGION_IS_POISONED(start, size) \
ASAN_POISON_MEMORY_REGION(start, size)
#endif // !V8_USE_ADDRESS_SANITIZER
diff --git a/chromium/v8/src/base/sanitizer/tsan.h b/chromium/v8/src/base/sanitizer/tsan.h
new file mode 100644
index 00000000000..854c82eb225
--- /dev/null
+++ b/chromium/v8/src/base/sanitizer/tsan.h
@@ -0,0 +1,20 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// ThreadSanitizer support.
+
+#ifndef V8_BASE_SANITIZER_TSAN_H_
+#define V8_BASE_SANITIZER_TSAN_H_
+
+#if defined(THREAD_SANITIZER)
+
+#define DISABLE_TSAN __attribute__((no_sanitize_thread))
+
+#else // !defined(THREAD_SANITIZER)
+
+#define DISABLE_TSAN
+
+#endif // !defined(THREAD_SANITIZER)
+
+#endif // V8_BASE_SANITIZER_TSAN_H_
diff --git a/chromium/v8/src/base/vlq.h b/chromium/v8/src/base/vlq.h
index 96ee42cf6e8..25dba27bfb2 100644
--- a/chromium/v8/src/base/vlq.h
+++ b/chromium/v8/src/base/vlq.h
@@ -91,7 +91,7 @@ VLQDecodeUnsigned(GetNextFunction&& get_next) {
}
uint32_t bits = cur_byte & kDataMask;
for (int shift = kContinueShift; shift <= 32; shift += kContinueShift) {
- byte cur_byte = get_next();
+ cur_byte = get_next();
bits |= (cur_byte & kDataMask) << shift;
if (cur_byte <= kDataMask) break;
}
diff --git a/chromium/v8/src/base/win32-headers.h b/chromium/v8/src/base/win32-headers.h
index e4e845d86d0..95aedd8c95e 100644
--- a/chromium/v8/src/base/win32-headers.h
+++ b/chromium/v8/src/base/win32-headers.h
@@ -41,12 +41,6 @@
#include <signal.h> // For raise().
#include <time.h> // For LocalOffset() implementation.
-#ifdef __MINGW32__
-// Require Windows XP or higher when compiling with MinGW. This is for MinGW
-// header files to expose getaddrinfo.
-#undef _WIN32_WINNT
-#define _WIN32_WINNT 0x501
-#endif // __MINGW32__
#if !defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR)
#include <errno.h> // For STRUNCATE
#endif // !defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR)
diff --git a/chromium/v8/src/baseline/arm/baseline-assembler-arm-inl.h b/chromium/v8/src/baseline/arm/baseline-assembler-arm-inl.h
index 040761091ab..db3c05ce18a 100644
--- a/chromium/v8/src/baseline/arm/baseline-assembler-arm-inl.h
+++ b/chromium/v8/src/baseline/arm/baseline-assembler-arm-inl.h
@@ -501,14 +501,21 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
- __ masm()->add(params_size, params_size,
- Operand(1)); // Include the receiver.
- __ masm()->Drop(params_size);
+ __ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ masm()->Ret();
}
#undef __
+inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
+ Register reg) {
+ assembler_->masm()->cmp(reg, kInterpreterAccumulatorRegister);
+ assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue);
+}
+
} // namespace baseline
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h b/chromium/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
index cda21083270..7824f92c2aa 100644
--- a/chromium/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
+++ b/chromium/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
@@ -583,13 +583,21 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
- __ masm()->Add(params_size, params_size, 1); // Include the receiver.
- __ masm()->DropArguments(params_size);
+ __ masm()->DropArguments(params_size,
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ masm()->Ret();
}
#undef __
+inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
+ Register reg) {
+ assembler_->masm()->CmpTagged(reg, kInterpreterAccumulatorRegister);
+ assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue);
+}
+
} // namespace baseline
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/baseline/baseline-assembler-inl.h b/chromium/v8/src/baseline/baseline-assembler-inl.h
index 83c102176f8..583db7e6798 100644
--- a/chromium/v8/src/baseline/baseline-assembler-inl.h
+++ b/chromium/v8/src/baseline/baseline-assembler-inl.h
@@ -34,6 +34,8 @@
#include "src/baseline/mips64/baseline-assembler-mips64-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/baseline/mips/baseline-assembler-mips-inl.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/baseline/loong64/baseline-assembler-loong64-inl.h"
#else
#error Unsupported target architecture.
#endif
@@ -135,6 +137,24 @@ SaveAccumulatorScope::~SaveAccumulatorScope() {
assembler_->Pop(kInterpreterAccumulatorRegister);
}
+EnsureAccumulatorPreservedScope::EnsureAccumulatorPreservedScope(
+ BaselineAssembler* assembler)
+ : assembler_(assembler)
+#ifdef V8_CODE_COMMENTS
+ ,
+ comment_(assembler->masm(), "EnsureAccumulatorPreservedScope")
+#endif
+{
+ assembler_->Push(kInterpreterAccumulatorRegister);
+}
+
+EnsureAccumulatorPreservedScope::~EnsureAccumulatorPreservedScope() {
+ BaselineAssembler::ScratchRegisterScope scratch(assembler_);
+ Register reg = scratch.AcquireScratch();
+ assembler_->Pop(reg);
+ AssertEqualToAccumulator(reg);
+}
+
#undef __
} // namespace baseline
diff --git a/chromium/v8/src/baseline/baseline-assembler.h b/chromium/v8/src/baseline/baseline-assembler.h
index e1063ff2b26..b8c876a8d37 100644
--- a/chromium/v8/src/baseline/baseline-assembler.h
+++ b/chromium/v8/src/baseline/baseline-assembler.h
@@ -202,6 +202,21 @@ class SaveAccumulatorScope final {
BaselineAssembler* assembler_;
};
+class EnsureAccumulatorPreservedScope final {
+ public:
+ inline explicit EnsureAccumulatorPreservedScope(BaselineAssembler* assembler);
+
+ inline ~EnsureAccumulatorPreservedScope();
+
+ private:
+ inline void AssertEqualToAccumulator(Register reg);
+
+ BaselineAssembler* assembler_;
+#ifdef V8_CODE_COMMENTS
+ Assembler::CodeComment comment_;
+#endif
+};
+
} // namespace baseline
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/baseline/baseline-batch-compiler.cc b/chromium/v8/src/baseline/baseline-batch-compiler.cc
index 6a25df72648..249702bd623 100644
--- a/chromium/v8/src/baseline/baseline-batch-compiler.cc
+++ b/chromium/v8/src/baseline/baseline-batch-compiler.cc
@@ -6,13 +6,13 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
- V8_TARGET_ARCH_MIPS
+#include "src/flags/flags.h"
+#if ENABLE_SPARKPLUG
#include "src/baseline/baseline-compiler.h"
#include "src/codegen/compiler.h"
#include "src/execution/isolate.h"
+#include "src/handles/global-handles-inl.h"
#include "src/heap/factory-inl.h"
#include "src/heap/heap-inl.h"
#include "src/objects/fixed-array-inl.h"
@@ -40,7 +40,7 @@ bool BaselineBatchCompiler::EnqueueFunction(Handle<JSFunction> function) {
Handle<SharedFunctionInfo> shared(function->shared(), isolate_);
// Early return if the function is compiled with baseline already or it is not
// suitable for baseline compilation.
- if (shared->HasBaselineData()) return true;
+ if (shared->HasBaselineCode()) return true;
if (!CanCompileWithBaseline(isolate_, *shared)) return false;
// Immediately compile the function if batch compilation is disabled.
diff --git a/chromium/v8/src/baseline/baseline-compiler.cc b/chromium/v8/src/baseline/baseline-compiler.cc
index f30812c85a2..63d684e733e 100644
--- a/chromium/v8/src/baseline/baseline-compiler.cc
+++ b/chromium/v8/src/baseline/baseline-compiler.cc
@@ -48,6 +48,8 @@
#include "src/baseline/mips64/baseline-compiler-mips64-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/baseline/mips/baseline-compiler-mips-inl.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/baseline/loong64/baseline-compiler-loong64-inl.h"
#else
#error Unsupported target architecture.
#endif
@@ -321,9 +323,16 @@ MaybeHandle<Code> BaselineCompiler::Build(Isolate* isolate) {
// Allocate the bytecode offset table.
Handle<ByteArray> bytecode_offset_table =
bytecode_offset_table_builder_.ToBytecodeOffsetTable(isolate);
- return Factory::CodeBuilder(isolate, desc, CodeKind::BASELINE)
- .set_bytecode_offset_table(bytecode_offset_table)
- .TryBuild();
+
+ Factory::CodeBuilder code_builder(isolate, desc, CodeKind::BASELINE);
+ code_builder.set_bytecode_offset_table(bytecode_offset_table);
+ if (shared_function_info_->HasInterpreterData()) {
+ code_builder.set_interpreter_data(
+ handle(shared_function_info_->interpreter_data(), isolate));
+ } else {
+ code_builder.set_interpreter_data(bytecode_);
+ }
+ return code_builder.TryBuild();
}
int BaselineCompiler::EstimateInstructionSize(BytecodeArray bytecode) {
@@ -488,13 +497,31 @@ void BaselineCompiler::VisitSingleBytecode() {
TraceBytecode(Runtime::kTraceUnoptimizedBytecodeEntry);
#endif
- switch (iterator().current_bytecode()) {
+ {
+ interpreter::Bytecode bytecode = iterator().current_bytecode();
+
+#ifdef DEBUG
+ base::Optional<EnsureAccumulatorPreservedScope> accumulator_preserved_scope;
+ // We should make sure to preserve the accumulator whenever the bytecode
+ // isn't registered as writing to it. We can't do this for jumps or switches
+ // though, since the control flow would not match the control flow of this
+ // scope.
+ if (FLAG_debug_code &&
+ !interpreter::Bytecodes::WritesAccumulator(bytecode) &&
+ !interpreter::Bytecodes::IsJump(bytecode) &&
+ !interpreter::Bytecodes::IsSwitch(bytecode)) {
+ accumulator_preserved_scope.emplace(&basm_);
+ }
+#endif // DEBUG
+
+ switch (bytecode) {
#define BYTECODE_CASE(name, ...) \
case interpreter::Bytecode::k##name: \
Visit##name(); \
break;
- BYTECODE_LIST(BYTECODE_CASE)
+ BYTECODE_LIST(BYTECODE_CASE)
#undef BYTECODE_CASE
+ }
}
#ifdef V8_TRACE_UNOPTIMIZED
@@ -1173,53 +1200,57 @@ void BaselineCompiler::BuildCall(uint32_t slot, uint32_t arg_count,
void BaselineCompiler::VisitCallAnyReceiver() {
interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
- uint32_t arg_count = args.register_count() - 1; // Remove receiver.
+ uint32_t arg_count = args.register_count();
+ if (!kJSArgcIncludesReceiver) arg_count -= 1; // Remove receiver.
BuildCall<ConvertReceiverMode::kAny>(Index(3), arg_count, args);
}
void BaselineCompiler::VisitCallProperty() {
interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
- uint32_t arg_count = args.register_count() - 1; // Remove receiver.
+ uint32_t arg_count = args.register_count();
+ if (!kJSArgcIncludesReceiver) arg_count -= 1; // Remove receiver.
BuildCall<ConvertReceiverMode::kNotNullOrUndefined>(Index(3), arg_count,
args);
}
void BaselineCompiler::VisitCallProperty0() {
- BuildCall<ConvertReceiverMode::kNotNullOrUndefined>(Index(2), 0,
- RegisterOperand(1));
+ BuildCall<ConvertReceiverMode::kNotNullOrUndefined>(
+ Index(2), JSParameterCount(0), RegisterOperand(1));
}
void BaselineCompiler::VisitCallProperty1() {
BuildCall<ConvertReceiverMode::kNotNullOrUndefined>(
- Index(3), 1, RegisterOperand(1), RegisterOperand(2));
+ Index(3), JSParameterCount(1), RegisterOperand(1), RegisterOperand(2));
}
void BaselineCompiler::VisitCallProperty2() {
BuildCall<ConvertReceiverMode::kNotNullOrUndefined>(
- Index(4), 2, RegisterOperand(1), RegisterOperand(2), RegisterOperand(3));
+ Index(4), JSParameterCount(2), RegisterOperand(1), RegisterOperand(2),
+ RegisterOperand(3));
}
void BaselineCompiler::VisitCallUndefinedReceiver() {
interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
- uint32_t arg_count = args.register_count();
+ uint32_t arg_count = JSParameterCount(args.register_count());
BuildCall<ConvertReceiverMode::kNullOrUndefined>(
Index(3), arg_count, RootIndex::kUndefinedValue, args);
}
void BaselineCompiler::VisitCallUndefinedReceiver0() {
- BuildCall<ConvertReceiverMode::kNullOrUndefined>(Index(1), 0,
- RootIndex::kUndefinedValue);
+ BuildCall<ConvertReceiverMode::kNullOrUndefined>(
+ Index(1), JSParameterCount(0), RootIndex::kUndefinedValue);
}
void BaselineCompiler::VisitCallUndefinedReceiver1() {
BuildCall<ConvertReceiverMode::kNullOrUndefined>(
- Index(2), 1, RootIndex::kUndefinedValue, RegisterOperand(1));
+ Index(2), JSParameterCount(1), RootIndex::kUndefinedValue,
+ RegisterOperand(1));
}
void BaselineCompiler::VisitCallUndefinedReceiver2() {
BuildCall<ConvertReceiverMode::kNullOrUndefined>(
- Index(3), 2, RootIndex::kUndefinedValue, RegisterOperand(1),
- RegisterOperand(2));
+ Index(3), JSParameterCount(2), RootIndex::kUndefinedValue,
+ RegisterOperand(1), RegisterOperand(2));
}
void BaselineCompiler::VisitCallWithSpread() {
@@ -1229,7 +1260,8 @@ void BaselineCompiler::VisitCallWithSpread() {
interpreter::Register spread_register = args.last_register();
args = args.Truncate(args.register_count() - 1);
- uint32_t arg_count = args.register_count() - 1; // Remove receiver.
+ uint32_t arg_count = args.register_count();
+ if (!kJSArgcIncludesReceiver) arg_count -= 1; // Remove receiver.
CallBuiltin<Builtin::kCallWithSpread_Baseline>(
RegisterOperand(0), // kFunction
@@ -1253,7 +1285,7 @@ void BaselineCompiler::VisitCallRuntimeForPair() {
void BaselineCompiler::VisitCallJSRuntime() {
interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
- uint32_t arg_count = args.register_count();
+ uint32_t arg_count = JSParameterCount(args.register_count());
// Load context for LoadNativeContextSlot.
__ LoadContext(kContextRegister);
@@ -1376,7 +1408,7 @@ void BaselineCompiler::VisitIntrinsicAsyncGeneratorYield(
void BaselineCompiler::VisitConstruct() {
interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
- uint32_t arg_count = args.register_count();
+ uint32_t arg_count = JSParameterCount(args.register_count());
CallBuiltin<Builtin::kConstruct_Baseline>(
RegisterOperand(0), // kFunction
kInterpreterAccumulatorRegister, // kNewTarget
@@ -1393,7 +1425,7 @@ void BaselineCompiler::VisitConstructWithSpread() {
interpreter::Register spread_register = args.last_register();
args = args.Truncate(args.register_count() - 1);
- uint32_t arg_count = args.register_count();
+ uint32_t arg_count = JSParameterCount(args.register_count());
using Descriptor =
CallInterfaceDescriptorFor<Builtin::kConstructWithSpread_Baseline>::type;
@@ -2079,13 +2111,15 @@ void BaselineCompiler::VisitReturn() {
iterator().current_bytecode_size_without_prefix();
int parameter_count = bytecode_->parameter_count();
- // We must pop all arguments from the stack (including the receiver). This
- // number of arguments is given by max(1 + argc_reg, parameter_count).
- int parameter_count_without_receiver =
- parameter_count - 1; // Exclude the receiver to simplify the
- // computation. We'll account for it at the end.
- TailCallBuiltin<Builtin::kBaselineLeaveFrame>(
- parameter_count_without_receiver, -profiling_weight);
+ if (kJSArgcIncludesReceiver) {
+ TailCallBuiltin<Builtin::kBaselineLeaveFrame>(parameter_count,
+ -profiling_weight);
+
+ } else {
+ int parameter_count_without_receiver = parameter_count - 1;
+ TailCallBuiltin<Builtin::kBaselineLeaveFrame>(
+ parameter_count_without_receiver, -profiling_weight);
+ }
}
void BaselineCompiler::VisitThrowReferenceErrorIfHole() {
diff --git a/chromium/v8/src/baseline/baseline-compiler.h b/chromium/v8/src/baseline/baseline-compiler.h
index d8cd9ac5c68..341e7c0822f 100644
--- a/chromium/v8/src/baseline/baseline-compiler.h
+++ b/chromium/v8/src/baseline/baseline-compiler.h
@@ -162,6 +162,7 @@ class BaselineCompiler {
LocalIsolate* local_isolate_;
RuntimeCallStats* stats_;
Handle<SharedFunctionInfo> shared_function_info_;
+ Handle<HeapObject> interpreter_data_;
Handle<BytecodeArray> bytecode_;
MacroAssembler masm_;
BaselineAssembler basm_;
diff --git a/chromium/v8/src/baseline/baseline.cc b/chromium/v8/src/baseline/baseline.cc
index cec0805aece..764d2db645a 100644
--- a/chromium/v8/src/baseline/baseline.cc
+++ b/chromium/v8/src/baseline/baseline.cc
@@ -43,6 +43,13 @@ bool CanCompileWithBaseline(Isolate* isolate, SharedFunctionInfo shared) {
// Functions with breakpoints have to stay interpreted.
if (shared.HasBreakInfo()) return false;
+ // Functions with instrumented bytecode can't be baseline compiled since the
+ // baseline code's bytecode array pointer is immutable.
+ if (shared.HasDebugInfo() &&
+ shared.GetDebugInfo().HasInstrumentedBytecodeArray()) {
+ return false;
+ }
+
// Do not baseline compile if function doesn't pass sparkplug_filter.
if (!shared.PassesFilter(FLAG_sparkplug_filter)) return false;
diff --git a/chromium/v8/src/baseline/bytecode-offset-iterator.cc b/chromium/v8/src/baseline/bytecode-offset-iterator.cc
index bbedac8ef30..d2504b62e9b 100644
--- a/chromium/v8/src/baseline/bytecode-offset-iterator.cc
+++ b/chromium/v8/src/baseline/bytecode-offset-iterator.cc
@@ -36,7 +36,7 @@ BytecodeOffsetIterator::BytecodeOffsetIterator(ByteArray mapping_table,
bytecode_iterator_(Handle<BytecodeArray>(
reinterpret_cast<Address*>(&bytecode_handle_storage_))),
local_heap_(nullptr) {
- no_gc.emplace();
+ no_gc_.emplace();
Initialize();
}
diff --git a/chromium/v8/src/baseline/bytecode-offset-iterator.h b/chromium/v8/src/baseline/bytecode-offset-iterator.h
index 6e78fba0614..9581a2a1f49 100644
--- a/chromium/v8/src/baseline/bytecode-offset-iterator.h
+++ b/chromium/v8/src/baseline/bytecode-offset-iterator.h
@@ -88,7 +88,7 @@ class V8_EXPORT_PRIVATE BytecodeOffsetIterator {
BytecodeArray bytecode_handle_storage_;
interpreter::BytecodeArrayIterator bytecode_iterator_;
LocalHeap* local_heap_;
- base::Optional<DisallowGarbageCollection> no_gc;
+ base::Optional<DisallowGarbageCollection> no_gc_;
};
} // namespace baseline
diff --git a/chromium/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h b/chromium/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h
index e3f991886db..e280bee3da8 100644
--- a/chromium/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h
+++ b/chromium/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h
@@ -457,16 +457,21 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
- Register return_pc = scratch;
- __ masm()->PopReturnAddressTo(return_pc);
- __ masm()->lea(esp, MemOperand(esp, params_size, times_system_pointer_size,
- kSystemPointerSize));
- __ masm()->PushReturnAddressFrom(return_pc);
+ __ masm()->DropArguments(
+ params_size, scratch, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ masm()->Ret();
}
#undef __
+inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
+ Register reg) {
+ assembler_->masm()->cmp(reg, kInterpreterAccumulatorRegister);
+ assembler_->masm()->Assert(equal, AbortReason::kUnexpectedValue);
+}
+
} // namespace baseline
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h b/chromium/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h
new file mode 100644
index 00000000000..059d932ef9a
--- /dev/null
+++ b/chromium/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h
@@ -0,0 +1,503 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASELINE_LOONG64_BASELINE_ASSEMBLER_LOONG64_INL_H_
+#define V8_BASELINE_LOONG64_BASELINE_ASSEMBLER_LOONG64_INL_H_
+
+#include "src/baseline/baseline-assembler.h"
+#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/loong64/assembler-loong64-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace baseline {
+
+class BaselineAssembler::ScratchRegisterScope {
+ public:
+ explicit ScratchRegisterScope(BaselineAssembler* assembler)
+ : assembler_(assembler),
+ prev_scope_(assembler->scratch_register_scope_),
+ wrapped_scope_(assembler->masm()) {
+ if (!assembler_->scratch_register_scope_) {
+ // If we haven't opened a scratch scope yet, for the first one add a
+ // couple of extra registers.
+ wrapped_scope_.Include(t0.bit() | t1.bit() | t2.bit() | t3.bit());
+ }
+ assembler_->scratch_register_scope_ = this;
+ }
+ ~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; }
+
+ Register AcquireScratch() { return wrapped_scope_.Acquire(); }
+
+ private:
+ BaselineAssembler* assembler_;
+ ScratchRegisterScope* prev_scope_;
+ UseScratchRegisterScope wrapped_scope_;
+};
+
+enum class Condition : uint32_t {
+ kEqual = eq,
+ kNotEqual = ne,
+
+ kLessThan = lt,
+ kGreaterThan = gt,
+ kLessThanEqual = le,
+ kGreaterThanEqual = ge,
+
+ kUnsignedLessThan = Uless,
+ kUnsignedGreaterThan = Ugreater,
+ kUnsignedLessThanEqual = Uless_equal,
+ kUnsignedGreaterThanEqual = Ugreater_equal,
+
+ kOverflow = overflow,
+ kNoOverflow = no_overflow,
+
+ kZero = eq,
+ kNotZero = ne,
+};
+
+inline internal::Condition AsMasmCondition(Condition cond) {
+ STATIC_ASSERT(sizeof(internal::Condition) == sizeof(Condition));
+ return static_cast<internal::Condition>(cond);
+}
+
+namespace detail {
+
+#ifdef DEBUG
+inline bool Clobbers(Register target, MemOperand op) {
+ return op.base() == target || op.index() == target;
+}
+#endif
+
+} // namespace detail
+
+#define __ masm_->
+
+MemOperand BaselineAssembler::RegisterFrameOperand(
+ interpreter::Register interpreter_register) {
+ return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
+}
+MemOperand BaselineAssembler::FeedbackVectorOperand() {
+ return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
+}
+
+void BaselineAssembler::Bind(Label* label) { __ bind(label); }
+
+void BaselineAssembler::BindWithoutJumpTarget(Label* label) { __ bind(label); }
+
+void BaselineAssembler::JumpTarget() {
+ // NOP.
+}
+void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
+ __ Branch(target);
+}
+void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
+ Label* target, Label::Distance) {
+ __ JumpIfRoot(value, index, target);
+}
+void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
+ Label* target, Label::Distance) {
+ __ JumpIfNotRoot(value, index, target);
+}
+void BaselineAssembler::JumpIfSmi(Register value, Label* target,
+ Label::Distance) {
+ __ JumpIfSmi(value, target);
+}
+void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
+ Label::Distance) {
+ __ JumpIfNotSmi(value, target);
+}
+
+void BaselineAssembler::CallBuiltin(Builtin builtin) {
+ ASM_CODE_COMMENT_STRING(masm_,
+ __ CommentForOffHeapTrampoline("call", builtin));
+ Register temp = t7;
+ __ LoadEntryFromBuiltin(builtin, temp);
+ __ Call(temp);
+}
+
+void BaselineAssembler::TailCallBuiltin(Builtin builtin) {
+ ASM_CODE_COMMENT_STRING(masm_,
+ __ CommentForOffHeapTrampoline("tail call", builtin));
+ Register temp = t7;
+ __ LoadEntryFromBuiltin(builtin, temp);
+ __ Jump(temp);
+}
+
+void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
+ Label* target, Label::Distance) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireScratch();
+ __ And(scratch, value, Operand(mask));
+ __ Branch(target, AsMasmCondition(cc), scratch, Operand(zero_reg));
+}
+
+void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
+ Label* target, Label::Distance) {
+ __ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs));
+}
+void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
+ InstanceType instance_type,
+ Register map, Label* target,
+ Label::Distance) {
+ ScratchRegisterScope temps(this);
+ Register type = temps.AcquireScratch();
+ __ GetObjectType(object, map, type);
+ __ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
+}
+void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
+ InstanceType instance_type,
+ Label* target, Label::Distance) {
+ ScratchRegisterScope temps(this);
+ Register type = temps.AcquireScratch();
+ if (FLAG_debug_code) {
+ __ AssertNotSmi(map);
+ __ GetObjectType(map, type, type);
+ __ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE));
+ }
+ __ Ld_d(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
+}
+void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
+ Label* target, Label::Distance) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireScratch();
+ __ li(scratch, Operand(smi));
+ __ SmiUntag(scratch);
+ __ Branch(target, AsMasmCondition(cc), value, Operand(scratch));
+}
+void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
+ Label* target, Label::Distance) {
+ __ AssertSmi(lhs);
+ __ AssertSmi(rhs);
+ __ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs));
+}
+void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
+ MemOperand operand, Label* target,
+ Label::Distance) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireScratch();
+ __ Ld_d(scratch, operand);
+ __ Branch(target, AsMasmCondition(cc), value, Operand(scratch));
+}
+void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
+ Register value, Label* target,
+ Label::Distance) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireScratch();
+ __ Ld_d(scratch, operand);
+ __ Branch(target, AsMasmCondition(cc), scratch, Operand(value));
+}
+void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
+ Label* target, Label::Distance) {
+ __ Branch(target, AsMasmCondition(cc), value, Operand(byte));
+}
+void BaselineAssembler::Move(interpreter::Register output, Register source) {
+ Move(RegisterFrameOperand(output), source);
+}
+void BaselineAssembler::Move(Register output, TaggedIndex value) {
+ __ li(output, Operand(value.ptr()));
+}
+void BaselineAssembler::Move(MemOperand output, Register source) {
+ __ St_d(source, output);
+}
+void BaselineAssembler::Move(Register output, ExternalReference reference) {
+ __ li(output, Operand(reference));
+}
+void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
+ __ li(output, Operand(value));
+}
+void BaselineAssembler::Move(Register output, int32_t value) {
+ __ li(output, Operand(value));
+}
+void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
+ __ Move(output, source);
+}
+void BaselineAssembler::MoveSmi(Register output, Register source) {
+ __ Move(output, source);
+}
+
+namespace detail {
+
+template <typename Arg>
+inline Register ToRegister(BaselineAssembler* basm,
+ BaselineAssembler::ScratchRegisterScope* scope,
+ Arg arg) {
+ Register reg = scope->AcquireScratch();
+ basm->Move(reg, arg);
+ return reg;
+}
+inline Register ToRegister(BaselineAssembler* basm,
+ BaselineAssembler::ScratchRegisterScope* scope,
+ Register reg) {
+ return reg;
+}
+
+template <typename... Args>
+struct PushAllHelper;
+template <>
+struct PushAllHelper<> {
+ static int Push(BaselineAssembler* basm) { return 0; }
+ static int PushReverse(BaselineAssembler* basm) { return 0; }
+};
+// TODO(ishell): try to pack sequence of pushes into one instruction by
+// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4)
+// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4).
+template <typename Arg>
+struct PushAllHelper<Arg> {
+ static int Push(BaselineAssembler* basm, Arg arg) {
+ BaselineAssembler::ScratchRegisterScope scope(basm);
+ basm->masm()->Push(ToRegister(basm, &scope, arg));
+ return 1;
+ }
+ static int PushReverse(BaselineAssembler* basm, Arg arg) {
+ return Push(basm, arg);
+ }
+};
+// TODO(ishell): try to pack sequence of pushes into one instruction by
+// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4)
+// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4).
+template <typename Arg, typename... Args>
+struct PushAllHelper<Arg, Args...> {
+ static int Push(BaselineAssembler* basm, Arg arg, Args... args) {
+ PushAllHelper<Arg>::Push(basm, arg);
+ return 1 + PushAllHelper<Args...>::Push(basm, args...);
+ }
+ static int PushReverse(BaselineAssembler* basm, Arg arg, Args... args) {
+ int nargs = PushAllHelper<Args...>::PushReverse(basm, args...);
+ PushAllHelper<Arg>::Push(basm, arg);
+ return nargs + 1;
+ }
+};
+
+template <>
+struct PushAllHelper<interpreter::RegisterList> {
+ static int Push(BaselineAssembler* basm, interpreter::RegisterList list) {
+ for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) {
+ PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
+ }
+ return list.register_count();
+ }
+ static int PushReverse(BaselineAssembler* basm,
+ interpreter::RegisterList list) {
+ for (int reg_index = list.register_count() - 1; reg_index >= 0;
+ --reg_index) {
+ PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
+ }
+ return list.register_count();
+ }
+};
+
+template <typename... T>
+struct PopAllHelper;
+template <>
+struct PopAllHelper<> {
+ static void Pop(BaselineAssembler* basm) {}
+};
+// TODO(ishell): try to pack sequence of pops into one instruction by
+// looking at regiser codes. For example, Pop(r1, r2, r5, r0, r3, r4)
+// could be generated as two pops: Pop(r1, r2, r5) and Pop(r0, r3, r4).
+template <>
+struct PopAllHelper<Register> {
+ static void Pop(BaselineAssembler* basm, Register reg) {
+ basm->masm()->Pop(reg);
+ }
+};
+template <typename... T>
+struct PopAllHelper<Register, T...> {
+ static void Pop(BaselineAssembler* basm, Register reg, T... tail) {
+ PopAllHelper<Register>::Pop(basm, reg);
+ PopAllHelper<T...>::Pop(basm, tail...);
+ }
+};
+
+} // namespace detail
+
+template <typename... T>
+int BaselineAssembler::Push(T... vals) {
+ return detail::PushAllHelper<T...>::Push(this, vals...);
+}
+
+template <typename... T>
+void BaselineAssembler::PushReverse(T... vals) {
+ detail::PushAllHelper<T...>::PushReverse(this, vals...);
+}
+
+template <typename... T>
+void BaselineAssembler::Pop(T... registers) {
+ detail::PopAllHelper<T...>::Pop(this, registers...);
+}
+
+void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
+ int offset) {
+ __ Ld_d(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
+ int offset) {
+ __ Ld_d(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
+ int offset) {
+ __ Ld_d(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::LoadByteField(Register output, Register source,
+ int offset) {
+ __ Ld_b(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
+ Smi value) {
+ ASM_CODE_COMMENT(masm_);
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireScratch();
+ __ li(scratch, Operand(value));
+ __ St_d(scratch, FieldMemOperand(target, offset));
+}
+void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
+ int offset,
+ Register value) {
+ ASM_CODE_COMMENT(masm_);
+ __ St_d(value, FieldMemOperand(target, offset));
+ ScratchRegisterScope temps(this);
+ __ RecordWriteField(target, offset, value, kRAHasNotBeenSaved,
+ SaveFPRegsMode::kIgnore);
+}
+void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
+ int offset,
+ Register value) {
+ __ St_d(value, FieldMemOperand(target, offset));
+}
+void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
+ int32_t weight, Label* skip_interrupt_label) {
+ ASM_CODE_COMMENT(masm_);
+ ScratchRegisterScope scratch_scope(this);
+ Register feedback_cell = scratch_scope.AcquireScratch();
+ LoadFunction(feedback_cell);
+ LoadTaggedPointerField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
+
+ Register interrupt_budget = scratch_scope.AcquireScratch();
+ __ Ld_w(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+ __ Add_w(interrupt_budget, interrupt_budget, weight);
+ __ St_w(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+ if (skip_interrupt_label) {
+ DCHECK_LT(weight, 0);
+ __ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg));
+ }
+}
+void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
+ Register weight, Label* skip_interrupt_label) {
+ ASM_CODE_COMMENT(masm_);
+ ScratchRegisterScope scratch_scope(this);
+ Register feedback_cell = scratch_scope.AcquireScratch();
+ LoadFunction(feedback_cell);
+ LoadTaggedPointerField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
+
+ Register interrupt_budget = scratch_scope.AcquireScratch();
+ __ Ld_w(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+ __ Add_w(interrupt_budget, interrupt_budget, weight);
+ __ St_w(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+ if (skip_interrupt_label)
+ __ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg));
+}
+
+void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
+ __ Add_d(lhs, lhs, Operand(rhs));
+}
+
+void BaselineAssembler::Switch(Register reg, int case_value_base,
+ Label** labels, int num_labels) {
+ ASM_CODE_COMMENT(masm_);
+ Label fallthrough;
+ if (case_value_base > 0) {
+ __ Sub_d(reg, reg, Operand(case_value_base));
+ }
+
+ ScratchRegisterScope scope(this);
+ Register scratch = scope.AcquireScratch();
+ __ Branch(&fallthrough, AsMasmCondition(Condition::kUnsignedGreaterThanEqual),
+ reg, Operand(num_labels));
+ int entry_size_log2 = 2;
+ __ pcaddi(scratch, 3);
+ __ Alsl_d(scratch, reg, scratch, entry_size_log2);
+ __ Jump(scratch);
+ {
+ TurboAssembler::BlockTrampolinePoolScope(masm());
+ __ BlockTrampolinePoolFor(num_labels * kInstrSize);
+ for (int i = 0; i < num_labels; ++i) {
+ __ Branch(labels[i]);
+ }
+ __ bind(&fallthrough);
+ }
+}
+
+#undef __
+
+#define __ basm.
+
+void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
+ ASM_CODE_COMMENT(masm);
+ BaselineAssembler basm(masm);
+
+ Register weight = BaselineLeaveFrameDescriptor::WeightRegister();
+ Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister();
+
+ {
+ ASM_CODE_COMMENT_STRING(masm, "Update Interrupt Budget");
+
+ Label skip_interrupt_label;
+ __ AddToInterruptBudgetAndJumpIfNotExceeded(weight, &skip_interrupt_label);
+ __ masm()->SmiTag(params_size);
+ __ masm()->Push(params_size, kInterpreterAccumulatorRegister);
+
+ __ LoadContext(kContextRegister);
+ __ LoadFunction(kJSFunctionRegister);
+ __ masm()->Push(kJSFunctionRegister);
+ __ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1);
+
+ __ masm()->Pop(params_size, kInterpreterAccumulatorRegister);
+ __ masm()->SmiUntag(params_size);
+ __ Bind(&skip_interrupt_label);
+ }
+
+ BaselineAssembler::ScratchRegisterScope temps(&basm);
+ Register actual_params_size = temps.AcquireScratch();
+ // Compute the size of the actual parameters + receiver (in bytes).
+ __ Move(actual_params_size,
+ MemOperand(fp, StandardFrameConstants::kArgCOffset));
+
+ // If actual is bigger than formal, then we should use it to free up the stack
+ // arguments.
+ Label corrected_args_count;
+ __ masm()->Branch(&corrected_args_count, ge, params_size,
+ Operand(actual_params_size));
+ __ masm()->Move(params_size, actual_params_size);
+ __ Bind(&corrected_args_count);
+
+ // Leave the frame (also dropping the register file).
+ __ masm()->LeaveFrame(StackFrame::BASELINE);
+
+ // Drop receiver + arguments.
+ __ masm()->Add_d(params_size, params_size, 1); // Include the receiver.
+ __ masm()->Alsl_d(sp, params_size, sp, kPointerSizeLog2);
+ __ masm()->Ret();
+}
+
+#undef __
+
+inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
+ Register reg) {
+ assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue, reg,
+ Operand(kInterpreterAccumulatorRegister));
+}
+
+} // namespace baseline
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BASELINE_LOONG64_BASELINE_ASSEMBLER_LOONG64_INL_H_
diff --git a/chromium/v8/src/baseline/loong64/baseline-compiler-loong64-inl.h b/chromium/v8/src/baseline/loong64/baseline-compiler-loong64-inl.h
new file mode 100644
index 00000000000..9a68c7ebca5
--- /dev/null
+++ b/chromium/v8/src/baseline/loong64/baseline-compiler-loong64-inl.h
@@ -0,0 +1,77 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASELINE_LOONG64_BASELINE_COMPILER_LOONG64_INL_H_
+#define V8_BASELINE_LOONG64_BASELINE_COMPILER_LOONG64_INL_H_
+
+#include "src/base/logging.h"
+#include "src/baseline/baseline-compiler.h"
+
+namespace v8 {
+namespace internal {
+namespace baseline {
+
+#define __ basm_.
+
+void BaselineCompiler::Prologue() {
+ ASM_CODE_COMMENT(&masm_);
+ __ masm()->EnterFrame(StackFrame::BASELINE);
+ DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
+ int max_frame_size =
+ bytecode_->frame_size() + max_call_args_ * kSystemPointerSize;
+ CallBuiltin<Builtin::kBaselineOutOfLinePrologue>(
+ kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
+ max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
+
+ PrologueFillFrame();
+}
+
+void BaselineCompiler::PrologueFillFrame() {
+ ASM_CODE_COMMENT(&masm_);
+ // Inlined register frame fill
+ interpreter::Register new_target_or_generator_register =
+ bytecode_->incoming_new_target_or_generator_register();
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+ int register_count = bytecode_->register_count();
+ // Magic value
+ const int kLoopUnrollSize = 8;
+ const int new_target_index = new_target_or_generator_register.index();
+ const bool has_new_target = new_target_index != kMaxInt;
+ if (has_new_target) {
+ DCHECK_LE(new_target_index, register_count);
+ __ masm()->Add_d(sp, sp, Operand(-(kPointerSize * new_target_index)));
+ for (int i = 0; i < new_target_index; i++) {
+ __ masm()->St_d(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
+ }
+ // Push new_target_or_generator.
+ __ Push(kJavaScriptCallNewTargetRegister);
+ register_count -= new_target_index + 1;
+ }
+ if (register_count < 2 * kLoopUnrollSize) {
+ // If the frame is small enough, just unroll the frame fill completely.
+ __ masm()->Add_d(sp, sp, Operand(-(kPointerSize * register_count)));
+ for (int i = 0; i < register_count; ++i) {
+ __ masm()->St_d(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
+ }
+ } else {
+ __ masm()->Add_d(sp, sp, Operand(-(kPointerSize * register_count)));
+ for (int i = 0; i < register_count; ++i) {
+ __ masm()->St_d(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
+ }
+ }
+}
+
+void BaselineCompiler::VerifyFrameSize() {
+ ASM_CODE_COMMENT(&masm_);
+ __ masm()->Add_d(t0, sp,
+ Operand(InterpreterFrameConstants::kFixedFrameSizeFromFp +
+ bytecode_->frame_size()));
+ __ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer, t0, Operand(fp));
+}
+
+} // namespace baseline
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BASELINE_LOONG64_BASELINE_COMPILER_LOONG64_INL_H_
diff --git a/chromium/v8/src/baseline/mips/baseline-assembler-mips-inl.h b/chromium/v8/src/baseline/mips/baseline-assembler-mips-inl.h
index 31bc96861b9..989d5c4ae5c 100644
--- a/chromium/v8/src/baseline/mips/baseline-assembler-mips-inl.h
+++ b/chromium/v8/src/baseline/mips/baseline-assembler-mips-inl.h
@@ -506,6 +506,12 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
#undef __
+inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
+ Register reg) {
+ assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue, reg,
+ Operand(kInterpreterAccumulatorRegister));
+}
+
} // namespace baseline
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h b/chromium/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h
index d8220fa798f..561e45249ed 100644
--- a/chromium/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h
+++ b/chromium/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h
@@ -504,6 +504,12 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
#undef __
+inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
+ Register reg) {
+ assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue, reg,
+ Operand(kInterpreterAccumulatorRegister));
+}
+
} // namespace baseline
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h b/chromium/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h
index 01f5a5802bb..7bf6bd2f4ec 100644
--- a/chromium/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h
+++ b/chromium/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h
@@ -109,30 +109,19 @@ void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
}
void BaselineAssembler::CallBuiltin(Builtin builtin) {
- if (masm()->options().short_builtin_calls) {
- __ CallBuiltin(builtin);
- } else {
- ASM_CODE_COMMENT_STRING(masm_,
- __ CommentForOffHeapTrampoline("call", builtin));
- Register temp = t6;
- __ LoadEntryFromBuiltin(builtin, temp);
- __ Call(temp);
- }
+ ASM_CODE_COMMENT_STRING(masm_,
+ __ CommentForOffHeapTrampoline("call", builtin));
+ Register temp = t6;
+ __ LoadEntryFromBuiltin(builtin, temp);
+ __ Call(temp);
}
void BaselineAssembler::TailCallBuiltin(Builtin builtin) {
- if (masm()->options().short_builtin_calls) {
- // Generate pc-relative jump.
- __ TailCallBuiltin(builtin);
- } else {
- ASM_CODE_COMMENT_STRING(
- masm_, __ CommentForOffHeapTrampoline("tail call", builtin));
- // t6 be used for function call in RISCV64
- // For example 'jalr t6' or 'jal t6'
- Register temp = t6;
- __ LoadEntryFromBuiltin(builtin, temp);
- __ Jump(temp);
- }
+ ASM_CODE_COMMENT_STRING(masm_,
+ __ CommentForOffHeapTrampoline("tail call", builtin));
+ Register temp = t6;
+ __ LoadEntryFromBuiltin(builtin, temp);
+ __ Jump(temp);
}
void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
@@ -140,7 +129,7 @@ void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
ScratchRegisterScope temps(this);
Register tmp = temps.AcquireScratch();
__ And(tmp, value, Operand(mask));
- __ Branch(target, AsMasmCondition(cc), tmp, Operand(mask));
+ __ Branch(target, AsMasmCondition(cc), tmp, Operand(zero_reg));
}
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
@@ -161,6 +150,11 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
Label* target, Label::Distance) {
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
+ if (FLAG_debug_code) {
+ __ AssertNotSmi(map);
+ __ GetObjectType(map, type, type);
+ __ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE));
+ }
__ Ld(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
}
@@ -182,44 +176,28 @@ void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
}
void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
Label* target, Label::Distance) {
- ScratchRegisterScope temps(this);
- Register temp = temps.AcquireScratch();
+ // todo: compress pointer
__ AssertSmi(lhs);
__ AssertSmi(rhs);
- if (COMPRESS_POINTERS_BOOL) {
- __ Sub32(temp, lhs, rhs);
- } else {
- __ Sub64(temp, lhs, rhs);
- }
- __ Branch(target, AsMasmCondition(cc), temp, Operand(zero_reg));
+ __ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs));
}
void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
MemOperand operand, Label* target,
Label::Distance) {
+ // todo: compress pointer
ScratchRegisterScope temps(this);
- Register tmp1 = temps.AcquireScratch();
- Register tmp2 = temps.AcquireScratch();
- __ Ld(tmp1, operand);
- if (COMPRESS_POINTERS_BOOL) {
- __ Sub32(tmp2, value, tmp1);
- } else {
- __ Sub64(tmp2, value, tmp1);
- }
- __ Branch(target, AsMasmCondition(cc), tmp2, Operand(zero_reg));
+ Register scratch = temps.AcquireScratch();
+ __ Ld(scratch, operand);
+ __ Branch(target, AsMasmCondition(cc), value, Operand(scratch));
}
void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
Register value, Label* target,
Label::Distance) {
+ // todo: compress pointer
ScratchRegisterScope temps(this);
- Register tmp1 = temps.AcquireScratch();
- Register tmp2 = temps.AcquireScratch();
- __ Ld(tmp1, operand);
- if (COMPRESS_POINTERS_BOOL) {
- __ Sub32(tmp2, tmp1, value);
- } else {
- __ Sub64(tmp2, tmp1, value);
- }
- __ Branch(target, AsMasmCondition(cc), tmp2, Operand(zero_reg));
+ Register scratch = temps.AcquireScratch();
+ __ Ld(scratch, operand);
+ __ Branch(target, AsMasmCondition(cc), scratch, Operand(value));
}
void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
Label* target, Label::Distance) {
@@ -268,136 +246,50 @@ inline Register ToRegister(BaselineAssembler* basm,
}
template <typename... Args>
-struct CountPushHelper;
-template <>
-struct CountPushHelper<> {
- static int Count() { return 0; }
-};
-template <typename Arg, typename... Args>
-struct CountPushHelper<Arg, Args...> {
- static int Count(Arg arg, Args... args) {
- return 1 + CountPushHelper<Args...>::Count(args...);
- }
-};
-template <typename... Args>
-struct CountPushHelper<interpreter::RegisterList, Args...> {
- static int Count(interpreter::RegisterList list, Args... args) {
- return list.register_count() + CountPushHelper<Args...>::Count(args...);
- }
-};
-
-template <typename... Args>
struct PushAllHelper;
-template <typename... Args>
-void PushAll(BaselineAssembler* basm, Args... args) {
- PushAllHelper<Args...>::Push(basm, args...);
-}
-template <typename... Args>
-void PushAllReverse(BaselineAssembler* basm, Args... args) {
- PushAllHelper<Args...>::PushReverse(basm, args...);
-}
-
template <>
struct PushAllHelper<> {
- static void Push(BaselineAssembler* basm) {}
- static void PushReverse(BaselineAssembler* basm) {}
+ static int Push(BaselineAssembler* basm) { return 0; }
+ static int PushReverse(BaselineAssembler* basm) { return 0; }
};
-
-inline void PushSingle(MacroAssembler* masm, RootIndex source) {
- masm->PushRoot(source);
-}
-inline void PushSingle(MacroAssembler* masm, Register reg) { masm->Push(reg); }
-
-inline void PushSingle(MacroAssembler* masm, Smi value) { masm->Push(value); }
-inline void PushSingle(MacroAssembler* masm, Handle<HeapObject> object) {
- masm->Push(object);
-}
-inline void PushSingle(MacroAssembler* masm, int32_t immediate) {
- masm->li(kScratchReg, (int64_t)(immediate));
- PushSingle(masm, kScratchReg);
-}
-
-inline void PushSingle(MacroAssembler* masm, TaggedIndex value) {
- masm->li(kScratchReg, static_cast<int64_t>(value.ptr()));
- PushSingle(masm, kScratchReg);
-}
-inline void PushSingle(MacroAssembler* masm, MemOperand operand) {
- masm->Ld(kScratchReg, operand);
- PushSingle(masm, kScratchReg);
-}
-inline void PushSingle(MacroAssembler* masm, interpreter::Register source) {
- return PushSingle(masm, BaselineAssembler::RegisterFrameOperand(source));
-}
-
template <typename Arg>
struct PushAllHelper<Arg> {
- static void Push(BaselineAssembler* basm, Arg arg) {
- PushSingle(basm->masm(), arg);
+ static int Push(BaselineAssembler* basm, Arg arg) {
+ BaselineAssembler::ScratchRegisterScope scope(basm);
+ basm->masm()->Push(ToRegister(basm, &scope, arg));
+ return 1;
}
- static void PushReverse(BaselineAssembler* basm, Arg arg) {
- // Push the padding register to round up the amount of values pushed.
+ static int PushReverse(BaselineAssembler* basm, Arg arg) {
return Push(basm, arg);
}
};
-template <typename Arg1, typename Arg2, typename... Args>
-struct PushAllHelper<Arg1, Arg2, Args...> {
- static void Push(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2,
- Args... args) {
- {
- BaselineAssembler::ScratchRegisterScope scope(basm);
- basm->masm()->Push(ToRegister(basm, &scope, arg1),
- ToRegister(basm, &scope, arg2));
- }
- PushAll(basm, args...);
+template <typename Arg, typename... Args>
+struct PushAllHelper<Arg, Args...> {
+ static int Push(BaselineAssembler* basm, Arg arg, Args... args) {
+ PushAllHelper<Arg>::Push(basm, arg);
+ return 1 + PushAllHelper<Args...>::Push(basm, args...);
}
- static void PushReverse(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2,
- Args... args) {
- PushAllReverse(basm, args...);
- {
- BaselineAssembler::ScratchRegisterScope scope(basm);
- basm->masm()->Push(ToRegister(basm, &scope, arg2),
- ToRegister(basm, &scope, arg1));
- }
- }
-};
-// Currently RegisterLists are always be the last argument, so we don't
-// specialize for the case where they're not. We do still specialise for the
-// aligned and unaligned cases.
-template <typename Arg>
-struct PushAllHelper<Arg, interpreter::RegisterList> {
- static void Push(BaselineAssembler* basm, Arg arg,
- interpreter::RegisterList list) {
- DCHECK_EQ(list.register_count() % 2, 1);
- PushAll(basm, arg, list[0], list.PopLeft());
- }
- static void PushReverse(BaselineAssembler* basm, Arg arg,
- interpreter::RegisterList list) {
- if (list.register_count() == 0) {
- PushAllReverse(basm, arg);
- } else {
- PushAllReverse(basm, arg, list[0], list.PopLeft());
- }
+ static int PushReverse(BaselineAssembler* basm, Arg arg, Args... args) {
+ int nargs = PushAllHelper<Args...>::PushReverse(basm, args...);
+ PushAllHelper<Arg>::Push(basm, arg);
+ return nargs + 1;
}
};
template <>
struct PushAllHelper<interpreter::RegisterList> {
- static void Push(BaselineAssembler* basm, interpreter::RegisterList list) {
- DCHECK_EQ(list.register_count() % 2, 0);
- for (int reg_index = 0; reg_index < list.register_count(); reg_index += 2) {
- PushAll(basm, list[reg_index], list[reg_index + 1]);
+ static int Push(BaselineAssembler* basm, interpreter::RegisterList list) {
+ for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) {
+ PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
}
+ return list.register_count();
}
- static void PushReverse(BaselineAssembler* basm,
- interpreter::RegisterList list) {
- int reg_index = list.register_count() - 1;
- if (reg_index % 2 == 0) {
- // Push the padding register to round up the amount of values pushed.
- PushAllReverse(basm, list[reg_index]);
- reg_index--;
- }
- for (; reg_index >= 1; reg_index -= 2) {
- PushAllReverse(basm, list[reg_index - 1], list[reg_index]);
+ static int PushReverse(BaselineAssembler* basm,
+ interpreter::RegisterList list) {
+ for (int reg_index = list.register_count() - 1; reg_index >= 0;
+ --reg_index) {
+ PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
}
+ return list.register_count();
}
};
@@ -414,10 +306,9 @@ struct PopAllHelper<Register> {
}
};
template <typename... T>
-struct PopAllHelper<Register, Register, T...> {
- static void Pop(BaselineAssembler* basm, Register reg1, Register reg2,
- T... tail) {
- basm->masm()->Pop(reg1, reg2);
+struct PopAllHelper<Register, T...> {
+ static void Pop(BaselineAssembler* basm, Register reg, T... tail) {
+ PopAllHelper<Register>::Pop(basm, reg);
PopAllHelper<T...>::Pop(basm, tail...);
}
};
@@ -426,20 +317,12 @@ struct PopAllHelper<Register, Register, T...> {
template <typename... T>
int BaselineAssembler::Push(T... vals) {
- // We have to count the pushes first, to decide whether to add padding before
- // the first push.
- int push_count = detail::CountPushHelper<T...>::Count(vals...);
- if (push_count % 2 == 0) {
- detail::PushAll(this, vals...);
- } else {
- detail::PushAll(this, vals...);
- }
- return push_count;
+ return detail::PushAllHelper<T...>::Push(this, vals...);
}
template <typename... T>
void BaselineAssembler::PushReverse(T... vals) {
- detail::PushAllReverse(this, vals...);
+ detail::PushAllHelper<T...>::PushReverse(this, vals...);
}
template <typename... T>
@@ -461,7 +344,7 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
}
void BaselineAssembler::LoadByteField(Register output, Register source,
int offset) {
- __ Ld(output, FieldMemOperand(source, offset));
+ __ Lb(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
Smi value) {
@@ -495,11 +378,11 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
- __ Ld(interrupt_budget,
+ __ Lw(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
// Remember to set flags as part of the add!
- __ Add64(interrupt_budget, interrupt_budget, weight);
- __ Sd(interrupt_budget,
+ __ Add32(interrupt_budget, interrupt_budget, weight);
+ __ Sw(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
if (skip_interrupt_label) {
DCHECK_LT(weight, 0);
@@ -517,11 +400,11 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
- __ Ld(interrupt_budget,
+ __ Lw(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
// Remember to set flags as part of the add!
- __ Add64(interrupt_budget, interrupt_budget, weight);
- __ Sd(interrupt_budget,
+ __ Add32(interrupt_budget, interrupt_budget, weight);
+ __ Sw(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
if (skip_interrupt_label)
__ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(weight));
@@ -546,30 +429,28 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
// Mostly copied from code-generator-riscv64.cc
ScratchRegisterScope scope(this);
- Register temp = scope.AcquireScratch();
Label table;
__ Branch(&fallthrough, AsMasmCondition(Condition::kUnsignedGreaterThanEqual),
reg, Operand(int64_t(num_labels)));
int64_t imm64;
imm64 = __ branch_long_offset(&table);
- DCHECK(is_int32(imm64));
+ CHECK(is_int32(imm64 + 0x800));
int32_t Hi20 = (((int32_t)imm64 + 0x800) >> 12);
int32_t Lo12 = (int32_t)imm64 << 20 >> 20;
- __ auipc(temp, Hi20); // Read PC + Hi20 into t6
- __ lui(temp, Lo12); // jump PC + Hi20 + Lo12
+ __ auipc(t6, Hi20); // Read PC + Hi20 into t6
+ __ addi(t6, t6, Lo12); // jump PC + Hi20 + Lo12
- int entry_size_log2 = 2;
- Register temp2 = scope.AcquireScratch();
- __ CalcScaledAddress(temp2, temp, reg, entry_size_log2);
- __ Jump(temp);
+ int entry_size_log2 = 3;
+ __ CalcScaledAddress(t6, t6, reg, entry_size_log2);
+ __ Jump(t6);
{
TurboAssembler::BlockTrampolinePoolScope(masm());
- __ BlockTrampolinePoolFor(num_labels * kInstrSize);
+ __ BlockTrampolinePoolFor(num_labels * kInstrSize * 2);
__ bind(&table);
for (int i = 0; i < num_labels; ++i) {
- __ Branch(labels[i]);
+ __ BranchLong(labels[i]);
}
- DCHECK_EQ(num_labels * kInstrSize, __ InstructionsGeneratedSince(&table));
+ DCHECK_EQ(num_labels * 2, __ InstructionsGeneratedSince(&table));
__ bind(&fallthrough);
}
}
@@ -598,7 +479,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->Push(kJSFunctionRegister);
__ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1);
- __ masm()->Pop(kInterpreterAccumulatorRegister, params_size);
+ __ masm()->Pop(params_size, kInterpreterAccumulatorRegister);
__ masm()->SmiUntag(params_size);
__ Bind(&skip_interrupt_label);
@@ -630,6 +511,11 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
#undef __
+inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
+ Register reg) {
+ assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue, reg,
+ Operand(kInterpreterAccumulatorRegister));
+}
} // namespace baseline
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h b/chromium/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h
index fc73105b8e9..1fbdaa0761e 100644
--- a/chromium/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h
+++ b/chromium/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h
@@ -37,69 +37,35 @@ void BaselineCompiler::PrologueFillFrame() {
const int kLoopUnrollSize = 8;
const int new_target_index = new_target_or_generator_register.index();
const bool has_new_target = new_target_index != kMaxInt;
- // BaselineOutOfLinePrologue already pushed one undefined.
- register_count -= 1;
if (has_new_target) {
- if (new_target_index == 0) {
- // Oops, need to fix up that undefined that BaselineOutOfLinePrologue
- // pushed.
- __ masm()->Sd(kJavaScriptCallNewTargetRegister, MemOperand(sp));
- } else {
- DCHECK_LE(new_target_index, register_count);
- int index = 1;
- for (; index + 2 <= new_target_index; index += 2) {
- __ masm()->Push(kInterpreterAccumulatorRegister,
- kInterpreterAccumulatorRegister);
- }
- if (index == new_target_index) {
- __ masm()->Push(kJavaScriptCallNewTargetRegister,
- kInterpreterAccumulatorRegister);
- } else {
- DCHECK_EQ(index, new_target_index - 1);
- __ masm()->Push(kInterpreterAccumulatorRegister,
- kJavaScriptCallNewTargetRegister);
- }
- // We pushed "index" registers, minus the one the prologue pushed, plus
- // the two registers that included new_target.
- register_count -= (index - 1 + 2);
+ DCHECK_LE(new_target_index, register_count);
+ __ masm()->Add64(sp, sp, Operand(-(kPointerSize * new_target_index)));
+ for (int i = 0; i < new_target_index; i++) {
+ __ masm()->Sd(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
}
+ // Push new_target_or_generator.
+ __ Push(kJavaScriptCallNewTargetRegister);
+ register_count -= new_target_index + 1;
}
if (register_count < 2 * kLoopUnrollSize) {
// If the frame is small enough, just unroll the frame fill completely.
- for (int i = 0; i < register_count; i += 2) {
- __ masm()->Push(kInterpreterAccumulatorRegister,
- kInterpreterAccumulatorRegister);
+ __ masm()->Add64(sp, sp, Operand(-(kPointerSize * register_count)));
+ for (int i = 0; i < register_count; ++i) {
+ __ masm()->Sd(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
}
} else {
- BaselineAssembler::ScratchRegisterScope temps(&basm_);
- Register scratch = temps.AcquireScratch();
-
- // Extract the first few registers to round to the unroll size.
- int first_registers = register_count % kLoopUnrollSize;
- for (int i = 0; i < first_registers; i += 2) {
- __ masm()->Push(kInterpreterAccumulatorRegister,
- kInterpreterAccumulatorRegister);
- }
- __ Move(scratch, register_count / kLoopUnrollSize);
- // We enter the loop unconditionally, so make sure we need to loop at least
- // once.
- DCHECK_GT(register_count / kLoopUnrollSize, 0);
- Label loop;
- __ Bind(&loop);
- for (int i = 0; i < kLoopUnrollSize; i += 2) {
- __ masm()->Push(kInterpreterAccumulatorRegister,
- kInterpreterAccumulatorRegister);
+ __ masm()->Add64(sp, sp, Operand(-(kPointerSize * register_count)));
+ for (int i = 0; i < register_count; ++i) {
+ __ masm()->Sd(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
}
- __ masm()->Branch(&loop, gt, scratch, Operand(1));
}
}
void BaselineCompiler::VerifyFrameSize() {
ASM_CODE_COMMENT(&masm_);
__ masm()->Add64(kScratchReg, sp,
- RoundUp(InterpreterFrameConstants::kFixedFrameSizeFromFp +
- bytecode_->frame_size(),
- 2 * kSystemPointerSize));
+ Operand(InterpreterFrameConstants::kFixedFrameSizeFromFp +
+ bytecode_->frame_size()));
__ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer, kScratchReg,
Operand(fp));
}
diff --git a/chromium/v8/src/baseline/x64/baseline-assembler-x64-inl.h b/chromium/v8/src/baseline/x64/baseline-assembler-x64-inl.h
index f18ac84eaee..aa9564dceaa 100644
--- a/chromium/v8/src/baseline/x64/baseline-assembler-x64-inl.h
+++ b/chromium/v8/src/baseline/x64/baseline-assembler-x64-inl.h
@@ -468,16 +468,21 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
- Register return_pc = scratch;
- __ masm()->PopReturnAddressTo(return_pc);
- __ masm()->leaq(rsp, MemOperand(rsp, params_size, times_system_pointer_size,
- kSystemPointerSize));
- __ masm()->PushReturnAddressFrom(return_pc);
+ __ masm()->DropArguments(
+ params_size, scratch, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ masm()->Ret();
}
#undef __
+inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
+ Register reg) {
+ assembler_->masm()->cmp_tagged(reg, kInterpreterAccumulatorRegister);
+ assembler_->masm()->Assert(equal, AbortReason::kUnexpectedValue);
+}
+
} // namespace baseline
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/bigint/bigint-internal.h b/chromium/v8/src/bigint/bigint-internal.h
index 4c214153bf3..e1e8cf77a07 100644
--- a/chromium/v8/src/bigint/bigint-internal.h
+++ b/chromium/v8/src/bigint/bigint-internal.h
@@ -22,6 +22,7 @@ constexpr int kNewtonInversionThreshold = 50;
// kBarrettThreshold is defined in bigint.h.
constexpr int kToStringFastThreshold = 43;
+constexpr int kFromStringLargeThreshold = 300;
class ProcessorImpl : public Processor {
public:
@@ -69,6 +70,8 @@ class ProcessorImpl : public Processor {
void FromString(RWDigits Z, FromStringAccumulator* accumulator);
void FromStringClassic(RWDigits Z, FromStringAccumulator* accumulator);
+ void FromStringLarge(RWDigits Z, FromStringAccumulator* accumulator);
+ void FromStringBasePowerOfTwo(RWDigits Z, FromStringAccumulator* accumulator);
bool should_terminate() { return status_ == Status::kInterrupted; }
diff --git a/chromium/v8/src/bigint/bigint.h b/chromium/v8/src/bigint/bigint.h
index 218bf4616cb..28df2936ac2 100644
--- a/chromium/v8/src/bigint/bigint.h
+++ b/chromium/v8/src/bigint/bigint.h
@@ -227,12 +227,40 @@ void Add(RWDigits Z, Digits X, Digits Y);
// Addition of signed integers. Returns true if the result is negative.
bool AddSigned(RWDigits Z, Digits X, bool x_negative, Digits Y,
bool y_negative);
+// Z := X + 1
+void AddOne(RWDigits Z, Digits X);
// Z := X - Y. Requires X >= Y.
void Subtract(RWDigits Z, Digits X, Digits Y);
// Subtraction of signed integers. Returns true if the result is negative.
bool SubtractSigned(RWDigits Z, Digits X, bool x_negative, Digits Y,
bool y_negative);
+// Z := X - 1
+void SubtractOne(RWDigits Z, Digits X);
+
+// The bitwise operations assume that negative BigInts are represented as
+// sign+magnitude. Their behavior depends on the sign of the inputs: negative
+// inputs perform an implicit conversion to two's complement representation.
+// Z := X & Y
+void BitwiseAnd_PosPos(RWDigits Z, Digits X, Digits Y);
+// Call this for a BigInt x = (magnitude=X, negative=true).
+void BitwiseAnd_NegNeg(RWDigits Z, Digits X, Digits Y);
+// Positive X, negative Y. Callers must swap arguments as needed.
+void BitwiseAnd_PosNeg(RWDigits Z, Digits X, Digits Y);
+void BitwiseOr_PosPos(RWDigits Z, Digits X, Digits Y);
+void BitwiseOr_NegNeg(RWDigits Z, Digits X, Digits Y);
+void BitwiseOr_PosNeg(RWDigits Z, Digits X, Digits Y);
+void BitwiseXor_PosPos(RWDigits Z, Digits X, Digits Y);
+void BitwiseXor_NegNeg(RWDigits Z, Digits X, Digits Y);
+void BitwiseXor_PosNeg(RWDigits Z, Digits X, Digits Y);
+
+// Z := (least significant n bits of X, interpreted as a signed n-bit integer).
+// Returns true if the result is negative; Z will hold the absolute value.
+bool AsIntN(RWDigits Z, Digits X, bool x_negative, int n);
+// Z := (least significant n bits of X).
+void AsUintN_Pos(RWDigits Z, Digits X, int n);
+// Same, but X is the absolute value of a negative BigInt.
+void AsUintN_Neg(RWDigits Z, Digits X, int n);
enum class Status { kOk, kInterrupted };
@@ -262,6 +290,8 @@ class Processor {
// upon return will be set to the actual length of the result string.
Status ToString(char* out, int* out_length, Digits X, int radix, bool sign);
+ // Z := the contents of {accumulator}.
+ // Assume that this leaves {accumulator} in unusable state.
Status FromString(RWDigits Z, FromStringAccumulator* accumulator);
};
@@ -301,6 +331,36 @@ int ToStringResultLength(Digits X, int radix, bool sign);
// In DEBUG builds, the result of {ToString} will be initialized to this value.
constexpr char kStringZapValue = '?';
+inline int BitwiseAnd_PosPos_ResultLength(int x_length, int y_length) {
+ return std::min(x_length, y_length);
+}
+inline int BitwiseAnd_NegNeg_ResultLength(int x_length, int y_length) {
+ // Result length growth example: -2 & -3 = -4 (2-bit inputs, 3-bit result).
+ return std::max(x_length, y_length) + 1;
+}
+inline int BitwiseAnd_PosNeg_ResultLength(int x_length) { return x_length; }
+inline int BitwiseOrResultLength(int x_length, int y_length) {
+ return std::max(x_length, y_length);
+}
+inline int BitwiseXor_PosPos_ResultLength(int x_length, int y_length) {
+ return std::max(x_length, y_length);
+}
+inline int BitwiseXor_NegNeg_ResultLength(int x_length, int y_length) {
+ return std::max(x_length, y_length);
+}
+inline int BitwiseXor_PosNeg_ResultLength(int x_length, int y_length) {
+ // Result length growth example: 3 ^ -1 == -4 (2-bit inputs, 3-bit result).
+ return std::max(x_length, y_length) + 1;
+}
+
+// Returns -1 if this "asIntN" operation would be a no-op.
+int AsIntNResultLength(Digits X, bool x_negative, int n);
+// Returns -1 if this "asUintN" operation would be a no-op.
+int AsUintN_Pos_ResultLength(Digits X, int n);
+inline int AsUintN_Neg_ResultLength(int n) {
+ return ((n - 1) / kDigitBits) + 1;
+}
+
// Support for parsing BigInts from Strings, using an Accumulator object
// for intermediate state.
@@ -336,7 +396,7 @@ class FromStringAccumulator {
// So for sufficiently large N, setting max_digits=N here will not actually
// allow parsing BigInts with N digits. We can fix that if/when anyone cares.
explicit FromStringAccumulator(int max_digits)
- : max_digits_(std::max(max_digits - kStackParts, kStackParts)) {}
+ : max_digits_(std::max(max_digits, kStackParts)) {}
// Step 2: Call this method to read all characters.
// {Char} should be a character type, such as uint8_t or uint16_t.
@@ -348,7 +408,7 @@ class FromStringAccumulator {
digit_t radix);
// Step 3: Check if a result is available, and determine its required
- // allocation size.
+ // allocation size (guaranteed to be <= max_digits passed to the constructor).
Result result() { return result_; }
int ResultLength() {
return std::max(stack_parts_used_, static_cast<int>(heap_parts_.size()));
@@ -360,8 +420,12 @@ class FromStringAccumulator {
private:
friend class ProcessorImpl;
- ALWAYS_INLINE bool AddPart(digit_t multiplier, digit_t part,
- bool is_last = false);
+ template <class Char>
+ ALWAYS_INLINE const Char* ParsePowerTwo(const Char* start, const Char* end,
+ digit_t radix);
+
+ ALWAYS_INLINE bool AddPart(digit_t multiplier, digit_t part, bool is_last);
+ ALWAYS_INLINE bool AddPart(digit_t part);
digit_t stack_parts_[kStackParts];
std::vector<digit_t> heap_parts_;
@@ -371,6 +435,7 @@ class FromStringAccumulator {
Result result_{Result::kOk};
int stack_parts_used_{0};
bool inline_everything_{false};
+ uint8_t radix_{0};
};
// The rest of this file is the inlineable implementation of
@@ -403,6 +468,47 @@ static constexpr uint8_t kCharValue[] = {
25, 26, 27, 28, 29, 30, 31, 32, // 112..119
33, 34, 35, 255, 255, 255, 255, 255, // 120..127 'z' == 122
};
+
+// A space- and time-efficient way to map {2,4,8,16,32} to {1,2,3,4,5}.
+static constexpr uint8_t kCharBits[] = {1, 2, 3, 0, 4, 0, 0, 0, 5};
+
+template <class Char>
+const Char* FromStringAccumulator::ParsePowerTwo(const Char* current,
+ const Char* end,
+ digit_t radix) {
+ radix_ = static_cast<uint8_t>(radix);
+ const int char_bits = kCharBits[radix >> 2];
+ int bits_left;
+ bool done = false;
+ do {
+ digit_t part = 0;
+ bits_left = kDigitBits;
+ while (true) {
+ digit_t d; // Numeric value of the current character {c}.
+ uint32_t c = *current;
+ if (c > 127 || (d = bigint::kCharValue[c]) >= radix) {
+ done = true;
+ break;
+ }
+
+ if (bits_left < char_bits) break;
+ bits_left -= char_bits;
+ part = (part << char_bits) | d;
+
+ ++current;
+ if (current == end) {
+ done = true;
+ break;
+ }
+ }
+ if (!AddPart(part)) return current;
+ } while (!done);
+ // We use the unused {last_multiplier_} field to
+ // communicate how many bits are unused in the last part.
+ last_multiplier_ = bits_left;
+ return current;
+}
+
template <class Char>
const Char* FromStringAccumulator::Parse(const Char* start, const Char* end,
digit_t radix) {
@@ -417,12 +523,15 @@ const Char* FromStringAccumulator::Parse(const Char* start, const Char* end,
static constexpr int kInlineThreshold = kStackParts * kDigitBits * 100 / 517;
inline_everything_ = (end - start) <= kInlineThreshold;
#endif
+ if (!inline_everything_ && (radix & (radix - 1)) == 0) {
+ return ParsePowerTwo(start, end, radix);
+ }
bool done = false;
do {
digit_t multiplier = 1;
digit_t part = 0;
while (true) {
- digit_t d;
+ digit_t d; // Numeric value of the current character {c}.
uint32_t c = *current;
if (c > 127 || (d = bigint::kCharValue[c]) >= radix) {
done = true;
@@ -478,6 +587,10 @@ bool FromStringAccumulator::AddPart(digit_t multiplier, digit_t part,
BIGINT_H_DCHECK(max_multiplier_ == 0 || max_multiplier_ == multiplier);
max_multiplier_ = multiplier;
}
+ return AddPart(part);
+}
+
+bool FromStringAccumulator::AddPart(digit_t part) {
if (stack_parts_used_ < kStackParts) {
stack_parts_[stack_parts_used_++] = part;
return true;
@@ -489,7 +602,7 @@ bool FromStringAccumulator::AddPart(digit_t multiplier, digit_t part,
heap_parts_.push_back(stack_parts_[i]);
}
}
- if (static_cast<int>(heap_parts_.size()) >= max_digits_ && !is_last) {
+ if (static_cast<int>(heap_parts_.size()) >= max_digits_) {
result_ = Result::kMaxSizeExceeded;
return false;
}
diff --git a/chromium/v8/src/bigint/bitwise.cc b/chromium/v8/src/bigint/bitwise.cc
new file mode 100644
index 00000000000..087847c1181
--- /dev/null
+++ b/chromium/v8/src/bigint/bitwise.cc
@@ -0,0 +1,262 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/bigint/bigint-internal.h"
+#include "src/bigint/digit-arithmetic.h"
+#include "src/bigint/util.h"
+#include "src/bigint/vector-arithmetic.h"
+
+namespace v8 {
+namespace bigint {
+
+void BitwiseAnd_PosPos(RWDigits Z, Digits X, Digits Y) {
+ int pairs = std::min(X.len(), Y.len());
+ DCHECK(Z.len() >= pairs);
+ int i = 0;
+ for (; i < pairs; i++) Z[i] = X[i] & Y[i];
+ for (; i < Z.len(); i++) Z[i] = 0;
+}
+
+void BitwiseAnd_NegNeg(RWDigits Z, Digits X, Digits Y) {
+ // (-x) & (-y) == ~(x-1) & ~(y-1)
+ // == ~((x-1) | (y-1))
+ // == -(((x-1) | (y-1)) + 1)
+ int pairs = std::min(X.len(), Y.len());
+ digit_t x_borrow = 1;
+ digit_t y_borrow = 1;
+ int i = 0;
+ for (; i < pairs; i++) {
+ Z[i] = digit_sub(X[i], x_borrow, &x_borrow) |
+ digit_sub(Y[i], y_borrow, &y_borrow);
+ }
+ // (At least) one of the next two loops will perform zero iterations:
+ for (; i < X.len(); i++) Z[i] = digit_sub(X[i], x_borrow, &x_borrow);
+ for (; i < Y.len(); i++) Z[i] = digit_sub(Y[i], y_borrow, &y_borrow);
+ DCHECK(x_borrow == 0); // NOLINT(readability/check)
+ DCHECK(y_borrow == 0); // NOLINT(readability/check)
+ for (; i < Z.len(); i++) Z[i] = 0;
+ Add(Z, 1);
+}
+
+void BitwiseAnd_PosNeg(RWDigits Z, Digits X, Digits Y) {
+ // x & (-y) == x & ~(y-1)
+ int pairs = std::min(X.len(), Y.len());
+ digit_t borrow = 1;
+ int i = 0;
+ for (; i < pairs; i++) Z[i] = X[i] & ~digit_sub(Y[i], borrow, &borrow);
+ for (; i < X.len(); i++) Z[i] = X[i];
+ for (; i < Z.len(); i++) Z[i] = 0;
+}
+
+void BitwiseOr_PosPos(RWDigits Z, Digits X, Digits Y) {
+ int pairs = std::min(X.len(), Y.len());
+ int i = 0;
+ for (; i < pairs; i++) Z[i] = X[i] | Y[i];
+ // (At least) one of the next two loops will perform zero iterations:
+ for (; i < X.len(); i++) Z[i] = X[i];
+ for (; i < Y.len(); i++) Z[i] = Y[i];
+ for (; i < Z.len(); i++) Z[i] = 0;
+}
+
+void BitwiseOr_NegNeg(RWDigits Z, Digits X, Digits Y) {
+ // (-x) | (-y) == ~(x-1) | ~(y-1)
+ // == ~((x-1) & (y-1))
+ // == -(((x-1) & (y-1)) + 1)
+ int pairs = std::min(X.len(), Y.len());
+ digit_t x_borrow = 1;
+ digit_t y_borrow = 1;
+ int i = 0;
+ for (; i < pairs; i++) {
+ Z[i] = digit_sub(X[i], x_borrow, &x_borrow) &
+ digit_sub(Y[i], y_borrow, &y_borrow);
+ }
+ // Any leftover borrows don't matter, the '&' would drop them anyway.
+ for (; i < Z.len(); i++) Z[i] = 0;
+ Add(Z, 1);
+}
+
+void BitwiseOr_PosNeg(RWDigits Z, Digits X, Digits Y) {
+ // x | (-y) == x | ~(y-1) == ~((y-1) &~ x) == -(((y-1) &~ x) + 1)
+ int pairs = std::min(X.len(), Y.len());
+ digit_t borrow = 1;
+ int i = 0;
+ for (; i < pairs; i++) Z[i] = digit_sub(Y[i], borrow, &borrow) & ~X[i];
+ for (; i < Y.len(); i++) Z[i] = digit_sub(Y[i], borrow, &borrow);
+ DCHECK(borrow == 0); // NOLINT(readability/check)
+ for (; i < Z.len(); i++) Z[i] = 0;
+ Add(Z, 1);
+}
+
+void BitwiseXor_PosPos(RWDigits Z, Digits X, Digits Y) {
+ int pairs = X.len();
+ if (Y.len() < X.len()) {
+ std::swap(X, Y);
+ pairs = X.len();
+ }
+ DCHECK(X.len() <= Y.len());
+ int i = 0;
+ for (; i < pairs; i++) Z[i] = X[i] ^ Y[i];
+ for (; i < Y.len(); i++) Z[i] = Y[i];
+ for (; i < Z.len(); i++) Z[i] = 0;
+}
+
+void BitwiseXor_NegNeg(RWDigits Z, Digits X, Digits Y) {
+ // (-x) ^ (-y) == ~(x-1) ^ ~(y-1) == (x-1) ^ (y-1)
+ int pairs = std::min(X.len(), Y.len());
+ digit_t x_borrow = 1;
+ digit_t y_borrow = 1;
+ int i = 0;
+ for (; i < pairs; i++) {
+ Z[i] = digit_sub(X[i], x_borrow, &x_borrow) ^
+ digit_sub(Y[i], y_borrow, &y_borrow);
+ }
+ // (At least) one of the next two loops will perform zero iterations:
+ for (; i < X.len(); i++) Z[i] = digit_sub(X[i], x_borrow, &x_borrow);
+ for (; i < Y.len(); i++) Z[i] = digit_sub(Y[i], y_borrow, &y_borrow);
+ DCHECK(x_borrow == 0); // NOLINT(readability/check)
+ DCHECK(y_borrow == 0); // NOLINT(readability/check)
+ for (; i < Z.len(); i++) Z[i] = 0;
+}
+
+void BitwiseXor_PosNeg(RWDigits Z, Digits X, Digits Y) {
+ // x ^ (-y) == x ^ ~(y-1) == ~(x ^ (y-1)) == -((x ^ (y-1)) + 1)
+ int pairs = std::min(X.len(), Y.len());
+ digit_t borrow = 1;
+ int i = 0;
+ for (; i < pairs; i++) Z[i] = X[i] ^ digit_sub(Y[i], borrow, &borrow);
+ // (At least) one of the next two loops will perform zero iterations:
+ for (; i < X.len(); i++) Z[i] = X[i];
+ for (; i < Y.len(); i++) Z[i] = digit_sub(Y[i], borrow, &borrow);
+ DCHECK(borrow == 0); // NOLINT(readability/check)
+ for (; i < Z.len(); i++) Z[i] = 0;
+ Add(Z, 1);
+}
+
+namespace {
+
+// Z := (least significant n bits of X).
+void TruncateToNBits(RWDigits Z, Digits X, int n) {
+ int digits = DIV_CEIL(n, kDigitBits);
+ int bits = n % kDigitBits;
+ // Copy all digits except the MSD.
+ int last = digits - 1;
+ for (int i = 0; i < last; i++) {
+ Z[i] = X[i];
+ }
+ // The MSD might contain extra bits that we don't want.
+ digit_t msd = X[last];
+ if (bits != 0) {
+ int drop = kDigitBits - bits;
+ msd = (msd << drop) >> drop;
+ }
+ Z[last] = msd;
+}
+
+// Z := 2**n - (least significant n bits of X).
+void TruncateAndSubFromPowerOfTwo(RWDigits Z, Digits X, int n) {
+ int digits = DIV_CEIL(n, kDigitBits);
+ int bits = n % kDigitBits;
+ // Process all digits except the MSD. Take X's digits, then simulate leading
+ // zeroes.
+ int last = digits - 1;
+ int have_x = std::min(last, X.len());
+ digit_t borrow = 0;
+ int i = 0;
+ for (; i < have_x; i++) Z[i] = digit_sub2(0, X[i], borrow, &borrow);
+ for (; i < last; i++) Z[i] = digit_sub(0, borrow, &borrow);
+
+ // The MSD might contain extra bits that we don't want.
+ digit_t msd = last < X.len() ? X[last] : 0;
+ if (bits == 0) {
+ Z[last] = digit_sub2(0, msd, borrow, &borrow);
+ } else {
+ int drop = kDigitBits - bits;
+ msd = (msd << drop) >> drop;
+ digit_t minuend_msd = static_cast<digit_t>(1) << bits;
+ digit_t result_msd = digit_sub2(minuend_msd, msd, borrow, &borrow);
+ DCHECK(borrow == 0); // result < 2^n. NOLINT(readability/check)
+ // If all subtracted bits were zero, we have to get rid of the
+ // materialized minuend_msd again.
+ Z[last] = result_msd & (minuend_msd - 1);
+ }
+}
+
+} // namespace
+
+// Returns -1 when the operation would return X unchanged.
+int AsIntNResultLength(Digits X, bool x_negative, int n) {
+ int needed_digits = DIV_CEIL(n, kDigitBits);
+ // Generally: decide based on number of digits, and bits in the top digit.
+ if (X.len() < needed_digits) return -1;
+ if (X.len() > needed_digits) return needed_digits;
+ digit_t top_digit = X[needed_digits - 1];
+ digit_t compare_digit = digit_t{1} << ((n - 1) % kDigitBits);
+ if (top_digit < compare_digit) return -1;
+ if (top_digit > compare_digit) return needed_digits;
+ // Special case: if X == -2**(n-1), truncation is a no-op.
+ if (!x_negative) return needed_digits;
+ for (int i = needed_digits - 2; i >= 0; i--) {
+ if (X[i] != 0) return needed_digits;
+ }
+ return -1;
+}
+
+bool AsIntN(RWDigits Z, Digits X, bool x_negative, int n) {
+ DCHECK(X.len() > 0); // NOLINT(readability/check)
+ DCHECK(n > 0); // NOLINT(readability/check)
+ // NOLINTNEXTLINE(readability/check)
+ DCHECK(AsIntNResultLength(X, x_negative, n) > 0);
+ int needed_digits = DIV_CEIL(n, kDigitBits);
+ digit_t top_digit = X[needed_digits - 1];
+ digit_t compare_digit = digit_t{1} << ((n - 1) % kDigitBits);
+ // The canonical algorithm would be: convert negative numbers to two's
+ // complement representation, truncate, convert back to sign+magnitude. To
+ // avoid the conversions, we predict what the result would be:
+ // When the (n-1)th bit is not set:
+ // - truncate the absolute value
+ // - preserve the sign.
+ // When the (n-1)th bit is set:
+ // - subtract the truncated absolute value from 2**n to simulate two's
+ // complement representation
+ // - flip the sign, unless it's the special case where the input is negative
+ // and the result is the minimum n-bit integer. E.g. asIntN(3, -12) => -4.
+ bool has_bit = (top_digit & compare_digit) == compare_digit;
+ if (!has_bit) {
+ TruncateToNBits(Z, X, n);
+ return x_negative;
+ }
+ TruncateAndSubFromPowerOfTwo(Z, X, n);
+ if (!x_negative) return true; // Result is negative.
+ // Scan for the special case (see above): if all bits below the (n-1)th
+ // digit are zero, the result is negative.
+ if ((top_digit & (compare_digit - 1)) != 0) return false;
+ for (int i = needed_digits - 2; i >= 0; i--) {
+ if (X[i] != 0) return false;
+ }
+ return true;
+}
+
+// Returns -1 when the operation would return X unchanged.
+int AsUintN_Pos_ResultLength(Digits X, int n) {
+ int needed_digits = DIV_CEIL(n, kDigitBits);
+ if (X.len() < needed_digits) return -1;
+ if (X.len() > needed_digits) return needed_digits;
+ int bits_in_top_digit = n % kDigitBits;
+ if (bits_in_top_digit == 0) return -1;
+ digit_t top_digit = X[needed_digits - 1];
+ if ((top_digit >> bits_in_top_digit) == 0) return -1;
+ return needed_digits;
+}
+
+void AsUintN_Pos(RWDigits Z, Digits X, int n) {
+ DCHECK(AsUintN_Pos_ResultLength(X, n) > 0); // NOLINT(readability/check)
+ TruncateToNBits(Z, X, n);
+}
+
+void AsUintN_Neg(RWDigits Z, Digits X, int n) {
+ TruncateAndSubFromPowerOfTwo(Z, X, n);
+}
+
+} // namespace bigint
+} // namespace v8
diff --git a/chromium/v8/src/bigint/fromstring.cc b/chromium/v8/src/bigint/fromstring.cc
index 0307745cad8..a4b34a1a025 100644
--- a/chromium/v8/src/bigint/fromstring.cc
+++ b/chromium/v8/src/bigint/fromstring.cc
@@ -40,7 +40,6 @@ void ProcessorImpl::FromStringClassic(RWDigits Z,
// Parts are stored on the heap.
for (int i = 1; i < num_heap_parts - 1; i++) {
MultiplySingle(Z, already_set, max_multiplier);
- if (should_terminate()) return;
Add(Z, accumulator->heap_parts_[i]);
already_set.set_len(already_set.len() + 1);
}
@@ -48,6 +47,262 @@ void ProcessorImpl::FromStringClassic(RWDigits Z,
Add(Z, accumulator->heap_parts_.back());
}
+// The fast algorithm: combine parts in a balanced-binary-tree like order:
+// Multiply-and-add neighboring pairs of parts, then loop, until only one
+// part is left. The benefit is that the multiplications will have inputs of
+// similar sizes, which makes them amenable to fast multiplication algorithms.
+// We have to do more multiplications than the classic algorithm though,
+// because we also have to multiply the multipliers.
+// Optimizations:
+// - We can skip the multiplier for the first part, because we never need it.
+// - Most multipliers are the same; we can avoid repeated multiplications and
+// just copy the previous result. (In theory we could even de-dupe them, but
+// as the parts/multipliers grow, we'll need most of the memory anyway.)
+// Copied results are marked with a * below.
+// - We can re-use memory using a system of three buffers whose usage rotates:
+// - one is considered empty, and is overwritten with the new parts,
+// - one holds the multipliers (and will be "empty" in the next round), and
+// - one initially holds the parts and is overwritten with the new multipliers
+// Parts and multipliers both grow in each iteration, and get fewer, so we
+// use the space of two adjacent old chunks for one new chunk.
+// Since the {heap_parts_} vectors has the right size, and so does the
+// result {Z}, we can use that memory, and only need to allocate one scratch
+// vector. If the final result ends up in the wrong bucket, we have to copy it
+// to the correct one.
+// - We don't have to keep track of the positions and sizes of the chunks,
+// because we can deduce their precise placement from the iteration index.
+//
+// Example, assuming digit_t is 4 bits, fitting one decimal digit:
+// Initial state:
+// parts_: 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+// multipliers_: 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10
+// After the first iteration of the outer loop:
+// parts: 12 34 56 78 90 12 34 5
+// multipliers: 100 *100 *100 *100 *100 *100 10
+// After the second iteration:
+// parts: 1234 5678 9012 345
+// multipliers: 10000 *10000 1000
+// After the third iteration:
+// parts: 12345678 9012345
+// multipliers: 10000000
+// And then there's an obvious last iteration.
+void ProcessorImpl::FromStringLarge(RWDigits Z,
+ FromStringAccumulator* accumulator) {
+ int num_parts = static_cast<int>(accumulator->heap_parts_.size());
+ DCHECK(num_parts >= 2); // NOLINT(readability/check)
+ DCHECK(Z.len() >= num_parts);
+ RWDigits parts(accumulator->heap_parts_.data(), num_parts);
+ Storage multipliers_storage(num_parts);
+ RWDigits multipliers(multipliers_storage.get(), num_parts);
+ RWDigits temp(Z, 0, num_parts);
+ // Unrolled and specialized first iteration: part_len == 1, so instead of
+ // Digits sub-vectors we have individual digit_t values, and the multipliers
+ // are known up front.
+ {
+ digit_t max_multiplier = accumulator->max_multiplier_;
+ digit_t last_multiplier = accumulator->last_multiplier_;
+ RWDigits new_parts = temp;
+ RWDigits new_multipliers = parts;
+ int i = 0;
+ for (; i + 1 < num_parts; i += 2) {
+ digit_t p_in = parts[i];
+ digit_t p_in2 = parts[i + 1];
+ digit_t m_in = max_multiplier;
+ digit_t m_in2 = i == num_parts - 2 ? last_multiplier : max_multiplier;
+ // p[j] = p[i] * m[i+1] + p[i+1]
+ digit_t p_high;
+ digit_t p_low = digit_mul(p_in, m_in2, &p_high);
+ digit_t carry;
+ new_parts[i] = digit_add2(p_low, p_in2, &carry);
+ new_parts[i + 1] = p_high + carry;
+ // m[j] = m[i] * m[i+1]
+ if (i > 0) {
+ if (i > 2 && m_in2 != last_multiplier) {
+ new_multipliers[i] = new_multipliers[i - 2];
+ new_multipliers[i + 1] = new_multipliers[i - 1];
+ } else {
+ digit_t m_high;
+ new_multipliers[i] = digit_mul(m_in, m_in2, &m_high);
+ new_multipliers[i + 1] = m_high;
+ }
+ }
+ }
+ // Trailing last part (if {num_parts} was odd).
+ if (i < num_parts) {
+ new_parts[i] = parts[i];
+ new_multipliers[i] = last_multiplier;
+ i += 2;
+ }
+ num_parts = i >> 1;
+ RWDigits new_temp = multipliers;
+ parts = new_parts;
+ multipliers = new_multipliers;
+ temp = new_temp;
+ AddWorkEstimate(num_parts);
+ }
+ int part_len = 2;
+
+ // Remaining iterations.
+ while (num_parts > 1) {
+ RWDigits new_parts = temp;
+ RWDigits new_multipliers = parts;
+ int new_part_len = part_len * 2;
+ int i = 0;
+ for (; i + 1 < num_parts; i += 2) {
+ int start = i * part_len;
+ Digits p_in(parts, start, part_len);
+ Digits p_in2(parts, start + part_len, part_len);
+ Digits m_in(multipliers, start, part_len);
+ Digits m_in2(multipliers, start + part_len, part_len);
+ RWDigits p_out(new_parts, start, new_part_len);
+ RWDigits m_out(new_multipliers, start, new_part_len);
+ // p[j] = p[i] * m[i+1] + p[i+1]
+ Multiply(p_out, p_in, m_in2);
+ if (should_terminate()) return;
+ digit_t overflow = AddAndReturnOverflow(p_out, p_in2);
+ DCHECK(overflow == 0); // NOLINT(readability/check)
+ USE(overflow);
+ // m[j] = m[i] * m[i+1]
+ if (i > 0) {
+ bool copied = false;
+ if (i > 2) {
+ int prev_start = (i - 2) * part_len;
+ Digits m_in_prev(multipliers, prev_start, part_len);
+ Digits m_in2_prev(multipliers, prev_start + part_len, part_len);
+ if (Compare(m_in, m_in_prev) == 0 &&
+ Compare(m_in2, m_in2_prev) == 0) {
+ copied = true;
+ Digits m_out_prev(new_multipliers, prev_start, new_part_len);
+ for (int k = 0; k < new_part_len; k++) m_out[k] = m_out_prev[k];
+ }
+ }
+ if (!copied) {
+ Multiply(m_out, m_in, m_in2);
+ if (should_terminate()) return;
+ }
+ }
+ }
+ // Trailing last part (if {num_parts} was odd).
+ if (i < num_parts) {
+ Digits p_in(parts, i * part_len, part_len);
+ Digits m_in(multipliers, i * part_len, part_len);
+ RWDigits p_out(new_parts, i * part_len, new_part_len);
+ RWDigits m_out(new_multipliers, i * part_len, new_part_len);
+ int k = 0;
+ for (; k < p_in.len(); k++) p_out[k] = p_in[k];
+ for (; k < p_out.len(); k++) p_out[k] = 0;
+ k = 0;
+ for (; k < m_in.len(); k++) m_out[k] = m_in[k];
+ for (; k < m_out.len(); k++) m_out[k] = 0;
+ i += 2;
+ }
+ num_parts = i >> 1;
+ part_len = new_part_len;
+ RWDigits new_temp = multipliers;
+ parts = new_parts;
+ multipliers = new_multipliers;
+ temp = new_temp;
+ }
+ // Copy the result to Z, if it doesn't happen to be there already.
+ if (parts.digits() != Z.digits()) {
+ int i = 0;
+ for (; i < parts.len(); i++) Z[i] = parts[i];
+ // Z might be bigger than we requested; be robust towards that.
+ for (; i < Z.len(); i++) Z[i] = 0;
+ }
+}
+
+// Specialized algorithms for power-of-two radixes. Designed to work with
+// {ParsePowerTwo}: {max_multiplier_} isn't saved, but {radix_} is, and
+// {last_multiplier_} has special meaning, namely the number of unpopulated bits
+// in the last part.
+// For these radixes, {parts} already is a list of correct bit sequences, we
+// just have to put them together in the right way:
+// - The parts are currently in reversed order. The highest-index parts[i]
+// will go into Z[0].
+// - All parts, possibly except for the last, are maximally populated.
+// - A maximally populated part stores a non-fractional number of characters,
+// i.e. the largest fitting multiple of {char_bits} of it is populated.
+// - The populated bits in a part are at the low end.
+// - The number of unused bits in the last part is stored in
+// {accumulator->last_multiplier_}.
+//
+// Example: Given the following parts vector, where letters are used to
+// label bits, bit order is big endian (i.e. [00000101] encodes "5"),
+// 'x' means "unpopulated", kDigitBits == 8, radix == 8, and char_bits == 3:
+//
+// parts[0] -> [xxABCDEF][xxGHIJKL][xxMNOPQR][xxxxxSTU] <- parts[3]
+//
+// We have to assemble the following result:
+//
+// Z[0] -> [NOPQRSTU][FGHIJKLM][xxxABCDE] <- Z[2]
+//
+void ProcessorImpl::FromStringBasePowerOfTwo(
+ RWDigits Z, FromStringAccumulator* accumulator) {
+ const int num_parts = accumulator->ResultLength();
+ DCHECK(num_parts >= 1); // NOLINT(readability/check)
+ DCHECK(Z.len() >= num_parts);
+ Digits parts(accumulator->heap_parts_.size() > 0
+ ? accumulator->heap_parts_.data()
+ : accumulator->stack_parts_,
+ num_parts);
+ uint8_t radix = accumulator->radix_;
+ DCHECK(radix == 2 || radix == 4 || radix == 8 || radix == 16 || radix == 32);
+ const int char_bits = BitLength(radix - 1);
+ const int unused_last_part_bits =
+ static_cast<int>(accumulator->last_multiplier_);
+ const int unused_part_bits = kDigitBits % char_bits;
+ const int max_part_bits = kDigitBits - unused_part_bits;
+ int z_index = 0;
+ int part_index = num_parts - 1;
+
+ // If the last part is fully populated, then all parts must be, and we can
+ // simply copy them (in reversed order).
+ if (unused_last_part_bits == 0) {
+ DCHECK(kDigitBits % char_bits == 0); // NOLINT(readability/check)
+ while (part_index >= 0) {
+ Z[z_index++] = parts[part_index--];
+ }
+ for (; z_index < Z.len(); z_index++) Z[z_index] = 0;
+ return;
+ }
+
+ // Otherwise we have to shift parts contents around as needed.
+ // Holds the next Z digit that we want to store...
+ digit_t digit = parts[part_index--];
+ // ...and the number of bits (at the right end) we already know.
+ int digit_bits = kDigitBits - unused_last_part_bits;
+ while (part_index >= 0) {
+ // Holds the last part that we read from {parts}...
+ digit_t part;
+ // ...and the number of bits (at the right end) that we haven't used yet.
+ int part_bits;
+ while (digit_bits < kDigitBits) {
+ part = parts[part_index--];
+ part_bits = max_part_bits;
+ digit |= part << digit_bits;
+ int part_shift = kDigitBits - digit_bits;
+ if (part_shift > part_bits) {
+ digit_bits += part_bits;
+ part = 0;
+ part_bits = 0;
+ if (part_index < 0) break;
+ } else {
+ digit_bits = kDigitBits;
+ part >>= part_shift;
+ part_bits -= part_shift;
+ }
+ }
+ Z[z_index++] = digit;
+ digit = part;
+ digit_bits = part_bits;
+ }
+ if (digit_bits > 0) {
+ Z[z_index++] = digit;
+ }
+ for (; z_index < Z.len(); z_index++) Z[z_index] = 0;
+}
+
void ProcessorImpl::FromString(RWDigits Z, FromStringAccumulator* accumulator) {
if (accumulator->inline_everything_) {
int i = 0;
@@ -57,8 +312,12 @@ void ProcessorImpl::FromString(RWDigits Z, FromStringAccumulator* accumulator) {
for (; i < Z.len(); i++) Z[i] = 0;
} else if (accumulator->stack_parts_used_ == 0) {
for (int i = 0; i < Z.len(); i++) Z[i] = 0;
- } else {
+ } else if (IsPowerOfTwo(accumulator->radix_)) {
+ FromStringBasePowerOfTwo(Z, accumulator);
+ } else if (accumulator->ResultLength() < kFromStringLargeThreshold) {
FromStringClassic(Z, accumulator);
+ } else {
+ FromStringLarge(Z, accumulator);
}
}
diff --git a/chromium/v8/src/bigint/mul-fft.cc b/chromium/v8/src/bigint/mul-fft.cc
index a3971a72764..9c297c00dfc 100644
--- a/chromium/v8/src/bigint/mul-fft.cc
+++ b/chromium/v8/src/bigint/mul-fft.cc
@@ -144,7 +144,7 @@ void ShiftModFn_Large(digit_t* result, const digit_t* input, int digit_shift,
result[digit_shift] = digit_sub(sum, i0_part, &borrow);
input_carry = d >> (kDigitBits - bits_shift);
if (digit_shift + 1 < K) {
- digit_t d = input[1];
+ d = input[1];
digit_t subtrahend = (d << bits_shift) | input_carry;
result[digit_shift + 1] =
digit_sub2(iK_carry, subtrahend, borrow, &borrow);
diff --git a/chromium/v8/src/bigint/vector-arithmetic.cc b/chromium/v8/src/bigint/vector-arithmetic.cc
index 4191755bc98..9bbea3873ea 100644
--- a/chromium/v8/src/bigint/vector-arithmetic.cc
+++ b/chromium/v8/src/bigint/vector-arithmetic.cc
@@ -118,5 +118,22 @@ bool SubtractSigned(RWDigits Z, Digits X, bool x_negative, Digits Y,
return !x_negative;
}
+void AddOne(RWDigits Z, Digits X) {
+ digit_t carry = 1;
+ int i = 0;
+ for (; carry > 0 && i < X.len(); i++) Z[i] = digit_add2(X[i], carry, &carry);
+ if (carry > 0) Z[i++] = carry;
+ for (; i < X.len(); i++) Z[i] = X[i];
+ for (; i < Z.len(); i++) Z[i] = 0;
+}
+
+void SubtractOne(RWDigits Z, Digits X) {
+ digit_t borrow = 1;
+ int i = 0;
+ for (; borrow > 0; i++) Z[i] = digit_sub(X[i], borrow, &borrow);
+ for (; i < X.len(); i++) Z[i] = X[i];
+ for (; i < Z.len(); i++) Z[i] = 0;
+}
+
} // namespace bigint
} // namespace v8
diff --git a/chromium/v8/src/builtins/accessors.cc b/chromium/v8/src/builtins/accessors.cc
index 8d262592048..0d994d2d034 100644
--- a/chromium/v8/src/builtins/accessors.cc
+++ b/chromium/v8/src/builtins/accessors.cc
@@ -17,7 +17,6 @@
#include "src/objects/contexts.h"
#include "src/objects/field-index-inl.h"
#include "src/objects/js-array-inl.h"
-#include "src/objects/js-regexp-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/property-details.h"
#include "src/objects/prototype.h"
diff --git a/chromium/v8/src/builtins/accessors.h b/chromium/v8/src/builtins/accessors.h
index 0148b8e3d18..27ff2768211 100644
--- a/chromium/v8/src/builtins/accessors.h
+++ b/chromium/v8/src/builtins/accessors.h
@@ -5,7 +5,7 @@
#ifndef V8_BUILTINS_ACCESSORS_H_
#define V8_BUILTINS_ACCESSORS_H_
-#include "include/v8.h"
+#include "include/v8-local-handle.h"
#include "src/base/bit-field.h"
#include "src/common/globals.h"
#include "src/objects/property-details.h"
diff --git a/chromium/v8/src/builtins/arm/builtins-arm.cc b/chromium/v8/src/builtins/arm/builtins-arm.cc
index f45c927e675..1b06f6f0a02 100644
--- a/chromium/v8/src/builtins/arm/builtins-arm.cc
+++ b/chromium/v8/src/builtins/arm/builtins-arm.cc
@@ -76,6 +76,36 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
+enum class ArgumentsElementType {
+ kRaw, // Push arguments as they are.
+ kHandle // Dereference arguments before pushing.
+};
+
+void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
+ Register scratch,
+ ArgumentsElementType element_type) {
+ DCHECK(!AreAliased(array, argc, scratch));
+ UseScratchRegisterScope temps(masm);
+ Register counter = scratch;
+ Register value = temps.Acquire();
+ Label loop, entry;
+ if (kJSArgcIncludesReceiver) {
+ __ sub(counter, argc, Operand(kJSArgcReceiverSlots));
+ } else {
+ __ mov(counter, argc);
+ }
+ __ b(&entry);
+ __ bind(&loop);
+ __ ldr(value, MemOperand(array, counter, LSL, kSystemPointerSizeLog2));
+ if (element_type == ArgumentsElementType::kHandle) {
+ __ ldr(value, MemOperand(value));
+ }
+ __ push(value);
+ __ bind(&entry);
+ __ sub(counter, counter, Operand(1), SetCC);
+ __ b(ge, &loop);
+}
+
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
@@ -106,12 +136,14 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// correct position (including any undefined), instead of delaying this to
// InvokeFunction.
- // Set up pointer to last argument (skip receiver).
+ // Set up pointer to first argument (skip receiver).
__ add(
r4, fp,
Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
// Copy arguments and receiver to the expression stack.
- __ PushArray(r4, r0, r5);
+ // r4: Pointer to start of arguments.
+ // r0: Number of arguments.
+ Generate_PushArguments(masm, r4, r0, r5, ArgumentsElementType::kRaw);
// The receiver for the builtin/api call.
__ PushRoot(RootIndex::kTheHoleValue);
@@ -130,7 +162,9 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ DropArguments(scratch, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ Jump(lr);
__ bind(&stack_overflow);
@@ -230,7 +264,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// InvokeFunction.
// Copy arguments to the expression stack.
- __ PushArray(r4, r0, r5);
+ // r4: Pointer to start of argument.
+ // r0: Number of arguments.
+ Generate_PushArguments(masm, r4, r0, r5, ArgumentsElementType::kRaw);
// Push implicit receiver.
__ Push(r6);
@@ -276,7 +312,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ DropArguments(r1, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ Jump(lr);
__ bind(&check_receiver);
@@ -308,14 +346,32 @@ void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
+static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
+ Register scratch) {
+ DCHECK(!AreAliased(code, scratch));
+ // Verify that the code kind is baseline code via the CodeKind.
+ __ ldr(scratch, FieldMemOperand(code, Code::kFlagsOffset));
+ __ DecodeField<Code::KindField>(scratch);
+ __ cmp(scratch, Operand(static_cast<int>(CodeKind::BASELINE)));
+ __ Assert(eq, AbortReason::kExpectedBaselineData);
+}
+
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Register sfi_data,
Register scratch1,
Label* is_baseline) {
ASM_CODE_COMMENT(masm);
Label done;
- __ CompareObjectType(sfi_data, scratch1, scratch1, BASELINE_DATA_TYPE);
- __ b(eq, is_baseline);
+ __ CompareObjectType(sfi_data, scratch1, scratch1, CODET_TYPE);
+ if (FLAG_debug_code) {
+ Label not_baseline;
+ __ b(ne, &not_baseline);
+ AssertCodeIsBaseline(masm, sfi_data, scratch1);
+ __ b(eq, is_baseline);
+ __ bind(&not_baseline);
+ } else {
+ __ b(eq, is_baseline);
+ }
__ cmp(scratch1, Operand(INTERPRETER_DATA_TYPE));
__ b(ne, &done);
__ ldr(sfi_data,
@@ -383,6 +439,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ ldrh(r3,
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
+ if (kJSArgcIncludesReceiver) {
+ __ sub(r3, r3, Operand(kJSArgcReceiverSlots));
+ }
__ ldr(r2,
FieldMemOperand(r1, JSGeneratorObject::kParametersAndRegistersOffset));
{
@@ -611,6 +670,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
Handle<Code> trampoline_code =
masm->isolate()->builtins()->code_handle(entry_trampoline);
DCHECK_EQ(kPushedStackSpace, pushed_stack_space);
+ USE(pushed_stack_space);
__ Call(trampoline_code, RelocInfo::CODE_TARGET);
// Unlink this frame from the handler chain.
@@ -705,7 +765,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Check if we have enough stack space to push all arguments + receiver.
// Clobbers r5.
Label enough_stack_space, stack_overflow;
- __ add(r6, r0, Operand(1)); // Add one for receiver.
+ if (kJSArgcIncludesReceiver) {
+ __ mov(r6, r0);
+ } else {
+ __ add(r6, r0, Operand(1)); // Add one for receiver.
+ }
__ StackOverflowCheck(r6, r5, &stack_overflow);
__ b(&enough_stack_space);
__ bind(&stack_overflow);
@@ -715,24 +779,13 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ bind(&enough_stack_space);
- // Copy arguments to the stack in a loop.
+ // Copy arguments to the stack.
// r1: new.target
// r2: function
// r3: receiver
// r0: argc
// r4: argv, i.e. points to first arg
- Label loop, entry;
- __ add(r6, r4, Operand(r0, LSL, kSystemPointerSizeLog2));
- // r6 points past last arg.
- __ b(&entry);
- __ bind(&loop);
- __ ldr(r5, MemOperand(r6, -kSystemPointerSize,
- PreIndex)); // read next parameter
- __ ldr(r5, MemOperand(r5)); // dereference handle
- __ push(r5); // push parameter
- __ bind(&entry);
- __ cmp(r4, r6);
- __ b(ne, &loop);
+ Generate_PushArguments(masm, r4, r0, r5, ArgumentsElementType::kHandle);
// Push the receiver.
__ Push(r3);
@@ -815,7 +868,9 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ ldr(actual_params_size,
MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ lsl(actual_params_size, actual_params_size, Operand(kPointerSizeLog2));
- __ add(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
+ if (!kJSArgcIncludesReceiver) {
+ __ add(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
+ }
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
@@ -1196,7 +1251,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// stack left to right.
//
// The live registers are:
-// o r0: actual argument count (not including the receiver)
+// o r0: actual argument count
// o r1: the JS function object being called.
// o r3: the incoming new target or generator object
// o cp: our context
@@ -1414,8 +1469,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
&has_optimized_code_or_marker);
// Load the baseline code into the closure.
- __ ldr(r2, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BaselineData::kBaselineCodeOffset));
+ __ mov(r2, kInterpreterBytecodeArrayRegister);
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
ReplaceClosureCodeWithOptimizedCode(masm, r2, closure);
__ JumpCodeObject(r2);
@@ -1451,7 +1505,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
InterpreterPushArgsMode mode) {
DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r2 : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
// they are to be pushed onto the stack.
@@ -1464,15 +1518,18 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
__ sub(r0, r0, Operand(1));
}
- __ add(r3, r0, Operand(1)); // Add one for receiver.
-
- __ StackOverflowCheck(r3, r4, &stack_overflow);
-
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- // Don't copy receiver. Argument count is correct.
+ const bool skip_receiver =
+ receiver_mode == ConvertReceiverMode::kNullOrUndefined;
+ if (kJSArgcIncludesReceiver && skip_receiver) {
+ __ sub(r3, r0, Operand(kJSArgcReceiverSlots));
+ } else if (!kJSArgcIncludesReceiver && !skip_receiver) {
+ __ add(r3, r0, Operand(1));
+ } else {
__ mov(r3, r0);
}
+ __ StackOverflowCheck(r3, r4, &stack_overflow);
+
// Push the arguments. r2 and r4 will be modified.
GenerateInterpreterPushArgs(masm, r3, r2, r4);
@@ -1510,7 +1567,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
MacroAssembler* masm, InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
- // -- r0 : argument count (not including receiver)
+ // -- r0 : argument count
// -- r3 : new target
// -- r1 : constructor to call
// -- r2 : allocation site feedback if available, undefined otherwise.
@@ -1518,17 +1575,20 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// -----------------------------------
Label stack_overflow;
- __ add(r5, r0, Operand(1)); // Add one for receiver.
-
- __ StackOverflowCheck(r5, r6, &stack_overflow);
+ __ StackOverflowCheck(r0, r6, &stack_overflow);
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ sub(r0, r0, Operand(1));
}
+ Register argc_without_receiver = r0;
+ if (kJSArgcIncludesReceiver) {
+ argc_without_receiver = r6;
+ __ sub(argc_without_receiver, r0, Operand(kJSArgcReceiverSlots));
+ }
// Push the arguments. r4 and r5 will be modified.
- GenerateInterpreterPushArgs(masm, r0, r4, r5);
+ GenerateInterpreterPushArgs(masm, argc_without_receiver, r4, r5);
// Push a slot for the receiver to be constructed.
__ mov(r5, Operand::Zero());
@@ -1729,10 +1789,13 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
// Overwrite the hole inserted by the deoptimizer with the return value from
// the LAZY deopt point. r0 contains the arguments count, the return value
// from LAZY is always the last argument.
- __ add(r0, r0, Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
+ constexpr int return_value_offset =
+ BuiltinContinuationFrameConstants::kFixedSlotCount -
+ kJSArgcReceiverSlots;
+ __ add(r0, r0, Operand(return_value_offset));
__ str(scratch, MemOperand(sp, r0, LSL, kPointerSizeLog2));
// Recover arguments count.
- __ sub(r0, r0, Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
+ __ sub(r0, r0, Operand(return_value_offset));
}
__ ldr(fp, MemOperand(
sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
@@ -1815,7 +1878,8 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
- __ ldr(r1, FieldMemOperand(r0, Code::kDeoptimizationDataOffset));
+ __ ldr(r1,
+ FieldMemOperand(r0, Code::kDeoptimizationDataOrInterpreterDataOffset));
{
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
@@ -1857,12 +1921,14 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadRoot(r5, RootIndex::kUndefinedValue);
__ mov(r2, r5);
__ ldr(r1, MemOperand(sp, 0)); // receiver
- __ cmp(r0, Operand(1));
+ __ cmp(r0, Operand(JSParameterCount(1)));
__ ldr(r5, MemOperand(sp, kSystemPointerSize), ge); // thisArg
- __ cmp(r0, Operand(2), ge);
+ __ cmp(r0, Operand(JSParameterCount(2)), ge);
__ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argArray
- __ DropArgumentsAndPushNewReceiver(r0, r5, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(
+ r0, r5, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -1888,7 +1954,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// arguments to the receiver.
__ bind(&no_arguments);
{
- __ mov(r0, Operand(0));
+ __ mov(r0, Operand(JSParameterCount(0)));
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
}
@@ -1902,7 +1968,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// r0: actual number of arguments
{
Label done;
- __ cmp(r0, Operand::Zero());
+ __ cmp(r0, Operand(JSParameterCount(0)));
__ b(ne, &done);
__ PushRoot(RootIndex::kUndefinedValue);
__ add(r0, r0, Operand(1));
@@ -1932,14 +1998,16 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadRoot(r1, RootIndex::kUndefinedValue);
__ mov(r5, r1);
__ mov(r2, r1);
- __ cmp(r0, Operand(1));
+ __ cmp(r0, Operand(JSParameterCount(1)));
__ ldr(r1, MemOperand(sp, kSystemPointerSize), ge); // target
- __ cmp(r0, Operand(2), ge);
+ __ cmp(r0, Operand(JSParameterCount(2)), ge);
__ ldr(r5, MemOperand(sp, 2 * kSystemPointerSize), ge); // thisArgument
- __ cmp(r0, Operand(3), ge);
+ __ cmp(r0, Operand(JSParameterCount(3)), ge);
__ ldr(r2, MemOperand(sp, 3 * kSystemPointerSize), ge); // argumentsList
- __ DropArgumentsAndPushNewReceiver(r0, r5, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(
+ r0, r5, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -1974,15 +2042,17 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ LoadRoot(r1, RootIndex::kUndefinedValue);
__ mov(r2, r1);
__ mov(r4, r1);
- __ cmp(r0, Operand(1));
+ __ cmp(r0, Operand(JSParameterCount(1)));
__ ldr(r1, MemOperand(sp, kSystemPointerSize), ge); // target
__ mov(r3, r1); // new.target defaults to target
- __ cmp(r0, Operand(2), ge);
+ __ cmp(r0, Operand(JSParameterCount(2)), ge);
__ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argumentsList
- __ cmp(r0, Operand(3), ge);
+ __ cmp(r0, Operand(JSParameterCount(3)), ge);
__ ldr(r3, MemOperand(sp, 3 * kSystemPointerSize), ge); // new.target
- __ DropArgumentsAndPushNewReceiver(r0, r4, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(
+ r0, r4, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -2005,13 +2075,55 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
+namespace {
+
+// Allocate new stack space for |count| arguments and shift all existing
+// arguments already on the stack. |pointer_to_new_space_out| points to the
+// first free slot on the stack to copy additional arguments to and
+// |argc_in_out| is updated to include |count|.
+void Generate_AllocateSpaceAndShiftExistingArguments(
+ MacroAssembler* masm, Register count, Register argc_in_out,
+ Register pointer_to_new_space_out, Register scratch1, Register scratch2) {
+ DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1,
+ scratch2));
+ UseScratchRegisterScope temps(masm);
+ Register old_sp = scratch1;
+ Register new_space = scratch2;
+ __ mov(old_sp, sp);
+ __ lsl(new_space, count, Operand(kSystemPointerSizeLog2));
+ __ AllocateStackSpace(new_space);
+
+ Register end = scratch2;
+ Register value = temps.Acquire();
+ Register dest = pointer_to_new_space_out;
+ __ mov(dest, sp);
+ __ add(end, old_sp, Operand(argc_in_out, LSL, kSystemPointerSizeLog2));
+ Label loop, done;
+ __ bind(&loop);
+ __ cmp(old_sp, end);
+ if (kJSArgcIncludesReceiver) {
+ __ b(ge, &done);
+ } else {
+ __ b(gt, &done);
+ }
+ __ ldr(value, MemOperand(old_sp, kSystemPointerSize, PostIndex));
+ __ str(value, MemOperand(dest, kSystemPointerSize, PostIndex));
+ __ b(&loop);
+ __ bind(&done);
+
+ // Update total number of arguments.
+ __ add(argc_in_out, argc_in_out, count);
+}
+
+} // namespace
+
// static
// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
// -- r1 : target
- // -- r0 : number of parameters on the stack (not including the receiver)
+ // -- r0 : number of parameters on the stack
// -- r2 : arguments list (a FixedArray)
// -- r4 : len (number of elements to push from args)
// -- r3 : new.target (for [[Construct]])
@@ -2042,23 +2154,10 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Move the arguments already in the stack,
// including the receiver and the return address.
- {
- Label copy, check;
- Register num = r5, src = r6, dest = r9; // r7 and r8 are context and root.
- __ mov(src, sp);
- // Update stack pointer.
- __ lsl(scratch, r4, Operand(kSystemPointerSizeLog2));
- __ AllocateStackSpace(scratch);
- __ mov(dest, sp);
- __ mov(num, r0);
- __ b(&check);
- __ bind(&copy);
- __ ldr(scratch, MemOperand(src, kSystemPointerSize, PostIndex));
- __ str(scratch, MemOperand(dest, kSystemPointerSize, PostIndex));
- __ sub(num, num, Operand(1), SetCC);
- __ bind(&check);
- __ b(ge, &copy);
- }
+ // r4: Number of arguments to make room for.
+ // r0: Number of arguments already on the stack.
+ // r9: Points to first free slot on the stack after arguments were shifted.
+ Generate_AllocateSpaceAndShiftExistingArguments(masm, r4, r0, r9, r5, r6);
// Copy arguments onto the stack (thisArgument is already on the stack).
{
@@ -2077,7 +2176,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ add(r6, r6, Operand(1));
__ b(&loop);
__ bind(&done);
- __ add(r0, r0, r6);
}
// Tail-call to the actual Call or Construct builtin.
@@ -2092,7 +2190,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
Handle<Code> code) {
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r3 : the new.target (for [[Construct]] calls)
// -- r1 : the target to call (can be any Object)
// -- r2 : start index (to support rest parameters)
@@ -2120,12 +2218,14 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
Label stack_done, stack_overflow;
__ ldr(r5, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ if (kJSArgcIncludesReceiver) {
+ __ sub(r5, r5, Operand(kJSArgcReceiverSlots));
+ }
__ sub(r5, r5, r2, SetCC);
__ b(le, &stack_done);
{
// ----------- S t a t e -------------
- // -- r0 : the number of arguments already in the stack (not including the
- // receiver)
+ // -- r0 : the number of arguments already in the stack
// -- r1 : the target to call (can be any Object)
// -- r2 : start index (to support rest parameters)
// -- r3 : the new.target (for [[Construct]] calls)
@@ -2145,30 +2245,17 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// Move the arguments already in the stack,
// including the receiver and the return address.
- {
- Label copy, check;
- Register num = r8, src = r9,
- dest = r2; // r7 and r10 are context and root.
- __ mov(src, sp);
- // Update stack pointer.
- __ lsl(scratch, r5, Operand(kSystemPointerSizeLog2));
- __ AllocateStackSpace(scratch);
- __ mov(dest, sp);
- __ mov(num, r0);
- __ b(&check);
- __ bind(&copy);
- __ ldr(scratch, MemOperand(src, kSystemPointerSize, PostIndex));
- __ str(scratch, MemOperand(dest, kSystemPointerSize, PostIndex));
- __ sub(num, num, Operand(1), SetCC);
- __ bind(&check);
- __ b(ge, &copy);
- }
+ // r5: Number of arguments to make room for.
+ // r0: Number of arguments already on the stack.
+ // r2: Points to first free slot on the stack after arguments were shifted.
+ Generate_AllocateSpaceAndShiftExistingArguments(masm, r5, r0, r2, scratch,
+ r8);
+
// Copy arguments from the caller frame.
// TODO(victorgomes): Consider using forward order as potentially more cache
// friendly.
{
Label loop;
- __ add(r0, r0, r5);
__ bind(&loop);
{
__ sub(r5, r5, Operand(1), SetCC);
@@ -2191,13 +2278,11 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
void Builtins::Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode) {
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r1 : the function to call (checked to be a JSFunction)
// -----------------------------------
__ AssertFunction(r1);
- // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
- // Check that the function is not a "classConstructor".
Label class_constructor;
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kFlagsOffset));
@@ -2216,7 +2301,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ b(ne, &done_convert);
{
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r1 : the function to call (checked to be a JSFunction)
// -- r2 : the shared function info.
// -- cp : the function context.
@@ -2268,7 +2353,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ bind(&done_convert);
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r1 : the function to call (checked to be a JSFunction)
// -- r2 : the shared function info.
// -- cp : the function context.
@@ -2292,7 +2377,7 @@ namespace {
void Generate_PushBoundArguments(MacroAssembler* masm) {
ASM_CODE_COMMENT(masm);
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r1 : target (checked to be a JSBoundFunction)
// -- r3 : new.target (only in case of [[Construct]])
// -----------------------------------
@@ -2306,7 +2391,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ b(eq, &no_bound_arguments);
{
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r1 : target (checked to be a JSBoundFunction)
// -- r2 : the [[BoundArguments]] (implemented as FixedArray)
// -- r3 : new.target (only in case of [[Construct]])
@@ -2370,7 +2455,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// static
void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r1 : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(r1);
@@ -2391,37 +2476,51 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// static
void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r1 : the target to call (can be any Object).
// -----------------------------------
-
- Label non_callable, non_smi;
- __ JumpIfSmi(r1, &non_callable);
- __ bind(&non_smi);
- __ LoadMap(r4, r1);
- __ CompareInstanceTypeRange(r4, r5, FIRST_JS_FUNCTION_TYPE,
- LAST_JS_FUNCTION_TYPE);
+ Register argc = r0;
+ Register target = r1;
+ Register map = r4;
+ Register instance_type = r5;
+ DCHECK(!AreAliased(argc, target, map, instance_type));
+
+ Label non_callable, class_constructor;
+ __ JumpIfSmi(target, &non_callable);
+ __ LoadMap(map, target);
+ __ CompareInstanceTypeRange(map, instance_type,
+ FIRST_CALLABLE_JS_FUNCTION_TYPE,
+ LAST_CALLABLE_JS_FUNCTION_TYPE);
__ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET, ls);
- __ cmp(r5, Operand(JS_BOUND_FUNCTION_TYPE));
+ __ cmp(instance_type, Operand(JS_BOUND_FUNCTION_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
RelocInfo::CODE_TARGET, eq);
// Check if target has a [[Call]] internal method.
- __ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
- __ tst(r4, Operand(Map::Bits1::IsCallableBit::kMask));
- __ b(eq, &non_callable);
+ {
+ Register flags = r4;
+ __ ldrb(flags, FieldMemOperand(map, Map::kBitFieldOffset));
+ map = no_reg;
+ __ tst(flags, Operand(Map::Bits1::IsCallableBit::kMask));
+ __ b(eq, &non_callable);
+ }
// Check if target is a proxy and call CallProxy external builtin
- __ cmp(r5, Operand(JS_PROXY_TYPE));
+ __ cmp(instance_type, Operand(JS_PROXY_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq);
+ // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that the function is not a "classConstructor".
+ __ cmp(instance_type, Operand(JS_CLASS_CONSTRUCTOR_TYPE));
+ __ b(eq, &class_constructor);
+
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
// Overwrite the original receiver the (original) target.
- __ str(r1, __ ReceiverOperand(r0));
+ __ str(target, __ ReceiverOperand(argc));
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadNativeContextSlot(r1, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
+ __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(
ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
@@ -2430,15 +2529,25 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ bind(&non_callable);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r1);
+ __ Push(target);
__ CallRuntime(Runtime::kThrowCalledNonCallable);
+ __ Trap(); // Unreachable.
+ }
+
+ // 4. The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(target);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ __ Trap(); // Unreachable.
}
}
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r1 : the constructor to call (checked to be a JSFunction)
// -- r3 : the new target (checked to be a constructor)
// -----------------------------------
@@ -2468,7 +2577,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r1 : the function to call (checked to be a JSBoundFunction)
// -- r3 : the new target (checked to be a constructor)
// -----------------------------------
@@ -2491,36 +2600,45 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// static
void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r1 : the constructor to call (can be any Object)
// -- r3 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
+ Register argc = r0;
+ Register target = r1;
+ Register map = r4;
+ Register instance_type = r5;
+ DCHECK(!AreAliased(argc, target, map, instance_type));
// Check if target is a Smi.
Label non_constructor, non_proxy;
- __ JumpIfSmi(r1, &non_constructor);
+ __ JumpIfSmi(target, &non_constructor);
// Check if target has a [[Construct]] internal method.
- __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r2, FieldMemOperand(r4, Map::kBitFieldOffset));
- __ tst(r2, Operand(Map::Bits1::IsConstructorBit::kMask));
- __ b(eq, &non_constructor);
+ __ ldr(map, FieldMemOperand(target, HeapObject::kMapOffset));
+ {
+ Register flags = r2;
+ DCHECK(!AreAliased(argc, target, map, instance_type, flags));
+ __ ldrb(flags, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ tst(flags, Operand(Map::Bits1::IsConstructorBit::kMask));
+ __ b(eq, &non_constructor);
+ }
// Dispatch based on instance type.
- __ CompareInstanceTypeRange(r4, r5, FIRST_JS_FUNCTION_TYPE,
+ __ CompareInstanceTypeRange(map, instance_type, FIRST_JS_FUNCTION_TYPE,
LAST_JS_FUNCTION_TYPE);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
RelocInfo::CODE_TARGET, ls);
// Only dispatch to bound functions after checking whether they are
// constructors.
- __ cmp(r5, Operand(JS_BOUND_FUNCTION_TYPE));
+ __ cmp(instance_type, Operand(JS_BOUND_FUNCTION_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
RelocInfo::CODE_TARGET, eq);
// Only dispatch to proxies after checking whether they are constructors.
- __ cmp(r5, Operand(JS_PROXY_TYPE));
+ __ cmp(instance_type, Operand(JS_PROXY_TYPE));
__ b(ne, &non_proxy);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
RelocInfo::CODE_TARGET);
@@ -2529,9 +2647,10 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ bind(&non_proxy);
{
// Overwrite the original receiver with the (original) target.
- __ str(r1, __ ReceiverOperand(r0));
+ __ str(target, __ ReceiverOperand(argc));
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadNativeContextSlot(r1, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
+ __ LoadNativeContextSlot(target,
+ Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
@@ -2777,12 +2896,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ cmp(cp, Operand(0));
__ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
- // Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
- // with both configurations. It is safe to always do this, because the
- // underlying register is caller-saved and can be arbitrarily clobbered.
- __ ResetSpeculationPoisonRegister();
-
// Clear c_entry_fp, like we do in `LeaveExitFrame`.
{
UseScratchRegisterScope temps(masm);
@@ -3504,7 +3617,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// always have baseline code.
if (!is_osr) {
Label start_with_baseline;
- __ CompareObjectType(code_obj, r3, r3, BASELINE_DATA_TYPE);
+ __ CompareObjectType(code_obj, r3, r3, CODET_TYPE);
__ b(eq, &start_with_baseline);
// Start with bytecode as there is no baseline code.
@@ -3517,13 +3630,13 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Start with baseline code.
__ bind(&start_with_baseline);
} else if (FLAG_debug_code) {
- __ CompareObjectType(code_obj, r3, r3, BASELINE_DATA_TYPE);
+ __ CompareObjectType(code_obj, r3, r3, CODET_TYPE);
__ Assert(eq, AbortReason::kExpectedBaselineData);
}
- // Load baseline code from baseline data.
- __ ldr(code_obj,
- FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
+ if (FLAG_debug_code) {
+ AssertCodeIsBaseline(masm, code_obj, r3);
+ }
// Load the feedback vector.
Register feedback_vector = r2;
diff --git a/chromium/v8/src/builtins/arm64/builtins-arm64.cc b/chromium/v8/src/builtins/arm64/builtins-arm64.cc
index b1f9a63e3c7..6b5a3f8ecca 100644
--- a/chromium/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/chromium/v8/src/builtins/arm64/builtins-arm64.cc
@@ -112,10 +112,12 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ SmiTag(x11, argc);
__ Push(x11, padreg);
- // Add a slot for the receiver, and round up to maintain alignment.
+ // Add a slot for the receiver (if not already included), and round up to
+ // maintain alignment.
Register slot_count = x2;
Register slot_count_without_rounding = x12;
- __ Add(slot_count_without_rounding, argc, 2);
+ constexpr int additional_slots = kJSArgcIncludesReceiver ? 1 : 2;
+ __ Add(slot_count_without_rounding, argc, additional_slots);
__ Bic(slot_count, slot_count_without_rounding, 1);
__ Claim(slot_count);
@@ -128,7 +130,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Store padding, if needed.
__ Tbnz(slot_count_without_rounding, 0, &already_aligned);
- __ Str(padreg, MemOperand(x2, 1 * kSystemPointerSize));
+ __ Str(padreg,
+ MemOperand(x2, kJSArgcIncludesReceiver ? 0 : kSystemPointerSize));
__ Bind(&already_aligned);
// TODO(victorgomes): When the arguments adaptor is completely removed, we
@@ -148,7 +151,11 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Add(src, fp,
StandardFrameConstants::kCallerSPOffset +
kSystemPointerSize); // Skip receiver.
- __ Mov(count, argc);
+ if (kJSArgcIncludesReceiver) {
+ __ Sub(count, argc, kJSArgcReceiverSlots);
+ } else {
+ __ Mov(count, argc);
+ }
__ CopyDoubleWords(dst, src, count);
}
@@ -190,7 +197,9 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
}
// Remove caller arguments from the stack and return.
- __ DropArguments(x1, TurboAssembler::kCountExcludesReceiver);
+ __ DropArguments(x1, kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ Ret();
__ Bind(&stack_overflow);
@@ -311,6 +320,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Round the number of arguments down to the next even number, and claim
// slots for the arguments. If the number of arguments was odd, the last
// argument will overwrite one of the receivers pushed above.
+ Register argc_without_receiver = x12;
+ if (kJSArgcIncludesReceiver) {
+ argc_without_receiver = x11;
+ __ Sub(argc_without_receiver, x12, kJSArgcReceiverSlots);
+ }
__ Bic(x10, x12, 1);
// Check if we have enough stack space to push all arguments.
@@ -328,7 +342,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Register count = x2;
Register dst = x10;
Register src = x11;
- __ Mov(count, x12);
+ __ Mov(count, argc_without_receiver);
__ Poke(x0, 0); // Add the receiver.
__ SlotAddress(dst, 1); // Skip receiver.
__ Add(src, fp,
@@ -374,7 +388,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Leave construct frame.
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
- __ DropArguments(x1, TurboAssembler::kCountExcludesReceiver);
+ __ DropArguments(x1, kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ Ret();
// Otherwise we do a smi check and fall through to check if the return value
@@ -414,6 +430,21 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
__ Unreachable();
}
+static void AssertCodeIsBaselineAllowClobber(MacroAssembler* masm,
+ Register code, Register scratch) {
+ // Verify that the code kind is baseline code via the CodeKind.
+ __ Ldr(scratch, FieldMemOperand(code, Code::kFlagsOffset));
+ __ DecodeField<Code::KindField>(scratch);
+ __ Cmp(scratch, Operand(static_cast<int>(CodeKind::BASELINE)));
+ __ Assert(eq, AbortReason::kExpectedBaselineData);
+}
+
+static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
+ Register scratch) {
+ DCHECK(!AreAliased(code, scratch));
+ return AssertCodeIsBaselineAllowClobber(masm, code, scratch);
+}
+
// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
// the more general dispatch.
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
@@ -422,8 +453,21 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label* is_baseline) {
ASM_CODE_COMMENT(masm);
Label done;
- __ CompareObjectType(sfi_data, scratch1, scratch1, BASELINE_DATA_TYPE);
- __ B(eq, is_baseline);
+ __ CompareObjectType(sfi_data, scratch1, scratch1, CODET_TYPE);
+ if (FLAG_debug_code) {
+ Label not_baseline;
+ __ B(ne, &not_baseline);
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ __ LoadCodeDataContainerCodeNonBuiltin(scratch1, sfi_data);
+ AssertCodeIsBaselineAllowClobber(masm, scratch1, scratch1);
+ } else {
+ AssertCodeIsBaseline(masm, sfi_data, scratch1);
+ }
+ __ B(eq, is_baseline);
+ __ Bind(&not_baseline);
+ } else {
+ __ B(eq, is_baseline);
+ }
__ Cmp(scratch1, INTERPRETER_DATA_TYPE);
__ B(ne, &done);
__ LoadTaggedPointerField(
@@ -485,12 +529,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Ldrh(w10, FieldMemOperand(
x10, SharedFunctionInfo::kFormalParameterCountOffset));
+ if (kJSArgcIncludesReceiver) {
+ __ Sub(x10, x10, kJSArgcReceiverSlots);
+ }
// Claim slots for arguments and receiver (rounded up to a multiple of two).
__ Add(x11, x10, 2);
__ Bic(x11, x11, 1);
__ Claim(x11);
- // Store padding (which might be replaced by the receiver).
+ // Store padding (which might be replaced by the last argument).
__ Sub(x11, x11, 1);
__ Poke(padreg, Operand(x11, LSL, kSystemPointerSizeLog2));
@@ -855,9 +902,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
masm->isolate()));
__ Ldr(cp, MemOperand(scratch));
- // Claim enough space for the arguments, the receiver and the function,
- // including an optional slot of padding.
- __ Add(slots_to_claim, argc, 3);
+ // Claim enough space for the arguments, the function and the receiver (if
+ // it is not included in argc already), including an optional slot of
+ // padding.
+ constexpr int additional_slots = kJSArgcIncludesReceiver ? 2 : 3;
+ __ Add(slots_to_claim, argc, additional_slots);
__ Bic(slots_to_claim, slots_to_claim, 1);
// Check if we have enough stack space to push all arguments.
@@ -880,7 +929,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Poke(receiver, 0);
// Store function on the stack.
__ SlotAddress(scratch, argc);
- __ Str(function, MemOperand(scratch, kSystemPointerSize));
+ __ Str(
+ function,
+ MemOperand(scratch, kJSArgcIncludesReceiver ? 0 : kSystemPointerSize));
// Copy arguments to the stack in a loop, in reverse order.
// x4: argc.
@@ -888,7 +939,12 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
Label loop, done;
// Skip the argument set up if we have no arguments.
- __ Cbz(argc, &done);
+ if (kJSArgcIncludesReceiver) {
+ __ Cmp(argc, JSParameterCount(0));
+ __ B(eq, &done);
+ } else {
+ __ Cbz(argc, &done);
+ }
// scratch has been set to point to the location of the function, which
// marks the end of the argument copy.
@@ -902,7 +958,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Str(x11, MemOperand(x0, kSystemPointerSize, PostIndex));
// Loop if we've not reached the end of copy marker.
__ Cmp(x0, scratch);
- __ B(le, &loop);
+ if (kJSArgcIncludesReceiver) {
+ __ B(lt, &loop);
+ } else {
+ __ B(le, &loop);
+ }
__ Bind(&done);
@@ -992,7 +1052,9 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ Ldr(actual_params_size,
MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ lsl(actual_params_size, actual_params_size, kSystemPointerSizeLog2);
- __ Add(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
+ if (!kJSArgcIncludesReceiver) {
+ __ Add(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
+ }
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
@@ -1378,7 +1440,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// stack left to right.
//
// The live registers are:
-// - x0: actual argument count (not including the receiver)
+// - x0: actual argument count
// - x1: the JS function object being called.
// - x3: the incoming new target or generator object
// - cp: our context.
@@ -1614,9 +1676,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
&has_optimized_code_or_marker);
// Load the baseline code into the closure.
- __ LoadTaggedPointerField(
- x2, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BaselineData::kBaselineCodeOffset));
+ __ Move(x2, kInterpreterBytecodeArrayRegister);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
ReplaceClosureCodeWithOptimizedCode(masm, x2, closure);
__ JumpCodeTObject(x2);
@@ -1643,7 +1703,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
Register last_arg_addr = x10;
Register stack_addr = x11;
Register slots_to_claim = x12;
- Register slots_to_copy = x13; // May include receiver, unlike num_args.
+ Register slots_to_copy = x13;
DCHECK(!AreAliased(num_args, first_arg_index, last_arg_addr, stack_addr,
slots_to_claim, slots_to_copy));
@@ -1651,15 +1711,17 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
DCHECK(!AreAliased(spread_arg_out, last_arg_addr, stack_addr, slots_to_claim,
slots_to_copy));
- // Add one slot for the receiver.
- __ Add(slots_to_claim, num_args, 1);
-
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Exclude final spread from slots to claim and the number of arguments.
- __ Sub(slots_to_claim, slots_to_claim, 1);
__ Sub(num_args, num_args, 1);
}
+ // Add receiver (if not already included in argc) and round up to an even
+ // number of slots.
+ constexpr int additional_slots = kJSArgcIncludesReceiver ? 1 : 2;
+ __ Add(slots_to_claim, num_args, additional_slots);
+ __ Bic(slots_to_claim, slots_to_claim, 1);
+
// Add a stack check before pushing arguments.
Label stack_overflow, done;
__ StackOverflowCheck(slots_to_claim, &stack_overflow);
@@ -1669,9 +1731,6 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
__ Unreachable();
__ Bind(&done);
- // Round up to an even number of slots and claim them.
- __ Add(slots_to_claim, slots_to_claim, 1);
- __ Bic(slots_to_claim, slots_to_claim, 1);
__ Claim(slots_to_claim);
{
@@ -1682,15 +1741,16 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
__ Poke(padreg, Operand(scratch, LSL, kSystemPointerSizeLog2));
}
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- __ Mov(slots_to_copy, num_args);
- __ SlotAddress(stack_addr, 1);
- } else {
- // If we're not given an explicit receiver to store, we'll need to copy it
- // together with the rest of the arguments.
+ const bool skip_receiver =
+ receiver_mode == ConvertReceiverMode::kNullOrUndefined;
+ if (kJSArgcIncludesReceiver && skip_receiver) {
+ __ Sub(slots_to_copy, num_args, kJSArgcReceiverSlots);
+ } else if (!kJSArgcIncludesReceiver && !skip_receiver) {
__ Add(slots_to_copy, num_args, 1);
- __ SlotAddress(stack_addr, 0);
+ } else {
+ __ Mov(slots_to_copy, num_args);
}
+ __ SlotAddress(stack_addr, skip_receiver ? 1 : 0);
__ Sub(last_arg_addr, first_arg_index,
Operand(slots_to_copy, LSL, kSystemPointerSizeLog2));
@@ -1718,7 +1778,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
InterpreterPushArgsMode mode) {
DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x2 : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
// they are to be pushed onto the stack.
@@ -1749,7 +1809,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
MacroAssembler* masm, InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
- // -- x0 : argument count (not including receiver)
+ // -- x0 : argument count
// -- x3 : new target
// -- x1 : constructor to call
// -- x2 : allocation site feedback if available, undefined otherwise
@@ -1975,16 +2035,16 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
if (java_script_builtin && with_result) {
// Overwrite the hole inserted by the deoptimizer with the return value from
- // the LAZY deopt point. r0 contains the arguments count, the return value
+ // the LAZY deopt point. x0 contains the arguments count, the return value
// from LAZY is always the last argument.
- __ add(x0, x0,
- BuiltinContinuationFrameConstants::kCallerSPOffset /
- kSystemPointerSize);
+ constexpr int return_offset =
+ BuiltinContinuationFrameConstants::kCallerSPOffset /
+ kSystemPointerSize -
+ kJSArgcReceiverSlots;
+ __ add(x0, x0, return_offset);
__ Str(scratch, MemOperand(fp, x0, LSL, kSystemPointerSizeLog2));
// Recover argument count.
- __ sub(x0, x0,
- BuiltinContinuationFrameConstants::kCallerSPOffset /
- kSystemPointerSize);
+ __ sub(x0, x0, return_offset);
}
// Load builtin index (stored as a Smi) and use it to get the builtin start
@@ -2078,7 +2138,8 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ LoadTaggedPointerField(
- x1, FieldMemOperand(x0, Code::kDeoptimizationDataOffset));
+ x1,
+ FieldMemOperand(x0, Code::kDeoptimizationDataOrInterpreterDataOffset));
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
@@ -2133,14 +2194,16 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ Mov(this_arg, undefined_value);
__ Mov(arg_array, undefined_value);
__ Peek(receiver, 0);
- __ Cmp(argc, Immediate(1));
+ __ Cmp(argc, Immediate(JSParameterCount(1)));
__ B(lt, &done);
__ Peek(this_arg, kSystemPointerSize);
__ B(eq, &done);
__ Peek(arg_array, 2 * kSystemPointerSize);
__ bind(&done);
}
- __ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
+ __ DropArguments(argc, kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ PushArgument(this_arg);
// ----------- S t a t e -------------
@@ -2167,7 +2230,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// arguments to the receiver.
__ Bind(&no_arguments);
{
- __ Mov(x0, 0);
+ __ Mov(x0, JSParameterCount(0));
DCHECK_EQ(receiver, x1);
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
@@ -2187,7 +2250,12 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
{
Label non_zero;
Register scratch = x10;
- __ Cbnz(argc, &non_zero);
+ if (kJSArgcIncludesReceiver) {
+ __ Cmp(argc, JSParameterCount(0));
+ __ B(gt, &non_zero);
+ } else {
+ __ Cbnz(argc, &non_zero);
+ }
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
// Overwrite receiver with undefined, which will be the new receiver.
// We do not need to overwrite the padding slot above it with anything.
@@ -2205,8 +2273,15 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
Register copy_from = x10;
Register copy_to = x11;
Register count = x12;
- __ Mov(count, argc); // CopyDoubleWords changes the count argument.
- __ Tbz(argc, 0, &even);
+ UseScratchRegisterScope temps(masm);
+ Register argc_without_receiver = argc;
+ if (kJSArgcIncludesReceiver) {
+ argc_without_receiver = temps.AcquireX();
+ __ Sub(argc_without_receiver, argc, kJSArgcReceiverSlots);
+ }
+ // CopyDoubleWords changes the count argument.
+ __ Mov(count, argc_without_receiver);
+ __ Tbz(argc_without_receiver, 0, &even);
// Shift arguments one slot down on the stack (overwriting the original
// receiver).
@@ -2214,7 +2289,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ Sub(copy_to, copy_from, kSystemPointerSize);
__ CopyDoubleWords(copy_to, copy_from, count);
// Overwrite the duplicated remaining last argument.
- __ Poke(padreg, Operand(argc, LSL, kXRegSizeLog2));
+ __ Poke(padreg, Operand(argc_without_receiver, LSL, kXRegSizeLog2));
__ B(&arguments_ready);
// Copy arguments one slot higher in memory, overwriting the original
@@ -2261,17 +2336,19 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ Mov(target, undefined_value);
__ Mov(this_argument, undefined_value);
__ Mov(arguments_list, undefined_value);
- __ Cmp(argc, Immediate(1));
+ __ Cmp(argc, Immediate(JSParameterCount(1)));
__ B(lt, &done);
__ Peek(target, kSystemPointerSize);
__ B(eq, &done);
__ Peek(this_argument, 2 * kSystemPointerSize);
- __ Cmp(argc, Immediate(3));
+ __ Cmp(argc, Immediate(JSParameterCount(3)));
__ B(lt, &done);
__ Peek(arguments_list, 3 * kSystemPointerSize);
__ bind(&done);
}
- __ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
+ __ DropArguments(argc, kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ PushArgument(this_argument);
// ----------- S t a t e -------------
@@ -2317,19 +2394,21 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ Mov(target, undefined_value);
__ Mov(arguments_list, undefined_value);
__ Mov(new_target, undefined_value);
- __ Cmp(argc, Immediate(1));
+ __ Cmp(argc, Immediate(JSParameterCount(1)));
__ B(lt, &done);
__ Peek(target, kSystemPointerSize);
__ B(eq, &done);
__ Peek(arguments_list, 2 * kSystemPointerSize);
__ Mov(new_target, target); // new.target defaults to target
- __ Cmp(argc, Immediate(3));
+ __ Cmp(argc, Immediate(JSParameterCount(3)));
__ B(lt, &done);
__ Peek(new_target, 3 * kSystemPointerSize);
__ bind(&done);
}
- __ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
+ __ DropArguments(argc, kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
// Push receiver (undefined).
__ PushArgument(undefined_value);
@@ -2365,19 +2444,25 @@ void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
Register slots_to_copy = x10;
Register slots_to_claim = x12;
- __ Add(slots_to_copy, argc, 1); // Copy with receiver.
+ if (kJSArgcIncludesReceiver) {
+ __ Mov(slots_to_copy, argc);
+ } else {
+ __ Add(slots_to_copy, argc, 1); // Copy with receiver.
+ }
__ Mov(slots_to_claim, len);
__ Tbz(slots_to_claim, 0, &even);
- // Claim space we need. If argc is even, slots_to_claim = len + 1, as we need
- // one extra padding slot. If argc is odd, we know that the original arguments
- // will have a padding slot we can reuse (since len is odd), so
- // slots_to_claim = len - 1.
+ // Claim space we need. If argc (without receiver) is even, slots_to_claim =
+ // len + 1, as we need one extra padding slot. If argc (without receiver) is
+ // odd, we know that the original arguments will have a padding slot we can
+ // reuse (since len is odd), so slots_to_claim = len - 1.
{
Register scratch = x11;
__ Add(slots_to_claim, len, 1);
__ And(scratch, argc, 1);
- __ Eor(scratch, scratch, 1);
+ if (!kJSArgcIncludesReceiver) {
+ __ Eor(scratch, scratch, 1);
+ }
__ Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1));
}
@@ -2404,7 +2489,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
// -- x1 : target
- // -- x0 : number of parameters on the stack (not including the receiver)
+ // -- x0 : number of parameters on the stack
// -- x2 : arguments list (a FixedArray)
// -- x4 : len (number of elements to push from args)
// -- x3 : new.target (for [[Construct]])
@@ -2455,8 +2540,12 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// scenes and we want to avoid that in a loop.
// TODO(all): Consider using Ldp and Stp.
Register dst = x16;
- __ Add(dst, argc, Immediate(1)); // Consider the receiver as well.
- __ SlotAddress(dst, dst);
+ if (kJSArgcIncludesReceiver) {
+ __ SlotAddress(dst, argc);
+ } else {
+ __ Add(dst, argc, Immediate(1)); // Consider the receiver as well.
+ __ SlotAddress(dst, dst);
+ }
__ Add(argc, argc, len); // Update new argc.
__ Bind(&loop);
__ Sub(len, len, 1);
@@ -2479,7 +2568,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
Handle<Code> code) {
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x3 : the new.target (for [[Construct]] calls)
// -- x1 : the target to call (can be any Object)
// -- x2 : start index (to support rest parameters)
@@ -2510,6 +2599,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
Register len = x6;
Label stack_done, stack_overflow;
__ Ldr(len, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ if (kJSArgcIncludesReceiver) {
+ __ Subs(len, len, kJSArgcReceiverSlots);
+ }
__ Subs(len, len, start_index);
__ B(le, &stack_done);
// Check for stack overflow.
@@ -2527,8 +2619,12 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ lsl(start_index, start_index, kSystemPointerSizeLog2);
__ Add(args_fp, args_fp, start_index);
// Point to the position to copy to.
- __ Add(x10, argc, 1);
- __ SlotAddress(dst, x10);
+ if (kJSArgcIncludesReceiver) {
+ __ SlotAddress(dst, argc);
+ } else {
+ __ Add(x10, argc, 1);
+ __ SlotAddress(dst, x10);
+ }
// Update total number of arguments.
__ Add(argc, argc, len);
__ CopyDoubleWords(dst, args_fp, len);
@@ -2547,13 +2643,11 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode) {
ASM_LOCATION("Builtins::Generate_CallFunction");
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x1 : the function to call (checked to be a JSFunction)
// -----------------------------------
__ AssertFunction(x1);
- // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
- // Check that function is not a "classConstructor".
Label class_constructor;
__ LoadTaggedPointerField(
x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
@@ -2568,13 +2662,14 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
FieldMemOperand(x1, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
+ __ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kFlagsOffset));
__ TestAndBranchIfAnySet(w3,
SharedFunctionInfo::IsNativeBit::kMask |
SharedFunctionInfo::IsStrictBit::kMask,
&done_convert);
{
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x1 : the function to call (checked to be a JSFunction)
// -- x2 : the shared function info.
// -- cp : the function context.
@@ -2625,7 +2720,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Bind(&done_convert);
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x1 : the function to call (checked to be a JSFunction)
// -- x2 : the shared function info.
// -- cp : the function context.
@@ -2649,7 +2744,7 @@ namespace {
void Generate_PushBoundArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x1 : target (checked to be a JSBoundFunction)
// -- x3 : new.target (only in case of [[Construct]])
// -----------------------------------
@@ -2666,7 +2761,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ Cbz(bound_argc, &no_bound_arguments);
{
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x1 : target (checked to be a JSBoundFunction)
// -- x2 : the [[BoundArguments]] (implemented as FixedArray)
// -- x3 : new.target (only in case of [[Construct]])
@@ -2698,6 +2793,9 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
Register scratch = x10;
Register receiver = x14;
+ if (kJSArgcIncludesReceiver) {
+ __ Sub(argc, argc, kJSArgcReceiverSlots);
+ }
__ Add(total_argc, argc, bound_argc);
__ Peek(receiver, 0);
@@ -2766,7 +2864,11 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ Cbnz(counter, &loop);
}
// Update argc.
- __ Mov(argc, total_argc);
+ if (kJSArgcIncludesReceiver) {
+ __ Add(argc, total_argc, kJSArgcReceiverSlots);
+ } else {
+ __ Mov(argc, total_argc);
+ }
}
__ Bind(&no_bound_arguments);
}
@@ -2776,7 +2878,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// static
void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x1 : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(x1);
@@ -2799,38 +2901,52 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// static
void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x1 : the target to call (can be any Object).
// -----------------------------------
-
- Label non_callable, non_smi;
- __ JumpIfSmi(x1, &non_callable);
- __ Bind(&non_smi);
- __ LoadMap(x4, x1);
- __ CompareInstanceTypeRange(x4, x5, FIRST_JS_FUNCTION_TYPE,
- LAST_JS_FUNCTION_TYPE);
+ Register argc = x0;
+ Register target = x1;
+ Register map = x4;
+ Register instance_type = x5;
+ DCHECK(!AreAliased(argc, target, map, instance_type));
+
+ Label non_callable, class_constructor;
+ __ JumpIfSmi(target, &non_callable);
+ __ LoadMap(map, target);
+ __ CompareInstanceTypeRange(map, instance_type,
+ FIRST_CALLABLE_JS_FUNCTION_TYPE,
+ LAST_CALLABLE_JS_FUNCTION_TYPE);
__ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET, ls);
- __ Cmp(x5, JS_BOUND_FUNCTION_TYPE);
+ __ Cmp(instance_type, JS_BOUND_FUNCTION_TYPE);
__ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
RelocInfo::CODE_TARGET, eq);
// Check if target has a [[Call]] internal method.
- __ Ldrb(x4, FieldMemOperand(x4, Map::kBitFieldOffset));
- __ TestAndBranchIfAllClear(x4, Map::Bits1::IsCallableBit::kMask,
- &non_callable);
+ {
+ Register flags = x4;
+ __ Ldrb(flags, FieldMemOperand(map, Map::kBitFieldOffset));
+ map = no_reg;
+ __ TestAndBranchIfAllClear(flags, Map::Bits1::IsCallableBit::kMask,
+ &non_callable);
+ }
// Check if target is a proxy and call CallProxy external builtin
- __ Cmp(x5, JS_PROXY_TYPE);
+ __ Cmp(instance_type, JS_PROXY_TYPE);
__ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq);
+ // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that the function is not a "classConstructor".
+ __ Cmp(instance_type, JS_CLASS_CONSTRUCTOR_TYPE);
+ __ B(eq, &class_constructor);
+
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
// Overwrite the original receiver with the (original) target.
- __ Poke(x1, __ ReceiverOperand(x0));
+ __ Poke(target, __ ReceiverOperand(argc));
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadNativeContextSlot(x1, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
+ __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(
ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
@@ -2839,16 +2955,25 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ bind(&non_callable);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ PushArgument(x1);
+ __ PushArgument(target);
__ CallRuntime(Runtime::kThrowCalledNonCallable);
__ Unreachable();
}
+
+ // 4. The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ PushArgument(target);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ __ Unreachable();
+ }
}
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x1 : the constructor to call (checked to be a JSFunction)
// -- x3 : the new target (checked to be a constructor)
// -----------------------------------
@@ -2879,7 +3004,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x1 : the function to call (checked to be a JSBoundFunction)
// -- x3 : the new target (checked to be a constructor)
// -----------------------------------
@@ -2908,36 +3033,46 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// static
void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x1 : the constructor to call (can be any Object)
// -- x3 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
+ Register argc = x0;
+ Register target = x1;
+ Register map = x4;
+ Register instance_type = x5;
+ DCHECK(!AreAliased(argc, target, map, instance_type));
// Check if target is a Smi.
Label non_constructor, non_proxy;
- __ JumpIfSmi(x1, &non_constructor);
+ __ JumpIfSmi(target, &non_constructor);
// Check if target has a [[Construct]] internal method.
- __ LoadTaggedPointerField(x4, FieldMemOperand(x1, HeapObject::kMapOffset));
- __ Ldrb(x2, FieldMemOperand(x4, Map::kBitFieldOffset));
- __ TestAndBranchIfAllClear(x2, Map::Bits1::IsConstructorBit::kMask,
- &non_constructor);
+ __ LoadTaggedPointerField(map,
+ FieldMemOperand(target, HeapObject::kMapOffset));
+ {
+ Register flags = x2;
+ DCHECK(!AreAliased(argc, target, map, instance_type, flags));
+ __ Ldrb(flags, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ TestAndBranchIfAllClear(flags, Map::Bits1::IsConstructorBit::kMask,
+ &non_constructor);
+ }
// Dispatch based on instance type.
- __ CompareInstanceTypeRange(x4, x5, FIRST_JS_FUNCTION_TYPE,
+ __ CompareInstanceTypeRange(map, instance_type, FIRST_JS_FUNCTION_TYPE,
LAST_JS_FUNCTION_TYPE);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
RelocInfo::CODE_TARGET, ls);
// Only dispatch to bound functions after checking whether they are
// constructors.
- __ Cmp(x5, JS_BOUND_FUNCTION_TYPE);
+ __ Cmp(instance_type, JS_BOUND_FUNCTION_TYPE);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
RelocInfo::CODE_TARGET, eq);
// Only dispatch to proxies after checking whether they are constructors.
- __ Cmp(x5, JS_PROXY_TYPE);
+ __ Cmp(instance_type, JS_PROXY_TYPE);
__ B(ne, &non_proxy);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
RelocInfo::CODE_TARGET);
@@ -2946,10 +3081,11 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ bind(&non_proxy);
{
// Overwrite the original receiver with the (original) target.
- __ Poke(x1, __ ReceiverOperand(x0));
+ __ Poke(target, __ ReceiverOperand(argc));
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadNativeContextSlot(x1, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
+ __ LoadNativeContextSlot(target,
+ Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
@@ -3250,12 +3386,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Bind(&not_js_frame);
- // Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
- // with both configurations. It is safe to always do this, because the
- // underlying register is caller-saved and can be arbitrarily clobbered.
- __ ResetSpeculationPoisonRegister();
-
{
// Clear c_entry_fp, like we do in `LeaveExitFrame`.
UseScratchRegisterScope temps(masm);
@@ -3677,8 +3807,11 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
Register js_getter = x4;
__ LoadTaggedPointerField(
js_getter, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
- __ Ldr(api_function_address,
- FieldMemOperand(js_getter, Foreign::kForeignAddressOffset));
+
+ __ LoadExternalPointerField(
+ api_function_address,
+ FieldMemOperand(js_getter, Foreign::kForeignAddressOffset),
+ kForeignForeignAddressTag);
const int spill_offset = 1 + kApiStackSpace;
// +3 is to skip prolog, return address and name handle.
@@ -4032,7 +4165,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// always have baseline code.
if (!is_osr) {
Label start_with_baseline;
- __ CompareObjectType(code_obj, x3, x3, BASELINE_DATA_TYPE);
+ __ CompareObjectType(code_obj, x3, x3, CODET_TYPE);
__ B(eq, &start_with_baseline);
// Start with bytecode as there is no baseline code.
@@ -4045,16 +4178,16 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Start with baseline code.
__ bind(&start_with_baseline);
} else if (FLAG_debug_code) {
- __ CompareObjectType(code_obj, x3, x3, BASELINE_DATA_TYPE);
+ __ CompareObjectType(code_obj, x3, x3, CODET_TYPE);
__ Assert(eq, AbortReason::kExpectedBaselineData);
}
- // Load baseline code from baseline data.
- __ LoadTaggedPointerField(
- code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
__ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
}
+ if (FLAG_debug_code) {
+ AssertCodeIsBaseline(masm, code_obj, x3);
+ }
// Load the feedback vector.
Register feedback_vector = x2;
diff --git a/chromium/v8/src/builtins/array-concat.tq b/chromium/v8/src/builtins/array-concat.tq
index 5eb66e6ce87..6fad3e66833 100644
--- a/chromium/v8/src/builtins/array-concat.tq
+++ b/chromium/v8/src/builtins/array-concat.tq
@@ -43,7 +43,7 @@ ArrayPrototypeConcat(
// TODO(victorgomes): Implement slow path ArrayConcat in Torque.
tail ArrayConcat(
context, LoadTargetFromFrame(), Undefined,
- Convert<int32>(arguments.length));
+ Convert<int32>(arguments.actual_count));
}
} // namespace array
diff --git a/chromium/v8/src/builtins/array-filter.tq b/chromium/v8/src/builtins/array-filter.tq
index 1add88fa6a4..bd892a2e76c 100644
--- a/chromium/v8/src/builtins/array-filter.tq
+++ b/chromium/v8/src/builtins/array-filter.tq
@@ -97,7 +97,7 @@ transitioning builtin ArrayFilterLoopContinuation(implicit context: Context)(
transitioning macro FastArrayFilter(implicit context: Context)(
fastO: FastJSArray, len: Smi, callbackfn: Callable, thisArg: JSAny,
- output: FastJSArray) labels
+ output: FastJSArray): void labels
Bailout(Number, Number) {
let k: Smi = 0;
let to: Smi = 0;
diff --git a/chromium/v8/src/builtins/array-from.tq b/chromium/v8/src/builtins/array-from.tq
index e139e58de62..5fcdefccc37 100644
--- a/chromium/v8/src/builtins/array-from.tq
+++ b/chromium/v8/src/builtins/array-from.tq
@@ -79,7 +79,7 @@ ArrayFrom(js-implicit context: NativeContext, receiver: JSAny)(...arguments):
// memory, e.g. a proxy that discarded the values. Ignoring this case
// just means we would repeatedly call CreateDataProperty with index =
// 2^53
- assert(k < kMaxSafeInteger);
+ dcheck(k < kMaxSafeInteger);
// ii. Let Pk be ! ToString(k).
diff --git a/chromium/v8/src/builtins/array-join.tq b/chromium/v8/src/builtins/array-join.tq
index 6448c958752..12988af2a20 100644
--- a/chromium/v8/src/builtins/array-join.tq
+++ b/chromium/v8/src/builtins/array-join.tq
@@ -55,7 +55,7 @@ LoadJoinElement<array::FastDoubleElements>(
builtin LoadJoinTypedElement<T : type extends ElementsKind>(
context: Context, receiver: JSReceiver, k: uintptr): JSAny {
const typedArray: JSTypedArray = UnsafeCast<JSTypedArray>(receiver);
- assert(!IsDetachedBuffer(typedArray.buffer));
+ dcheck(!IsDetachedBuffer(typedArray.buffer));
return typed_array::LoadFixedTypedArrayElementAsTagged(
typedArray.data_ptr, k, typed_array::KindForArrayType<T>());
}
@@ -126,14 +126,14 @@ macro AddStringLength(implicit context: Context)(
macro StoreAndGrowFixedArray<T: type>(
fixedArray: FixedArray, index: intptr, element: T): FixedArray {
const length: intptr = fixedArray.length_intptr;
- assert(index <= length);
+ dcheck(index <= length);
if (index < length) {
fixedArray.objects[index] = element;
return fixedArray;
} else
deferred {
const newLength: intptr = CalculateNewElementsCapacity(length);
- assert(index < newLength);
+ dcheck(index < newLength);
const newfixedArray: FixedArray =
ExtractFixedArray(fixedArray, 0, length, newLength);
newfixedArray.objects[index] = element;
@@ -147,7 +147,7 @@ macro StoreAndGrowFixedArray<T: type>(
// Buffer.AddSeparators().
struct Buffer {
macro Add(implicit context: Context)(
- str: String, nofSeparators: intptr, separatorLength: intptr) {
+ str: String, nofSeparators: intptr, separatorLength: intptr): void {
// Add separators if necessary (at the beginning or more than one)
const writeSeparators: bool = this.index == 0 | nofSeparators > 1;
this.AddSeparators(nofSeparators, separatorLength, writeSeparators);
@@ -161,7 +161,7 @@ struct Buffer {
}
macro AddSeparators(implicit context: Context)(
- nofSeparators: intptr, separatorLength: intptr, write: bool) {
+ nofSeparators: intptr, separatorLength: intptr, write: bool): void {
if (nofSeparators == 0 || separatorLength == 0) return;
const nofSeparatorsInt: intptr = nofSeparators;
@@ -211,7 +211,7 @@ macro NewBuffer(len: uintptr, sep: String): Buffer {
const cappedBufferSize: intptr = len > kMaxNewSpaceFixedArrayElements ?
kMaxNewSpaceFixedArrayElements :
Signed(len);
- assert(cappedBufferSize > 0);
+ dcheck(cappedBufferSize > 0);
return Buffer{
fixedArray: AllocateZeroedFixedArray(cappedBufferSize),
index: 0,
@@ -222,7 +222,7 @@ macro NewBuffer(len: uintptr, sep: String): Buffer {
macro BufferJoin(implicit context: Context)(
buffer: Buffer, sep: String): String {
- assert(IsValidPositiveSmi(buffer.totalStringLength));
+ dcheck(IsValidPositiveSmi(buffer.totalStringLength));
if (buffer.totalStringLength == 0) return kEmptyString;
// Fast path when there's only one buffer element.
@@ -504,7 +504,8 @@ builtin JoinStackPop(implicit context: Context)(
}
// Fast path the common non-nested calls.
-macro JoinStackPopInline(implicit context: Context)(receiver: JSReceiver) {
+macro JoinStackPopInline(implicit context: Context)(receiver: JSReceiver):
+ void {
const stack: FixedArray = LoadJoinStack()
otherwise unreachable;
const len: intptr = stack.length_intptr;
diff --git a/chromium/v8/src/builtins/array-lastindexof.tq b/chromium/v8/src/builtins/array-lastindexof.tq
index fe416fa4a24..912b43abed1 100644
--- a/chromium/v8/src/builtins/array-lastindexof.tq
+++ b/chromium/v8/src/builtins/array-lastindexof.tq
@@ -44,7 +44,7 @@ macro FastArrayLastIndexOf<Elements : type extends FixedArrayBase>(
const same: Boolean = StrictEqual(searchElement, element);
if (same == True) {
- assert(Is<FastJSArray>(array));
+ dcheck(Is<FastJSArray>(array));
return k;
}
} label Hole {} // Do nothing for holes.
@@ -52,7 +52,7 @@ macro FastArrayLastIndexOf<Elements : type extends FixedArrayBase>(
--k;
}
- assert(Is<FastJSArray>(array));
+ dcheck(Is<FastJSArray>(array));
return -1;
}
@@ -90,7 +90,7 @@ macro TryFastArrayLastIndexOf(
return FastArrayLastIndexOf<FixedArray>(
context, array, fromSmi, searchElement);
}
- assert(IsDoubleElementsKind(kind));
+ dcheck(IsDoubleElementsKind(kind));
return FastArrayLastIndexOf<FixedDoubleArray>(
context, array, fromSmi, searchElement);
}
diff --git a/chromium/v8/src/builtins/array-map.tq b/chromium/v8/src/builtins/array-map.tq
index 48c8f876810..1958d1eb59d 100644
--- a/chromium/v8/src/builtins/array-map.tq
+++ b/chromium/v8/src/builtins/array-map.tq
@@ -97,13 +97,13 @@ transitioning builtin ArrayMapLoopContinuation(implicit context: Context)(
}
struct Vector {
- macro ReportSkippedElement() {
+ macro ReportSkippedElement(): void {
this.skippedElements = true;
}
macro CreateJSArray(implicit context: Context)(validLength: Smi): JSArray {
const length: Smi = this.fixedArray.length;
- assert(validLength <= length);
+ dcheck(validLength <= length);
let kind: ElementsKind = ElementsKind::PACKED_SMI_ELEMENTS;
if (!this.onlySmis) {
if (this.onlyNumbers) {
@@ -153,7 +153,8 @@ struct Vector {
return a;
}
- macro StoreResult(implicit context: Context)(index: Smi, result: JSAny) {
+ macro StoreResult(implicit context: Context)(
+ index: Smi, result: JSAny): void {
typeswitch (result) {
case (s: Smi): {
this.fixedArray.objects[index] = s;
diff --git a/chromium/v8/src/builtins/array-reverse.tq b/chromium/v8/src/builtins/array-reverse.tq
index b154483d062..69a678a5131 100644
--- a/chromium/v8/src/builtins/array-reverse.tq
+++ b/chromium/v8/src/builtins/array-reverse.tq
@@ -27,23 +27,24 @@ LoadElement<array::FastPackedDoubleElements, float64>(
}
macro StoreElement<ElementsAccessor : type extends ElementsKind, T: type>(
- implicit context: Context)(elements: FixedArrayBase, index: Smi, value: T);
+ implicit context: Context)(
+ elements: FixedArrayBase, index: Smi, value: T): void;
StoreElement<array::FastPackedSmiElements, Smi>(implicit context: Context)(
- elements: FixedArrayBase, index: Smi, value: Smi) {
+ elements: FixedArrayBase, index: Smi, value: Smi): void {
const elems: FixedArray = UnsafeCast<FixedArray>(elements);
StoreFixedArrayElement(elems, index, value);
}
StoreElement<array::FastPackedObjectElements, JSAny>(implicit context: Context)(
- elements: FixedArrayBase, index: Smi, value: JSAny) {
+ elements: FixedArrayBase, index: Smi, value: JSAny): void {
const elements: FixedArray = UnsafeCast<FixedArray>(elements);
elements.objects[index] = value;
}
StoreElement<array::FastPackedDoubleElements, float64>(
implicit context: Context)(
- elements: FixedArrayBase, index: Smi, value: float64) {
+ elements: FixedArrayBase, index: Smi, value: float64): void {
const elems: FixedDoubleArray = UnsafeCast<FixedDoubleArray>(elements);
StoreFixedDoubleArrayElement(elems, index, value);
}
@@ -52,7 +53,7 @@ StoreElement<array::FastPackedDoubleElements, float64>(
// whether a property is present, so we can simply swap them using fast
// FixedArray loads/stores.
macro FastPackedArrayReverse<Accessor: type, T: type>(
- implicit context: Context)(elements: FixedArrayBase, length: Smi) {
+ implicit context: Context)(elements: FixedArrayBase, length: Smi): void {
let lower: Smi = 0;
let upper: Smi = length - 1;
@@ -138,8 +139,8 @@ transitioning macro GenericArrayReverse(
return object;
}
-macro TryFastPackedArrayReverse(implicit context: Context)(receiver: JSAny)
- labels Slow {
+macro TryFastPackedArrayReverse(implicit context: Context)(receiver: JSAny):
+ void labels Slow {
const array: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
const kind: ElementsKind = array.map.elements_kind;
diff --git a/chromium/v8/src/builtins/array-shift.tq b/chromium/v8/src/builtins/array-shift.tq
index ed1087a85af..ea62b1c7a86 100644
--- a/chromium/v8/src/builtins/array-shift.tq
+++ b/chromium/v8/src/builtins/array-shift.tq
@@ -103,7 +103,7 @@ transitioning javascript builtin ArrayPrototypeShift(
} label Runtime {
tail ArrayShift(
context, LoadTargetFromFrame(), Undefined,
- Convert<int32>(arguments.length));
+ Convert<int32>(arguments.actual_count));
}
}
}
diff --git a/chromium/v8/src/builtins/array-slice.tq b/chromium/v8/src/builtins/array-slice.tq
index 435431f49d3..f5a644ef40e 100644
--- a/chromium/v8/src/builtins/array-slice.tq
+++ b/chromium/v8/src/builtins/array-slice.tq
@@ -89,7 +89,7 @@ macro HandleFastSlice(
labels Bailout {
const start: Smi = Cast<Smi>(startNumber) otherwise Bailout;
const count: Smi = Cast<Smi>(countNumber) otherwise Bailout;
- assert(start >= 0);
+ dcheck(start >= 0);
try {
typeswitch (o) {
@@ -130,17 +130,6 @@ macro HandleFastSlice(
transitioning javascript builtin
ArrayPrototypeSlice(
js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
- // Handle array cloning case if the receiver is a fast array.
- if (arguments.length == 0) {
- typeswitch (receiver) {
- case (a: FastJSArrayForCopy): {
- return CloneFastJSArray(context, a);
- }
- case (JSAny): {
- }
- }
- }
-
// 1. Let O be ? ToObject(this value).
const o: JSReceiver = ToObject_Inline(context, receiver);
@@ -161,6 +150,30 @@ ArrayPrototypeSlice(
const end: JSAny = arguments[1];
const relativeEnd: Number = end == Undefined ? len : ToInteger_Inline(end);
+ // Handle array cloning case if the receiver is a fast array. In the case
+ // where relativeStart is 0 but start is not the SMI zero (e.g., start is an
+ // object whose valueOf returns 0) we must not call CloneFastJSArray. This is
+ // because CloneFastArray reloads the array length, and the ToInteger above
+ // might have called user code which changed it. Thus, calling
+ // CloneFastJSArray here is safe only if we know ToInteger didn't call user
+ // code.
+
+ // This logic should be in sync with ArrayPrototypeSlice (to a reasonable
+ // degree). This is because CloneFastJSArray produces arrays which are
+ // potentially COW. If there's a discrepancy, TF generates code which produces
+ // a COW array and then expects it to be non-COW (or the other way around) ->
+ // immediate deopt.
+ if ((start == Undefined || TaggedEqual(start, SmiConstant(0))) &&
+ end == Undefined) {
+ typeswitch (receiver) {
+ case (a: FastJSArrayForCopy): {
+ return CloneFastJSArray(context, a);
+ }
+ case (JSAny): {
+ }
+ }
+ }
+
// 6. If relativeEnd < 0, let final be max((len + relativeEnd), 0);
// else let final be min(relativeEnd, len).
const final: Number =
@@ -169,12 +182,12 @@ ArrayPrototypeSlice(
// 7. Let count be max(final - k, 0).
const count: Number = Max(final - k, 0);
- assert(0 <= k);
- assert(k <= len);
- assert(0 <= final);
- assert(final <= len);
- assert(0 <= count);
- assert(count <= len);
+ dcheck(0 <= k);
+ dcheck(k <= len);
+ dcheck(0 <= final);
+ dcheck(final <= len);
+ dcheck(0 <= count);
+ dcheck(count <= len);
try {
return HandleFastSlice(context, o, k, count)
diff --git a/chromium/v8/src/builtins/array-unshift.tq b/chromium/v8/src/builtins/array-unshift.tq
index 7afeeb06271..69938ccaea2 100644
--- a/chromium/v8/src/builtins/array-unshift.tq
+++ b/chromium/v8/src/builtins/array-unshift.tq
@@ -89,7 +89,7 @@ transitioning javascript builtin ArrayPrototypeUnshift(
tail ArrayUnshift(
context, LoadTargetFromFrame(), Undefined,
- Convert<int32>(arguments.length));
+ Convert<int32>(arguments.actual_count));
} label Slow {
return GenericArrayUnshift(context, receiver, arguments);
}
diff --git a/chromium/v8/src/builtins/array.tq b/chromium/v8/src/builtins/array.tq
index a9b4b1235b2..2ec4ab8f4e6 100644
--- a/chromium/v8/src/builtins/array.tq
+++ b/chromium/v8/src/builtins/array.tq
@@ -16,20 +16,21 @@ type FastSmiOrObjectElements extends ElementsKind;
type FastDoubleElements extends ElementsKind;
type DictionaryElements extends ElementsKind;
-macro EnsureWriteableFastElements(implicit context: Context)(array: JSArray) {
- assert(IsFastElementsKind(array.map.elements_kind));
+macro EnsureWriteableFastElements(implicit context: Context)(array: JSArray):
+ void {
+ dcheck(IsFastElementsKind(array.map.elements_kind));
const elements: FixedArrayBase = array.elements;
if (elements.map != kCOWMap) return;
// There are no COW *_DOUBLE_ELEMENTS arrays, so we are allowed to always
// extract FixedArrays and don't have to worry about FixedDoubleArrays.
- assert(IsFastSmiOrTaggedElementsKind(array.map.elements_kind));
+ dcheck(IsFastSmiOrTaggedElementsKind(array.map.elements_kind));
const length = Convert<intptr>(Cast<Smi>(array.length) otherwise unreachable);
array.elements =
ExtractFixedArray(UnsafeCast<FixedArray>(elements), 0, length, length);
- assert(array.elements.map != kCOWMap);
+ dcheck(array.elements.map != kCOWMap);
}
macro LoadElementOrUndefined(implicit context: Context)(
@@ -51,7 +52,7 @@ macro StoreArrayHole(elements: FixedArray, k: Smi): void {
elements.objects[k] = TheHole;
}
-extern macro SetPropertyLength(implicit context: Context)(JSAny, Number);
+extern macro SetPropertyLength(implicit context: Context)(JSAny, Number): void;
const kLengthDescriptorIndex:
constexpr int31 generates 'JSArray::kLengthDescriptorIndex';
@@ -72,7 +73,7 @@ macro EnsureArrayLengthWritable(implicit context: Context)(map: Map):
const descriptors: DescriptorArray = map.instance_descriptors;
const descriptor:&DescriptorEntry =
&descriptors.descriptors[kLengthDescriptorIndex];
- assert(TaggedEqual(descriptor->key, LengthStringConstant()));
+ dcheck(TaggedEqual(descriptor->key, LengthStringConstant()));
const details: Smi = UnsafeCast<Smi>(descriptor->details);
if ((details & kAttributesReadOnlyMask) != 0) {
goto Bailout;
diff --git a/chromium/v8/src/builtins/arraybuffer.tq b/chromium/v8/src/builtins/arraybuffer.tq
index fc0152f51ab..f033048abcf 100644
--- a/chromium/v8/src/builtins/arraybuffer.tq
+++ b/chromium/v8/src/builtins/arraybuffer.tq
@@ -47,7 +47,7 @@ transitioning javascript builtin ArrayBufferPrototypeGetMaxByteLength(
// 6. Else,
// a. Let length be O.[[ArrayBufferByteLength]].
// 7. Return F(length);
- assert(IsResizableArrayBuffer(o) || o.max_byte_length == o.byte_length);
+ dcheck(IsResizableArrayBuffer(o) || o.max_byte_length == o.byte_length);
return Convert<Number>(o.max_byte_length);
}
@@ -92,7 +92,7 @@ SharedArrayBufferPrototypeGetMaxByteLength(
// 5. Else,
// a. Let length be O.[[ArrayBufferByteLength]].
// 6. Return F(length);
- assert(IsResizableArrayBuffer(o) || o.max_byte_length == o.byte_length);
+ dcheck(IsResizableArrayBuffer(o) || o.max_byte_length == o.byte_length);
return Convert<Number>(o.max_byte_length);
}
diff --git a/chromium/v8/src/builtins/base.tq b/chromium/v8/src/builtins/base.tq
index af1813b61d7..7716d94288a 100644
--- a/chromium/v8/src/builtins/base.tq
+++ b/chromium/v8/src/builtins/base.tq
@@ -158,7 +158,7 @@ struct float64_or_hole {
return this.value;
}
macro ValueUnsafeAssumeNotHole(): float64 {
- assert(!this.is_hole);
+ dcheck(!this.is_hole);
return this.value;
}
@@ -307,9 +307,18 @@ extern enum ElementsKind extends int32 {
UINT8_CLAMPED_ELEMENTS,
BIGUINT64_ELEMENTS,
BIGINT64_ELEMENTS,
+ RAB_GSAB_UINT8_ELEMENTS,
+ // TODO(torque): Allow duplicate enum values.
+ // FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND,
+ // FIRST_RAB_GSAB_FIXED_TYPED_ARRAY_ELEMENTS_KIND,
...
}
+const kFirstFixedTypedArrayElementsKind: constexpr ElementsKind =
+ ElementsKind::UINT8_ELEMENTS;
+const kFirstRabGsabFixedTypedArrayElementsKind: constexpr ElementsKind =
+ ElementsKind::RAB_GSAB_UINT8_ELEMENTS;
+
extern enum AllocationFlag extends int32
constexpr 'CodeStubAssembler::AllocationFlag' {
kNone,
@@ -554,7 +563,6 @@ extern class Filler extends HeapObject generates 'TNode<HeapObject>';
// Like JSObject, but created from API function.
@apiExposedInstanceTypeValue(0x422)
@doNotGenerateCast
-@noVerifier
extern class JSApiObject extends JSObject generates 'TNode<JSObject>';
// TODO(gsathya): This only exists to make JSApiObject instance type into a
@@ -562,7 +570,6 @@ extern class JSApiObject extends JSObject generates 'TNode<JSObject>';
@apiExposedInstanceTypeValue(0x80A)
@doNotGenerateCast
@highestInstanceTypeWithinParentClassRange
-@noVerifier
extern class JSLastDummyApiObject extends JSApiObject
generates 'TNode<JSObject>';
@@ -578,11 +585,11 @@ extern macro Is64(): constexpr bool;
extern macro SelectBooleanConstant(bool): Boolean;
-extern macro Print(constexpr string);
-extern macro Print(constexpr string, Object);
-extern macro Comment(constexpr string);
-extern macro Print(Object);
-extern macro DebugBreak();
+extern macro Print(constexpr string): void;
+extern macro Print(constexpr string, Object): void;
+extern macro Comment(constexpr string): void;
+extern macro Print(Object): void;
+extern macro DebugBreak(): void;
// ES6 7.1.4 ToInteger ( argument )
transitioning macro ToIntegerImpl(implicit context: Context)(input: JSAny):
@@ -601,7 +608,7 @@ transitioning macro ToIntegerImpl(implicit context: Context)(input: JSAny):
// ToInteger normalizes -0 to +0.
if (value == 0.0) return SmiConstant(0);
const result = ChangeFloat64ToTagged(value);
- assert(IsNumberNormalized(result));
+ dcheck(IsNumberNormalized(result));
return result;
}
case (a: JSAnyNotNumber): {
@@ -741,9 +748,9 @@ transitioning macro ToPrimitiveDefault(implicit context: Context)(v: JSAny):
}
}
-extern transitioning runtime NormalizeElements(Context, JSObject);
+extern transitioning runtime NormalizeElements(Context, JSObject): void;
extern transitioning runtime TransitionElementsKindWithKind(
- Context, JSObject, Smi);
+ Context, JSObject, Smi): void;
extern macro LoadBufferObject(RawPtr, constexpr int32): Object;
extern macro LoadBufferPointer(RawPtr, constexpr int32): RawPtr;
@@ -806,6 +813,8 @@ extern macro IsElementsKindLessThanOrEqual(
ElementsKind, constexpr ElementsKind): bool;
extern macro IsElementsKindGreaterThan(
ElementsKind, constexpr ElementsKind): bool;
+extern macro IsElementsKindGreaterThanOrEqual(
+ ElementsKind, constexpr ElementsKind): bool;
extern macro IsElementsKindInRange(
ElementsKind, constexpr ElementsKind, constexpr ElementsKind): bool;
@@ -1228,7 +1237,7 @@ extern operator '.elements_kind' macro LoadElementsKind(JSTypedArray):
extern operator '.length' macro LoadFastJSArrayLength(FastJSArray): Smi;
operator '.length=' macro StoreFastJSArrayLength(
- array: FastJSArray, length: Smi) {
+ array: FastJSArray, length: Smi): void {
const array: JSArray = array;
array.length = length;
}
@@ -1252,7 +1261,7 @@ macro FastHoleyElementsKind(kind: ElementsKind): ElementsKind {
} else if (kind == ElementsKind::PACKED_DOUBLE_ELEMENTS) {
return ElementsKind::HOLEY_DOUBLE_ELEMENTS;
}
- assert(kind == ElementsKind::PACKED_ELEMENTS);
+ dcheck(kind == ElementsKind::PACKED_ELEMENTS);
return ElementsKind::HOLEY_ELEMENTS;
}
@@ -1362,7 +1371,7 @@ macro NumberIsNaN(number: Number): bool {
}
}
-extern macro GotoIfForceSlowPath() labels Taken;
+extern macro GotoIfForceSlowPath(): void labels Taken;
macro IsForceSlowPath(): bool {
GotoIfForceSlowPath() otherwise return true;
return false;
@@ -1394,10 +1403,10 @@ macro SameValue(a: JSAny, b: JSAny): bool {
// Does "if (index1 + index2 > limit) goto IfOverflow" in an uintptr overflow
// friendly way where index1 and index2 are in [0, kMaxSafeInteger] range.
macro CheckIntegerIndexAdditionOverflow(
- index1: uintptr, index2: uintptr, limit: uintptr) labels IfOverflow {
+ index1: uintptr, index2: uintptr, limit: uintptr): void labels IfOverflow {
if constexpr (Is64()) {
- assert(index1 <= kMaxSafeIntegerUint64);
- assert(index2 <= kMaxSafeIntegerUint64);
+ dcheck(index1 <= kMaxSafeIntegerUint64);
+ dcheck(index2 <= kMaxSafeIntegerUint64);
// Given that both index1 and index2 are in a safe integer range the
// addition can't overflow.
if (index1 + index2 > limit) goto IfOverflow;
@@ -1431,7 +1440,7 @@ macro TryNumberToUintPtr(valueNumber: Number, kMode: constexpr int31):
if (kMode == kModeValueIsAnyNumber) {
if (valueSmi < 0) goto IfLessThanZero;
} else {
- assert(valueSmi >= 0);
+ dcheck(valueSmi >= 0);
}
const value: uintptr = Unsigned(Convert<intptr>(valueSmi));
// Positive Smi values definitely fit into both [0, kMaxSafeInteger] and
@@ -1439,14 +1448,14 @@ macro TryNumberToUintPtr(valueNumber: Number, kMode: constexpr int31):
return value;
}
case (valueHeapNumber: HeapNumber): {
- assert(IsNumberNormalized(valueHeapNumber));
+ dcheck(IsNumberNormalized(valueHeapNumber));
const valueDouble: float64 = Convert<float64>(valueHeapNumber);
// NaNs must be handled outside.
- assert(!Float64IsNaN(valueDouble));
+ dcheck(!Float64IsNaN(valueDouble));
if (kMode == kModeValueIsAnyNumber) {
if (valueDouble < 0) goto IfLessThanZero;
} else {
- assert(valueDouble >= 0);
+ dcheck(valueDouble >= 0);
}
if constexpr (Is64()) {
@@ -1455,7 +1464,7 @@ macro TryNumberToUintPtr(valueNumber: Number, kMode: constexpr int31):
if (kMode == kModeValueIsAnyNumber) {
if (valueDouble > kMaxSafeInteger) goto IfSafeIntegerOverflow;
} else {
- assert(valueDouble <= kMaxSafeInteger);
+ dcheck(valueDouble <= kMaxSafeInteger);
}
} else {
// On 32-bit architectures uintptr range is smaller than safe integer
@@ -1464,7 +1473,7 @@ macro TryNumberToUintPtr(valueNumber: Number, kMode: constexpr int31):
kMode == kModeValueIsSafeInteger) {
if (valueDouble > kMaxUInt32Double) goto IfUIntPtrOverflow;
} else {
- assert(valueDouble <= kMaxUInt32Double);
+ dcheck(valueDouble <= kMaxUInt32Double);
}
}
return ChangeFloat64ToUintPtr(valueDouble);
@@ -1602,13 +1611,13 @@ macro ConvertToRelativeIndex(indexNumber: Number, length: uintptr): uintptr {
}
}
case (indexHeapNumber: HeapNumber): {
- assert(IsNumberNormalized(indexHeapNumber));
+ dcheck(IsNumberNormalized(indexHeapNumber));
const indexDouble: float64 = Convert<float64>(indexHeapNumber);
// NaNs must already be handled by ConvertToRelativeIndex() version
// above accepting JSAny indices.
- assert(!Float64IsNaN(indexDouble));
+ dcheck(!Float64IsNaN(indexDouble));
const lengthDouble: float64 = Convert<float64>(length);
- assert(lengthDouble <= kMaxSafeInteger);
+ dcheck(lengthDouble <= kMaxSafeInteger);
if (indexDouble < 0) {
const relativeIndex: float64 = lengthDouble + indexDouble;
return relativeIndex > 0 ? ChangeFloat64ToUintPtr(relativeIndex) : 0;
@@ -1643,15 +1652,15 @@ macro ClampToIndexRange(indexNumber: Number, limit: uintptr): uintptr {
return index;
}
case (indexHeapNumber: HeapNumber): {
- assert(IsNumberNormalized(indexHeapNumber));
+ dcheck(IsNumberNormalized(indexHeapNumber));
const indexDouble: float64 = Convert<float64>(indexHeapNumber);
// NaNs must already be handled by ClampToIndexRange() version
// above accepting JSAny indices.
- assert(!Float64IsNaN(indexDouble));
+ dcheck(!Float64IsNaN(indexDouble));
if (indexDouble <= 0) return 0;
const maxIndexDouble: float64 = Convert<float64>(limit);
- assert(maxIndexDouble <= kMaxSafeInteger);
+ dcheck(maxIndexDouble <= kMaxSafeInteger);
if (indexDouble >= maxIndexDouble) return limit;
return ChangeFloat64ToUintPtr(indexDouble);
@@ -1713,10 +1722,10 @@ macro IsFastJSArrayForReadWithNoCustomIteration(context: Context, o: Object):
}
extern transitioning runtime
-CreateDataProperty(implicit context: Context)(JSReceiver, JSAny, JSAny);
+CreateDataProperty(implicit context: Context)(JSReceiver, JSAny, JSAny): void;
extern transitioning runtime SetOwnPropertyIgnoreAttributes(
- implicit context: Context)(JSObject, String, JSAny, Smi);
+ implicit context: Context)(JSObject, String, JSAny, Smi): void;
namespace runtime {
extern runtime
@@ -1746,7 +1755,7 @@ transitioning builtin FastCreateDataProperty(implicit context: Context)(
BuildAppendJSArray(ElementsKind::HOLEY_DOUBLE_ELEMENTS, array, value)
otherwise Slow;
} else {
- assert(IsFastSmiOrTaggedElementsKind(kind));
+ dcheck(IsFastSmiOrTaggedElementsKind(kind));
BuildAppendJSArray(ElementsKind::HOLEY_ELEMENTS, array, value)
otherwise Slow;
}
@@ -1767,7 +1776,7 @@ transitioning builtin FastCreateDataProperty(implicit context: Context)(
otherwise unreachable;
doubleElements[index] = numberValue;
} else {
- assert(IsFastSmiOrTaggedElementsKind(kind));
+ dcheck(IsFastSmiOrTaggedElementsKind(kind));
const elements = Cast<FixedArray>(array.elements) otherwise unreachable;
elements[index] = value;
}
diff --git a/chromium/v8/src/builtins/builtins-array-gen.cc b/chromium/v8/src/builtins/builtins-array-gen.cc
index 48eb954f83b..e5a3d446860 100644
--- a/chromium/v8/src/builtins/builtins-array-gen.cc
+++ b/chromium/v8/src/builtins/builtins-array-gen.cc
@@ -36,7 +36,7 @@ void ArrayBuiltinsAssembler::TypedArrayMapResultGenerator() {
context(), method_name, original_array, len());
// In the Spec and our current implementation, the length check is already
// performed in TypedArraySpeciesCreate.
- CSA_ASSERT(this, UintPtrLessThanOrEqual(len(), LoadJSTypedArrayLength(a)));
+ CSA_DCHECK(this, UintPtrLessThanOrEqual(len(), LoadJSTypedArrayLength(a)));
fast_typed_array_target_ =
Word32Equal(LoadElementsKind(original_array), LoadElementsKind(a));
a_ = a;
@@ -45,13 +45,13 @@ void ArrayBuiltinsAssembler::TypedArrayMapResultGenerator() {
// See tc39.github.io/ecma262/#sec-%typedarray%.prototype.map.
TNode<Object> ArrayBuiltinsAssembler::TypedArrayMapProcessor(
TNode<Object> k_value, TNode<UintPtrT> k) {
- // 8. c. Let mapped_value be ? Call(callbackfn, T, « kValue, k, O »).
+ // 7c. Let mapped_value be ? Call(callbackfn, T, « kValue, k, O »).
TNode<Number> k_number = ChangeUintPtrToTagged(k);
TNode<Object> mapped_value =
Call(context(), callbackfn(), this_arg(), k_value, k_number, o());
Label fast(this), slow(this), done(this), detached(this, Label::kDeferred);
- // 8. d. Perform ? Set(A, Pk, mapped_value, true).
+ // 7d. Perform ? Set(A, Pk, mapped_value, true).
// Since we know that A is a TypedArray, this always ends up in
// #sec-integer-indexed-exotic-objects-set-p-v-receiver and then
// tc39.github.io/ecma262/#sec-integerindexedelementset .
@@ -59,9 +59,9 @@ TNode<Object> ArrayBuiltinsAssembler::TypedArrayMapProcessor(
BIND(&fast);
// #sec-integerindexedelementset
- // 5. If arrayTypeName is "BigUint64Array" or "BigInt64Array", let
+ // 2. If arrayTypeName is "BigUint64Array" or "BigInt64Array", let
// numValue be ? ToBigInt(v).
- // 6. Otherwise, let numValue be ? ToNumber(value).
+ // 3. Otherwise, let numValue be ? ToNumber(value).
TNode<Object> num_value;
if (source_elements_kind_ == BIGINT64_ELEMENTS ||
source_elements_kind_ == BIGUINT64_ELEMENTS) {
@@ -175,24 +175,16 @@ void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody(
size_t i = 0;
for (auto it = labels.begin(); it != labels.end(); ++i, ++it) {
BIND(&*it);
- Label done(this);
source_elements_kind_ = static_cast<ElementsKind>(elements_kinds[i]);
- // TODO(turbofan): Silently cancelling the loop on buffer detachment is a
- // spec violation. Should go to &throw_detached and throw a TypeError
- // instead.
- VisitAllTypedArrayElements(array_buffer, processor, &done, direction,
- typed_array);
- Goto(&done);
- // No exception, return success
- BIND(&done);
+ VisitAllTypedArrayElements(array_buffer, processor, direction, typed_array);
ReturnFromBuiltin(a_.value());
}
}
void ArrayBuiltinsAssembler::VisitAllTypedArrayElements(
TNode<JSArrayBuffer> array_buffer, const CallResultProcessor& processor,
- Label* detached, ForEachDirection direction,
- TNode<JSTypedArray> typed_array) {
+ ForEachDirection direction, TNode<JSTypedArray> typed_array) {
+ // TODO(v8:11111): Support RAB / GSAB.
VariableList list({&a_, &k_}, zone());
TNode<UintPtrT> start = UintPtrConstant(0);
@@ -208,12 +200,28 @@ void ArrayBuiltinsAssembler::VisitAllTypedArrayElements(
BuildFastLoop<UintPtrT>(
list, start, end,
[&](TNode<UintPtrT> index) {
- GotoIf(IsDetachedBuffer(array_buffer), detached);
- TNode<RawPtrT> data_ptr = LoadJSTypedArrayDataPtr(typed_array);
- TNode<Numeric> value = LoadFixedTypedArrayElementAsTagged(
- data_ptr, index, source_elements_kind_);
- k_ = index;
- a_ = processor(this, value, index);
+ TVARIABLE(Object, value);
+ Label detached(this, Label::kDeferred);
+ Label process(this);
+ GotoIf(IsDetachedBuffer(array_buffer), &detached);
+ {
+ TNode<RawPtrT> data_ptr = LoadJSTypedArrayDataPtr(typed_array);
+ value = LoadFixedTypedArrayElementAsTagged(data_ptr, index,
+ source_elements_kind_);
+ Goto(&process);
+ }
+
+ BIND(&detached);
+ {
+ value = UndefinedConstant();
+ Goto(&process);
+ }
+
+ BIND(&process);
+ {
+ k_ = index;
+ a_ = processor(this, value.value(), index);
+ }
},
incr, advance_mode);
}
@@ -221,7 +229,7 @@ void ArrayBuiltinsAssembler::VisitAllTypedArrayElements(
TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
auto argc = UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
auto context = Parameter<Context>(Descriptor::kContext);
- CSA_ASSERT(this, IsUndefined(Parameter<Object>(Descriptor::kJSNewTarget)));
+ CSA_DCHECK(this, IsUndefined(Parameter<Object>(Descriptor::kJSNewTarget)));
CodeStubArguments args(this, argc);
TNode<Object> receiver = args.GetReceiver();
@@ -241,7 +249,7 @@ TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
BIND(&fast);
{
TNode<JSArray> array_receiver = CAST(receiver);
- CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(array_receiver)));
+ CSA_DCHECK(this, TaggedIsPositiveSmi(LoadJSArrayLength(array_receiver)));
TNode<IntPtrT> length =
LoadAndUntagObjectField(array_receiver, JSArray::kLengthOffset);
Label return_undefined(this), fast_elements(this);
@@ -323,7 +331,7 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
auto argc = UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
auto context = Parameter<Context>(Descriptor::kContext);
- CSA_ASSERT(this, IsUndefined(Parameter<Object>(Descriptor::kJSNewTarget)));
+ CSA_DCHECK(this, IsUndefined(Parameter<Object>(Descriptor::kJSNewTarget)));
CodeStubArguments args(this, argc);
TNode<Object> receiver = args.GetReceiver();
@@ -362,8 +370,8 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
Increment(&arg_index);
// The runtime SetProperty call could have converted the array to dictionary
// mode, which must be detected to abort the fast-path.
- TNode<Int32T> kind = LoadElementsKind(array_receiver);
- GotoIf(Word32Equal(kind, Int32Constant(DICTIONARY_ELEMENTS)),
+ TNode<Int32T> elements_kind = LoadElementsKind(array_receiver);
+ GotoIf(Word32Equal(elements_kind, Int32Constant(DICTIONARY_ELEMENTS)),
&default_label);
GotoIfNotNumber(arg, &object_push);
@@ -406,8 +414,8 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
Increment(&arg_index);
// The runtime SetProperty call could have converted the array to dictionary
// mode, which must be detected to abort the fast-path.
- TNode<Int32T> kind = LoadElementsKind(array_receiver);
- GotoIf(Word32Equal(kind, Int32Constant(DICTIONARY_ELEMENTS)),
+ TNode<Int32T> elements_kind = LoadElementsKind(array_receiver);
+ GotoIf(Word32Equal(elements_kind, Int32Constant(DICTIONARY_ELEMENTS)),
&default_label);
Goto(&object_push);
}
@@ -442,7 +450,7 @@ TF_BUILTIN(ExtractFastJSArray, ArrayBuiltinsAssembler) {
TNode<BInt> begin = SmiToBInt(Parameter<Smi>(Descriptor::kBegin));
TNode<BInt> count = SmiToBInt(Parameter<Smi>(Descriptor::kCount));
- CSA_ASSERT(this, Word32BinaryNot(IsNoElementsProtectorCellInvalid()));
+ CSA_DCHECK(this, Word32BinaryNot(IsNoElementsProtectorCellInvalid()));
Return(ExtractFastJSArray(context, array, begin, count));
}
@@ -451,7 +459,7 @@ TF_BUILTIN(CloneFastJSArray, ArrayBuiltinsAssembler) {
auto context = Parameter<Context>(Descriptor::kContext);
auto array = Parameter<JSArray>(Descriptor::kSource);
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
Word32Or(Word32BinaryNot(IsHoleyFastElementsKindForRead(
LoadElementsKind(array))),
Word32BinaryNot(IsNoElementsProtectorCellInvalid())));
@@ -470,7 +478,7 @@ TF_BUILTIN(CloneFastJSArrayFillingHoles, ArrayBuiltinsAssembler) {
auto context = Parameter<Context>(Descriptor::kContext);
auto array = Parameter<JSArray>(Descriptor::kSource);
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
Word32Or(Word32BinaryNot(IsHoleyFastElementsKindForRead(
LoadElementsKind(array))),
Word32BinaryNot(IsNoElementsProtectorCellInvalid())));
@@ -519,7 +527,7 @@ class ArrayPopulatorAssembler : public CodeStubAssembler {
TNode<Number> length) {
TVARIABLE(Object, array);
Label is_constructor(this), is_not_constructor(this), done(this);
- CSA_ASSERT(this, IsNumberNormalized(length));
+ CSA_DCHECK(this, IsNumberNormalized(length));
GotoIf(TaggedIsSmi(receiver), &is_not_constructor);
Branch(IsConstructor(CAST(receiver)), &is_constructor, &is_not_constructor);
@@ -612,7 +620,7 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant,
TNode<JSArray> array = CAST(receiver);
// JSArray length is always a positive Smi for fast arrays.
- CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(array)));
+ CSA_DCHECK(this, TaggedIsPositiveSmi(LoadJSArrayLength(array)));
TNode<Smi> array_length = LoadFastJSArrayLength(array);
TNode<IntPtrT> array_length_untagged = SmiUntag(array_length);
@@ -621,9 +629,9 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant,
Label is_smi(this), is_nonsmi(this), done(this);
// If no fromIndex was passed, default to 0.
- GotoIf(
- IntPtrLessThanOrEqual(args.GetLength(), IntPtrConstant(kFromIndexArg)),
- &done);
+ GotoIf(IntPtrLessThanOrEqual(args.GetLengthWithoutReceiver(),
+ IntPtrConstant(kFromIndexArg)),
+ &done);
TNode<Object> start_from = args.AtIndex(kFromIndexArg);
// Handle Smis and undefined here and everything else in runtime.
@@ -799,16 +807,16 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
BIND(&not_nan_loop);
{
- Label continue_loop(this), not_smi(this);
+ Label continue_loop(this), element_k_not_smi(this);
GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
&return_not_found);
TNode<Object> element_k =
UnsafeLoadFixedArrayElement(elements, index_var.value());
- GotoIfNot(TaggedIsSmi(element_k), &not_smi);
+ GotoIfNot(TaggedIsSmi(element_k), &element_k_not_smi);
Branch(Float64Equal(search_num.value(), SmiToFloat64(CAST(element_k))),
&return_found, &continue_loop);
- BIND(&not_smi);
+ BIND(&element_k_not_smi);
GotoIfNot(IsHeapNumber(CAST(element_k)), &continue_loop);
Branch(Float64Equal(search_num.value(),
LoadHeapNumberValue(CAST(element_k))),
@@ -1200,7 +1208,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
// Let index be O.[[ArrayIteratorNextIndex]].
TNode<Number> index = LoadJSArrayIteratorNextIndex(iterator);
- CSA_ASSERT(this, IsNumberNonNegativeSafeInteger(index));
+ CSA_DCHECK(this, IsNumberNonNegativeSafeInteger(index));
// Dispatch based on the type of the {array}.
TNode<Map> array_map = LoadMap(array);
@@ -1212,7 +1220,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
BIND(&if_array);
{
// If {array} is a JSArray, then the {index} must be in Unsigned32 range.
- CSA_ASSERT(this, IsNumberArrayIndex(index));
+ CSA_DCHECK(this, IsNumberArrayIndex(index));
// Check that the {index} is within range for the {array}. We handle all
// kinds of JSArray's here, so we do the computation on Uint32.
@@ -1253,8 +1261,8 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
BIND(&if_other);
{
// We cannot enter here with either JSArray's or JSTypedArray's.
- CSA_ASSERT(this, Word32BinaryNot(IsJSArray(array)));
- CSA_ASSERT(this, Word32BinaryNot(IsJSTypedArray(array)));
+ CSA_DCHECK(this, Word32BinaryNot(IsJSArray(array)));
+ CSA_DCHECK(this, Word32BinaryNot(IsJSTypedArray(array)));
// Check that the {index} is within the bounds of the {array}s "length".
TNode<Number> length = CAST(
@@ -1290,7 +1298,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
//
// Note specifically that JSTypedArray's will never take this path, so
// we don't need to worry about their maximum value.
- CSA_ASSERT(this, Word32BinaryNot(IsJSTypedArray(array)));
+ CSA_DCHECK(this, Word32BinaryNot(IsJSTypedArray(array)));
TNode<Number> max_length =
SelectConstant(IsJSArray(array), NumberConstant(kMaxUInt32),
NumberConstant(kMaxSafeInteger));
@@ -1375,8 +1383,8 @@ class ArrayFlattenAssembler : public CodeStubAssembler {
TNode<Number> start, TNode<Number> depth,
base::Optional<TNode<HeapObject>> mapper_function = base::nullopt,
base::Optional<TNode<Object>> this_arg = base::nullopt) {
- CSA_ASSERT(this, IsNumberPositive(source_length));
- CSA_ASSERT(this, IsNumberPositive(start));
+ CSA_DCHECK(this, IsNumberPositive(source_length));
+ CSA_DCHECK(this, IsNumberPositive(start));
// 1. Let targetIndex be start.
TVARIABLE(Number, var_target_index, start);
@@ -1397,7 +1405,7 @@ class ArrayFlattenAssembler : public CodeStubAssembler {
// a. Let P be ! ToString(sourceIndex).
// b. Let exists be ? HasProperty(source, P).
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
SmiGreaterThanOrEqual(CAST(source_index), SmiConstant(0)));
const TNode<Oddball> exists =
HasProperty(context, source, source_index, kHasProperty);
@@ -1412,7 +1420,7 @@ class ArrayFlattenAssembler : public CodeStubAssembler {
// ii. If mapperFunction is present, then
if (mapper_function) {
- CSA_ASSERT(this, Word32Or(IsUndefined(mapper_function.value()),
+ CSA_DCHECK(this, Word32Or(IsUndefined(mapper_function.value()),
IsCallable(mapper_function.value())));
DCHECK(this_arg.has_value());
@@ -1438,7 +1446,7 @@ class ArrayFlattenAssembler : public CodeStubAssembler {
BIND(&if_flatten_array);
{
- CSA_ASSERT(this, IsJSArray(element));
+ CSA_DCHECK(this, IsJSArray(element));
// 1. Let elementLen be ? ToLength(? Get(element, "length")).
const TNode<Object> element_length =
@@ -1455,7 +1463,7 @@ class ArrayFlattenAssembler : public CodeStubAssembler {
BIND(&if_flatten_proxy);
{
- CSA_ASSERT(this, IsJSProxy(element));
+ CSA_DCHECK(this, IsJSProxy(element));
// 1. Let elementLen be ? ToLength(? Get(element, "length")).
const TNode<Number> element_length = ToLength_Inline(
@@ -1774,11 +1782,13 @@ void ArrayBuiltinsAssembler::GenerateDispatchToArrayStub(
base::Optional<TNode<AllocationSite>> allocation_site) {
CodeStubArguments args(this, argc);
Label check_one_case(this), fallthrough(this);
- GotoIfNot(IntPtrEqual(args.GetLength(), IntPtrConstant(0)), &check_one_case);
+ GotoIfNot(IntPtrEqual(args.GetLengthWithoutReceiver(), IntPtrConstant(0)),
+ &check_one_case);
CreateArrayDispatchNoArgument(context, target, argc, mode, allocation_site);
BIND(&check_one_case);
- GotoIfNot(IntPtrEqual(args.GetLength(), IntPtrConstant(1)), &fallthrough);
+ GotoIfNot(IntPtrEqual(args.GetLengthWithoutReceiver(), IntPtrConstant(1)),
+ &fallthrough);
CreateArrayDispatchSingleArgument(context, target, argc, mode,
allocation_site);
@@ -1793,11 +1803,11 @@ TF_BUILTIN(ArrayConstructorImpl, ArrayBuiltinsAssembler) {
Parameter<HeapObject>(Descriptor::kAllocationSite);
// Initial map for the builtin Array functions should be Map.
- CSA_ASSERT(this, IsMap(CAST(LoadObjectField(
+ CSA_DCHECK(this, IsMap(CAST(LoadObjectField(
target, JSFunction::kPrototypeOrInitialMapOffset))));
// We should either have undefined or a valid AllocationSite
- CSA_ASSERT(this, Word32Or(IsUndefined(maybe_allocation_site),
+ CSA_DCHECK(this, Word32Or(IsUndefined(maybe_allocation_site),
IsAllocationSite(maybe_allocation_site)));
// "Enter" the context of the Array function.
diff --git a/chromium/v8/src/builtins/builtins-array-gen.h b/chromium/v8/src/builtins/builtins-array-gen.h
index 96833d9dea2..1f169632bf7 100644
--- a/chromium/v8/src/builtins/builtins-array-gen.h
+++ b/chromium/v8/src/builtins/builtins-array-gen.h
@@ -104,7 +104,7 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler {
private:
void VisitAllTypedArrayElements(TNode<JSArrayBuffer> array_buffer,
const CallResultProcessor& processor,
- Label* detached, ForEachDirection direction,
+ ForEachDirection direction,
TNode<JSTypedArray> typed_array);
TNode<Object> callbackfn_;
diff --git a/chromium/v8/src/builtins/builtins-array.cc b/chromium/v8/src/builtins/builtins-array.cc
index 703be0198a6..1baba71926a 100644
--- a/chromium/v8/src/builtins/builtins-array.cc
+++ b/chromium/v8/src/builtins/builtins-array.cc
@@ -10,7 +10,7 @@
#include "src/debug/debug.h"
#include "src/execution/isolate.h"
#include "src/execution/protectors-inl.h"
-#include "src/handles/global-handles.h"
+#include "src/handles/global-handles-inl.h"
#include "src/logging/counters.h"
#include "src/objects/contexts.h"
#include "src/objects/elements-inl.h"
@@ -1338,8 +1338,8 @@ Object Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
if (length == 0) break;
FixedDoubleArray elements =
FixedDoubleArray::cast(array.elements());
- for (uint32_t i = 0; i < length; i++) {
- if (elements.is_the_hole(i)) {
+ for (uint32_t k = 0; k < length; k++) {
+ if (elements.is_the_hole(k)) {
// TODO(jkummerow/verwaest): We could be a bit more clever
// here: Check if there are no elements/getters on the
// prototype chain, and if so, allow creation of a holey
@@ -1348,7 +1348,7 @@ Object Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
failure = true;
break;
}
- double double_value = elements.get_scalar(i);
+ double double_value = elements.get_scalar(k);
double_storage->set(j, double_value);
j++;
}
@@ -1358,8 +1358,8 @@ Object Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
case PACKED_SMI_ELEMENTS: {
Object the_hole = ReadOnlyRoots(isolate).the_hole_value();
FixedArray elements(FixedArray::cast(array.elements()));
- for (uint32_t i = 0; i < length; i++) {
- Object element = elements.get(i);
+ for (uint32_t k = 0; k < length; k++) {
+ Object element = elements.get(k);
if (element == the_hole) {
failure = true;
break;
diff --git a/chromium/v8/src/builtins/builtins-arraybuffer.cc b/chromium/v8/src/builtins/builtins-arraybuffer.cc
index f995299b7e2..ed0110ba2c6 100644
--- a/chromium/v8/src/builtins/builtins-arraybuffer.cc
+++ b/chromium/v8/src/builtins/builtins-arraybuffer.cc
@@ -191,8 +191,6 @@ static Object SliceHelper(BuiltinArguments args, Isolate* isolate,
// * [SAB] If IsSharedArrayBuffer(O) is false, throw a TypeError exception.
CHECK_SHARED(is_shared, array_buffer, kMethodName);
- CHECK_RESIZABLE(false, array_buffer, kMethodName);
-
// * [AB] If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
if (!is_shared && array_buffer->was_detached()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -203,7 +201,7 @@ static Object SliceHelper(BuiltinArguments args, Isolate* isolate,
// * [AB] Let len be O.[[ArrayBufferByteLength]].
// * [SAB] Let len be O.[[ArrayBufferByteLength]].
- double const len = array_buffer->byte_length();
+ double const len = array_buffer->GetByteLength();
// * Let relativeStart be ? ToInteger(start).
Handle<Object> relative_start;
@@ -215,7 +213,6 @@ static Object SliceHelper(BuiltinArguments args, Isolate* isolate,
double const first = (relative_start->Number() < 0)
? std::max(len + relative_start->Number(), 0.0)
: std::min(relative_start->Number(), len);
- Handle<Object> first_obj = isolate->factory()->NewNumber(first);
// * If end is undefined, let relativeEnd be len; else let relativeEnd be ?
// ToInteger(end).
@@ -279,6 +276,9 @@ static Object SliceHelper(BuiltinArguments args, Isolate* isolate,
Handle<JSArrayBuffer> new_array_buffer = Handle<JSArrayBuffer>::cast(new_);
CHECK_SHARED(is_shared, new_array_buffer, kMethodName);
+ // The created ArrayBuffer might or might not be resizable, since the species
+ // constructor might return a non-resizable or a resizable buffer.
+
// * [AB] If IsDetachedBuffer(new) is true, throw a TypeError exception.
if (!is_shared && new_array_buffer->was_detached()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -302,7 +302,8 @@ static Object SliceHelper(BuiltinArguments args, Isolate* isolate,
}
// * If new.[[ArrayBufferByteLength]] < newLen, throw a TypeError exception.
- if (new_array_buffer->byte_length() < new_len) {
+ size_t new_array_buffer_byte_length = new_array_buffer->GetByteLength();
+ if (new_array_buffer_byte_length < new_len) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate,
NewTypeError(is_shared ? MessageTemplate::kSharedArrayBufferTooShort
@@ -321,21 +322,35 @@ static Object SliceHelper(BuiltinArguments args, Isolate* isolate,
// * Let fromBuf be O.[[ArrayBufferData]].
// * Let toBuf be new.[[ArrayBufferData]].
// * Perform CopyDataBlockBytes(toBuf, 0, fromBuf, first, newLen).
- size_t first_size = 0, new_len_size = 0;
- CHECK(TryNumberToSize(*first_obj, &first_size));
- CHECK(TryNumberToSize(*new_len_obj, &new_len_size));
- DCHECK(new_array_buffer->byte_length() >= new_len_size);
+ size_t first_size = first;
+ size_t new_len_size = new_len;
+ DCHECK(new_array_buffer_byte_length >= new_len_size);
if (new_len_size != 0) {
- size_t from_byte_length = array_buffer->byte_length();
- USE(from_byte_length);
+ size_t from_byte_length = array_buffer->GetByteLength();
+ if (V8_UNLIKELY(!is_shared && array_buffer->is_resizable())) {
+ // The above steps might have resized the underlying buffer. In that case,
+ // only copy the still-accessible portion of the underlying data.
+ if (first_size > from_byte_length) {
+ return *new_; // Nothing to copy.
+ }
+ if (new_len_size > from_byte_length - first_size) {
+ new_len_size = from_byte_length - first_size;
+ }
+ }
DCHECK(first_size <= from_byte_length);
DCHECK(from_byte_length - first_size >= new_len_size);
uint8_t* from_data =
- reinterpret_cast<uint8_t*>(array_buffer->backing_store());
+ reinterpret_cast<uint8_t*>(array_buffer->backing_store()) + first_size;
uint8_t* to_data =
reinterpret_cast<uint8_t*>(new_array_buffer->backing_store());
- CopyBytes(to_data, from_data + first_size, new_len_size);
+ if (is_shared) {
+ base::Relaxed_Memcpy(reinterpret_cast<base::Atomic8*>(to_data),
+ reinterpret_cast<base::Atomic8*>(from_data),
+ new_len_size);
+ } else {
+ CopyBytes(to_data, from_data, new_len_size);
+ }
}
return *new_;
@@ -479,17 +494,7 @@ BUILTIN(SharedArrayBufferPrototypeGetByteLength) {
array_buffer->GetBackingStore()->max_byte_length());
// 4. Let length be ArrayBufferByteLength(O, SeqCst).
- size_t byte_length;
- if (array_buffer->is_resizable()) {
- // Invariant: byte_length for GSAB is 0 (it needs to be read from the
- // BackingStore).
- DCHECK_EQ(0, array_buffer->byte_length());
-
- byte_length =
- array_buffer->GetBackingStore()->byte_length(std::memory_order_seq_cst);
- } else {
- byte_length = array_buffer->byte_length();
- }
+ size_t byte_length = array_buffer->GetByteLength();
// 5. Return F(length).
return *isolate->factory()->NewNumberFromSize(byte_length);
}
diff --git a/chromium/v8/src/builtins/builtins-async-function-gen.cc b/chromium/v8/src/builtins/builtins-async-function-gen.cc
index 3e872526736..039f4ade69f 100644
--- a/chromium/v8/src/builtins/builtins-async-function-gen.cc
+++ b/chromium/v8/src/builtins/builtins-async-function-gen.cc
@@ -55,7 +55,7 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwaitResumeClosure(
// unnecessary runtime checks removed.
// Ensure that the {async_function_object} is neither closed nor running.
- CSA_SLOW_ASSERT(
+ CSA_SLOW_DCHECK(
this, SmiGreaterThan(
LoadObjectField<Smi>(async_function_object,
JSGeneratorObject::kContinuationOffset),
@@ -84,9 +84,8 @@ TF_BUILTIN(AsyncFunctionEnter, AsyncFunctionBuiltinsAssembler) {
// Compute the number of registers and parameters.
TNode<SharedFunctionInfo> shared = LoadObjectField<SharedFunctionInfo>(
closure, JSFunction::kSharedFunctionInfoOffset);
- TNode<IntPtrT> formal_parameter_count =
- ChangeInt32ToIntPtr(LoadObjectField<Uint16T>(
- shared, SharedFunctionInfo::kFormalParameterCountOffset));
+ TNode<IntPtrT> formal_parameter_count = ChangeInt32ToIntPtr(
+ LoadSharedFunctionInfoFormalParameterCountWithoutReceiver(shared));
TNode<BytecodeArray> bytecode_array =
LoadSharedFunctionInfoBytecodeArray(shared);
TNode<IntPtrT> frame_size = ChangeInt32ToIntPtr(LoadObjectField<Uint32T>(
@@ -98,7 +97,7 @@ TF_BUILTIN(AsyncFunctionEnter, AsyncFunctionBuiltinsAssembler) {
// Allocate and initialize the register file.
TNode<FixedArrayBase> parameters_and_registers =
AllocateFixedArray(HOLEY_ELEMENTS, parameters_and_register_length,
- kAllowLargeObjectAllocation);
+ AllocationFlag::kAllowLargeObjectAllocation);
FillFixedArrayWithValue(HOLEY_ELEMENTS, parameters_and_registers,
IntPtrConstant(0), parameters_and_register_length,
RootIndex::kUndefinedValue);
@@ -227,7 +226,7 @@ TF_BUILTIN(AsyncFunctionLazyDeoptContinuation, AsyncFunctionBuiltinsAssembler) {
}
TF_BUILTIN(AsyncFunctionAwaitRejectClosure, AsyncFunctionBuiltinsAssembler) {
- CSA_ASSERT_JS_ARGC_EQ(this, 1);
+ CSA_DCHECK_JS_ARGC_EQ(this, 1);
const auto sentError = Parameter<Object>(Descriptor::kSentError);
const auto context = Parameter<Context>(Descriptor::kContext);
@@ -237,7 +236,7 @@ TF_BUILTIN(AsyncFunctionAwaitRejectClosure, AsyncFunctionBuiltinsAssembler) {
}
TF_BUILTIN(AsyncFunctionAwaitResolveClosure, AsyncFunctionBuiltinsAssembler) {
- CSA_ASSERT_JS_ARGC_EQ(this, 1);
+ CSA_DCHECK_JS_ARGC_EQ(this, 1);
const auto sentValue = Parameter<Object>(Descriptor::kSentValue);
const auto context = Parameter<Context>(Descriptor::kContext);
diff --git a/chromium/v8/src/builtins/builtins-async-gen.cc b/chromium/v8/src/builtins/builtins-async-gen.cc
index 4d821c82793..0adb95ad433 100644
--- a/chromium/v8/src/builtins/builtins-async-gen.cc
+++ b/chromium/v8/src/builtins/builtins-async-gen.cc
@@ -55,12 +55,12 @@ TNode<Object> AsyncBuiltinsAssembler::AwaitOld(
// Let promiseCapability be ! NewPromiseCapability(%Promise%).
const TNode<JSFunction> promise_fun =
CAST(LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX));
- CSA_ASSERT(this, IsFunctionWithPrototypeSlotMap(LoadMap(promise_fun)));
+ CSA_DCHECK(this, IsFunctionWithPrototypeSlotMap(LoadMap(promise_fun)));
const TNode<Map> promise_map = CAST(
LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset));
// Assert that the JSPromise map has an instance size is
// JSPromise::kSizeWithEmbedderFields.
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
IntPtrEqual(LoadMapInstanceSizeInWords(promise_map),
IntPtrConstant(JSPromise::kSizeWithEmbedderFields /
kTaggedSize)));
@@ -259,7 +259,7 @@ void AsyncBuiltinsAssembler::InitializeNativeClosure(
native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX));
// Ensure that we don't have to initialize prototype_or_initial_map field of
// JSFunction.
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
IntPtrEqual(LoadMapInstanceSizeInWords(function_map),
IntPtrConstant(JSFunction::kSizeWithoutPrototype /
kTaggedSize)));
@@ -302,7 +302,7 @@ TNode<JSFunction> AsyncBuiltinsAssembler::CreateUnwrapClosure(
TNode<Context> AsyncBuiltinsAssembler::AllocateAsyncIteratorValueUnwrapContext(
TNode<NativeContext> native_context, TNode<Oddball> done) {
- CSA_ASSERT(this, IsBoolean(done));
+ CSA_DCHECK(this, IsBoolean(done));
TNode<Context> context = AllocateSyntheticFunctionContext(
native_context, ValueUnwrapContext::kLength);
@@ -317,7 +317,7 @@ TF_BUILTIN(AsyncIteratorValueUnwrap, AsyncBuiltinsAssembler) {
const TNode<Object> done =
LoadContextElement(context, ValueUnwrapContext::kDoneSlot);
- CSA_ASSERT(this, IsBoolean(CAST(done)));
+ CSA_DCHECK(this, IsBoolean(CAST(done)));
const TNode<Object> unwrapped_value =
CallBuiltin(Builtin::kCreateIterResultObject, context, value, done);
diff --git a/chromium/v8/src/builtins/builtins-async-generator-gen.cc b/chromium/v8/src/builtins/builtins-async-generator-gen.cc
index 9d15ba0cfd0..87c1d443a6a 100644
--- a/chromium/v8/src/builtins/builtins-async-generator-gen.cc
+++ b/chromium/v8/src/builtins/builtins-async-generator-gen.cc
@@ -65,18 +65,18 @@ class AsyncGeneratorBuiltinsAssembler : public AsyncBuiltinsAssembler {
}
inline void SetGeneratorAwaiting(const TNode<JSGeneratorObject> generator) {
- CSA_ASSERT(this, Word32BinaryNot(IsGeneratorAwaiting(generator)));
+ CSA_DCHECK(this, Word32BinaryNot(IsGeneratorAwaiting(generator)));
StoreObjectFieldNoWriteBarrier(
generator, JSAsyncGeneratorObject::kIsAwaitingOffset, SmiConstant(1));
- CSA_ASSERT(this, IsGeneratorAwaiting(generator));
+ CSA_DCHECK(this, IsGeneratorAwaiting(generator));
}
inline void SetGeneratorNotAwaiting(
const TNode<JSGeneratorObject> generator) {
- CSA_ASSERT(this, IsGeneratorAwaiting(generator));
+ CSA_DCHECK(this, IsGeneratorAwaiting(generator));
StoreObjectFieldNoWriteBarrier(
generator, JSAsyncGeneratorObject::kIsAwaitingOffset, SmiConstant(0));
- CSA_ASSERT(this, Word32BinaryNot(IsGeneratorAwaiting(generator)));
+ CSA_DCHECK(this, Word32BinaryNot(IsGeneratorAwaiting(generator)));
}
inline void CloseGenerator(const TNode<JSGeneratorObject> generator) {
@@ -216,7 +216,7 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwaitResumeClosure(
SetGeneratorNotAwaiting(generator);
- CSA_SLOW_ASSERT(this, IsGeneratorSuspended(generator));
+ CSA_SLOW_DCHECK(this, IsGeneratorSuspended(generator));
// Remember the {resume_mode} for the {generator}.
StoreObjectFieldNoWriteBarrier(generator,
@@ -401,7 +401,7 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) {
Goto(&start);
BIND(&start);
- CSA_ASSERT(this, IsGeneratorNotExecuting(generator));
+ CSA_DCHECK(this, IsGeneratorNotExecuting(generator));
// Stop resuming if suspended for Await.
ReturnIf(IsGeneratorAwaiting(generator), UndefinedConstant());
@@ -478,7 +478,7 @@ TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) {
const auto done = Parameter<Object>(Descriptor::kDone);
const auto context = Parameter<Context>(Descriptor::kContext);
- CSA_ASSERT(this, Word32BinaryNot(IsGeneratorAwaiting(generator)));
+ CSA_DCHECK(this, Word32BinaryNot(IsGeneratorAwaiting(generator)));
// This operation should be called only when the `value` parameter has been
// Await-ed. Typically, this means `value` is not a JSPromise value. However,
diff --git a/chromium/v8/src/builtins/builtins-async-iterator-gen.cc b/chromium/v8/src/builtins/builtins-async-iterator-gen.cc
index 11dd73cd4a2..cbae195060c 100644
--- a/chromium/v8/src/builtins/builtins-async-iterator-gen.cc
+++ b/chromium/v8/src/builtins/builtins-async-iterator-gen.cc
@@ -137,9 +137,9 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
{
Label has_sent_value(this), no_sent_value(this), merge(this);
ScopedExceptionHandler handler(this, &reject_promise, &var_exception);
- Branch(
- IntPtrGreaterThan(args->GetLength(), IntPtrConstant(kValueOrReasonArg)),
- &has_sent_value, &no_sent_value);
+ Branch(IntPtrGreaterThan(args->GetLengthWithoutReceiver(),
+ IntPtrConstant(kValueOrReasonArg)),
+ &has_sent_value, &no_sent_value);
BIND(&has_sent_value);
{
iter_result = Call(context, method, sync_iterator, sent_value);
@@ -161,7 +161,7 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
const TNode<JSFunction> promise_fun =
CAST(LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX));
- CSA_ASSERT(this, IsConstructor(promise_fun));
+ CSA_DCHECK(this, IsConstructor(promise_fun));
// Let valueWrapper be PromiseResolve(%Promise%, « value »).
// IfAbruptRejectPromise(valueWrapper, promiseCapability).
@@ -228,16 +228,16 @@ AsyncFromSyncBuiltinsAssembler::LoadIteratorResult(
// Let nextDone be IteratorComplete(nextResult).
// IfAbruptRejectPromise(nextDone, promiseCapability).
- const TNode<Object> done =
+ const TNode<Object> iter_result_done =
GetProperty(context, iter_result, factory()->done_string());
// Let nextValue be IteratorValue(nextResult).
// IfAbruptRejectPromise(nextValue, promiseCapability).
- const TNode<Object> value =
+ const TNode<Object> iter_result_value =
GetProperty(context, iter_result, factory()->value_string());
- var_value = value;
- var_done = done;
+ var_value = iter_result_value;
+ var_done = iter_result_done;
Goto(&merge);
}
diff --git a/chromium/v8/src/builtins/builtins-bigint.cc b/chromium/v8/src/builtins/builtins-bigint.cc
index 30da5207f90..2cb74aa3997 100644
--- a/chromium/v8/src/builtins/builtins-bigint.cc
+++ b/chromium/v8/src/builtins/builtins-bigint.cc
@@ -125,21 +125,21 @@ Object BigIntToStringImpl(Handle<Object> receiver, Handle<Object> radix,
BUILTIN(BigIntPrototypeToLocaleString) {
HandleScope scope(isolate);
- const char* method = "BigInt.prototype.toLocaleString";
+ const char* method_name = "BigInt.prototype.toLocaleString";
#ifdef V8_INTL_SUPPORT
// 1. Let x be ? thisBigIntValue(this value).
Handle<BigInt> x;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, x, ThisBigIntValue(isolate, args.receiver(), method));
+ isolate, x, ThisBigIntValue(isolate, args.receiver(), method_name));
RETURN_RESULT_OR_FAILURE(
isolate,
Intl::NumberToLocaleString(isolate, x, args.atOrUndefined(isolate, 1),
- args.atOrUndefined(isolate, 2), method));
+ args.atOrUndefined(isolate, 2), method_name));
// Fallbacks to old toString implemention if no V8_INTL_SUPPORT
#endif // V8_INTL_SUPPORT
Handle<Object> radix = isolate->factory()->undefined_value();
- return BigIntToStringImpl(args.receiver(), radix, isolate, method);
+ return BigIntToStringImpl(args.receiver(), radix, isolate, method_name);
}
BUILTIN(BigIntPrototypeToString) {
diff --git a/chromium/v8/src/builtins/builtins-bigint.tq b/chromium/v8/src/builtins/builtins-bigint.tq
index 067fb235deb..3cf46ef9bfb 100644
--- a/chromium/v8/src/builtins/builtins-bigint.tq
+++ b/chromium/v8/src/builtins/builtins-bigint.tq
@@ -70,9 +70,9 @@ macro MutableBigIntAbsoluteSub(implicit context: Context)(
const ylength = ReadBigIntLength(y);
const xsign = ReadBigIntSign(x);
- assert(MutableBigIntAbsoluteCompare(x, y) >= 0);
+ dcheck(MutableBigIntAbsoluteCompare(x, y) >= 0);
if (xlength == 0) {
- assert(ylength == 0);
+ dcheck(ylength == 0);
return x;
}
@@ -104,7 +104,7 @@ macro MutableBigIntAbsoluteAdd(implicit context: Context)(
// case: 0n + 0n
if (xlength == 0) {
- assert(ylength == 0);
+ dcheck(ylength == 0);
return x;
}
diff --git a/chromium/v8/src/builtins/builtins-call-gen.cc b/chromium/v8/src/builtins/builtins-call-gen.cc
index 54d2c748021..8b7b364375f 100644
--- a/chromium/v8/src/builtins/builtins-call-gen.cc
+++ b/chromium/v8/src/builtins/builtins-call-gen.cc
@@ -274,12 +274,13 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
BIND(&if_done);
{
Label if_not_double(this), if_double(this);
- TNode<Int32T> args_count = Int32Constant(0); // args already on the stack
+ TNode<Int32T> args_count =
+ Int32Constant(i::JSParameterCount(0)); // args already on the stack
TNode<Int32T> length = var_length.value();
{
Label normalize_done(this);
- CSA_ASSERT(this, Int32LessThanOrEqual(
+ CSA_DCHECK(this, Int32LessThanOrEqual(
length, Int32Constant(FixedArray::kMaxLength)));
GotoIfNot(Word32Equal(length, Int32Constant(0)), &normalize_done);
// Make sure we don't accidentally pass along the
@@ -326,14 +327,14 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructDoubleVarargs(
TNode<Int32T> args_count, TNode<Context> context, TNode<Int32T> kind) {
const ElementsKind new_kind = PACKED_ELEMENTS;
const WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER;
- CSA_ASSERT(this, Int32LessThanOrEqual(length,
+ CSA_DCHECK(this, Int32LessThanOrEqual(length,
Int32Constant(FixedArray::kMaxLength)));
TNode<IntPtrT> intptr_length = ChangeInt32ToIntPtr(length);
- CSA_ASSERT(this, WordNotEqual(intptr_length, IntPtrConstant(0)));
+ CSA_DCHECK(this, WordNotEqual(intptr_length, IntPtrConstant(0)));
// Allocate a new FixedArray of Objects.
TNode<FixedArray> new_elements = CAST(AllocateFixedArray(
- new_kind, intptr_length, CodeStubAssembler::kAllowLargeObjectAllocation));
+ new_kind, intptr_length, AllocationFlag::kAllowLargeObjectAllocation));
// CopyFixedArrayElements does not distinguish between holey and packed for
// its first argument, so we don't need to dispatch on {kind} here.
CopyFixedArrayElements(PACKED_DOUBLE_ELEMENTS, elements, new_kind,
@@ -438,7 +439,7 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
TNode<Int32T> length = LoadAndUntagToWord32ObjectField(
var_js_array.value(), JSArray::kLengthOffset);
TNode<FixedArrayBase> elements = var_elements.value();
- CSA_ASSERT(this, Int32LessThanOrEqual(
+ CSA_DCHECK(this, Int32LessThanOrEqual(
length, Int32Constant(FixedArray::kMaxLength)));
if (!new_target) {
@@ -737,8 +738,8 @@ void CallOrConstructBuiltinsAssembler::CallFunctionTemplate(
TNode<RawPtrT> callback = LoadForeignForeignAddressPtr(foreign);
TNode<Object> call_data =
LoadObjectField<Object>(call_handler_info, CallHandlerInfo::kDataOffset);
- TailCallStub(CodeFactory::CallApiCallback(isolate()), context, callback, argc,
- call_data, holder);
+ TailCallStub(CodeFactory::CallApiCallback(isolate()), context, callback,
+ args.GetLengthWithoutReceiver(), call_data, holder);
}
TF_BUILTIN(CallFunctionTemplate_CheckAccess, CallOrConstructBuiltinsAssembler) {
diff --git a/chromium/v8/src/builtins/builtins-collections-gen.cc b/chromium/v8/src/builtins/builtins-collections-gen.cc
index b44c70423ef..f4885efed8d 100644
--- a/chromium/v8/src/builtins/builtins-collections-gen.cc
+++ b/chromium/v8/src/builtins/builtins-collections-gen.cc
@@ -151,7 +151,7 @@ void BaseCollectionsAssembler::AddConstructorEntry(
Label* if_may_have_side_effects, Label* if_exception,
TVariable<Object>* var_exception) {
compiler::ScopedExceptionHandler handler(this, if_exception, var_exception);
- CSA_ASSERT(this, Word32BinaryNot(IsTheHole(key_value)));
+ CSA_DCHECK(this, Word32BinaryNot(IsTheHole(key_value)));
if (variant == kMap || variant == kWeakMap) {
TorqueStructKeyValuePair pair =
if_may_have_side_effects != nullptr
@@ -191,7 +191,7 @@ void BaseCollectionsAssembler::AddConstructorEntries(
TNode<JSArray> initial_entries_jsarray =
UncheckedCast<JSArray>(initial_entries);
#if DEBUG
- CSA_ASSERT(this, IsFastJSArrayWithNoCustomIteration(
+ CSA_DCHECK(this, IsFastJSArrayWithNoCustomIteration(
context, initial_entries_jsarray));
TNode<Map> original_initial_entries_map = LoadMap(initial_entries_jsarray);
#endif
@@ -215,7 +215,7 @@ void BaseCollectionsAssembler::AddConstructorEntries(
Unreachable();
BIND(&if_not_modified);
}
- CSA_ASSERT(this, TaggedEqual(original_initial_entries_map,
+ CSA_DCHECK(this, TaggedEqual(original_initial_entries_map,
LoadMap(initial_entries_jsarray)));
#endif
use_fast_loop = Int32FalseConstant();
@@ -238,13 +238,13 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
TNode<FixedArrayBase> elements = LoadElements(fast_jsarray);
TNode<Int32T> elements_kind = LoadElementsKind(fast_jsarray);
TNode<JSFunction> add_func = GetInitialAddFunction(variant, native_context);
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
TaggedEqual(GetAddFunction(variant, native_context, collection),
add_func));
- CSA_ASSERT(this, IsFastJSArrayWithNoCustomIteration(context, fast_jsarray));
+ CSA_DCHECK(this, IsFastJSArrayWithNoCustomIteration(context, fast_jsarray));
TNode<IntPtrT> length = SmiUntag(LoadFastJSArrayLength(fast_jsarray));
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(length, IntPtrConstant(0)));
- CSA_ASSERT(
+ CSA_DCHECK(this, IntPtrGreaterThanOrEqual(length, IntPtrConstant(0)));
+ CSA_DCHECK(
this, HasInitialCollectionPrototype(variant, native_context, collection));
#if DEBUG
@@ -277,7 +277,7 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
// A Map constructor requires entries to be arrays (ex. [key, value]),
// so a FixedDoubleArray can never succeed.
if (variant == kMap || variant == kWeakMap) {
- CSA_ASSERT(this, IntPtrGreaterThan(length, IntPtrConstant(0)));
+ CSA_DCHECK(this, IntPtrGreaterThan(length, IntPtrConstant(0)));
TNode<Object> element =
LoadAndNormalizeFixedDoubleArrayElement(elements, IntPtrConstant(0));
ThrowTypeError(context, MessageTemplate::kIteratorValueNotAnObject,
@@ -296,9 +296,9 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
}
BIND(&exit);
#if DEBUG
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
TaggedEqual(original_collection_map, LoadMap(CAST(collection))));
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
TaggedEqual(original_fast_js_array_map, LoadMap(fast_jsarray)));
#endif
}
@@ -307,14 +307,14 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromIterable(
Variant variant, TNode<Context> context, TNode<Context> native_context,
TNode<Object> collection, TNode<Object> iterable) {
Label exit(this), loop(this), if_exception(this, Label::kDeferred);
- CSA_ASSERT(this, Word32BinaryNot(IsNullOrUndefined(iterable)));
+ CSA_DCHECK(this, Word32BinaryNot(IsNullOrUndefined(iterable)));
TNode<Object> add_func = GetAddFunction(variant, context, collection);
IteratorBuiltinsAssembler iterator_assembler(this->state());
TorqueStructIteratorRecord iterator =
iterator_assembler.GetIterator(context, iterable);
- CSA_ASSERT(this, Word32BinaryNot(IsUndefined(iterator.object)));
+ CSA_DCHECK(this, Word32BinaryNot(IsUndefined(iterator.object)));
TNode<Map> fast_iterator_result_map = CAST(
LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX));
@@ -402,7 +402,7 @@ TNode<JSObject> BaseCollectionsAssembler::AllocateJSCollection(
TNode<JSObject> BaseCollectionsAssembler::AllocateJSCollectionFast(
TNode<JSFunction> constructor) {
- CSA_ASSERT(this, IsConstructorMap(LoadMap(constructor)));
+ CSA_DCHECK(this, IsConstructorMap(LoadMap(constructor)));
TNode<Map> initial_map =
CAST(LoadJSFunctionPrototypeOrInitialMap(constructor));
return AllocateJSObjectFromMap(initial_map);
@@ -779,7 +779,7 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntry(
not_found);
// Make sure the entry index is within range.
- CSA_ASSERT(
+ CSA_DCHECK(
this,
UintPtrLessThan(
var_entry.value(),
@@ -1081,7 +1081,7 @@ TNode<JSArray> CollectionsBuiltinsAssembler::MapIteratorToList(
TNode<IntPtrT> index;
std::tie(table, index) =
TransitionAndUpdate<JSMapIterator, OrderedHashMap>(iterator);
- CSA_ASSERT(this, IntPtrEqual(index, IntPtrConstant(0)));
+ CSA_DCHECK(this, IntPtrEqual(index, IntPtrConstant(0)));
TNode<IntPtrT> size =
LoadAndUntagObjectField(table, OrderedHashMap::NumberOfElementsOffset());
@@ -1089,8 +1089,9 @@ TNode<JSArray> CollectionsBuiltinsAssembler::MapIteratorToList(
const ElementsKind kind = PACKED_ELEMENTS;
TNode<Map> array_map =
LoadJSArrayElementsMap(kind, LoadNativeContext(context));
- TNode<JSArray> array = AllocateJSArray(kind, array_map, size, SmiTag(size),
- kAllowLargeObjectAllocation);
+ TNode<JSArray> array =
+ AllocateJSArray(kind, array_map, size, SmiTag(size),
+ AllocationFlag::kAllowLargeObjectAllocation);
TNode<FixedArray> elements = CAST(LoadElements(array));
const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
@@ -1128,7 +1129,7 @@ TNode<JSArray> CollectionsBuiltinsAssembler::MapIteratorToList(
BIND(&write_value);
{
- CSA_ASSERT(this, InstanceTypeEqual(LoadInstanceType(iterator),
+ CSA_DCHECK(this, InstanceTypeEqual(LoadInstanceType(iterator),
JS_MAP_VALUE_ITERATOR_TYPE));
TNode<Object> entry_value =
UnsafeLoadFixedArrayElement(table, entry_start_position,
@@ -1187,7 +1188,7 @@ TNode<JSArray> CollectionsBuiltinsAssembler::SetOrSetIteratorToList(
TNode<IntPtrT> iter_index;
std::tie(iter_table, iter_index) =
TransitionAndUpdate<JSSetIterator, OrderedHashSet>(CAST(iterable));
- CSA_ASSERT(this, IntPtrEqual(iter_index, IntPtrConstant(0)));
+ CSA_DCHECK(this, IntPtrEqual(iter_index, IntPtrConstant(0)));
var_table = iter_table;
Goto(&copy);
}
@@ -1200,8 +1201,9 @@ TNode<JSArray> CollectionsBuiltinsAssembler::SetOrSetIteratorToList(
const ElementsKind kind = PACKED_ELEMENTS;
TNode<Map> array_map =
LoadJSArrayElementsMap(kind, LoadNativeContext(context));
- TNode<JSArray> array = AllocateJSArray(kind, array_map, size, SmiTag(size),
- kAllowLargeObjectAllocation);
+ TNode<JSArray> array =
+ AllocateJSArray(kind, array_map, size, SmiTag(size),
+ AllocationFlag::kAllowLargeObjectAllocation);
TNode<FixedArray> elements = CAST(LoadElements(array));
const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
@@ -1272,7 +1274,7 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForSmiKey(
const TNode<IntPtrT> key_untagged = SmiUntag(smi_key);
const TNode<IntPtrT> hash =
ChangeInt32ToIntPtr(ComputeUnseededHash(key_untagged));
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
+ CSA_DCHECK(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
*result = hash;
FindOrderedHashTableEntry<CollectionType>(
table, hash,
@@ -1287,7 +1289,7 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForStringKey(
TNode<CollectionType> table, TNode<String> key_tagged,
TVariable<IntPtrT>* result, Label* entry_found, Label* not_found) {
const TNode<IntPtrT> hash = ComputeStringHash(key_tagged);
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
+ CSA_DCHECK(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
*result = hash;
FindOrderedHashTableEntry<CollectionType>(
table, hash,
@@ -1302,7 +1304,7 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForHeapNumberKey(
TNode<CollectionType> table, TNode<HeapNumber> key_heap_number,
TVariable<IntPtrT>* result, Label* entry_found, Label* not_found) {
const TNode<IntPtrT> hash = CallGetHashRaw(key_heap_number);
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
+ CSA_DCHECK(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
*result = hash;
const TNode<Float64T> key_float = LoadHeapNumberValue(key_heap_number);
FindOrderedHashTableEntry<CollectionType>(
@@ -1318,7 +1320,7 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForBigIntKey(
TNode<CollectionType> table, TNode<BigInt> key_big_int,
TVariable<IntPtrT>* result, Label* entry_found, Label* not_found) {
const TNode<IntPtrT> hash = CallGetHashRaw(key_big_int);
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
+ CSA_DCHECK(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
*result = hash;
FindOrderedHashTableEntry<CollectionType>(
table, hash,
@@ -1333,7 +1335,7 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForOtherKey(
TNode<CollectionType> table, TNode<HeapObject> key_heap_object,
TVariable<IntPtrT>* result, Label* entry_found, Label* not_found) {
const TNode<IntPtrT> hash = GetHash(key_heap_object);
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
+ CSA_DCHECK(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
*result = hash;
FindOrderedHashTableEntry<CollectionType>(
table, hash,
@@ -1481,17 +1483,17 @@ CollectionsBuiltinsAssembler::Transition(
Goto(&loop);
BIND(&loop);
{
- TNode<TableType> table = var_table.value();
- TNode<IntPtrT> index = var_index.value();
+ TNode<TableType> current_table = var_table.value();
+ TNode<IntPtrT> current_index = var_index.value();
TNode<Object> next_table =
- LoadObjectField(table, TableType::NextTableOffset());
+ LoadObjectField(current_table, TableType::NextTableOffset());
GotoIf(TaggedIsSmi(next_table), &done_loop);
var_table = CAST(next_table);
- var_index = SmiUntag(
- CAST(CallBuiltin(Builtin::kOrderedHashTableHealIndex,
- NoContextConstant(), table, SmiTag(index))));
+ var_index = SmiUntag(CAST(CallBuiltin(Builtin::kOrderedHashTableHealIndex,
+ NoContextConstant(), current_table,
+ SmiTag(current_index))));
Goto(&loop);
}
BIND(&done_loop);
@@ -2496,14 +2498,14 @@ void WeakCollectionsBuiltinsAssembler::AddEntry(
TNode<HeapObject> WeakCollectionsBuiltinsAssembler::AllocateTable(
Variant variant, TNode<IntPtrT> at_least_space_for) {
// See HashTable::New().
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
IntPtrLessThanOrEqual(IntPtrConstant(0), at_least_space_for));
TNode<IntPtrT> capacity = HashTableComputeCapacity(at_least_space_for);
// See HashTable::NewInternal().
TNode<IntPtrT> length = KeyIndexFromEntry(capacity);
- TNode<FixedArray> table = CAST(
- AllocateFixedArray(HOLEY_ELEMENTS, length, kAllowLargeObjectAllocation));
+ TNode<FixedArray> table = CAST(AllocateFixedArray(
+ HOLEY_ELEMENTS, length, AllocationFlag::kAllowLargeObjectAllocation));
TNode<Map> map =
HeapConstant(EphemeronHashTable::GetMap(ReadOnlyRoots(isolate())));
@@ -2814,7 +2816,7 @@ TF_BUILTIN(WeakCollectionSet, WeakCollectionsBuiltinsAssembler) {
auto key = Parameter<JSReceiver>(Descriptor::kKey);
auto value = Parameter<Object>(Descriptor::kValue);
- CSA_ASSERT(this, IsJSReceiver(key));
+ CSA_DCHECK(this, IsJSReceiver(key));
Label call_runtime(this), if_no_hash(this), if_not_found(this);
diff --git a/chromium/v8/src/builtins/builtins-constructor-gen.cc b/chromium/v8/src/builtins/builtins-constructor-gen.cc
index 0d677da854f..28af8bfabc3 100644
--- a/chromium/v8/src/builtins/builtins-constructor-gen.cc
+++ b/chromium/v8/src/builtins/builtins-constructor-gen.cc
@@ -189,7 +189,7 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
GotoIf(IsNoClosuresCellMap(feedback_cell_map), &no_closures);
GotoIf(IsOneClosureCellMap(feedback_cell_map), &one_closure);
- CSA_ASSERT(this, IsManyClosuresCellMap(feedback_cell_map),
+ CSA_DCHECK(this, IsManyClosuresCellMap(feedback_cell_map),
feedback_cell_map, feedback_cell);
Goto(&cell_done);
@@ -211,7 +211,7 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
const TNode<IntPtrT> function_map_index = Signed(IntPtrAdd(
DecodeWordFromWord32<SharedFunctionInfo::FunctionMapIndexBits>(flags),
IntPtrConstant(Context::FIRST_FUNCTION_MAP_INDEX)));
- CSA_ASSERT(this, UintPtrLessThanOrEqual(
+ CSA_DCHECK(this, UintPtrLessThanOrEqual(
function_map_index,
IntPtrConstant(Context::LAST_FUNCTION_MAP_INDEX)));
@@ -338,7 +338,7 @@ TNode<JSObject> ConstructorBuiltinsAssembler::FastNewObject(
BIND(&instantiate_map);
return AllocateJSObjectFromMap(initial_map, properties.value(), base::nullopt,
- kNone, kWithSlackTracking);
+ AllocationFlag::kNone, kWithSlackTracking);
}
TNode<Context> ConstructorBuiltinsAssembler::FastNewFunctionContext(
@@ -539,7 +539,7 @@ TNode<HeapObject> ConstructorBuiltinsAssembler::CreateShallowObjectLiteral(
TNode<AllocationSite> allocation_site = CAST(maybe_allocation_site);
TNode<JSObject> boilerplate = LoadBoilerplate(allocation_site);
TNode<Map> boilerplate_map = LoadMap(boilerplate);
- CSA_ASSERT(this, IsJSObjectMap(boilerplate_map));
+ CSA_DCHECK(this, IsJSObjectMap(boilerplate_map));
TVARIABLE(HeapObject, var_properties);
{
@@ -587,12 +587,9 @@ TNode<HeapObject> ConstructorBuiltinsAssembler::CreateShallowObjectLiteral(
Goto(&done);
BIND(&if_copy_elements);
- CSA_ASSERT(this, Word32BinaryNot(
+ CSA_DCHECK(this, Word32BinaryNot(
IsFixedCOWArrayMap(LoadMap(boilerplate_elements))));
- ExtractFixedArrayFlags flags;
- flags |= ExtractFixedArrayFlag::kAllFixedArrays;
- flags |= ExtractFixedArrayFlag::kNewSpaceAllocationOnly;
- flags |= ExtractFixedArrayFlag::kDontCopyCOW;
+ auto flags = ExtractFixedArrayFlag::kAllFixedArrays;
var_elements = CloneFixedArray(boilerplate_elements, flags);
Goto(&done);
BIND(&done);
@@ -684,7 +681,7 @@ TNode<JSObject> ConstructorBuiltinsAssembler::CreateEmptyObjectLiteral(
TNode<Map> map = LoadObjectFunctionInitialMap(native_context);
// Ensure that slack tracking is disabled for the map.
STATIC_ASSERT(Map::kNoSlackTracking == 0);
- CSA_ASSERT(this, IsClearWord32<Map::Bits3::ConstructionCounterBits>(
+ CSA_DCHECK(this, IsClearWord32<Map::Bits3::ConstructionCounterBits>(
LoadMapBitField3(map)));
TNode<FixedArray> empty_fixed_array = EmptyFixedArrayConstant();
TNode<JSObject> result =
diff --git a/chromium/v8/src/builtins/builtins-dataview.cc b/chromium/v8/src/builtins/builtins-dataview.cc
index 3ae331f5d72..7bd277beafd 100644
--- a/chromium/v8/src/builtins/builtins-dataview.cc
+++ b/chromium/v8/src/builtins/builtins-dataview.cc
@@ -19,6 +19,7 @@ namespace internal {
// ES #sec-dataview-constructor
BUILTIN(DataViewConstructor) {
+ const char* const kMethodName = "DataView constructor";
HandleScope scope(isolate);
if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -33,29 +34,31 @@ BUILTIN(DataViewConstructor) {
Handle<Object> byte_offset = args.atOrUndefined(isolate, 2);
Handle<Object> byte_length = args.atOrUndefined(isolate, 3);
- // 2. If Type(buffer) is not Object, throw a TypeError exception.
- // 3. If buffer does not have an [[ArrayBufferData]] internal slot, throw a
- // TypeError exception.
+ // 2. Perform ? RequireInternalSlot(buffer, [[ArrayBufferData]]).
if (!buffer->IsJSArrayBuffer()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kDataViewNotArrayBuffer));
}
Handle<JSArrayBuffer> array_buffer = Handle<JSArrayBuffer>::cast(buffer);
- // 4. Let offset be ? ToIndex(byteOffset).
+ // 3. Let offset be ? ToIndex(byteOffset).
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, byte_offset,
Object::ToIndex(isolate, byte_offset, MessageTemplate::kInvalidOffset));
size_t view_byte_offset = byte_offset->Number();
- // 5. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
- // We currently violate the specification at this point. TODO: Fix that.
+ // 4. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
+ if (array_buffer->was_detached()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kDetachedOperation,
+ isolate->factory()->NewStringFromAsciiChecked(
+ kMethodName)));
+ }
- // 6. Let bufferByteLength be the value of buffer's
- // [[ArrayBufferByteLength]] internal slot.
+ // 5. Let bufferByteLength be buffer.[[ArrayBufferByteLength]].
size_t const buffer_byte_length = array_buffer->byte_length();
- // 7. If offset > bufferByteLength, throw a RangeError exception.
+ // 6. If offset > bufferByteLength, throw a RangeError exception.
if (view_byte_offset > buffer_byte_length) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewRangeError(MessageTemplate::kInvalidOffset, byte_offset));
@@ -63,11 +66,11 @@ BUILTIN(DataViewConstructor) {
size_t view_byte_length;
if (byte_length->IsUndefined(isolate)) {
- // 8. If byteLength is either not present or undefined, then
+ // 7. If byteLength is undefined, then
// a. Let viewByteLength be bufferByteLength - offset.
view_byte_length = buffer_byte_length - view_byte_offset;
} else {
- // 9. Else,
+ // 8. Else,
// a. Let viewByteLength be ? ToIndex(byteLength).
// b. If offset+viewByteLength > bufferByteLength, throw a
// RangeError exception.
@@ -82,31 +85,45 @@ BUILTIN(DataViewConstructor) {
view_byte_length = byte_length->Number();
}
- // 10. Let O be ? OrdinaryCreateFromConstructor(NewTarget,
- // "%DataViewPrototype%", «[[DataView]], [[ViewedArrayBuffer]],
- // [[ByteLength]], [[ByteOffset]]»).
+ // 9. Let O be ? OrdinaryCreateFromConstructor(NewTarget,
+ // "%DataViewPrototype%", «[[DataView]], [[ViewedArrayBuffer]],
+ // [[ByteLength]], [[ByteOffset]]»).
Handle<JSObject> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
JSObject::New(target, new_target, Handle<AllocationSite>::null()));
+ Handle<JSDataView> data_view = Handle<JSDataView>::cast(result);
for (int i = 0; i < ArrayBufferView::kEmbedderFieldCount; ++i) {
// TODO(v8:10391, saelo): Handle external pointers in EmbedderDataSlot
- Handle<JSDataView>::cast(result)->SetEmbedderField(i, Smi::zero());
+ data_view->SetEmbedderField(i, Smi::zero());
}
- // 11. Set O's [[ViewedArrayBuffer]] internal slot to buffer.
- Handle<JSDataView>::cast(result)->set_buffer(*array_buffer);
+ // We have to set the internal slots before the detached check on step 10 or
+ // the TorqueGeneratedClassVerifier ended up complaining that the slot is
+ // empty or invalid on heap teardown.
+ // The result object is not observable from JavaScript when step 10 early
+ // aborts so it is fine to set internal slots here.
+
+ // 11. Set O.[[ViewedArrayBuffer]] to buffer.
+ data_view->set_buffer(*array_buffer);
- // 12. Set O's [[ByteLength]] internal slot to viewByteLength.
- Handle<JSDataView>::cast(result)->set_byte_length(view_byte_length);
+ // 12. Set O.[[ByteLength]] to viewByteLength.
+ data_view->set_byte_length(view_byte_length);
- // 13. Set O's [[ByteOffset]] internal slot to offset.
- Handle<JSDataView>::cast(result)->set_byte_offset(view_byte_offset);
- Handle<JSDataView>::cast(result)->AllocateExternalPointerEntries(isolate);
- Handle<JSDataView>::cast(result)->set_data_pointer(
+ // 13. Set O.[[ByteOffset]] to offset.
+ data_view->set_byte_offset(view_byte_offset);
+ data_view->set_data_pointer(
isolate,
static_cast<uint8_t*>(array_buffer->backing_store()) + view_byte_offset);
+ // 10. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
+ if (array_buffer->was_detached()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kDetachedOperation,
+ isolate->factory()->NewStringFromAsciiChecked(
+ kMethodName)));
+ }
+
// 14. Return O.
return *result;
}
diff --git a/chromium/v8/src/builtins/builtins-date.cc b/chromium/v8/src/builtins/builtins-date.cc
index 1de6357cf83..cb264279d50 100644
--- a/chromium/v8/src/builtins/builtins-date.cc
+++ b/chromium/v8/src/builtins/builtins-date.cc
@@ -24,85 +24,6 @@ namespace internal {
namespace {
-// ES6 section 20.3.1.1 Time Values and Time Range
-const double kMinYear = -1000000.0;
-const double kMaxYear = -kMinYear;
-const double kMinMonth = -10000000.0;
-const double kMaxMonth = -kMinMonth;
-
-// 20.3.1.2 Day Number and Time within Day
-const double kMsPerDay = 86400000.0;
-
-// ES6 section 20.3.1.11 Hours, Minutes, Second, and Milliseconds
-const double kMsPerSecond = 1000.0;
-const double kMsPerMinute = 60000.0;
-const double kMsPerHour = 3600000.0;
-
-// ES6 section 20.3.1.14 MakeDate (day, time)
-double MakeDate(double day, double time) {
- if (std::isfinite(day) && std::isfinite(time)) {
- return time + day * kMsPerDay;
- }
- return std::numeric_limits<double>::quiet_NaN();
-}
-
-// ES6 section 20.3.1.13 MakeDay (year, month, date)
-double MakeDay(double year, double month, double date) {
- if ((kMinYear <= year && year <= kMaxYear) &&
- (kMinMonth <= month && month <= kMaxMonth) && std::isfinite(date)) {
- int y = FastD2I(year);
- int m = FastD2I(month);
- y += m / 12;
- m %= 12;
- if (m < 0) {
- m += 12;
- y -= 1;
- }
- DCHECK_LE(0, m);
- DCHECK_LT(m, 12);
-
- // kYearDelta is an arbitrary number such that:
- // a) kYearDelta = -1 (mod 400)
- // b) year + kYearDelta > 0 for years in the range defined by
- // ECMA 262 - 15.9.1.1, i.e. upto 100,000,000 days on either side of
- // Jan 1 1970. This is required so that we don't run into integer
- // division of negative numbers.
- // c) there shouldn't be an overflow for 32-bit integers in the following
- // operations.
- static const int kYearDelta = 399999;
- static const int kBaseDay =
- 365 * (1970 + kYearDelta) + (1970 + kYearDelta) / 4 -
- (1970 + kYearDelta) / 100 + (1970 + kYearDelta) / 400;
- int day_from_year = 365 * (y + kYearDelta) + (y + kYearDelta) / 4 -
- (y + kYearDelta) / 100 + (y + kYearDelta) / 400 -
- kBaseDay;
- if ((y % 4 != 0) || (y % 100 == 0 && y % 400 != 0)) {
- static const int kDayFromMonth[] = {0, 31, 59, 90, 120, 151,
- 181, 212, 243, 273, 304, 334};
- day_from_year += kDayFromMonth[m];
- } else {
- static const int kDayFromMonth[] = {0, 31, 60, 91, 121, 152,
- 182, 213, 244, 274, 305, 335};
- day_from_year += kDayFromMonth[m];
- }
- return static_cast<double>(day_from_year - 1) + DoubleToInteger(date);
- }
- return std::numeric_limits<double>::quiet_NaN();
-}
-
-// ES6 section 20.3.1.12 MakeTime (hour, min, sec, ms)
-double MakeTime(double hour, double min, double sec, double ms) {
- if (std::isfinite(hour) && std::isfinite(min) && std::isfinite(sec) &&
- std::isfinite(ms)) {
- double const h = DoubleToInteger(hour);
- double const m = DoubleToInteger(min);
- double const s = DoubleToInteger(sec);
- double const milli = DoubleToInteger(ms);
- return h * kMsPerHour + m * kMsPerMinute + s * kMsPerSecond + milli;
- }
- return std::numeric_limits<double>::quiet_NaN();
-}
-
const char* kShortWeekDays[] = {"Sun", "Mon", "Tue", "Wed",
"Thu", "Fri", "Sat"};
const char* kShortMonths[] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun",
@@ -394,31 +315,33 @@ BUILTIN(DatePrototypeSetFullYear) {
Handle<Object> year = args.atOrUndefined(isolate, 1);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year,
Object::ToNumber(isolate, year));
- double y = year->Number(), m = 0.0, dt = 1.0;
+ double year_double = year->Number(), month_double = 0.0, day_double = 1.0;
int time_within_day = 0;
if (!std::isnan(date->value().Number())) {
int64_t const time_ms = static_cast<int64_t>(date->value().Number());
int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
int const days = isolate->date_cache()->DaysFromTime(local_time_ms);
time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, days);
- int year, month, day;
- isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
- m = month;
- dt = day;
+ int year_int, month_int, day_int;
+ isolate->date_cache()->YearMonthDayFromDays(days, &year_int, &month_int,
+ &day_int);
+ month_double = month_int;
+ day_double = day_int;
}
if (argc >= 2) {
Handle<Object> month = args.at(2);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month,
Object::ToNumber(isolate, month));
- m = month->Number();
+ month_double = month->Number();
if (argc >= 3) {
- Handle<Object> date = args.at(3);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date,
- Object::ToNumber(isolate, date));
- dt = date->Number();
+ Handle<Object> day = args.at(3);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, day,
+ Object::ToNumber(isolate, day));
+ day_double = day->Number();
}
}
- double time_val = MakeDate(MakeDay(y, m, dt), time_within_day);
+ double time_val =
+ MakeDate(MakeDay(year_double, month_double, day_double), time_within_day);
return SetLocalDateValue(isolate, date, time_val);
}
@@ -613,30 +536,32 @@ BUILTIN(DatePrototypeSetUTCFullYear) {
Handle<Object> year = args.atOrUndefined(isolate, 1);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year,
Object::ToNumber(isolate, year));
- double y = year->Number(), m = 0.0, dt = 1.0;
+ double year_double = year->Number(), month_double = 0.0, day_double = 1.0;
int time_within_day = 0;
if (!std::isnan(date->value().Number())) {
int64_t const time_ms = static_cast<int64_t>(date->value().Number());
int const days = isolate->date_cache()->DaysFromTime(time_ms);
time_within_day = isolate->date_cache()->TimeInDay(time_ms, days);
- int year, month, day;
- isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
- m = month;
- dt = day;
+ int year_int, month_int, day_int;
+ isolate->date_cache()->YearMonthDayFromDays(days, &year_int, &month_int,
+ &day_int);
+ month_double = month_int;
+ day_double = day_int;
}
if (argc >= 2) {
Handle<Object> month = args.at(2);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month,
Object::ToNumber(isolate, month));
- m = month->Number();
+ month_double = month->Number();
if (argc >= 3) {
- Handle<Object> date = args.at(3);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date,
- Object::ToNumber(isolate, date));
- dt = date->Number();
+ Handle<Object> day = args.at(3);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, day,
+ Object::ToNumber(isolate, day));
+ day_double = day->Number();
}
}
- double const time_val = MakeDate(MakeDay(y, m, dt), time_within_day);
+ double const time_val =
+ MakeDate(MakeDay(year_double, month_double, day_double), time_within_day);
return *JSDate::SetValue(date, DateCache::TimeClip(time_val));
}
@@ -854,8 +779,8 @@ BUILTIN(DatePrototypeToLocaleDateString) {
isolate->CountUsage(v8::Isolate::UseCounterFeature::kDateToLocaleDateString);
- const char* method = "Date.prototype.toLocaleDateString";
- CHECK_RECEIVER(JSDate, date, method);
+ const char* method_name = "Date.prototype.toLocaleDateString";
+ CHECK_RECEIVER(JSDate, date, method_name);
RETURN_RESULT_OR_FAILURE(
isolate, JSDateTimeFormat::ToLocaleDateTime(
@@ -865,7 +790,7 @@ BUILTIN(DatePrototypeToLocaleDateString) {
args.atOrUndefined(isolate, 2), // options
JSDateTimeFormat::RequiredOption::kDate, // required
JSDateTimeFormat::DefaultsOption::kDate, // defaults
- method)); // method
+ method_name)); // method_name
}
// ecma402 #sup-date.prototype.tolocalestring
@@ -874,8 +799,8 @@ BUILTIN(DatePrototypeToLocaleString) {
isolate->CountUsage(v8::Isolate::UseCounterFeature::kDateToLocaleString);
- const char* method = "Date.prototype.toLocaleString";
- CHECK_RECEIVER(JSDate, date, method);
+ const char* method_name = "Date.prototype.toLocaleString";
+ CHECK_RECEIVER(JSDate, date, method_name);
RETURN_RESULT_OR_FAILURE(
isolate, JSDateTimeFormat::ToLocaleDateTime(
@@ -885,7 +810,7 @@ BUILTIN(DatePrototypeToLocaleString) {
args.atOrUndefined(isolate, 2), // options
JSDateTimeFormat::RequiredOption::kAny, // required
JSDateTimeFormat::DefaultsOption::kAll, // defaults
- method)); // method
+ method_name)); // method_name
}
// ecma402 #sup-date.prototype.tolocaletimestring
@@ -894,8 +819,8 @@ BUILTIN(DatePrototypeToLocaleTimeString) {
isolate->CountUsage(v8::Isolate::UseCounterFeature::kDateToLocaleTimeString);
- const char* method = "Date.prototype.toLocaleTimeString";
- CHECK_RECEIVER(JSDate, date, method);
+ const char* method_name = "Date.prototype.toLocaleTimeString";
+ CHECK_RECEIVER(JSDate, date, method_name);
RETURN_RESULT_OR_FAILURE(
isolate, JSDateTimeFormat::ToLocaleDateTime(
@@ -905,7 +830,7 @@ BUILTIN(DatePrototypeToLocaleTimeString) {
args.atOrUndefined(isolate, 2), // options
JSDateTimeFormat::RequiredOption::kTime, // required
JSDateTimeFormat::DefaultsOption::kTime, // defaults
- method)); // method
+ method_name)); // method_name
}
#endif // V8_INTL_SUPPORT
@@ -951,11 +876,11 @@ BUILTIN(DatePrototypeSetYear) {
Handle<Object> year = args.atOrUndefined(isolate, 1);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year,
Object::ToNumber(isolate, year));
- double m = 0.0, dt = 1.0, y = year->Number();
- if (!std::isnan(y)) {
- double y_int = DoubleToInteger(y);
- if (0.0 <= y_int && y_int <= 99.0) {
- y = 1900.0 + y_int;
+ double month_double = 0.0, day_double = 1.0, year_double = year->Number();
+ if (!std::isnan(year_double)) {
+ double year_int = DoubleToInteger(year_double);
+ if (0.0 <= year_int && year_int <= 99.0) {
+ year_double = 1900.0 + year_int;
}
}
int time_within_day = 0;
@@ -964,12 +889,14 @@ BUILTIN(DatePrototypeSetYear) {
int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
int const days = isolate->date_cache()->DaysFromTime(local_time_ms);
time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, days);
- int year, month, day;
- isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
- m = month;
- dt = day;
- }
- double time_val = MakeDate(MakeDay(y, m, dt), time_within_day);
+ int year_int, month_int, day_int;
+ isolate->date_cache()->YearMonthDayFromDays(days, &year_int, &month_int,
+ &day_int);
+ month_double = month_int;
+ day_double = day_int;
+ }
+ double time_val =
+ MakeDate(MakeDay(year_double, month_double, day_double), time_within_day);
return SetLocalDateValue(isolate, date, time_val);
}
diff --git a/chromium/v8/src/builtins/builtins-definitions.h b/chromium/v8/src/builtins/builtins-definitions.h
index 70eb349dab9..f7b94c40593 100644
--- a/chromium/v8/src/builtins/builtins-definitions.h
+++ b/chromium/v8/src/builtins/builtins-definitions.h
@@ -31,33 +31,62 @@ namespace internal {
// TODO(jgruber): Remove DummyDescriptor once all ASM builtins have been
// properly associated with their descriptor.
-#define BUILTIN_LIST_BASE(CPP, TFJ, TFC, TFS, TFH, ASM) \
- /* GC write barrirer */ \
- TFC(RecordWriteEmitRememberedSetSaveFP, WriteBarrier) \
- TFC(RecordWriteOmitRememberedSetSaveFP, WriteBarrier) \
- TFC(RecordWriteEmitRememberedSetIgnoreFP, WriteBarrier) \
- TFC(RecordWriteOmitRememberedSetIgnoreFP, WriteBarrier) \
- TFC(EphemeronKeyBarrierSaveFP, WriteBarrier) \
- TFC(EphemeronKeyBarrierIgnoreFP, WriteBarrier) \
- \
- /* TSAN support for stores in generated code.*/ \
- IF_TSAN(TFC, TSANRelaxedStore8IgnoreFP, TSANRelaxedStore) \
- IF_TSAN(TFC, TSANRelaxedStore8SaveFP, TSANRelaxedStore) \
- IF_TSAN(TFC, TSANRelaxedStore16IgnoreFP, TSANRelaxedStore) \
- IF_TSAN(TFC, TSANRelaxedStore16SaveFP, TSANRelaxedStore) \
- IF_TSAN(TFC, TSANRelaxedStore32IgnoreFP, TSANRelaxedStore) \
- IF_TSAN(TFC, TSANRelaxedStore32SaveFP, TSANRelaxedStore) \
- IF_TSAN(TFC, TSANRelaxedStore64IgnoreFP, TSANRelaxedStore) \
- IF_TSAN(TFC, TSANRelaxedStore64SaveFP, TSANRelaxedStore) \
- \
- /* TSAN support for loads in generated code.*/ \
- IF_TSAN(TFC, TSANRelaxedLoad32IgnoreFP, TSANRelaxedLoad) \
- IF_TSAN(TFC, TSANRelaxedLoad32SaveFP, TSANRelaxedLoad) \
- IF_TSAN(TFC, TSANRelaxedLoad64IgnoreFP, TSANRelaxedLoad) \
- IF_TSAN(TFC, TSANRelaxedLoad64SaveFP, TSANRelaxedLoad) \
- \
- /* Adaptor for CPP builtin */ \
- TFC(AdaptorWithBuiltinExitFrame, CppBuiltinAdaptor) \
+// Builtins are additionally split into tiers, where the tier determines the
+// distance of the builtins table from the root register within IsolateData.
+//
+// - Tier 0 (T0) are guaranteed to be close to the root register and can thus
+// be accessed efficiently root-relative calls (so not, e.g., calls from
+// generated code when short-builtin-calls is on).
+// - T1 builtins have no distance guarantees.
+//
+// Note, this mechanism works only if the set of T0 builtins is kept as small
+// as possible. Please, resist the temptation to add your builtin here unless
+// there's a very good reason.
+#define BUILTIN_LIST_BASE_TIER0(CPP, TFJ, TFC, TFS, TFH, ASM) \
+ /* Deoptimization entries. */ \
+ ASM(DeoptimizationEntry_Eager, DeoptimizationEntry) \
+ ASM(DeoptimizationEntry_Soft, DeoptimizationEntry) \
+ ASM(DeoptimizationEntry_Bailout, DeoptimizationEntry) \
+ ASM(DeoptimizationEntry_Lazy, DeoptimizationEntry) \
+ ASM(DynamicCheckMapsTrampoline, DynamicCheckMaps) \
+ ASM(DynamicCheckMapsWithFeedbackVectorTrampoline, \
+ DynamicCheckMapsWithFeedbackVector) \
+ \
+ /* GC write barrier. */ \
+ TFC(RecordWriteEmitRememberedSetSaveFP, WriteBarrier) \
+ TFC(RecordWriteOmitRememberedSetSaveFP, WriteBarrier) \
+ TFC(RecordWriteEmitRememberedSetIgnoreFP, WriteBarrier) \
+ TFC(RecordWriteOmitRememberedSetIgnoreFP, WriteBarrier) \
+ TFC(EphemeronKeyBarrierSaveFP, WriteBarrier) \
+ TFC(EphemeronKeyBarrierIgnoreFP, WriteBarrier) \
+ \
+ /* Adaptor for CPP builtins. */ \
+ TFC(AdaptorWithBuiltinExitFrame, CppBuiltinAdaptor)
+
+#define BUILTIN_LIST_BASE_TIER1(CPP, TFJ, TFC, TFS, TFH, ASM) \
+ /* TSAN support for stores in generated code. */ \
+ IF_TSAN(TFC, TSANRelaxedStore8IgnoreFP, TSANStore) \
+ IF_TSAN(TFC, TSANRelaxedStore8SaveFP, TSANStore) \
+ IF_TSAN(TFC, TSANRelaxedStore16IgnoreFP, TSANStore) \
+ IF_TSAN(TFC, TSANRelaxedStore16SaveFP, TSANStore) \
+ IF_TSAN(TFC, TSANRelaxedStore32IgnoreFP, TSANStore) \
+ IF_TSAN(TFC, TSANRelaxedStore32SaveFP, TSANStore) \
+ IF_TSAN(TFC, TSANRelaxedStore64IgnoreFP, TSANStore) \
+ IF_TSAN(TFC, TSANRelaxedStore64SaveFP, TSANStore) \
+ IF_TSAN(TFC, TSANSeqCstStore8IgnoreFP, TSANStore) \
+ IF_TSAN(TFC, TSANSeqCstStore8SaveFP, TSANStore) \
+ IF_TSAN(TFC, TSANSeqCstStore16IgnoreFP, TSANStore) \
+ IF_TSAN(TFC, TSANSeqCstStore16SaveFP, TSANStore) \
+ IF_TSAN(TFC, TSANSeqCstStore32IgnoreFP, TSANStore) \
+ IF_TSAN(TFC, TSANSeqCstStore32SaveFP, TSANStore) \
+ IF_TSAN(TFC, TSANSeqCstStore64IgnoreFP, TSANStore) \
+ IF_TSAN(TFC, TSANSeqCstStore64SaveFP, TSANStore) \
+ \
+ /* TSAN support for loads in generated code. */ \
+ IF_TSAN(TFC, TSANRelaxedLoad32IgnoreFP, TSANLoad) \
+ IF_TSAN(TFC, TSANRelaxedLoad32SaveFP, TSANLoad) \
+ IF_TSAN(TFC, TSANRelaxedLoad64IgnoreFP, TSANLoad) \
+ IF_TSAN(TFC, TSANRelaxedLoad64SaveFP, TSANLoad) \
\
/* Calls */ \
/* ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) */ \
@@ -179,10 +208,6 @@ namespace internal {
TFC(CompileLazyDeoptimizedCode, JSTrampoline) \
TFC(InstantiateAsmJs, JSTrampoline) \
ASM(NotifyDeoptimized, Dummy) \
- ASM(DeoptimizationEntry_Eager, DeoptimizationEntry) \
- ASM(DeoptimizationEntry_Soft, DeoptimizationEntry) \
- ASM(DeoptimizationEntry_Bailout, DeoptimizationEntry) \
- ASM(DeoptimizationEntry_Lazy, DeoptimizationEntry) \
\
/* Trampolines called when returning from a deoptimization that expects */ \
/* to continue in a JavaScript builtin to finish the functionality of a */ \
@@ -274,10 +299,7 @@ namespace internal {
TFH(HasIndexedInterceptorIC, LoadWithVector) \
\
/* Dynamic check maps */ \
- ASM(DynamicCheckMapsTrampoline, DynamicCheckMaps) \
TFC(DynamicCheckMaps, DynamicCheckMaps) \
- ASM(DynamicCheckMapsWithFeedbackVectorTrampoline, \
- DynamicCheckMapsWithFeedbackVector) \
TFC(DynamicCheckMapsWithFeedbackVector, DynamicCheckMapsWithFeedbackVector) \
\
/* Microtask helpers */ \
@@ -294,7 +316,7 @@ namespace internal {
\
/* Abort */ \
TFC(Abort, Abort) \
- TFC(AbortCSAAssert, Abort) \
+ TFC(AbortCSADcheck, Abort) \
\
/* Built-in functions for Javascript */ \
/* Special internal builtins */ \
@@ -302,7 +324,7 @@ namespace internal {
CPP(Illegal) \
CPP(StrictPoisonPillThrower) \
CPP(UnsupportedThrower) \
- TFJ(ReturnReceiver, 0, kReceiver) \
+ TFJ(ReturnReceiver, kJSArgcReceiverSlots, kReceiver) \
\
/* Array */ \
TFC(ArrayConstructor, JSTrampoline) \
@@ -373,13 +395,13 @@ namespace internal {
TFS(CloneFastJSArrayFillingHoles, kSource) \
TFS(ExtractFastJSArray, kSource, kBegin, kCount) \
/* ES6 #sec-array.prototype.entries */ \
- TFJ(ArrayPrototypeEntries, 0, kReceiver) \
+ TFJ(ArrayPrototypeEntries, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-array.prototype.keys */ \
- TFJ(ArrayPrototypeKeys, 0, kReceiver) \
+ TFJ(ArrayPrototypeKeys, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-array.prototype.values */ \
- TFJ(ArrayPrototypeValues, 0, kReceiver) \
+ TFJ(ArrayPrototypeValues, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-%arrayiteratorprototype%.next */ \
- TFJ(ArrayIteratorPrototypeNext, 0, kReceiver) \
+ TFJ(ArrayIteratorPrototypeNext, kJSArgcReceiverSlots, kReceiver) \
/* https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray */ \
TFS(FlattenIntoArray, kTarget, kSource, kSourceLength, kStart, kDepth) \
TFS(FlatMapIntoArray, kTarget, kSource, kSourceLength, kStart, kDepth, \
@@ -404,8 +426,10 @@ namespace internal {
TFC(AsyncFunctionLazyDeoptContinuation, AsyncFunctionStackParameter) \
TFS(AsyncFunctionAwaitCaught, kAsyncFunctionObject, kValue) \
TFS(AsyncFunctionAwaitUncaught, kAsyncFunctionObject, kValue) \
- TFJ(AsyncFunctionAwaitRejectClosure, 1, kReceiver, kSentError) \
- TFJ(AsyncFunctionAwaitResolveClosure, 1, kReceiver, kSentValue) \
+ TFJ(AsyncFunctionAwaitRejectClosure, kJSArgcReceiverSlots + 1, kReceiver, \
+ kSentError) \
+ TFJ(AsyncFunctionAwaitResolveClosure, kJSArgcReceiverSlots + 1, kReceiver, \
+ kSentValue) \
\
/* BigInt */ \
CPP(BigIntConstructor) \
@@ -471,45 +495,45 @@ namespace internal {
/* ES #sec-date-constructor */ \
CPP(DateConstructor) \
/* ES6 #sec-date.prototype.getdate */ \
- TFJ(DatePrototypeGetDate, 0, kReceiver) \
+ TFJ(DatePrototypeGetDate, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getday */ \
- TFJ(DatePrototypeGetDay, 0, kReceiver) \
+ TFJ(DatePrototypeGetDay, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getfullyear */ \
- TFJ(DatePrototypeGetFullYear, 0, kReceiver) \
+ TFJ(DatePrototypeGetFullYear, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.gethours */ \
- TFJ(DatePrototypeGetHours, 0, kReceiver) \
+ TFJ(DatePrototypeGetHours, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getmilliseconds */ \
- TFJ(DatePrototypeGetMilliseconds, 0, kReceiver) \
+ TFJ(DatePrototypeGetMilliseconds, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getminutes */ \
- TFJ(DatePrototypeGetMinutes, 0, kReceiver) \
+ TFJ(DatePrototypeGetMinutes, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getmonth */ \
- TFJ(DatePrototypeGetMonth, 0, kReceiver) \
+ TFJ(DatePrototypeGetMonth, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getseconds */ \
- TFJ(DatePrototypeGetSeconds, 0, kReceiver) \
+ TFJ(DatePrototypeGetSeconds, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.gettime */ \
- TFJ(DatePrototypeGetTime, 0, kReceiver) \
+ TFJ(DatePrototypeGetTime, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.gettimezoneoffset */ \
- TFJ(DatePrototypeGetTimezoneOffset, 0, kReceiver) \
+ TFJ(DatePrototypeGetTimezoneOffset, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getutcdate */ \
- TFJ(DatePrototypeGetUTCDate, 0, kReceiver) \
+ TFJ(DatePrototypeGetUTCDate, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getutcday */ \
- TFJ(DatePrototypeGetUTCDay, 0, kReceiver) \
+ TFJ(DatePrototypeGetUTCDay, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getutcfullyear */ \
- TFJ(DatePrototypeGetUTCFullYear, 0, kReceiver) \
+ TFJ(DatePrototypeGetUTCFullYear, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getutchours */ \
- TFJ(DatePrototypeGetUTCHours, 0, kReceiver) \
+ TFJ(DatePrototypeGetUTCHours, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getutcmilliseconds */ \
- TFJ(DatePrototypeGetUTCMilliseconds, 0, kReceiver) \
+ TFJ(DatePrototypeGetUTCMilliseconds, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getutcminutes */ \
- TFJ(DatePrototypeGetUTCMinutes, 0, kReceiver) \
+ TFJ(DatePrototypeGetUTCMinutes, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getutcmonth */ \
- TFJ(DatePrototypeGetUTCMonth, 0, kReceiver) \
+ TFJ(DatePrototypeGetUTCMonth, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getutcseconds */ \
- TFJ(DatePrototypeGetUTCSeconds, 0, kReceiver) \
+ TFJ(DatePrototypeGetUTCSeconds, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.valueof */ \
- TFJ(DatePrototypeValueOf, 0, kReceiver) \
+ TFJ(DatePrototypeValueOf, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype-@@toprimitive */ \
- TFJ(DatePrototypeToPrimitive, 1, kReceiver, kHint) \
+ TFJ(DatePrototypeToPrimitive, kJSArgcReceiverSlots + 1, kReceiver, kHint) \
CPP(DatePrototypeGetYear) \
CPP(DatePrototypeSetYear) \
CPP(DateNow) \
@@ -578,9 +602,9 @@ namespace internal {
CPP(GlobalUnescape) \
CPP(GlobalEval) \
/* ES6 #sec-isfinite-number */ \
- TFJ(GlobalIsFinite, 1, kReceiver, kNumber) \
+ TFJ(GlobalIsFinite, kJSArgcReceiverSlots + 1, kReceiver, kNumber) \
/* ES6 #sec-isnan-number */ \
- TFJ(GlobalIsNaN, 1, kReceiver, kNumber) \
+ TFJ(GlobalIsNaN, kJSArgcReceiverSlots + 1, kReceiver, kNumber) \
\
/* JSON */ \
CPP(JsonParse) \
@@ -643,23 +667,23 @@ namespace internal {
/* Map */ \
TFS(FindOrderedHashMapEntry, kTable, kKey) \
TFJ(MapConstructor, kDontAdaptArgumentsSentinel) \
- TFJ(MapPrototypeSet, 2, kReceiver, kKey, kValue) \
- TFJ(MapPrototypeDelete, 1, kReceiver, kKey) \
- TFJ(MapPrototypeGet, 1, kReceiver, kKey) \
- TFJ(MapPrototypeHas, 1, kReceiver, kKey) \
+ TFJ(MapPrototypeSet, kJSArgcReceiverSlots + 2, kReceiver, kKey, kValue) \
+ TFJ(MapPrototypeDelete, kJSArgcReceiverSlots + 1, kReceiver, kKey) \
+ TFJ(MapPrototypeGet, kJSArgcReceiverSlots + 1, kReceiver, kKey) \
+ TFJ(MapPrototypeHas, kJSArgcReceiverSlots + 1, kReceiver, kKey) \
CPP(MapPrototypeClear) \
/* ES #sec-map.prototype.entries */ \
- TFJ(MapPrototypeEntries, 0, kReceiver) \
+ TFJ(MapPrototypeEntries, kJSArgcReceiverSlots, kReceiver) \
/* ES #sec-get-map.prototype.size */ \
- TFJ(MapPrototypeGetSize, 0, kReceiver) \
+ TFJ(MapPrototypeGetSize, kJSArgcReceiverSlots, kReceiver) \
/* ES #sec-map.prototype.forEach */ \
TFJ(MapPrototypeForEach, kDontAdaptArgumentsSentinel) \
/* ES #sec-map.prototype.keys */ \
- TFJ(MapPrototypeKeys, 0, kReceiver) \
+ TFJ(MapPrototypeKeys, kJSArgcReceiverSlots, kReceiver) \
/* ES #sec-map.prototype.values */ \
- TFJ(MapPrototypeValues, 0, kReceiver) \
+ TFJ(MapPrototypeValues, kJSArgcReceiverSlots, kReceiver) \
/* ES #sec-%mapiteratorprototype%.next */ \
- TFJ(MapIteratorPrototypeNext, 0, kReceiver) \
+ TFJ(MapIteratorPrototypeNext, kJSArgcReceiverSlots, kReceiver) \
TFS(MapIteratorToList, kSource) \
\
/* ES #sec-number-constructor */ \
@@ -731,28 +755,30 @@ namespace internal {
CPP(ObjectDefineProperties) \
CPP(ObjectDefineProperty) \
CPP(ObjectDefineSetter) \
- TFJ(ObjectEntries, 1, kReceiver, kObject) \
+ TFJ(ObjectEntries, kJSArgcReceiverSlots + 1, kReceiver, kObject) \
CPP(ObjectFreeze) \
TFJ(ObjectGetOwnPropertyDescriptor, kDontAdaptArgumentsSentinel) \
CPP(ObjectGetOwnPropertyDescriptors) \
- TFJ(ObjectGetOwnPropertyNames, 1, kReceiver, kObject) \
+ TFJ(ObjectGetOwnPropertyNames, kJSArgcReceiverSlots + 1, kReceiver, kObject) \
CPP(ObjectGetOwnPropertySymbols) \
- TFJ(ObjectHasOwn, 2, kReceiver, kObject, kKey) \
- TFJ(ObjectIs, 2, kReceiver, kLeft, kRight) \
+ TFJ(ObjectHasOwn, kJSArgcReceiverSlots + 2, kReceiver, kObject, kKey) \
+ TFJ(ObjectIs, kJSArgcReceiverSlots + 2, kReceiver, kLeft, kRight) \
CPP(ObjectIsFrozen) \
CPP(ObjectIsSealed) \
- TFJ(ObjectKeys, 1, kReceiver, kObject) \
+ TFJ(ObjectKeys, kJSArgcReceiverSlots + 1, kReceiver, kObject) \
CPP(ObjectLookupGetter) \
CPP(ObjectLookupSetter) \
/* ES6 #sec-object.prototype.hasownproperty */ \
- TFJ(ObjectPrototypeHasOwnProperty, 1, kReceiver, kKey) \
- TFJ(ObjectPrototypeIsPrototypeOf, 1, kReceiver, kValue) \
+ TFJ(ObjectPrototypeHasOwnProperty, kJSArgcReceiverSlots + 1, kReceiver, \
+ kKey) \
+ TFJ(ObjectPrototypeIsPrototypeOf, kJSArgcReceiverSlots + 1, kReceiver, \
+ kValue) \
CPP(ObjectPrototypePropertyIsEnumerable) \
CPP(ObjectPrototypeGetProto) \
CPP(ObjectPrototypeSetProto) \
CPP(ObjectSeal) \
TFS(ObjectToString, kReceiver) \
- TFJ(ObjectValues, 1, kReceiver, kObject) \
+ TFJ(ObjectValues, kJSArgcReceiverSlots + 1, kReceiver, kObject) \
\
/* instanceof */ \
TFC(OrdinaryHasInstance, Compare) \
@@ -784,14 +810,16 @@ namespace internal {
CPP(RegExpCapture8Getter) \
CPP(RegExpCapture9Getter) \
/* ES #sec-regexp-pattern-flags */ \
- TFJ(RegExpConstructor, 2, kReceiver, kPattern, kFlags) \
+ TFJ(RegExpConstructor, kJSArgcReceiverSlots + 2, kReceiver, kPattern, \
+ kFlags) \
CPP(RegExpInputGetter) \
CPP(RegExpInputSetter) \
CPP(RegExpLastMatchGetter) \
CPP(RegExpLastParenGetter) \
CPP(RegExpLeftContextGetter) \
/* ES #sec-regexp.prototype.compile */ \
- TFJ(RegExpPrototypeCompile, 2, kReceiver, kPattern, kFlags) \
+ TFJ(RegExpPrototypeCompile, kJSArgcReceiverSlots + 2, kReceiver, kPattern, \
+ kFlags) \
CPP(RegExpPrototypeToString) \
CPP(RegExpRightContextGetter) \
\
@@ -803,20 +831,20 @@ namespace internal {
\
/* Set */ \
TFJ(SetConstructor, kDontAdaptArgumentsSentinel) \
- TFJ(SetPrototypeHas, 1, kReceiver, kKey) \
- TFJ(SetPrototypeAdd, 1, kReceiver, kKey) \
- TFJ(SetPrototypeDelete, 1, kReceiver, kKey) \
+ TFJ(SetPrototypeHas, kJSArgcReceiverSlots + 1, kReceiver, kKey) \
+ TFJ(SetPrototypeAdd, kJSArgcReceiverSlots + 1, kReceiver, kKey) \
+ TFJ(SetPrototypeDelete, kJSArgcReceiverSlots + 1, kReceiver, kKey) \
CPP(SetPrototypeClear) \
/* ES #sec-set.prototype.entries */ \
- TFJ(SetPrototypeEntries, 0, kReceiver) \
+ TFJ(SetPrototypeEntries, kJSArgcReceiverSlots, kReceiver) \
/* ES #sec-get-set.prototype.size */ \
- TFJ(SetPrototypeGetSize, 0, kReceiver) \
+ TFJ(SetPrototypeGetSize, kJSArgcReceiverSlots, kReceiver) \
/* ES #sec-set.prototype.foreach */ \
TFJ(SetPrototypeForEach, kDontAdaptArgumentsSentinel) \
/* ES #sec-set.prototype.values */ \
- TFJ(SetPrototypeValues, 0, kReceiver) \
+ TFJ(SetPrototypeValues, kJSArgcReceiverSlots, kReceiver) \
/* ES #sec-%setiteratorprototype%.next */ \
- TFJ(SetIteratorPrototypeNext, 0, kReceiver) \
+ TFJ(SetIteratorPrototypeNext, kJSArgcReceiverSlots, kReceiver) \
TFS(SetOrSetIteratorToList, kSource) \
\
/* SharedArrayBuffer */ \
@@ -825,16 +853,18 @@ namespace internal {
/* https://tc39.es/proposal-resizablearraybuffer/ */ \
CPP(SharedArrayBufferPrototypeGrow) \
\
- TFJ(AtomicsLoad, 2, kReceiver, kArray, kIndex) \
- TFJ(AtomicsStore, 3, kReceiver, kArray, kIndex, kValue) \
- TFJ(AtomicsExchange, 3, kReceiver, kArray, kIndex, kValue) \
- TFJ(AtomicsCompareExchange, 4, kReceiver, kArray, kIndex, kOldValue, \
- kNewValue) \
- TFJ(AtomicsAdd, 3, kReceiver, kArray, kIndex, kValue) \
- TFJ(AtomicsSub, 3, kReceiver, kArray, kIndex, kValue) \
- TFJ(AtomicsAnd, 3, kReceiver, kArray, kIndex, kValue) \
- TFJ(AtomicsOr, 3, kReceiver, kArray, kIndex, kValue) \
- TFJ(AtomicsXor, 3, kReceiver, kArray, kIndex, kValue) \
+ TFJ(AtomicsLoad, kJSArgcReceiverSlots + 2, kReceiver, kArray, kIndex) \
+ TFJ(AtomicsStore, kJSArgcReceiverSlots + 3, kReceiver, kArray, kIndex, \
+ kValue) \
+ TFJ(AtomicsExchange, kJSArgcReceiverSlots + 3, kReceiver, kArray, kIndex, \
+ kValue) \
+ TFJ(AtomicsCompareExchange, kJSArgcReceiverSlots + 4, kReceiver, kArray, \
+ kIndex, kOldValue, kNewValue) \
+ TFJ(AtomicsAdd, kJSArgcReceiverSlots + 3, kReceiver, kArray, kIndex, kValue) \
+ TFJ(AtomicsSub, kJSArgcReceiverSlots + 3, kReceiver, kArray, kIndex, kValue) \
+ TFJ(AtomicsAnd, kJSArgcReceiverSlots + 3, kReceiver, kArray, kIndex, kValue) \
+ TFJ(AtomicsOr, kJSArgcReceiverSlots + 3, kReceiver, kArray, kIndex, kValue) \
+ TFJ(AtomicsXor, kJSArgcReceiverSlots + 3, kReceiver, kArray, kIndex, kValue) \
CPP(AtomicsNotify) \
CPP(AtomicsIsLockFree) \
CPP(AtomicsWait) \
@@ -848,11 +878,12 @@ namespace internal {
/* ES6 #sec-string.prototype.lastindexof */ \
CPP(StringPrototypeLastIndexOf) \
/* ES #sec-string.prototype.matchAll */ \
- TFJ(StringPrototypeMatchAll, 1, kReceiver, kRegexp) \
+ TFJ(StringPrototypeMatchAll, kJSArgcReceiverSlots + 1, kReceiver, kRegexp) \
/* ES6 #sec-string.prototype.localecompare */ \
CPP(StringPrototypeLocaleCompare) \
/* ES6 #sec-string.prototype.replace */ \
- TFJ(StringPrototypeReplace, 2, kReceiver, kSearch, kReplace) \
+ TFJ(StringPrototypeReplace, kJSArgcReceiverSlots + 2, kReceiver, kSearch, \
+ kReplace) \
/* ES6 #sec-string.prototype.split */ \
TFJ(StringPrototypeSplit, kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.raw */ \
@@ -868,15 +899,15 @@ namespace internal {
\
/* TypedArray */ \
/* ES #sec-typedarray-constructors */ \
- TFJ(TypedArrayBaseConstructor, 0, kReceiver) \
+ TFJ(TypedArrayBaseConstructor, kJSArgcReceiverSlots, kReceiver) \
TFJ(TypedArrayConstructor, kDontAdaptArgumentsSentinel) \
CPP(TypedArrayPrototypeBuffer) \
/* ES6 #sec-get-%typedarray%.prototype.bytelength */ \
- TFJ(TypedArrayPrototypeByteLength, 0, kReceiver) \
+ TFJ(TypedArrayPrototypeByteLength, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-get-%typedarray%.prototype.byteoffset */ \
- TFJ(TypedArrayPrototypeByteOffset, 0, kReceiver) \
+ TFJ(TypedArrayPrototypeByteOffset, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-get-%typedarray%.prototype.length */ \
- TFJ(TypedArrayPrototypeLength, 0, kReceiver) \
+ TFJ(TypedArrayPrototypeLength, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-%typedarray%.prototype.copywithin */ \
CPP(TypedArrayPrototypeCopyWithin) \
/* ES6 #sec-%typedarray%.prototype.fill */ \
@@ -890,7 +921,7 @@ namespace internal {
/* ES6 #sec-%typedarray%.prototype.reverse */ \
CPP(TypedArrayPrototypeReverse) \
/* ES6 #sec-get-%typedarray%.prototype-@@tostringtag */ \
- TFJ(TypedArrayPrototypeToStringTag, 0, kReceiver) \
+ TFJ(TypedArrayPrototypeToStringTag, kJSArgcReceiverSlots, kReceiver) \
/* ES6 %TypedArray%.prototype.map */ \
TFJ(TypedArrayPrototypeMap, kDontAdaptArgumentsSentinel) \
\
@@ -908,16 +939,16 @@ namespace internal {
/* WeakMap */ \
TFJ(WeakMapConstructor, kDontAdaptArgumentsSentinel) \
TFS(WeakMapLookupHashIndex, kTable, kKey) \
- TFJ(WeakMapGet, 1, kReceiver, kKey) \
- TFJ(WeakMapPrototypeHas, 1, kReceiver, kKey) \
- TFJ(WeakMapPrototypeSet, 2, kReceiver, kKey, kValue) \
- TFJ(WeakMapPrototypeDelete, 1, kReceiver, kKey) \
+ TFJ(WeakMapGet, kJSArgcReceiverSlots + 1, kReceiver, kKey) \
+ TFJ(WeakMapPrototypeHas, kJSArgcReceiverSlots + 1, kReceiver, kKey) \
+ TFJ(WeakMapPrototypeSet, kJSArgcReceiverSlots + 2, kReceiver, kKey, kValue) \
+ TFJ(WeakMapPrototypeDelete, kJSArgcReceiverSlots + 1, kReceiver, kKey) \
\
/* WeakSet */ \
TFJ(WeakSetConstructor, kDontAdaptArgumentsSentinel) \
- TFJ(WeakSetPrototypeHas, 1, kReceiver, kKey) \
- TFJ(WeakSetPrototypeAdd, 1, kReceiver, kValue) \
- TFJ(WeakSetPrototypeDelete, 1, kReceiver, kValue) \
+ TFJ(WeakSetPrototypeHas, kJSArgcReceiverSlots + 1, kReceiver, kKey) \
+ TFJ(WeakSetPrototypeAdd, kJSArgcReceiverSlots + 1, kReceiver, kValue) \
+ TFJ(WeakSetPrototypeDelete, kJSArgcReceiverSlots + 1, kReceiver, kValue) \
\
/* WeakSet / WeakMap Helpers */ \
TFS(WeakCollectionDelete, kCollection, kKey) \
@@ -948,12 +979,18 @@ namespace internal {
/* specific to Async Generators. Internal / Not exposed to JS code. */ \
TFS(AsyncGeneratorAwaitCaught, kAsyncGeneratorObject, kValue) \
TFS(AsyncGeneratorAwaitUncaught, kAsyncGeneratorObject, kValue) \
- TFJ(AsyncGeneratorAwaitResolveClosure, 1, kReceiver, kValue) \
- TFJ(AsyncGeneratorAwaitRejectClosure, 1, kReceiver, kValue) \
- TFJ(AsyncGeneratorYieldResolveClosure, 1, kReceiver, kValue) \
- TFJ(AsyncGeneratorReturnClosedResolveClosure, 1, kReceiver, kValue) \
- TFJ(AsyncGeneratorReturnClosedRejectClosure, 1, kReceiver, kValue) \
- TFJ(AsyncGeneratorReturnResolveClosure, 1, kReceiver, kValue) \
+ TFJ(AsyncGeneratorAwaitResolveClosure, kJSArgcReceiverSlots + 1, kReceiver, \
+ kValue) \
+ TFJ(AsyncGeneratorAwaitRejectClosure, kJSArgcReceiverSlots + 1, kReceiver, \
+ kValue) \
+ TFJ(AsyncGeneratorYieldResolveClosure, kJSArgcReceiverSlots + 1, kReceiver, \
+ kValue) \
+ TFJ(AsyncGeneratorReturnClosedResolveClosure, kJSArgcReceiverSlots + 1, \
+ kReceiver, kValue) \
+ TFJ(AsyncGeneratorReturnClosedRejectClosure, kJSArgcReceiverSlots + 1, \
+ kReceiver, kValue) \
+ TFJ(AsyncGeneratorReturnResolveClosure, kJSArgcReceiverSlots + 1, kReceiver, \
+ kValue) \
\
/* Async-from-Sync Iterator */ \
\
@@ -966,7 +1003,7 @@ namespace internal {
/* #sec-%asyncfromsynciteratorprototype%.return */ \
TFJ(AsyncFromSyncIteratorPrototypeReturn, kDontAdaptArgumentsSentinel) \
/* #sec-async-iterator-value-unwrap-functions */ \
- TFJ(AsyncIteratorValueUnwrap, 1, kReceiver, kValue) \
+ TFJ(AsyncIteratorValueUnwrap, kJSArgcReceiverSlots + 1, kReceiver, kValue) \
\
/* CEntry */ \
ASM(CEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit, Dummy) \
@@ -1009,6 +1046,10 @@ namespace internal {
CPP(CallAsyncModuleFulfilled) \
CPP(CallAsyncModuleRejected)
+#define BUILTIN_LIST_BASE(CPP, TFJ, TFC, TFS, TFH, ASM) \
+ BUILTIN_LIST_BASE_TIER0(CPP, TFJ, TFC, TFS, TFH, ASM) \
+ BUILTIN_LIST_BASE_TIER1(CPP, TFJ, TFC, TFS, TFH, ASM)
+
#ifdef V8_INTL_SUPPORT
#define BUILTIN_LIST_INTL(CPP, TFJ, TFS) \
/* ecma402 #sec-intl.collator */ \
@@ -1053,6 +1094,8 @@ namespace internal {
CPP(DisplayNamesSupportedLocalesOf) \
/* ecma402 #sec-intl.getcanonicallocales */ \
CPP(IntlGetCanonicalLocales) \
+ /* ecma402 #sec-intl.supportedvaluesof */ \
+ CPP(IntlSupportedValuesOf) \
/* ecma402 #sec-intl-listformat-constructor */ \
CPP(ListFormatConstructor) \
/* ecma402 #sec-intl-list-format.prototype.format */ \
@@ -1156,7 +1199,7 @@ namespace internal {
/* ecma402 #sup-string.prototype.tolocaleuppercase */ \
CPP(StringPrototypeToLocaleUpperCase) \
/* ES #sec-string.prototype.tolowercase */ \
- TFJ(StringPrototypeToLowerCaseIntl, 0, kReceiver) \
+ TFJ(StringPrototypeToLowerCaseIntl, kJSArgcReceiverSlots, kReceiver) \
/* ES #sec-string.prototype.touppercase */ \
CPP(StringPrototypeToUpperCaseIntl) \
TFS(StringToLowerCaseIntl, kString) \
@@ -1193,6 +1236,17 @@ namespace internal {
BUILTIN_LIST_INTL(CPP, TFJ, TFS) \
BUILTIN_LIST_BYTECODE_HANDLERS(BCH)
+// See the comment on top of BUILTIN_LIST_BASE_TIER0 for an explanation of
+// tiers.
+#define BUILTIN_LIST_TIER0(CPP, TFJ, TFC, TFS, TFH, BCH, ASM) \
+ BUILTIN_LIST_BASE_TIER0(CPP, TFJ, TFC, TFS, TFH, ASM)
+
+#define BUILTIN_LIST_TIER1(CPP, TFJ, TFC, TFS, TFH, BCH, ASM) \
+ BUILTIN_LIST_BASE_TIER1(CPP, TFJ, TFC, TFS, TFH, ASM) \
+ BUILTIN_LIST_FROM_TORQUE(CPP, TFJ, TFC, TFS, TFH, ASM) \
+ BUILTIN_LIST_INTL(CPP, TFJ, TFS) \
+ BUILTIN_LIST_BYTECODE_HANDLERS(BCH)
+
// The exception thrown in the following builtins are caught
// internally and result in a promise rejection.
#define BUILTIN_PROMISE_REJECTION_PREDICTION_LIST(V) \
diff --git a/chromium/v8/src/builtins/builtins-descriptors.h b/chromium/v8/src/builtins/builtins-descriptors.h
index c2eb44debea..12f7f58ec5c 100644
--- a/chromium/v8/src/builtins/builtins-descriptors.h
+++ b/chromium/v8/src/builtins/builtins-descriptors.h
@@ -14,19 +14,20 @@ namespace v8 {
namespace internal {
// Define interface descriptors for builtins with JS linkage.
-#define DEFINE_TFJ_INTERFACE_DESCRIPTOR(Name, Argc, ...) \
- struct Builtin_##Name##_InterfaceDescriptor { \
- enum ParameterIndices { \
- kJSTarget = compiler::CodeAssembler::kTargetParameterIndex, \
- ##__VA_ARGS__, \
- kJSNewTarget, \
- kJSActualArgumentsCount, \
- kContext, \
- kParameterCount, \
- }; \
- static_assert((Argc) == static_cast<uint16_t>(kParameterCount - 4), \
- "Inconsistent set of arguments"); \
- static_assert(kJSTarget == -1, "Unexpected kJSTarget index value"); \
+#define DEFINE_TFJ_INTERFACE_DESCRIPTOR(Name, Argc, ...) \
+ struct Builtin_##Name##_InterfaceDescriptor { \
+ enum ParameterIndices { \
+ kJSTarget = compiler::CodeAssembler::kTargetParameterIndex, \
+ ##__VA_ARGS__, \
+ kJSNewTarget, \
+ kJSActualArgumentsCount, \
+ kContext, \
+ kParameterCount, \
+ }; \
+ static_assert((Argc) == static_cast<uint16_t>(kParameterCount - 4 + \
+ kJSArgcReceiverSlots), \
+ "Inconsistent set of arguments"); \
+ static_assert(kJSTarget == -1, "Unexpected kJSTarget index value"); \
};
// Define interface descriptors for builtins with StubCall linkage.
diff --git a/chromium/v8/src/builtins/builtins-generator-gen.cc b/chromium/v8/src/builtins/builtins-generator-gen.cc
index eb557b1ca15..cdae0cdd335 100644
--- a/chromium/v8/src/builtins/builtins-generator-gen.cc
+++ b/chromium/v8/src/builtins/builtins-generator-gen.cc
@@ -74,7 +74,7 @@ void GeneratorBuiltinsAssembler::InnerResume(
// The generator function should not close the generator by itself, let's
// check it is indeed not closed yet.
- CSA_ASSERT(this, SmiNotEqual(result_continuation, closed));
+ CSA_DCHECK(this, SmiNotEqual(result_continuation, closed));
TNode<Smi> executing = SmiConstant(JSGeneratorObject::kGeneratorExecuting);
GotoIf(SmiEqual(result_continuation, executing), &if_final_return);
@@ -94,21 +94,21 @@ void GeneratorBuiltinsAssembler::InnerResume(
BIND(&if_receiverisclosed);
{
// The {receiver} is closed already.
- TNode<Object> result;
+ TNode<Object> builtin_result;
switch (resume_mode) {
case JSGeneratorObject::kNext:
- result = CallBuiltin(Builtin::kCreateIterResultObject, context,
- UndefinedConstant(), TrueConstant());
+ builtin_result = CallBuiltin(Builtin::kCreateIterResultObject, context,
+ UndefinedConstant(), TrueConstant());
break;
case JSGeneratorObject::kReturn:
- result = CallBuiltin(Builtin::kCreateIterResultObject, context, value,
- TrueConstant());
+ builtin_result = CallBuiltin(Builtin::kCreateIterResultObject, context,
+ value, TrueConstant());
break;
case JSGeneratorObject::kThrow:
- result = CallRuntime(Runtime::kThrow, context, value);
+ builtin_result = CallRuntime(Runtime::kThrow, context, value);
break;
}
- args->PopAndReturn(result);
+ args->PopAndReturn(builtin_result);
}
BIND(&if_receiverisrunning);
@@ -219,11 +219,10 @@ TF_BUILTIN(SuspendGeneratorBaseline, GeneratorBuiltinsAssembler) {
TNode<JSFunction> closure = LoadJSGeneratorObjectFunction(generator);
auto sfi = LoadJSFunctionSharedFunctionInfo(closure);
- TNode<IntPtrT> formal_parameter_count = Signed(
- ChangeUint32ToWord(LoadSharedFunctionInfoFormalParameterCount(sfi)));
- CSA_ASSERT(this, Word32BinaryNot(IntPtrEqual(
- formal_parameter_count,
- IntPtrConstant(kDontAdaptArgumentsSentinel))));
+ CSA_DCHECK(this,
+ Word32BinaryNot(IsSharedFunctionInfoDontAdaptArguments(sfi)));
+ TNode<IntPtrT> formal_parameter_count = Signed(ChangeUint32ToWord(
+ LoadSharedFunctionInfoFormalParameterCountWithoutReceiver(sfi)));
TNode<FixedArray> parameters_and_registers =
LoadJSGeneratorObjectParametersAndRegisters(generator);
@@ -274,11 +273,10 @@ TF_BUILTIN(ResumeGeneratorBaseline, GeneratorBuiltinsAssembler) {
auto generator = Parameter<JSGeneratorObject>(Descriptor::kGeneratorObject);
TNode<JSFunction> closure = LoadJSGeneratorObjectFunction(generator);
auto sfi = LoadJSFunctionSharedFunctionInfo(closure);
- TNode<IntPtrT> formal_parameter_count = Signed(
- ChangeUint32ToWord(LoadSharedFunctionInfoFormalParameterCount(sfi)));
- CSA_ASSERT(this, Word32BinaryNot(IntPtrEqual(
- formal_parameter_count,
- IntPtrConstant(kDontAdaptArgumentsSentinel))));
+ CSA_DCHECK(this,
+ Word32BinaryNot(IsSharedFunctionInfoDontAdaptArguments(sfi)));
+ TNode<IntPtrT> formal_parameter_count = Signed(ChangeUint32ToWord(
+ LoadSharedFunctionInfoFormalParameterCountWithoutReceiver(sfi)));
TNode<FixedArray> parameters_and_registers =
LoadJSGeneratorObjectParametersAndRegisters(generator);
diff --git a/chromium/v8/src/builtins/builtins-internal-gen.cc b/chromium/v8/src/builtins/builtins-internal-gen.cc
index 49ad4b4e7c6..dc5a49640e8 100644
--- a/chromium/v8/src/builtins/builtins-internal-gen.cc
+++ b/chromium/v8/src/builtins/builtins-internal-gen.cc
@@ -323,12 +323,13 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler {
GotoIfNot(IsPageFlagSet(value, MemoryChunk::kEvacuationCandidateMask),
&next);
- TNode<IntPtrT> object = BitcastTaggedToWord(
- UncheckedParameter<Object>(WriteBarrierDescriptor::kObject));
- Branch(
- IsPageFlagSet(object, MemoryChunk::kSkipEvacuationSlotsRecordingMask),
- &next, &call_incremental_wb);
-
+ {
+ TNode<IntPtrT> object = BitcastTaggedToWord(
+ UncheckedParameter<Object>(WriteBarrierDescriptor::kObject));
+ Branch(
+ IsPageFlagSet(object, MemoryChunk::kSkipEvacuationSlotsRecordingMask),
+ &next, &call_incremental_wb);
+ }
BIND(&call_incremental_wb);
{
TNode<ExternalReference> function = ExternalConstant(
@@ -439,10 +440,9 @@ class TSANRelaxedStoreCodeStubAssembler : public CodeStubAssembler {
void GenerateTSANRelaxedStore(SaveFPRegsMode fp_mode, int size) {
TNode<ExternalReference> function = GetExternalReference(size);
- auto address =
- UncheckedParameter<IntPtrT>(TSANRelaxedStoreDescriptor::kAddress);
+ auto address = UncheckedParameter<IntPtrT>(TSANStoreDescriptor::kAddress);
TNode<IntPtrT> value = BitcastTaggedToWord(
- UncheckedParameter<Object>(TSANRelaxedStoreDescriptor::kValue));
+ UncheckedParameter<Object>(TSANStoreDescriptor::kValue));
CallCFunctionWithCallerSavedRegisters(
function, MachineType::Int32(), fp_mode,
std::make_pair(MachineType::IntPtr(), address),
@@ -483,6 +483,73 @@ TF_BUILTIN(TSANRelaxedStore64SaveFP, TSANRelaxedStoreCodeStubAssembler) {
GenerateTSANRelaxedStore(SaveFPRegsMode::kSave, kInt64Size);
}
+class TSANSeqCstStoreCodeStubAssembler : public CodeStubAssembler {
+ public:
+ explicit TSANSeqCstStoreCodeStubAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ TNode<ExternalReference> GetExternalReference(int size) {
+ if (size == kInt8Size) {
+ return ExternalConstant(
+ ExternalReference::tsan_seq_cst_store_function_8_bits());
+ } else if (size == kInt16Size) {
+ return ExternalConstant(
+ ExternalReference::tsan_seq_cst_store_function_16_bits());
+ } else if (size == kInt32Size) {
+ return ExternalConstant(
+ ExternalReference::tsan_seq_cst_store_function_32_bits());
+ } else {
+ CHECK_EQ(size, kInt64Size);
+ return ExternalConstant(
+ ExternalReference::tsan_seq_cst_store_function_64_bits());
+ }
+ }
+
+ void GenerateTSANSeqCstStore(SaveFPRegsMode fp_mode, int size) {
+ TNode<ExternalReference> function = GetExternalReference(size);
+ auto address = UncheckedParameter<IntPtrT>(TSANStoreDescriptor::kAddress);
+ TNode<IntPtrT> value = BitcastTaggedToWord(
+ UncheckedParameter<Object>(TSANStoreDescriptor::kValue));
+ CallCFunctionWithCallerSavedRegisters(
+ function, MachineType::Int32(), fp_mode,
+ std::make_pair(MachineType::IntPtr(), address),
+ std::make_pair(MachineType::IntPtr(), value));
+ Return(UndefinedConstant());
+ }
+};
+
+TF_BUILTIN(TSANSeqCstStore8IgnoreFP, TSANSeqCstStoreCodeStubAssembler) {
+ GenerateTSANSeqCstStore(SaveFPRegsMode::kIgnore, kInt8Size);
+}
+
+TF_BUILTIN(TSANSeqCstStore8SaveFP, TSANSeqCstStoreCodeStubAssembler) {
+ GenerateTSANSeqCstStore(SaveFPRegsMode::kSave, kInt8Size);
+}
+
+TF_BUILTIN(TSANSeqCstStore16IgnoreFP, TSANSeqCstStoreCodeStubAssembler) {
+ GenerateTSANSeqCstStore(SaveFPRegsMode::kIgnore, kInt16Size);
+}
+
+TF_BUILTIN(TSANSeqCstStore16SaveFP, TSANSeqCstStoreCodeStubAssembler) {
+ GenerateTSANSeqCstStore(SaveFPRegsMode::kSave, kInt16Size);
+}
+
+TF_BUILTIN(TSANSeqCstStore32IgnoreFP, TSANSeqCstStoreCodeStubAssembler) {
+ GenerateTSANSeqCstStore(SaveFPRegsMode::kIgnore, kInt32Size);
+}
+
+TF_BUILTIN(TSANSeqCstStore32SaveFP, TSANSeqCstStoreCodeStubAssembler) {
+ GenerateTSANSeqCstStore(SaveFPRegsMode::kSave, kInt32Size);
+}
+
+TF_BUILTIN(TSANSeqCstStore64IgnoreFP, TSANSeqCstStoreCodeStubAssembler) {
+ GenerateTSANSeqCstStore(SaveFPRegsMode::kIgnore, kInt64Size);
+}
+
+TF_BUILTIN(TSANSeqCstStore64SaveFP, TSANSeqCstStoreCodeStubAssembler) {
+ GenerateTSANSeqCstStore(SaveFPRegsMode::kSave, kInt64Size);
+}
+
class TSANRelaxedLoadCodeStubAssembler : public CodeStubAssembler {
public:
explicit TSANRelaxedLoadCodeStubAssembler(compiler::CodeAssemblerState* state)
@@ -501,8 +568,7 @@ class TSANRelaxedLoadCodeStubAssembler : public CodeStubAssembler {
void GenerateTSANRelaxedLoad(SaveFPRegsMode fp_mode, int size) {
TNode<ExternalReference> function = GetExternalReference(size);
- auto address =
- UncheckedParameter<IntPtrT>(TSANRelaxedLoadDescriptor::kAddress);
+ auto address = UncheckedParameter<IntPtrT>(TSANLoadDescriptor::kAddress);
CallCFunctionWithCallerSavedRegisters(
function, MachineType::Int32(), fp_mode,
std::make_pair(MachineType::IntPtr(), address));
@@ -777,7 +843,7 @@ TF_BUILTIN(CopyDataProperties, SetOrCopyDataPropertiesAssembler) {
auto source = Parameter<Object>(Descriptor::kSource);
auto context = Parameter<Context>(Descriptor::kContext);
- CSA_ASSERT(this, TaggedNotEqual(target, source));
+ CSA_DCHECK(this, TaggedNotEqual(target, source));
Label if_runtime(this, Label::kDeferred);
Return(SetOrCopyDataProperties(context, target, source, &if_runtime, false));
@@ -888,21 +954,23 @@ TF_BUILTIN(AdaptorWithBuiltinExitFrame, CodeStubAssembler) {
auto actual_argc =
UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ CodeStubArguments args(this, actual_argc);
- TVARIABLE(Int32T, pushed_argc, actual_argc);
+ TVARIABLE(Int32T, pushed_argc,
+ TruncateIntPtrToInt32(args.GetLengthWithReceiver()));
TNode<SharedFunctionInfo> shared = LoadJSFunctionSharedFunctionInfo(target);
- TNode<Int32T> formal_count =
- UncheckedCast<Int32T>(LoadSharedFunctionInfoFormalParameterCount(shared));
+ TNode<Int32T> formal_count = UncheckedCast<Int32T>(
+ LoadSharedFunctionInfoFormalParameterCountWithReceiver(shared));
// The number of arguments pushed is the maximum of actual arguments count
// and formal parameters count. Except when the formal parameters count is
// the sentinel.
Label check_argc(this), update_argc(this), done_argc(this);
- Branch(Word32Equal(formal_count, Int32Constant(kDontAdaptArgumentsSentinel)),
- &done_argc, &check_argc);
+ Branch(IsSharedFunctionInfoDontAdaptArguments(shared), &done_argc,
+ &check_argc);
BIND(&check_argc);
Branch(Int32GreaterThan(formal_count, pushed_argc.value()), &update_argc,
&done_argc);
@@ -915,7 +983,7 @@ TF_BUILTIN(AdaptorWithBuiltinExitFrame, CodeStubAssembler) {
// including the receiver and the extra arguments.
TNode<Int32T> argc = Int32Add(
pushed_argc.value(),
- Int32Constant(BuiltinExitFrameConstants::kNumExtraArgsWithReceiver));
+ Int32Constant(BuiltinExitFrameConstants::kNumExtraArgsWithoutReceiver));
const bool builtin_exit_frame = true;
TNode<Code> code =
@@ -982,9 +1050,9 @@ TF_BUILTIN(Abort, CodeStubAssembler) {
TailCallRuntime(Runtime::kAbort, NoContextConstant(), message_id);
}
-TF_BUILTIN(AbortCSAAssert, CodeStubAssembler) {
+TF_BUILTIN(AbortCSADcheck, CodeStubAssembler) {
auto message = Parameter<String>(Descriptor::kMessageOrMessageId);
- TailCallRuntime(Runtime::kAbortCSAAssert, NoContextConstant(), message);
+ TailCallRuntime(Runtime::kAbortCSADcheck, NoContextConstant(), message);
}
void Builtins::Generate_CEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit(
@@ -1053,9 +1121,7 @@ void Builtins::Generate_MemMove(MacroAssembler* masm) {
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
- V8_TARGET_ARCH_MIPS
+#if ENABLE_SPARKPLUG
void Builtins::Generate_BaselineLeaveFrame(MacroAssembler* masm) {
EmitReturnBaseline(masm);
}
@@ -1168,7 +1234,7 @@ TF_BUILTIN(GetPropertyWithReceiver, CodeStubAssembler) {
GotoIf(TaggedEqual(on_non_existent,
SmiConstant(OnNonExistent::kThrowReferenceError)),
&throw_reference_error);
- CSA_ASSERT(this, TaggedEqual(on_non_existent,
+ CSA_DCHECK(this, TaggedEqual(on_non_existent,
SmiConstant(OnNonExistent::kReturnUndefined)));
Return(UndefinedConstant());
@@ -1241,17 +1307,17 @@ TF_BUILTIN(InstantiateAsmJs, CodeStubAssembler) {
GotoIf(TaggedIsSmi(maybe_result_or_smi_zero), &tailcall_to_function);
TNode<SharedFunctionInfo> shared = LoadJSFunctionSharedFunctionInfo(function);
- TNode<Int32T> parameter_count =
- UncheckedCast<Int32T>(LoadSharedFunctionInfoFormalParameterCount(shared));
+ TNode<Int32T> parameter_count = UncheckedCast<Int32T>(
+ LoadSharedFunctionInfoFormalParameterCountWithReceiver(shared));
// This builtin intercepts a call to {function}, where the number of arguments
// pushed is the maximum of actual arguments count and formal parameters
// count.
Label argc_lt_param_count(this), argc_ge_param_count(this);
- Branch(IntPtrLessThan(args.GetLength(), ChangeInt32ToIntPtr(parameter_count)),
+ Branch(IntPtrLessThan(args.GetLengthWithReceiver(),
+ ChangeInt32ToIntPtr(parameter_count)),
&argc_lt_param_count, &argc_ge_param_count);
BIND(&argc_lt_param_count);
- PopAndReturn(Int32Add(parameter_count, Int32Constant(1)),
- maybe_result_or_smi_zero);
+ PopAndReturn(parameter_count, maybe_result_or_smi_zero);
BIND(&argc_ge_param_count);
args.PopAndReturn(maybe_result_or_smi_zero);
diff --git a/chromium/v8/src/builtins/builtins-intl-gen.cc b/chromium/v8/src/builtins/builtins-intl-gen.cc
index 6a9e0fbad4f..dd0410ccd21 100644
--- a/chromium/v8/src/builtins/builtins-intl-gen.cc
+++ b/chromium/v8/src/builtins/builtins-intl-gen.cc
@@ -29,7 +29,7 @@ class IntlBuiltinsAssembler : public CodeStubAssembler {
TNode<JSArray> AllocateEmptyJSArray(TNode<Context> context);
TNode<IntPtrT> PointerToSeqStringData(TNode<String> seq_string) {
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
IsSequentialStringInstanceType(LoadInstanceType(seq_string)));
STATIC_ASSERT(SeqOneByteString::kHeaderSize ==
SeqTwoByteString::kHeaderSize);
@@ -55,7 +55,7 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
to_direct.TryToDirect(&runtime);
const TNode<Int32T> instance_type = to_direct.instance_type();
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
Word32BinaryNot(IsIndirectStringInstanceType(instance_type)));
GotoIfNot(IsOneByteStringInstanceType(instance_type), &runtime);
diff --git a/chromium/v8/src/builtins/builtins-intl.cc b/chromium/v8/src/builtins/builtins-intl.cc
index c3711898c30..6fd36dd8e06 100644
--- a/chromium/v8/src/builtins/builtins-intl.cc
+++ b/chromium/v8/src/builtins/builtins-intl.cc
@@ -30,6 +30,7 @@
#include "src/objects/js-segmenter-inl.h"
#include "src/objects/js-segments-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/option-utils.h"
#include "src/objects/property-descriptor.h"
#include "src/objects/smi.h"
#include "unicode/brkiter.h"
@@ -78,9 +79,9 @@ BUILTIN(NumberFormatSupportedLocalesOf) {
}
BUILTIN(NumberFormatPrototypeFormatToParts) {
- const char* const method = "Intl.NumberFormat.prototype.formatToParts";
+ const char* const method_name = "Intl.NumberFormat.prototype.formatToParts";
HandleScope handle_scope(isolate);
- CHECK_RECEIVER(JSNumberFormat, number_format, method);
+ CHECK_RECEIVER(JSNumberFormat, number_format, method_name);
Handle<Object> x;
if (args.length() >= 2) {
@@ -95,9 +96,10 @@ BUILTIN(NumberFormatPrototypeFormatToParts) {
}
BUILTIN(DateTimeFormatPrototypeResolvedOptions) {
- const char* const method = "Intl.DateTimeFormat.prototype.resolvedOptions";
+ const char* const method_name =
+ "Intl.DateTimeFormat.prototype.resolvedOptions";
HandleScope scope(isolate);
- CHECK_RECEIVER(JSReceiver, format_holder, method);
+ CHECK_RECEIVER(JSReceiver, format_holder, method_name);
// 3. Let dtf be ? UnwrapDateTimeFormat(dtf).
Handle<JSDateTimeFormat> date_time_format;
@@ -121,15 +123,15 @@ BUILTIN(DateTimeFormatSupportedLocalesOf) {
}
BUILTIN(DateTimeFormatPrototypeFormatToParts) {
- const char* const method = "Intl.DateTimeFormat.prototype.formatToParts";
+ const char* const method_name = "Intl.DateTimeFormat.prototype.formatToParts";
HandleScope handle_scope(isolate);
- CHECK_RECEIVER(JSObject, date_format_holder, method);
+ CHECK_RECEIVER(JSObject, date_format_holder, method_name);
Factory* factory = isolate->factory();
if (!date_format_holder->IsJSDateTimeFormat()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
- factory->NewStringFromAsciiChecked(method),
+ factory->NewStringFromAsciiChecked(method_name),
date_format_holder));
}
Handle<JSDateTimeFormat> dtf =
@@ -156,12 +158,12 @@ BUILTIN(DateTimeFormatPrototypeFormatToParts) {
// Common code for DateTimeFormatPrototypeFormtRange(|ToParts)
template <class T>
V8_WARN_UNUSED_RESULT Object DateTimeFormatRange(
- BuiltinArguments args, Isolate* isolate, const char* const method,
+ BuiltinArguments args, Isolate* isolate, const char* const method_name,
MaybeHandle<T> (*format)(Isolate*, Handle<JSDateTimeFormat>, double,
double)) {
// 1. Let dtf be this value.
// 2. If Type(dtf) is not Object, throw a TypeError exception.
- CHECK_RECEIVER(JSObject, date_format_holder, method);
+ CHECK_RECEIVER(JSObject, date_format_holder, method_name);
Factory* factory = isolate->factory();
@@ -170,7 +172,7 @@ V8_WARN_UNUSED_RESULT Object DateTimeFormatRange(
if (!date_format_holder->IsJSDateTimeFormat()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
- factory->NewStringFromAsciiChecked(method),
+ factory->NewStringFromAsciiChecked(method_name),
date_format_holder));
}
Handle<JSDateTimeFormat> dtf =
@@ -206,16 +208,17 @@ V8_WARN_UNUSED_RESULT Object DateTimeFormatRange(
}
BUILTIN(DateTimeFormatPrototypeFormatRange) {
- const char* const method = "Intl.DateTimeFormat.prototype.formatRange";
+ const char* const method_name = "Intl.DateTimeFormat.prototype.formatRange";
HandleScope handle_scope(isolate);
- return DateTimeFormatRange<String>(args, isolate, method,
+ return DateTimeFormatRange<String>(args, isolate, method_name,
JSDateTimeFormat::FormatRange);
}
BUILTIN(DateTimeFormatPrototypeFormatRangeToParts) {
- const char* const method = "Intl.DateTimeFormat.prototype.formatRangeToParts";
+ const char* const method_name =
+ "Intl.DateTimeFormat.prototype.formatRangeToParts";
HandleScope handle_scope(isolate);
- return DateTimeFormatRange<JSArray>(args, isolate, method,
+ return DateTimeFormatRange<JSArray>(args, isolate, method_name,
JSDateTimeFormat::FormatRangeToParts);
}
@@ -236,7 +239,7 @@ Handle<JSFunction> CreateBoundFunction(Isolate* isolate,
Handle<SharedFunctionInfo> info =
isolate->factory()->NewSharedFunctionInfoForBuiltin(
isolate->factory()->empty_string(), builtin, kNormalFunction);
- info->set_internal_formal_parameter_count(len);
+ info->set_internal_formal_parameter_count(JSParameterCount(len));
info->set_length(len);
return Factory::JSFunctionBuilder{isolate, info, context}
@@ -251,7 +254,8 @@ Handle<JSFunction> CreateBoundFunction(Isolate* isolate,
template <class T>
Object LegacyFormatConstructor(BuiltinArguments args, Isolate* isolate,
v8::Isolate::UseCounterFeature feature,
- Handle<Object> constructor, const char* method) {
+ Handle<Object> constructor,
+ const char* method_name) {
isolate->CountUsage(feature);
Handle<JSReceiver> new_target;
// 1. If NewTarget is undefined, let newTarget be the active
@@ -276,7 +280,7 @@ Object LegacyFormatConstructor(BuiltinArguments args, Isolate* isolate,
// 3. Perform ? Initialize<T>(Format, locales, options).
Handle<T> format;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, format, T::New(isolate, map, locales, options, method));
+ isolate, format, T::New(isolate, map, locales, options, method_name));
// 4. Let this be the this value.
if (args.new_target()->IsUndefined(isolate)) {
Handle<Object> receiver = args.receiver();
@@ -290,10 +294,10 @@ Object LegacyFormatConstructor(BuiltinArguments args, Isolate* isolate,
if (ordinary_has_instance_obj->BooleanValue(isolate)) {
if (!receiver->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
- isolate->factory()->NewStringFromAsciiChecked(method),
- receiver));
+ isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
+ isolate->factory()->NewStringFromAsciiChecked(
+ method_name),
+ receiver));
}
Handle<JSReceiver> rec = Handle<JSReceiver>::cast(receiver);
// a. Perform ? DefinePropertyOrThrow(this,
@@ -324,15 +328,15 @@ Object LegacyFormatConstructor(BuiltinArguments args, Isolate* isolate,
template <class T>
Object DisallowCallConstructor(BuiltinArguments args, Isolate* isolate,
v8::Isolate::UseCounterFeature feature,
- const char* method) {
+ const char* method_name) {
isolate->CountUsage(feature);
// 1. If NewTarget is undefined, throw a TypeError exception.
if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewTypeError(MessageTemplate::kConstructorNotFunction,
- isolate->factory()->NewStringFromAsciiChecked(method)));
+ isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
+ isolate->factory()->NewStringFromAsciiChecked(
+ method_name)));
}
// [[Construct]]
Handle<JSFunction> target = args.target();
@@ -356,7 +360,7 @@ Object DisallowCallConstructor(BuiltinArguments args, Isolate* isolate,
*/
template <class T>
Object CallOrConstructConstructor(BuiltinArguments args, Isolate* isolate,
- const char* method) {
+ const char* method_name) {
Handle<JSReceiver> new_target;
if (args.new_target()->IsUndefined(isolate)) {
@@ -376,7 +380,7 @@ Object CallOrConstructConstructor(BuiltinArguments args, Isolate* isolate,
isolate, map, JSFunction::GetDerivedMap(isolate, target, new_target));
RETURN_RESULT_OR_FAILURE(isolate,
- T::New(isolate, map, locales, options, method));
+ T::New(isolate, map, locales, options, method_name));
}
} // namespace
@@ -430,11 +434,11 @@ BUILTIN(NumberFormatConstructor) {
BUILTIN(NumberFormatPrototypeResolvedOptions) {
HandleScope scope(isolate);
- const char* const method = "Intl.NumberFormat.prototype.resolvedOptions";
+ const char* const method_name = "Intl.NumberFormat.prototype.resolvedOptions";
// 1. Let nf be the this value.
// 2. If Type(nf) is not Object, throw a TypeError exception.
- CHECK_RECEIVER(JSReceiver, number_format_holder, method);
+ CHECK_RECEIVER(JSReceiver, number_format_holder, method_name);
// 3. Let nf be ? UnwrapNumberFormat(nf)
Handle<JSNumberFormat> number_format;
@@ -446,12 +450,12 @@ BUILTIN(NumberFormatPrototypeResolvedOptions) {
}
BUILTIN(NumberFormatPrototypeFormatNumber) {
- const char* const method = "get Intl.NumberFormat.prototype.format";
+ const char* const method_name = "get Intl.NumberFormat.prototype.format";
HandleScope scope(isolate);
// 1. Let nf be the this value.
// 2. If Type(nf) is not Object, throw a TypeError exception.
- CHECK_RECEIVER(JSReceiver, receiver, method);
+ CHECK_RECEIVER(JSReceiver, receiver, method_name);
// 3. Let nf be ? UnwrapNumberFormat(nf).
Handle<JSNumberFormat> number_format;
@@ -518,12 +522,12 @@ BUILTIN(DateTimeFormatConstructor) {
}
BUILTIN(DateTimeFormatPrototypeFormat) {
- const char* const method = "get Intl.DateTimeFormat.prototype.format";
+ const char* const method_name = "get Intl.DateTimeFormat.prototype.format";
HandleScope scope(isolate);
// 1. Let dtf be this value.
// 2. If Type(dtf) is not Object, throw a TypeError exception.
- CHECK_RECEIVER(JSReceiver, receiver, method);
+ CHECK_RECEIVER(JSReceiver, receiver, method_name);
// 3. Let dtf be ? UnwrapDateTimeFormat(dtf).
Handle<JSDateTimeFormat> format;
@@ -576,6 +580,13 @@ BUILTIN(IntlGetCanonicalLocales) {
Intl::GetCanonicalLocales(isolate, locales));
}
+BUILTIN(IntlSupportedValuesOf) {
+ HandleScope scope(isolate);
+ Handle<Object> locales = args.atOrUndefined(isolate, 1);
+
+ RETURN_RESULT_OR_FAILURE(isolate, Intl::SupportedValuesOf(isolate, locales));
+}
+
BUILTIN(ListFormatConstructor) {
HandleScope scope(isolate);
@@ -608,12 +619,12 @@ BUILTIN(LocaleConstructor) {
isolate->CountUsage(v8::Isolate::UseCounterFeature::kLocale);
- const char* method = "Intl.Locale";
+ const char* method_name = "Intl.Locale";
if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewTypeError(MessageTemplate::kConstructorNotFunction,
- isolate->factory()->NewStringFromAsciiChecked(method)));
+ isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
+ isolate->factory()->NewStringFromAsciiChecked(
+ method_name)));
}
// [[Construct]]
Handle<JSFunction> target = args.target();
@@ -650,7 +661,7 @@ BUILTIN(LocaleConstructor) {
Handle<JSReceiver> options_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, options_object,
- Intl::CoerceOptionsToObject(isolate, options, method));
+ CoerceOptionsToObject(isolate, options, method_name));
RETURN_RESULT_OR_FAILURE(
isolate, JSLocale::New(isolate, map, locale_string, options_object));
@@ -946,14 +957,14 @@ BUILTIN(CollatorSupportedLocalesOf) {
}
BUILTIN(CollatorPrototypeCompare) {
- const char* const method = "get Intl.Collator.prototype.compare";
+ const char* const method_name = "get Intl.Collator.prototype.compare";
HandleScope scope(isolate);
// 1. Let collator be this value.
// 2. If Type(collator) is not Object, throw a TypeError exception.
// 3. If collator does not have an [[InitializedCollator]] internal slot,
// throw a TypeError exception.
- CHECK_RECEIVER(JSCollator, collator, method);
+ CHECK_RECEIVER(JSCollator, collator, method_name);
// 4. If collator.[[BoundCompare]] is undefined, then
Handle<Object> bound_compare(collator->bound_compare(), isolate);
@@ -1002,14 +1013,15 @@ BUILTIN(CollatorInternalCompare) {
// 7. Return CompareStrings(collator, X, Y).
icu::Collator* icu_collator = collator->icu_collator().raw();
CHECK_NOT_NULL(icu_collator);
- return *Intl::CompareStrings(isolate, *icu_collator, string_x, string_y);
+ return Smi::FromInt(
+ Intl::CompareStrings(isolate, *icu_collator, string_x, string_y));
}
// ecma402 #sec-%segmentiteratorprototype%.next
BUILTIN(SegmentIteratorPrototypeNext) {
- const char* const method = "%SegmentIterator.prototype%.next";
+ const char* const method_name = "%SegmentIterator.prototype%.next";
HandleScope scope(isolate);
- CHECK_RECEIVER(JSSegmentIterator, segment_iterator, method);
+ CHECK_RECEIVER(JSSegmentIterator, segment_iterator, method_name);
RETURN_RESULT_OR_FAILURE(isolate,
JSSegmentIterator::Next(isolate, segment_iterator));
@@ -1061,9 +1073,9 @@ BUILTIN(SegmenterPrototypeSegment) {
// ecma402 #sec-%segmentsprototype%.containing
BUILTIN(SegmentsPrototypeContaining) {
- const char* const method = "%Segments.prototype%.containing";
+ const char* const method_name = "%Segments.prototype%.containing";
HandleScope scope(isolate);
- CHECK_RECEIVER(JSSegments, segments, method);
+ CHECK_RECEIVER(JSSegments, segments, method_name);
Handle<Object> index = args.atOrUndefined(isolate, 1);
// 6. Let n be ? ToInteger(index).
@@ -1077,9 +1089,9 @@ BUILTIN(SegmentsPrototypeContaining) {
// ecma402 #sec-%segmentsprototype%-@@iterator
BUILTIN(SegmentsPrototypeIterator) {
- const char* const method = "%SegmentIsPrototype%[@@iterator]";
+ const char* const method_name = "%SegmentIsPrototype%[@@iterator]";
HandleScope scope(isolate);
- CHECK_RECEIVER(JSSegments, segments, method);
+ CHECK_RECEIVER(JSSegments, segments, method_name);
RETURN_RESULT_OR_FAILURE(
isolate,
JSSegmentIterator::Create(isolate, segments->icu_break_iterator().raw(),
@@ -1101,10 +1113,11 @@ BUILTIN(V8BreakIteratorPrototypeResolvedOptions) {
}
BUILTIN(V8BreakIteratorPrototypeAdoptText) {
- const char* const method = "get Intl.v8BreakIterator.prototype.adoptText";
+ const char* const method_name =
+ "get Intl.v8BreakIterator.prototype.adoptText";
HandleScope scope(isolate);
- CHECK_RECEIVER(JSV8BreakIterator, break_iterator, method);
+ CHECK_RECEIVER(JSV8BreakIterator, break_iterator, method_name);
Handle<Object> bound_adopt_text(break_iterator->bound_adopt_text(), isolate);
if (!bound_adopt_text->IsUndefined(isolate)) {
@@ -1137,10 +1150,10 @@ BUILTIN(V8BreakIteratorInternalAdoptText) {
}
BUILTIN(V8BreakIteratorPrototypeFirst) {
- const char* const method = "get Intl.v8BreakIterator.prototype.first";
+ const char* const method_name = "get Intl.v8BreakIterator.prototype.first";
HandleScope scope(isolate);
- CHECK_RECEIVER(JSV8BreakIterator, break_iterator, method);
+ CHECK_RECEIVER(JSV8BreakIterator, break_iterator, method_name);
Handle<Object> bound_first(break_iterator->bound_first(), isolate);
if (!bound_first->IsUndefined(isolate)) {
@@ -1167,10 +1180,10 @@ BUILTIN(V8BreakIteratorInternalFirst) {
}
BUILTIN(V8BreakIteratorPrototypeNext) {
- const char* const method = "get Intl.v8BreakIterator.prototype.next";
+ const char* const method_name = "get Intl.v8BreakIterator.prototype.next";
HandleScope scope(isolate);
- CHECK_RECEIVER(JSV8BreakIterator, break_iterator, method);
+ CHECK_RECEIVER(JSV8BreakIterator, break_iterator, method_name);
Handle<Object> bound_next(break_iterator->bound_next(), isolate);
if (!bound_next->IsUndefined(isolate)) {
@@ -1196,10 +1209,10 @@ BUILTIN(V8BreakIteratorInternalNext) {
}
BUILTIN(V8BreakIteratorPrototypeCurrent) {
- const char* const method = "get Intl.v8BreakIterator.prototype.current";
+ const char* const method_name = "get Intl.v8BreakIterator.prototype.current";
HandleScope scope(isolate);
- CHECK_RECEIVER(JSV8BreakIterator, break_iterator, method);
+ CHECK_RECEIVER(JSV8BreakIterator, break_iterator, method_name);
Handle<Object> bound_current(break_iterator->bound_current(), isolate);
if (!bound_current->IsUndefined(isolate)) {
@@ -1225,10 +1238,11 @@ BUILTIN(V8BreakIteratorInternalCurrent) {
}
BUILTIN(V8BreakIteratorPrototypeBreakType) {
- const char* const method = "get Intl.v8BreakIterator.prototype.breakType";
+ const char* const method_name =
+ "get Intl.v8BreakIterator.prototype.breakType";
HandleScope scope(isolate);
- CHECK_RECEIVER(JSV8BreakIterator, break_iterator, method);
+ CHECK_RECEIVER(JSV8BreakIterator, break_iterator, method_name);
Handle<Object> bound_break_type(break_iterator->bound_break_type(), isolate);
if (!bound_break_type->IsUndefined(isolate)) {
diff --git a/chromium/v8/src/builtins/builtins-lazy-gen.cc b/chromium/v8/src/builtins/builtins-lazy-gen.cc
index 6ee50ac737f..2ef9aa07342 100644
--- a/chromium/v8/src/builtins/builtins-lazy-gen.cc
+++ b/chromium/v8/src/builtins/builtins-lazy-gen.cc
@@ -136,7 +136,7 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
&maybe_use_sfi_code);
// If it isn't undefined or fixed array it must be a feedback vector.
- CSA_ASSERT(this, IsFeedbackVector(feedback_cell_value));
+ CSA_DCHECK(this, IsFeedbackVector(feedback_cell_value));
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(function, CAST(feedback_cell_value));
@@ -146,7 +146,7 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
// optimized Code object (we'd have tail-called it above). A usual case would
// be the InterpreterEntryTrampoline to start executing existing bytecode.
BIND(&maybe_use_sfi_code);
- CSA_ASSERT(this, TaggedNotEqual(sfi_code, HeapConstant(BUILTIN_CODE(
+ CSA_DCHECK(this, TaggedNotEqual(sfi_code, HeapConstant(BUILTIN_CODE(
isolate(), CompileLazy))));
StoreObjectField(function, JSFunction::kCodeOffset, ToCodeT(sfi_code));
@@ -156,8 +156,7 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
TVARIABLE(Code, code);
// Check if we have baseline code.
- GotoIf(InstanceTypeEqual(sfi_data_type.value(), BASELINE_DATA_TYPE),
- &baseline);
+ GotoIf(InstanceTypeEqual(sfi_data_type.value(), CODET_TYPE), &baseline);
code = sfi_code;
Goto(&tailcall_code);
diff --git a/chromium/v8/src/builtins/builtins-microtask-queue-gen.cc b/chromium/v8/src/builtins/builtins-microtask-queue-gen.cc
index 64dd15bd1e4..6c677e922d9 100644
--- a/chromium/v8/src/builtins/builtins-microtask-queue-gen.cc
+++ b/chromium/v8/src/builtins/builtins-microtask-queue-gen.cc
@@ -55,7 +55,7 @@ class MicrotaskQueueBuiltinsAssembler : public CodeStubAssembler {
TNode<RawPtrT> MicrotaskQueueBuiltinsAssembler::GetMicrotaskQueue(
TNode<Context> native_context) {
- CSA_ASSERT(this, IsNativeContext(native_context));
+ CSA_DCHECK(this, IsNativeContext(native_context));
return LoadExternalPointerFromObject(native_context,
NativeContext::kMicrotaskQueueOffset,
kNativeContextMicrotaskQueueTag);
@@ -105,7 +105,7 @@ TNode<IntPtrT> MicrotaskQueueBuiltinsAssembler::CalculateRingBufferOffset(
void MicrotaskQueueBuiltinsAssembler::PrepareForContext(
TNode<Context> native_context, Label* bailout) {
- CSA_ASSERT(this, IsNativeContext(native_context));
+ CSA_DCHECK(this, IsNativeContext(native_context));
// Skip the microtask execution if the associated context is shutdown.
GotoIf(WordEqual(GetMicrotaskQueue(native_context), IntPtrConstant(0)),
@@ -117,7 +117,7 @@ void MicrotaskQueueBuiltinsAssembler::PrepareForContext(
void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
TNode<Context> current_context, TNode<Microtask> microtask) {
- CSA_ASSERT(this, TaggedIsNotSmi(microtask));
+ CSA_DCHECK(this, TaggedIsNotSmi(microtask));
StoreRoot(RootIndex::kCurrentMicrotask, microtask);
TNode<IntPtrT> saved_entered_context_count = GetEnteredContextCount();
@@ -378,7 +378,7 @@ TNode<IntPtrT> MicrotaskQueueBuiltinsAssembler::GetEnteredContextCount() {
void MicrotaskQueueBuiltinsAssembler::EnterMicrotaskContext(
TNode<Context> native_context) {
- CSA_ASSERT(this, IsNativeContext(native_context));
+ CSA_DCHECK(this, IsNativeContext(native_context));
auto ref = ExternalReference::handle_scope_implementer_address(isolate());
TNode<RawPtrT> hsi = Load<RawPtrT>(ExternalConstant(ref));
diff --git a/chromium/v8/src/builtins/builtins-number.cc b/chromium/v8/src/builtins/builtins-number.cc
index f6ff61a7041..4eddf5358a7 100644
--- a/chromium/v8/src/builtins/builtins-number.cc
+++ b/chromium/v8/src/builtins/builtins-number.cc
@@ -111,7 +111,7 @@ BUILTIN(NumberPrototypeToFixed) {
// ES6 section 20.1.3.4 Number.prototype.toLocaleString ( [ r1 [ , r2 ] ] )
BUILTIN(NumberPrototypeToLocaleString) {
HandleScope scope(isolate);
- const char* method = "Number.prototype.toLocaleString";
+ const char* method_name = "Number.prototype.toLocaleString";
isolate->CountUsage(v8::Isolate::UseCounterFeature::kNumberToLocaleString);
@@ -126,7 +126,7 @@ BUILTIN(NumberPrototypeToLocaleString) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate,
NewTypeError(MessageTemplate::kNotGeneric,
- isolate->factory()->NewStringFromAsciiChecked(method),
+ isolate->factory()->NewStringFromAsciiChecked(method_name),
isolate->factory()->Number_string()));
}
@@ -134,7 +134,7 @@ BUILTIN(NumberPrototypeToLocaleString) {
RETURN_RESULT_OR_FAILURE(
isolate,
Intl::NumberToLocaleString(isolate, value, args.atOrUndefined(isolate, 1),
- args.atOrUndefined(isolate, 2), method));
+ args.atOrUndefined(isolate, 2), method_name));
#else
// Turn the {value} into a String.
return *isolate->factory()->NumberToString(value);
diff --git a/chromium/v8/src/builtins/builtins-object-gen.cc b/chromium/v8/src/builtins/builtins-object-gen.cc
index 68112e5bffd..3e56df803ae 100644
--- a/chromium/v8/src/builtins/builtins-object-gen.cc
+++ b/chromium/v8/src/builtins/builtins-object-gen.cc
@@ -249,8 +249,9 @@ TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
BIND(&if_has_enum_cache);
{
GotoIf(WordEqual(object_enum_length, IntPtrConstant(0)), if_no_properties);
- TNode<FixedArray> values_or_entries = CAST(AllocateFixedArray(
- PACKED_ELEMENTS, object_enum_length, kAllowLargeObjectAllocation));
+ TNode<FixedArray> values_or_entries =
+ CAST(AllocateFixedArray(PACKED_ELEMENTS, object_enum_length,
+ AllocationFlag::kAllowLargeObjectAllocation));
// If in case we have enum_cache,
// we can't detect accessor of object until loop through descriptors.
@@ -278,7 +279,7 @@ TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
{
// Currently, we will not invoke getters,
// so, map will not be changed.
- CSA_ASSERT(this, TaggedEqual(map, LoadMap(object)));
+ CSA_DCHECK(this, TaggedEqual(map, LoadMap(object)));
TNode<IntPtrT> descriptor_entry = var_descriptor_number.value();
TNode<Name> next_key =
LoadKeyByDescriptorEntry(descriptors, descriptor_entry);
@@ -293,7 +294,7 @@ TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
// If property is accessor, we escape fast path and call runtime.
GotoIf(IsPropertyKindAccessor(kind), if_call_runtime_with_fast_path);
- CSA_ASSERT(this, IsPropertyKindData(kind));
+ CSA_DCHECK(this, IsPropertyKindData(kind));
// If desc is not undefined and desc.[[Enumerable]] is true, then skip to
// the next descriptor.
@@ -346,7 +347,7 @@ TNode<JSArray>
ObjectEntriesValuesBuiltinsAssembler::FinalizeValuesOrEntriesJSArray(
TNode<Context> context, TNode<FixedArray> result, TNode<IntPtrT> size,
TNode<Map> array_map, Label* if_empty) {
- CSA_ASSERT(this, IsJSArrayMap(array_map));
+ CSA_DCHECK(this, IsJSArrayMap(array_map));
GotoIf(IntPtrEqual(size, IntPtrConstant(0)), if_empty);
TNode<JSArray> array = AllocateJSArray(array_map, result, SmiTag(size));
@@ -436,7 +437,9 @@ TF_BUILTIN(ObjectAssign, ObjectBuiltinsAssembler) {
Label done(this);
// 2. If only one argument was passed, return to.
- GotoIf(UintPtrLessThanOrEqual(args.GetLength(), IntPtrConstant(1)), &done);
+ GotoIf(UintPtrLessThanOrEqual(args.GetLengthWithoutReceiver(),
+ IntPtrConstant(1)),
+ &done);
// 3. Let sources be the List of argument values starting with the
// second argument.
@@ -475,7 +478,7 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
&if_slow);
// Ensure that the {object} doesn't have any elements.
- CSA_ASSERT(this, IsJSObjectMap(object_map));
+ CSA_DCHECK(this, IsJSObjectMap(object_map));
TNode<FixedArrayBase> object_elements = LoadElements(CAST(object));
GotoIf(IsEmptyFixedArray(object_elements), &if_empty_elements);
Branch(IsEmptySlowElementDictionary(object_elements), &if_empty_elements,
@@ -851,7 +854,7 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
BIND(&if_object);
{
- CSA_ASSERT(this, IsJSReceiver(CAST(receiver)));
+ CSA_DCHECK(this, IsJSReceiver(CAST(receiver)));
var_default = ObjectToStringConstant();
Goto(&checkstringtag);
}
@@ -866,7 +869,7 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
GotoIf(IsHeapNumberMap(receiver_map), &if_number);
GotoIf(IsSymbolMap(receiver_map), &if_symbol);
GotoIf(IsUndefined(receiver), &return_undefined);
- CSA_ASSERT(this, IsNull(receiver));
+ CSA_DCHECK(this, IsNull(receiver));
Return(NullToStringConstant());
BIND(&return_undefined);
@@ -978,7 +981,7 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
LoadMapInstanceType(receiver_value_map);
GotoIf(IsBigIntInstanceType(receiver_value_instance_type),
&if_value_is_bigint);
- CSA_ASSERT(this, IsStringInstanceType(receiver_value_instance_type));
+ CSA_DCHECK(this, IsStringInstanceType(receiver_value_instance_type));
Goto(&if_value_is_string);
BIND(&if_value_is_number);
@@ -1094,7 +1097,7 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
BIND(&no_properties);
{
TVARIABLE(Map, map);
- TVARIABLE(HeapObject, properties);
+ TVARIABLE(HeapObject, new_properties);
Label null_proto(this), non_null_proto(this), instantiate_map(this);
Branch(IsNull(prototype), &null_proto, &non_null_proto);
@@ -1103,17 +1106,18 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
{
map = LoadSlowObjectWithNullPrototypeMap(native_context);
if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
- properties =
+ new_properties =
AllocateSwissNameDictionary(SwissNameDictionary::kInitialCapacity);
} else {
- properties = AllocateNameDictionary(NameDictionary::kInitialCapacity);
+ new_properties =
+ AllocateNameDictionary(NameDictionary::kInitialCapacity);
}
Goto(&instantiate_map);
}
BIND(&non_null_proto);
{
- properties = EmptyFixedArrayConstant();
+ new_properties = EmptyFixedArrayConstant();
map = LoadObjectFunctionInitialMap(native_context);
GotoIf(TaggedEqual(prototype, LoadMapPrototype(map.value())),
&instantiate_map);
@@ -1131,7 +1135,7 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
BIND(&instantiate_map);
{
TNode<JSObject> instance =
- AllocateJSObjectFromMap(map.value(), properties.value());
+ AllocateJSObjectFromMap(map.value(), new_properties.value());
args.PopAndReturn(instance);
}
}
@@ -1242,21 +1246,21 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
TNode<BytecodeArray> bytecode_array =
LoadSharedFunctionInfoBytecodeArray(shared);
- TNode<IntPtrT> formal_parameter_count =
- ChangeInt32ToIntPtr(LoadObjectField<Uint16T>(
- shared, SharedFunctionInfo::kFormalParameterCountOffset));
+ TNode<IntPtrT> formal_parameter_count = ChangeInt32ToIntPtr(
+ LoadSharedFunctionInfoFormalParameterCountWithoutReceiver(shared));
TNode<IntPtrT> frame_size = ChangeInt32ToIntPtr(
LoadObjectField<Int32T>(bytecode_array, BytecodeArray::kFrameSizeOffset));
TNode<IntPtrT> size =
IntPtrAdd(WordSar(frame_size, IntPtrConstant(kTaggedSizeLog2)),
formal_parameter_count);
- TNode<FixedArrayBase> parameters_and_registers =
- AllocateFixedArray(HOLEY_ELEMENTS, size, kAllowLargeObjectAllocation);
+ TNode<FixedArrayBase> parameters_and_registers = AllocateFixedArray(
+ HOLEY_ELEMENTS, size, AllocationFlag::kAllowLargeObjectAllocation);
FillFixedArrayWithValue(HOLEY_ELEMENTS, parameters_and_registers,
IntPtrConstant(0), size, RootIndex::kUndefinedValue);
// TODO(cbruni): support start_offset to avoid double initialization.
- TNode<JSObject> result = AllocateJSObjectFromMap(
- map, base::nullopt, base::nullopt, kNone, kWithSlackTracking);
+ TNode<JSObject> result =
+ AllocateJSObjectFromMap(map, base::nullopt, base::nullopt,
+ AllocationFlag::kNone, kWithSlackTracking);
StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kFunctionOffset,
closure);
StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kContextOffset,
@@ -1293,7 +1297,7 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) {
auto argc = UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
auto context = Parameter<Context>(Descriptor::kContext);
- CSA_ASSERT(this, IsUndefined(Parameter<Object>(Descriptor::kJSNewTarget)));
+ CSA_DCHECK(this, IsUndefined(Parameter<Object>(Descriptor::kJSNewTarget)));
CodeStubArguments args(this, argc);
TNode<Object> object_input = args.GetOptionalArgumentValue(0);
@@ -1495,7 +1499,7 @@ TNode<JSObject> ObjectBuiltinsAssembler::FromPropertyDescriptor(
Goto(&return_desc);
BIND(&bailout);
- CSA_ASSERT(this, Int32Constant(0));
+ CSA_DCHECK(this, Int32Constant(0));
Unreachable();
}
diff --git a/chromium/v8/src/builtins/builtins-proxy-gen.cc b/chromium/v8/src/builtins/builtins-proxy-gen.cc
index 16304a56a54..29eec7c9f51 100644
--- a/chromium/v8/src/builtins/builtins-proxy-gen.cc
+++ b/chromium/v8/src/builtins/builtins-proxy-gen.cc
@@ -91,7 +91,7 @@ TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) {
auto proxy = Parameter<JSProxy>(Descriptor::kFunction);
auto context = Parameter<Context>(Descriptor::kContext);
- CSA_ASSERT(this, IsCallable(proxy));
+ CSA_DCHECK(this, IsCallable(proxy));
PerformStackCheck(context);
@@ -103,11 +103,11 @@ TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) {
CAST(LoadObjectField(proxy, JSProxy::kHandlerOffset));
// 2. If handler is null, throw a TypeError exception.
- CSA_ASSERT(this, IsNullOrJSReceiver(handler));
+ CSA_DCHECK(this, IsNullOrJSReceiver(handler));
GotoIfNot(IsJSReceiver(handler), &throw_proxy_handler_revoked);
// 3. Assert: Type(handler) is Object.
- CSA_ASSERT(this, IsJSReceiver(handler));
+ CSA_DCHECK(this, IsJSReceiver(handler));
// 4. Let target be the value of the [[ProxyTarget]] internal slot of O.
TNode<Object> target = LoadObjectField(proxy, JSProxy::kTargetOffset);
@@ -121,10 +121,10 @@ TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) {
TNode<Object> receiver = args.GetReceiver();
// 7. Let argArray be CreateArrayFromList(argumentsList).
- TNode<JSArray> array =
- EmitFastNewAllArguments(UncheckedCast<Context>(context),
- UncheckedCast<RawPtrT>(LoadFramePointer()),
- UncheckedCast<IntPtrT>(argc_ptr));
+ TNode<JSArray> array = EmitFastNewAllArguments(
+ UncheckedCast<Context>(context),
+ UncheckedCast<RawPtrT>(LoadFramePointer()),
+ UncheckedCast<IntPtrT>(args.GetLengthWithoutReceiver()));
// 8. Return Call(trap, handler, «target, thisArgument, argArray»).
TNode<Object> result = Call(context, trap, handler, target, receiver, array);
@@ -147,7 +147,7 @@ TF_BUILTIN(ConstructProxy, ProxiesCodeStubAssembler) {
auto new_target = Parameter<Object>(Descriptor::kNewTarget);
auto context = Parameter<Context>(Descriptor::kContext);
- CSA_ASSERT(this, IsCallable(proxy));
+ CSA_DCHECK(this, IsCallable(proxy));
Label throw_proxy_handler_revoked(this, Label::kDeferred),
trap_undefined(this), not_an_object(this, Label::kDeferred);
@@ -157,11 +157,11 @@ TF_BUILTIN(ConstructProxy, ProxiesCodeStubAssembler) {
CAST(LoadObjectField(proxy, JSProxy::kHandlerOffset));
// 2. If handler is null, throw a TypeError exception.
- CSA_ASSERT(this, IsNullOrJSReceiver(handler));
+ CSA_DCHECK(this, IsNullOrJSReceiver(handler));
GotoIfNot(IsJSReceiver(handler), &throw_proxy_handler_revoked);
// 3. Assert: Type(handler) is Object.
- CSA_ASSERT(this, IsJSReceiver(handler));
+ CSA_DCHECK(this, IsJSReceiver(handler));
// 4. Let target be the value of the [[ProxyTarget]] internal slot of O.
TNode<Object> target = LoadObjectField(proxy, JSProxy::kTargetOffset);
@@ -174,10 +174,10 @@ TF_BUILTIN(ConstructProxy, ProxiesCodeStubAssembler) {
CodeStubArguments args(this, argc_ptr);
// 7. Let argArray be CreateArrayFromList(argumentsList).
- TNode<JSArray> array =
- EmitFastNewAllArguments(UncheckedCast<Context>(context),
- UncheckedCast<RawPtrT>(LoadFramePointer()),
- UncheckedCast<IntPtrT>(argc_ptr));
+ TNode<JSArray> array = EmitFastNewAllArguments(
+ UncheckedCast<Context>(context),
+ UncheckedCast<RawPtrT>(LoadFramePointer()),
+ UncheckedCast<IntPtrT>(args.GetLengthWithoutReceiver()));
// 8. Let newObj be ? Call(trap, handler, « target, argArray, newTarget »).
TNode<Object> new_obj =
@@ -198,7 +198,7 @@ TF_BUILTIN(ConstructProxy, ProxiesCodeStubAssembler) {
BIND(&trap_undefined);
{
// 6.a. Assert: target has a [[Construct]] internal method.
- CSA_ASSERT(this, IsConstructor(CAST(target)));
+ CSA_DCHECK(this, IsConstructor(CAST(target)));
// 6.b. Return ? Construct(target, argumentsList, newTarget).
TailCallStub(CodeFactory::Construct(isolate()), context, target, new_target,
diff --git a/chromium/v8/src/builtins/builtins-regexp-gen.cc b/chromium/v8/src/builtins/builtins-regexp-gen.cc
index 535188c567e..0a75e1bebd4 100644
--- a/chromium/v8/src/builtins/builtins-regexp-gen.cc
+++ b/chromium/v8/src/builtins/builtins-regexp-gen.cc
@@ -18,6 +18,7 @@
#include "src/objects/js-regexp-string-iterator.h"
#include "src/objects/js-regexp.h"
#include "src/objects/regexp-match-info.h"
+#include "src/regexp/regexp-flags.h"
namespace v8 {
namespace internal {
@@ -88,9 +89,9 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::AllocateRegExpResult(
TNode<Context> context, TNode<Smi> length, TNode<Smi> index,
TNode<String> input, TNode<JSRegExp> regexp, TNode<Number> last_index,
TNode<BoolT> has_indices, TNode<FixedArray>* elements_out) {
- CSA_ASSERT(this, SmiLessThanOrEqual(
+ CSA_DCHECK(this, SmiLessThanOrEqual(
length, SmiConstant(JSArray::kMaxFastArrayLength)));
- CSA_ASSERT(this, SmiGreaterThan(length, SmiConstant(0)));
+ CSA_DCHECK(this, SmiGreaterThan(length, SmiConstant(0)));
// Allocate.
@@ -111,7 +112,7 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::AllocateRegExpResult(
std::tie(var_array, var_elements) =
AllocateUninitializedJSArrayWithElements(
elements_kind, map, length, no_gc_site, length_intptr,
- kAllowLargeObjectAllocation, JSRegExpResult::kSize);
+ AllocationFlag::kAllowLargeObjectAllocation, JSRegExpResult::kSize);
Goto(&allocated);
}
@@ -123,7 +124,8 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::AllocateRegExpResult(
std::tie(var_array, var_elements) =
AllocateUninitializedJSArrayWithElements(
elements_kind, map, length, no_gc_site, length_intptr,
- kAllowLargeObjectAllocation, JSRegExpResultWithIndices::kSize);
+ AllocationFlag::kAllowLargeObjectAllocation,
+ JSRegExpResultWithIndices::kSize);
Goto(&allocated);
}
@@ -259,19 +261,19 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
{
TNode<IntPtrT> from_cursor = var_from_cursor.value();
TNode<IntPtrT> to_cursor = var_to_cursor.value();
- TNode<Smi> start =
+ TNode<Smi> start_cursor =
CAST(UnsafeLoadFixedArrayElement(match_info, from_cursor));
Label next_iter(this);
- GotoIf(SmiEqual(start, SmiConstant(-1)), &next_iter);
+ GotoIf(SmiEqual(start_cursor, SmiConstant(-1)), &next_iter);
TNode<IntPtrT> from_cursor_plus1 =
IntPtrAdd(from_cursor, IntPtrConstant(1));
- TNode<Smi> end =
+ TNode<Smi> end_cursor =
CAST(UnsafeLoadFixedArrayElement(match_info, from_cursor_plus1));
- TNode<String> capture =
- CAST(CallBuiltin(Builtin::kSubString, context, string, start, end));
+ TNode<String> capture = CAST(CallBuiltin(Builtin::kSubString, context,
+ string, start_cursor, end_cursor));
UnsafeStoreFixedArrayElement(result_elements, to_cursor, capture);
Goto(&next_iter);
@@ -284,7 +286,7 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
BIND(&named_captures);
{
- CSA_ASSERT(this, SmiGreaterThan(num_results, SmiConstant(1)));
+ CSA_DCHECK(this, SmiGreaterThan(num_results, SmiConstant(1)));
// Preparations for named capture properties. Exit early if the result does
// not have any named captures to minimize performance impact.
@@ -294,7 +296,7 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
// We reach this point only if captures exist, implying that the assigned
// regexp engine must be able to handle captures.
- CSA_ASSERT(
+ CSA_DCHECK(
this,
Word32Or(
SmiEqual(CAST(LoadFixedArrayElement(data, JSRegExp::kTagIndex)),
@@ -312,7 +314,7 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
TNode<FixedArray> names = CAST(maybe_names);
TNode<IntPtrT> names_length = LoadAndUntagFixedArrayBaseLength(names);
- CSA_ASSERT(this, IntPtrGreaterThan(names_length, IntPtrZero()));
+ CSA_DCHECK(this, IntPtrGreaterThan(names_length, IntPtrZero()));
// Stash names in case we need them to build the indices array later.
StoreObjectField(result, JSRegExpResult::kNamesOffset, names);
@@ -328,8 +330,8 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
properties = AllocateSwissNameDictionary(num_properties);
} else {
- properties =
- AllocateNameDictionary(num_properties, kAllowLargeObjectAllocation);
+ properties = AllocateNameDictionary(
+ num_properties, AllocationFlag::kAllowLargeObjectAllocation);
}
TNode<JSObject> group_object = AllocateJSObjectFromMap(map, properties);
@@ -337,10 +339,10 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
TVARIABLE(IntPtrT, var_i, IntPtrZero());
- Label loop(this, &var_i);
+ Label inner_loop(this, &var_i);
- Goto(&loop);
- BIND(&loop);
+ Goto(&inner_loop);
+ BIND(&inner_loop);
{
TNode<IntPtrT> i = var_i.value();
TNode<IntPtrT> i_plus_1 = IntPtrAdd(i, IntPtrConstant(1));
@@ -370,7 +372,7 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
var_i = i_plus_2;
Branch(IntPtrGreaterThanOrEqual(var_i.value(), names_length),
- &maybe_build_indices, &loop);
+ &maybe_build_indices, &inner_loop);
BIND(&add_dictionary_property_slow);
// If the dictionary needs resizing, the above Add call will jump here
@@ -435,8 +437,6 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
// External constants.
TNode<ExternalReference> isolate_address =
ExternalConstant(ExternalReference::isolate_address(isolate()));
- TNode<ExternalReference> regexp_stack_memory_top_address = ExternalConstant(
- ExternalReference::address_of_regexp_stack_memory_top_address(isolate()));
TNode<ExternalReference> static_offsets_vector_address = ExternalConstant(
ExternalReference::address_of_static_offsets_vector(isolate()));
@@ -447,8 +447,8 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
Label if_failure(this);
- CSA_ASSERT(this, IsNumberNormalized(last_index));
- CSA_ASSERT(this, IsNumberPositive(last_index));
+ CSA_DCHECK(this, IsNumberNormalized(last_index));
+ CSA_DCHECK(this, IsNumberPositive(last_index));
GotoIf(TaggedIsNotSmi(last_index), &if_failure);
TNode<IntPtrT> int_string_length = LoadStringLengthAsWord(string);
@@ -545,7 +545,7 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
{
Label next(this);
GotoIfNot(TaggedIsSmi(var_code.value()), &next);
- CSA_ASSERT(this, SmiEqual(CAST(var_code.value()),
+ CSA_DCHECK(this, SmiEqual(CAST(var_code.value()),
SmiConstant(JSRegExp::kUninitializedValue)));
Goto(&next);
BIND(&next);
@@ -605,26 +605,18 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
MachineType arg5_type = type_int32;
TNode<Int32T> arg5 = SmiToInt32(register_count);
- // Argument 6: Start (high end) of backtracking stack memory area. This
- // argument is ignored in the interpreter.
- TNode<RawPtrT> stack_top = UncheckedCast<RawPtrT>(
- Load(MachineType::Pointer(), regexp_stack_memory_top_address));
-
- MachineType arg6_type = type_ptr;
- TNode<RawPtrT> arg6 = stack_top;
+ // Argument 6: Indicate that this is a direct call from JavaScript.
+ MachineType arg6_type = type_int32;
+ TNode<Int32T> arg6 = Int32Constant(RegExp::CallOrigin::kFromJs);
- // Argument 7: Indicate that this is a direct call from JavaScript.
- MachineType arg7_type = type_int32;
- TNode<Int32T> arg7 = Int32Constant(RegExp::CallOrigin::kFromJs);
+ // Argument 7: Pass current isolate address.
+ MachineType arg7_type = type_ptr;
+ TNode<ExternalReference> arg7 = isolate_address;
- // Argument 8: Pass current isolate address.
- MachineType arg8_type = type_ptr;
- TNode<ExternalReference> arg8 = isolate_address;
-
- // Argument 9: Regular expression object. This argument is ignored in native
+ // Argument 8: Regular expression object. This argument is ignored in native
// irregexp code.
- MachineType arg9_type = type_tagged;
- TNode<JSRegExp> arg9 = regexp;
+ MachineType arg8_type = type_tagged;
+ TNode<JSRegExp> arg8 = regexp;
// TODO(v8:11880): avoid roundtrips between cdc and code.
TNode<RawPtrT> code_entry = LoadCodeObjectEntry(code);
@@ -639,8 +631,7 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
std::make_pair(arg1_type, arg1), std::make_pair(arg2_type, arg2),
std::make_pair(arg3_type, arg3), std::make_pair(arg4_type, arg4),
std::make_pair(arg5_type, arg5), std::make_pair(arg6_type, arg6),
- std::make_pair(arg7_type, arg7), std::make_pair(arg8_type, arg8),
- std::make_pair(arg9_type, arg9)));
+ std::make_pair(arg7_type, arg7), std::make_pair(arg8_type, arg8)));
// Check the result.
// We expect exactly one result since we force the called regexp to behave
@@ -660,7 +651,7 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
IntPtrConstant(RegExp::kInternalRegExpFallbackToExperimental)),
&retry_experimental);
- CSA_ASSERT(this, IntPtrEqual(int_result,
+ CSA_DCHECK(this, IntPtrEqual(int_result,
IntPtrConstant(RegExp::kInternalRegExpRetry)));
Goto(&runtime);
}
@@ -737,7 +728,7 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
TNode<ExternalReference> pending_exception_address =
ExternalConstant(ExternalReference::Create(
IsolateAddressId::kPendingExceptionAddress, isolate()));
- CSA_ASSERT(this, IsTheHole(Load<Object>(pending_exception_address)));
+ CSA_DCHECK(this, IsTheHole(Load<Object>(pending_exception_address)));
#endif // DEBUG
CallRuntime(Runtime::kThrowStackOverflow, context);
Unreachable();
@@ -810,7 +801,7 @@ TNode<BoolT> RegExpBuiltinsAssembler::IsFastRegExpNoPrototype(
TNode<BoolT> RegExpBuiltinsAssembler::IsFastRegExpNoPrototype(
TNode<Context> context, TNode<Object> object) {
- CSA_ASSERT(this, TaggedIsNotSmi(object));
+ CSA_DCHECK(this, TaggedIsNotSmi(object));
return IsFastRegExpNoPrototype(context, object, LoadMap(CAST(object)));
}
@@ -819,7 +810,7 @@ void RegExpBuiltinsAssembler::BranchIfFastRegExp(
PrototypeCheckAssembler::Flags prototype_check_flags,
base::Optional<DescriptorIndexNameValue> additional_property_to_check,
Label* if_isunmodified, Label* if_ismodified) {
- CSA_ASSERT(this, TaggedEqual(LoadMap(object), map));
+ CSA_DCHECK(this, TaggedEqual(LoadMap(object), map));
GotoIfForceSlowPath(if_ismodified);
@@ -941,16 +932,16 @@ TF_BUILTIN(RegExpExecAtom, RegExpBuiltinsAssembler) {
auto match_info = Parameter<FixedArray>(Descriptor::kMatchInfo);
auto context = Parameter<Context>(Descriptor::kContext);
- CSA_ASSERT(this, TaggedIsPositiveSmi(last_index));
+ CSA_DCHECK(this, TaggedIsPositiveSmi(last_index));
TNode<FixedArray> data = CAST(LoadObjectField(regexp, JSRegExp::kDataOffset));
- CSA_ASSERT(
+ CSA_DCHECK(
this,
SmiEqual(CAST(UnsafeLoadFixedArrayElement(data, JSRegExp::kTagIndex)),
SmiConstant(JSRegExp::ATOM)));
// Callers ensure that last_index is in-bounds.
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
UintPtrLessThanOrEqual(SmiUntag(last_index),
LoadStringLengthAsWord(subject_string)));
@@ -962,7 +953,7 @@ TF_BUILTIN(RegExpExecAtom, RegExpBuiltinsAssembler) {
//
// This is especially relevant for crbug.com/1075514: atom patterns are
// non-empty and thus guaranteed not to match at the end of the string.
- CSA_ASSERT(this, IntPtrGreaterThan(LoadStringLengthAsWord(needle_string),
+ CSA_DCHECK(this, IntPtrGreaterThan(LoadStringLengthAsWord(needle_string),
IntPtrConstant(0)));
const TNode<Smi> match_from =
@@ -974,8 +965,8 @@ TF_BUILTIN(RegExpExecAtom, RegExpBuiltinsAssembler) {
BIND(&if_success);
{
- CSA_ASSERT(this, TaggedIsPositiveSmi(match_from));
- CSA_ASSERT(this, UintPtrLessThan(SmiUntag(match_from),
+ CSA_DCHECK(this, TaggedIsPositiveSmi(match_from));
+ CSA_DCHECK(this, UintPtrLessThan(SmiUntag(match_from),
LoadStringLengthAsWord(subject_string)));
const int kNumRegisters = 2;
@@ -1010,8 +1001,8 @@ TF_BUILTIN(RegExpExecInternal, RegExpBuiltinsAssembler) {
auto match_info = Parameter<RegExpMatchInfo>(Descriptor::kMatchInfo);
auto context = Parameter<Context>(Descriptor::kContext);
- CSA_ASSERT(this, IsNumberNormalized(last_index));
- CSA_ASSERT(this, IsNumberPositive(last_index));
+ CSA_DCHECK(this, IsNumberNormalized(last_index));
+ CSA_DCHECK(this, IsNumberPositive(last_index));
Return(RegExpExecInternal(context, regexp, string, last_index, match_info));
}
@@ -1036,28 +1027,21 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
if (is_fastpath) {
// Refer to JSRegExp's flag property on the fast-path.
- CSA_ASSERT(this, IsJSRegExp(CAST(regexp)));
+ CSA_DCHECK(this, IsJSRegExp(CAST(regexp)));
const TNode<Smi> flags_smi =
CAST(LoadObjectField(CAST(regexp), JSRegExp::kFlagsOffset));
var_flags = SmiUntag(flags_smi);
-#define CASE_FOR_FLAG(FLAG) \
- do { \
- Label next(this); \
- GotoIfNot(IsSetWord(var_flags.value(), FLAG), &next); \
- var_length = Uint32Add(var_length.value(), Uint32Constant(1)); \
- Goto(&next); \
- BIND(&next); \
- } while (false)
+#define CASE_FOR_FLAG(Lower, Camel, ...) \
+ do { \
+ Label next(this); \
+ GotoIfNot(IsSetWord(var_flags.value(), JSRegExp::k##Camel), &next); \
+ var_length = Uint32Add(var_length.value(), Uint32Constant(1)); \
+ Goto(&next); \
+ BIND(&next); \
+ } while (false);
- CASE_FOR_FLAG(JSRegExp::kHasIndices);
- CASE_FOR_FLAG(JSRegExp::kGlobal);
- CASE_FOR_FLAG(JSRegExp::kIgnoreCase);
- CASE_FOR_FLAG(JSRegExp::kLinear);
- CASE_FOR_FLAG(JSRegExp::kMultiline);
- CASE_FOR_FLAG(JSRegExp::kDotAll);
- CASE_FOR_FLAG(JSRegExp::kUnicode);
- CASE_FOR_FLAG(JSRegExp::kSticky);
+ REGEXP_FLAG_LIST(CASE_FOR_FLAG)
#undef CASE_FOR_FLAG
} else {
DCHECK(!is_fastpath);
@@ -1123,26 +1107,19 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
TVARIABLE(IntPtrT, var_offset,
IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-#define CASE_FOR_FLAG(FLAG, CHAR) \
- do { \
- Label next(this); \
- GotoIfNot(IsSetWord(var_flags.value(), FLAG), &next); \
- const TNode<Int32T> value = Int32Constant(CHAR); \
- StoreNoWriteBarrier(MachineRepresentation::kWord8, string, \
- var_offset.value(), value); \
- var_offset = IntPtrAdd(var_offset.value(), int_one); \
- Goto(&next); \
- BIND(&next); \
- } while (false)
-
- CASE_FOR_FLAG(JSRegExp::kHasIndices, 'd');
- CASE_FOR_FLAG(JSRegExp::kGlobal, 'g');
- CASE_FOR_FLAG(JSRegExp::kIgnoreCase, 'i');
- CASE_FOR_FLAG(JSRegExp::kLinear, 'l');
- CASE_FOR_FLAG(JSRegExp::kMultiline, 'm');
- CASE_FOR_FLAG(JSRegExp::kDotAll, 's');
- CASE_FOR_FLAG(JSRegExp::kUnicode, 'u');
- CASE_FOR_FLAG(JSRegExp::kSticky, 'y');
+#define CASE_FOR_FLAG(Lower, Camel, LowerCamel, Char, ...) \
+ do { \
+ Label next(this); \
+ GotoIfNot(IsSetWord(var_flags.value(), JSRegExp::k##Camel), &next); \
+ const TNode<Int32T> value = Int32Constant(Char); \
+ StoreNoWriteBarrier(MachineRepresentation::kWord8, string, \
+ var_offset.value(), value); \
+ var_offset = IntPtrAdd(var_offset.value(), int_one); \
+ Goto(&next); \
+ BIND(&next); \
+ } while (false);
+
+ REGEXP_FLAG_LIST(CASE_FOR_FLAG)
#undef CASE_FOR_FLAG
if (is_fastpath) {
@@ -1334,12 +1311,12 @@ TF_BUILTIN(RegExpPrototypeCompile, RegExpBuiltinsAssembler) {
// {maybe_flags} must be undefined in this case, otherwise throw.
{
- Label next(this);
- GotoIf(IsUndefined(maybe_flags), &next);
+ Label maybe_flags_is_undefined(this);
+ GotoIf(IsUndefined(maybe_flags), &maybe_flags_is_undefined);
ThrowTypeError(context, MessageTemplate::kRegExpFlags);
- BIND(&next);
+ BIND(&maybe_flags_is_undefined);
}
const TNode<JSRegExp> pattern = CAST(maybe_pattern);
@@ -1391,29 +1368,12 @@ TNode<BoolT> RegExpBuiltinsAssembler::SlowFlagGetter(TNode<Context> context,
switch (flag) {
case JSRegExp::kNone:
UNREACHABLE();
- case JSRegExp::kGlobal:
- name = isolate()->factory()->global_string();
- break;
- case JSRegExp::kIgnoreCase:
- name = isolate()->factory()->ignoreCase_string();
- break;
- case JSRegExp::kMultiline:
- name = isolate()->factory()->multiline_string();
- break;
- case JSRegExp::kDotAll:
- UNREACHABLE(); // Never called for dotAll.
- case JSRegExp::kSticky:
- name = isolate()->factory()->sticky_string();
- break;
- case JSRegExp::kUnicode:
- name = isolate()->factory()->unicode_string();
- break;
- case JSRegExp::kHasIndices:
- name = isolate()->factory()->has_indices_string();
- break;
- case JSRegExp::kLinear:
- name = isolate()->factory()->linear_string();
- break;
+#define V(Lower, Camel, LowerCamel, Char, Bit) \
+ case JSRegExp::k##Camel: \
+ name = isolate()->factory()->LowerCamel##_string(); \
+ break;
+ REGEXP_FLAG_LIST(V)
+#undef V
}
TNode<Object> value = GetProperty(context, regexp, name);
@@ -1442,8 +1402,8 @@ TNode<BoolT> RegExpBuiltinsAssembler::FlagGetter(TNode<Context> context,
TNode<Number> RegExpBuiltinsAssembler::AdvanceStringIndex(
TNode<String> string, TNode<Number> index, TNode<BoolT> is_unicode,
bool is_fastpath) {
- CSA_ASSERT(this, IsNumberNormalized(index));
- if (is_fastpath) CSA_ASSERT(this, TaggedIsPositiveSmi(index));
+ CSA_DCHECK(this, IsNumberNormalized(index));
+ if (is_fastpath) CSA_DCHECK(this, TaggedIsPositiveSmi(index));
// Default to last_index + 1.
// TODO(pwong): Consider using TrySmiAdd for the fast path to reduce generated
@@ -1467,7 +1427,7 @@ TNode<Number> RegExpBuiltinsAssembler::AdvanceStringIndex(
// Must be in Smi range on the fast path. We control the value of {index}
// on all call-sites and can never exceed the length of the string.
STATIC_ASSERT(String::kMaxLength + 2 < Smi::kMaxValue);
- CSA_ASSERT(this, TaggedIsPositiveSmi(index_plus_one));
+ CSA_DCHECK(this, TaggedIsPositiveSmi(index_plus_one));
}
Label if_isunicode(this), out(this);
@@ -1554,8 +1514,8 @@ TNode<Object> RegExpMatchAllAssembler::CreateRegExpStringIterator(
TNode<JSArray> RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(
TNode<Context> context, TNode<JSRegExp> regexp, TNode<String> string,
const TNode<Smi> limit) {
- CSA_ASSERT(this, IsFastRegExpPermissive(context, regexp));
- CSA_ASSERT(this, Word32BinaryNot(FastFlagGetter(regexp, JSRegExp::kSticky)));
+ CSA_DCHECK(this, IsFastRegExpPermissive(context, regexp));
+ CSA_DCHECK(this, Word32BinaryNot(FastFlagGetter(regexp, JSRegExp::kSticky)));
const TNode<IntPtrT> int_limit = SmiUntag(limit);
@@ -1660,7 +1620,7 @@ TNode<JSArray> RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(
match_indices, RegExpMatchInfo::kFirstCaptureIndex));
const TNode<Smi> match_to = CAST(UnsafeLoadFixedArrayElement(
match_indices, RegExpMatchInfo::kFirstCaptureIndex + 1));
- CSA_ASSERT(this, SmiNotEqual(match_from, string_length));
+ CSA_DCHECK(this, SmiNotEqual(match_from, string_length));
// Advance index and continue if the match is empty.
{
diff --git a/chromium/v8/src/builtins/builtins-sharedarraybuffer-gen.cc b/chromium/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
index fa536792edf..154c6d39f88 100644
--- a/chromium/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
+++ b/chromium/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
@@ -139,9 +139,9 @@ void SharedArrayBufferBuiltinsAssembler::DebugCheckAtomicIndex(
//
// This function must always be called after ValidateIntegerTypedArray, which
// will ensure that LoadJSArrayBufferViewBuffer will not be null.
- CSA_ASSERT(this, Word32BinaryNot(
+ CSA_DCHECK(this, Word32BinaryNot(
IsDetachedBuffer(LoadJSArrayBufferViewBuffer(array))));
- CSA_ASSERT(this, UintPtrLessThan(index, LoadJSTypedArrayLength(array)));
+ CSA_DCHECK(this, UintPtrLessThan(index, LoadJSTypedArrayLength(array)));
}
TNode<BigInt> SharedArrayBufferBuiltinsAssembler::BigIntFromSigned64(
@@ -204,26 +204,28 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
arraysize(case_labels));
BIND(&i8);
- Return(SmiFromInt32(AtomicLoad<Int8T>(backing_store, index_word)));
+ Return(SmiFromInt32(AtomicLoad<Int8T>(AtomicMemoryOrder::kSeqCst,
+ backing_store, index_word)));
BIND(&u8);
- Return(SmiFromInt32(AtomicLoad<Uint8T>(backing_store, index_word)));
+ Return(SmiFromInt32(AtomicLoad<Uint8T>(AtomicMemoryOrder::kSeqCst,
+ backing_store, index_word)));
BIND(&i16);
- Return(
- SmiFromInt32(AtomicLoad<Int16T>(backing_store, WordShl(index_word, 1))));
+ Return(SmiFromInt32(AtomicLoad<Int16T>(
+ AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 1))));
BIND(&u16);
- Return(
- SmiFromInt32(AtomicLoad<Uint16T>(backing_store, WordShl(index_word, 1))));
+ Return(SmiFromInt32(AtomicLoad<Uint16T>(
+ AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 1))));
BIND(&i32);
- Return(ChangeInt32ToTagged(
- AtomicLoad<Int32T>(backing_store, WordShl(index_word, 2))));
+ Return(ChangeInt32ToTagged(AtomicLoad<Int32T>(
+ AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 2))));
BIND(&u32);
- Return(ChangeUint32ToTagged(
- AtomicLoad<Uint32T>(backing_store, WordShl(index_word, 2))));
+ Return(ChangeUint32ToTagged(AtomicLoad<Uint32T>(
+ AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 2))));
#if V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6
BIND(&i64);
Goto(&u64);
@@ -235,12 +237,12 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
}
#else
BIND(&i64);
- Return(BigIntFromSigned64(
- AtomicLoad64<AtomicInt64>(backing_store, WordShl(index_word, 3))));
+ Return(BigIntFromSigned64(AtomicLoad64<AtomicInt64>(
+ AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 3))));
BIND(&u64);
- Return(BigIntFromUnsigned64(
- AtomicLoad64<AtomicUint64>(backing_store, WordShl(index_word, 3))));
+ Return(BigIntFromUnsigned64(AtomicLoad64<AtomicUint64>(
+ AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 3))));
#endif
// This shouldn't happen, we've already validated the type.
@@ -307,18 +309,18 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
arraysize(case_labels));
BIND(&u8);
- AtomicStore(MachineRepresentation::kWord8, backing_store, index_word,
- value_word32);
+ AtomicStore(MachineRepresentation::kWord8, AtomicMemoryOrder::kSeqCst,
+ backing_store, index_word, value_word32);
Return(value_integer);
BIND(&u16);
- AtomicStore(MachineRepresentation::kWord16, backing_store,
- WordShl(index_word, 1), value_word32);
+ AtomicStore(MachineRepresentation::kWord16, AtomicMemoryOrder::kSeqCst,
+ backing_store, WordShl(index_word, 1), value_word32);
Return(value_integer);
BIND(&u32);
- AtomicStore(MachineRepresentation::kWord32, backing_store,
- WordShl(index_word, 2), value_word32);
+ AtomicStore(MachineRepresentation::kWord32, AtomicMemoryOrder::kSeqCst,
+ backing_store, WordShl(index_word, 2), value_word32);
Return(value_integer);
BIND(&u64);
@@ -340,7 +342,8 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
TVARIABLE(UintPtrT, var_high);
BigIntToRawBytes(value_bigint, &var_low, &var_high);
TNode<UintPtrT> high = Is64() ? TNode<UintPtrT>() : var_high.value();
- AtomicStore64(backing_store, WordShl(index_word, 3), var_low.value(), high);
+ AtomicStore64(AtomicMemoryOrder::kSeqCst, backing_store,
+ WordShl(index_word, 3), var_low.value(), high);
Return(value_bigint);
#endif
diff --git a/chromium/v8/src/builtins/builtins-string-gen.cc b/chromium/v8/src/builtins/builtins-string-gen.cc
index 61c1d8d3879..ceee7b0b949 100644
--- a/chromium/v8/src/builtins/builtins-string-gen.cc
+++ b/chromium/v8/src/builtins/builtins-string-gen.cc
@@ -167,8 +167,8 @@ void StringBuiltinsAssembler::StringEqual_Core(
TNode<String> lhs, TNode<Word32T> lhs_instance_type, TNode<String> rhs,
TNode<Word32T> rhs_instance_type, TNode<IntPtrT> length, Label* if_equal,
Label* if_not_equal, Label* if_indirect) {
- CSA_ASSERT(this, WordEqual(LoadStringLengthAsWord(lhs), length));
- CSA_ASSERT(this, WordEqual(LoadStringLengthAsWord(rhs), length));
+ CSA_DCHECK(this, WordEqual(LoadStringLengthAsWord(lhs), length));
+ CSA_DCHECK(this, WordEqual(LoadStringLengthAsWord(rhs), length));
// Fast check to see if {lhs} and {rhs} refer to the same String object.
GotoIf(TaggedEqual(lhs, rhs), if_equal);
@@ -244,8 +244,8 @@ void StringBuiltinsAssembler::StringEqual_Loop(
TNode<String> lhs, TNode<Word32T> lhs_instance_type, MachineType lhs_type,
TNode<String> rhs, TNode<Word32T> rhs_instance_type, MachineType rhs_type,
TNode<IntPtrT> length, Label* if_equal, Label* if_not_equal) {
- CSA_ASSERT(this, WordEqual(LoadStringLengthAsWord(lhs), length));
- CSA_ASSERT(this, WordEqual(LoadStringLengthAsWord(rhs), length));
+ CSA_DCHECK(this, WordEqual(LoadStringLengthAsWord(lhs), length));
+ CSA_DCHECK(this, WordEqual(LoadStringLengthAsWord(rhs), length));
// Compute the effective offset of the first character.
TNode<RawPtrT> lhs_data = DirectStringData(lhs, lhs_instance_type);
@@ -341,7 +341,7 @@ TNode<String> StringBuiltinsAssembler::AllocateConsString(TNode<Uint32T> length,
TNode<String> StringBuiltinsAssembler::StringAdd(
TNode<ContextOrEmptyContext> context, TNode<String> left,
TNode<String> right) {
- CSA_ASSERT(this, IsZeroOrContext(context));
+ CSA_DCHECK(this, IsZeroOrContext(context));
TVARIABLE(String, result);
Label check_right(this), runtime(this, Label::kDeferred), cons(this),
@@ -540,7 +540,7 @@ TF_BUILTIN(StringAdd_CheckNone, StringBuiltinsAssembler) {
auto right = Parameter<String>(Descriptor::kRight);
TNode<ContextOrEmptyContext> context =
UncheckedParameter<ContextOrEmptyContext>(Descriptor::kContext);
- CSA_ASSERT(this, IsZeroOrContext(context));
+ CSA_DCHECK(this, IsZeroOrContext(context));
Return(StringAdd(context, left, right));
}
@@ -792,12 +792,12 @@ TF_BUILTIN(StringFromCharCode, StringBuiltinsAssembler) {
CodeStubArguments arguments(this, argc);
TNode<Uint32T> unsigned_argc =
- Unsigned(TruncateIntPtrToInt32(arguments.GetLength()));
+ Unsigned(TruncateIntPtrToInt32(arguments.GetLengthWithoutReceiver()));
// Check if we have exactly one argument (plus the implicit receiver), i.e.
// if the parent frame is not an arguments adaptor frame.
Label if_oneargument(this), if_notoneargument(this);
- Branch(IntPtrEqual(arguments.GetLength(), IntPtrConstant(1)), &if_oneargument,
- &if_notoneargument);
+ Branch(IntPtrEqual(arguments.GetLengthWithoutReceiver(), IntPtrConstant(1)),
+ &if_oneargument, &if_notoneargument);
BIND(&if_oneargument);
{
@@ -965,8 +965,8 @@ TNode<String> StringBuiltinsAssembler::GetSubstitution(
TNode<Context> context, TNode<String> subject_string,
TNode<Smi> match_start_index, TNode<Smi> match_end_index,
TNode<String> replace_string) {
- CSA_ASSERT(this, TaggedIsPositiveSmi(match_start_index));
- CSA_ASSERT(this, TaggedIsPositiveSmi(match_end_index));
+ CSA_DCHECK(this, TaggedIsPositiveSmi(match_start_index));
+ CSA_DCHECK(this, TaggedIsPositiveSmi(match_end_index));
TVARIABLE(String, var_result, replace_string);
Label runtime(this), out(this);
@@ -984,7 +984,7 @@ TNode<String> StringBuiltinsAssembler::GetSubstitution(
BIND(&runtime);
{
- CSA_ASSERT(this, TaggedIsPositiveSmi(dollar_index));
+ CSA_DCHECK(this, TaggedIsPositiveSmi(dollar_index));
const TNode<Object> matched =
CallBuiltin(Builtin::kStringSubstring, context, subject_string,
@@ -1185,8 +1185,8 @@ TF_BUILTIN(StringPrototypeMatchAll, StringBuiltinsAssembler) {
// TypeError exception.
GotoIf(TaggedIsSmi(maybe_regexp), &next);
TNode<HeapObject> heap_maybe_regexp = CAST(maybe_regexp);
- regexp_asm.BranchIfFastRegExp_Strict(context, heap_maybe_regexp, &fast,
- &slow);
+ regexp_asm.BranchIfFastRegExpForMatch(context, heap_maybe_regexp, &fast,
+ &slow);
BIND(&fast);
{
@@ -1260,7 +1260,7 @@ TF_BUILTIN(StringPrototypeMatchAll, StringBuiltinsAssembler) {
TNode<JSArray> StringBuiltinsAssembler::StringToArray(
TNode<NativeContext> context, TNode<String> subject_string,
TNode<Smi> subject_length, TNode<Number> limit_number) {
- CSA_ASSERT(this, SmiGreaterThan(subject_length, SmiConstant(0)));
+ CSA_DCHECK(this, SmiGreaterThan(subject_length, SmiConstant(0)));
Label done(this), call_runtime(this, Label::kDeferred),
fill_thehole_and_call_runtime(this, Label::kDeferred);
@@ -1299,7 +1299,7 @@ TNode<JSArray> StringBuiltinsAssembler::StringToArray(
// TODO(jkummerow): Implement a CSA version of
// DisallowGarbageCollection and use that to guard
// ToDirectStringAssembler.PointerToData().
- CSA_ASSERT(this, WordEqual(to_direct.PointerToData(&call_runtime),
+ CSA_DCHECK(this, WordEqual(to_direct.PointerToData(&call_runtime),
string_data));
TNode<Int32T> char_code =
UncheckedCast<Int32T>(Load(MachineType::Uint8(), string_data,
@@ -1434,9 +1434,10 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
TNode<Smi> length = smi_zero;
TNode<IntPtrT> capacity = IntPtrConstant(0);
- TNode<JSArray> result = AllocateJSArray(kind, array_map, capacity, length);
+ TNode<JSArray> result_array =
+ AllocateJSArray(kind, array_map, capacity, length);
- args.PopAndReturn(result);
+ args.PopAndReturn(result_array);
}
}
@@ -1478,12 +1479,12 @@ TNode<Int32T> StringBuiltinsAssembler::LoadSurrogatePairAt(
TNode<Int32T> trail = var_trail.value();
// Check that this path is only taken if a surrogate pair is found
- CSA_SLOW_ASSERT(this,
+ CSA_SLOW_DCHECK(this,
Uint32GreaterThanOrEqual(lead, Int32Constant(0xD800)));
- CSA_SLOW_ASSERT(this, Uint32LessThan(lead, Int32Constant(0xDC00)));
- CSA_SLOW_ASSERT(this,
+ CSA_SLOW_DCHECK(this, Uint32LessThan(lead, Int32Constant(0xDC00)));
+ CSA_SLOW_DCHECK(this,
Uint32GreaterThanOrEqual(trail, Int32Constant(0xDC00)));
- CSA_SLOW_ASSERT(this, Uint32LessThan(trail, Int32Constant(0xE000)));
+ CSA_SLOW_DCHECK(this, Uint32LessThan(trail, Int32Constant(0xE000)));
switch (encoding) {
case UnicodeEncoding::UTF16:
@@ -1757,7 +1758,7 @@ TNode<String> StringBuiltinsAssembler::SubString(TNode<String> string,
BIND(&original_string_or_invalid_length);
{
- CSA_ASSERT(this, IntPtrEqual(substr_length, string_length));
+ CSA_DCHECK(this, IntPtrEqual(substr_length, string_length));
// Equal length - check if {from, to} == {0, str.length}.
GotoIf(UintPtrGreaterThan(from, IntPtrConstant(0)), &runtime);
diff --git a/chromium/v8/src/builtins/builtins-string.cc b/chromium/v8/src/builtins/builtins-string.cc
index 950cefd7baf..d94976bab21 100644
--- a/chromium/v8/src/builtins/builtins-string.cc
+++ b/chromium/v8/src/builtins/builtins-string.cc
@@ -140,21 +140,25 @@ BUILTIN(StringPrototypeLocaleCompare) {
HandleScope handle_scope(isolate);
isolate->CountUsage(v8::Isolate::UseCounterFeature::kStringLocaleCompare);
- const char* method = "String.prototype.localeCompare";
+ static const char* const kMethod = "String.prototype.localeCompare";
#ifdef V8_INTL_SUPPORT
- TO_THIS_STRING(str1, method);
+ TO_THIS_STRING(str1, kMethod);
Handle<String> str2;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, str2, Object::ToString(isolate, args.atOrUndefined(isolate, 1)));
- RETURN_RESULT_OR_FAILURE(
- isolate, Intl::StringLocaleCompare(
- isolate, str1, str2, args.atOrUndefined(isolate, 2),
- args.atOrUndefined(isolate, 3), method));
+ base::Optional<int> result = Intl::StringLocaleCompare(
+ isolate, str1, str2, args.atOrUndefined(isolate, 2),
+ args.atOrUndefined(isolate, 3), kMethod);
+ if (!result.has_value()) {
+ DCHECK(isolate->has_pending_exception());
+ return ReadOnlyRoots(isolate).exception();
+ }
+ return Smi::FromInt(result.value());
#else
DCHECK_LE(2, args.length());
- TO_THIS_STRING(str1, method);
+ TO_THIS_STRING(str1, kMethod);
Handle<String> str2;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, str2,
Object::ToString(isolate, args.at(1)));
diff --git a/chromium/v8/src/builtins/builtins-string.tq b/chromium/v8/src/builtins/builtins-string.tq
index 4111155fd2f..ab2cf2696d9 100644
--- a/chromium/v8/src/builtins/builtins-string.tq
+++ b/chromium/v8/src/builtins/builtins-string.tq
@@ -32,7 +32,7 @@ transitioning macro ToStringImpl(context: Context, o: JSAny): String {
ThrowTypeError(MessageTemplate::kSymbolToString);
}
case (JSAny): {
- return runtime::ToString(context, o);
+ return runtime::ToString(context, result);
}
}
}
@@ -90,8 +90,8 @@ transitioning builtin StringToList(implicit context: Context)(string: String):
i = i + value.length_intptr;
arrayLength++;
}
- assert(arrayLength >= 0);
- assert(SmiTag(stringLength) >= arrayLength);
+ dcheck(arrayLength >= 0);
+ dcheck(SmiTag(stringLength) >= arrayLength);
array.length = arrayLength;
return array;
@@ -121,7 +121,7 @@ IfInBounds(String, uintptr, uintptr), IfOutOfBounds {
goto IfInBounds(string, index, length);
}
case (indexHeapNumber: HeapNumber): {
- assert(IsNumberNormalized(indexHeapNumber));
+ dcheck(IsNumberNormalized(indexHeapNumber));
// Valid string indices fit into Smi range, so HeapNumber index is
// definitely an out of bounds case.
goto IfOutOfBounds;
diff --git a/chromium/v8/src/builtins/builtins-typed-array-gen.cc b/chromium/v8/src/builtins/builtins-typed-array-gen.cc
index a76650d052d..60f26c63dc1 100644
--- a/chromium/v8/src/builtins/builtins-typed-array-gen.cc
+++ b/chromium/v8/src/builtins/builtins-typed-array-gen.cc
@@ -65,9 +65,8 @@ TNode<JSArrayBuffer> TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer(
StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kByteLengthOffset,
byte_length);
- InitializeExternalPointerField(buffer, JSArrayBuffer::kBackingStoreOffset,
- PointerConstant(nullptr),
- kArrayBufferBackingStoreTag);
+ StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kBackingStoreOffset,
+ PointerConstant(nullptr));
StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kExtensionOffset,
IntPtrConstant(0));
for (int offset = JSArrayBuffer::kHeaderSize;
@@ -155,13 +154,13 @@ TF_BUILTIN(TypedArrayPrototypeByteOffset, TypedArrayBuiltinsAssembler) {
ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName);
// Default to zero if the {receiver}s buffer was detached / out of bounds.
- Label detached_or_oob(this), not_detached_or_oob(this);
- IsTypedArrayDetachedOrOutOfBounds(CAST(receiver), &detached_or_oob,
- &not_detached_or_oob);
+ Label detached_or_oob(this), not_detached_nor_oob(this);
+ IsJSTypedArrayDetachedOrOutOfBounds(CAST(receiver), &detached_or_oob,
+ &not_detached_nor_oob);
BIND(&detached_or_oob);
Return(ChangeUintPtrToTagged(UintPtrConstant(0)));
- BIND(&not_detached_or_oob);
+ BIND(&not_detached_nor_oob);
Return(
ChangeUintPtrToTagged(LoadJSArrayBufferViewByteOffset(CAST(receiver))));
}
@@ -193,7 +192,10 @@ TNode<BoolT> TypedArrayBuiltinsAssembler::IsUint8ElementsKind(
TNode<BoolT> TypedArrayBuiltinsAssembler::IsBigInt64ElementsKind(
TNode<Int32T> kind) {
STATIC_ASSERT(BIGUINT64_ELEMENTS + 1 == BIGINT64_ELEMENTS);
- return IsElementsKindInRange(kind, BIGUINT64_ELEMENTS, BIGINT64_ELEMENTS);
+ return Word32Or(
+ IsElementsKindInRange(kind, BIGUINT64_ELEMENTS, BIGINT64_ELEMENTS),
+ IsElementsKindInRange(kind, RAB_GSAB_BIGUINT64_ELEMENTS,
+ RAB_GSAB_BIGINT64_ELEMENTS));
}
TNode<IntPtrT> TypedArrayBuiltinsAssembler::GetTypedArrayElementSize(
@@ -256,9 +258,27 @@ TNode<JSTypedArray> TypedArrayBuiltinsAssembler::ValidateTypedArray(
// If the typed array's buffer is detached, throw
ThrowIfArrayBufferViewBufferIsDetached(context, CAST(obj), method_name);
+ // TODO(v8:11111): Throw if the RAB / GSAB is OOB.
return CAST(obj);
}
+TNode<UintPtrT> TypedArrayBuiltinsAssembler::ValidateTypedArrayAndGetLength(
+ TNode<Context> context, TNode<Object> obj, const char* method_name) {
+ // If it is not a typed array, throw
+ ThrowIfNotInstanceType(context, obj, JS_TYPED_ARRAY_TYPE, method_name);
+
+ Label detached_or_oob(this), not_detached_nor_oob(this);
+ TNode<UintPtrT> length =
+ LoadJSTypedArrayLengthAndCheckDetached(CAST(obj), &detached_or_oob);
+ Goto(&not_detached_nor_oob);
+
+ BIND(&detached_or_oob);
+ ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
+
+ BIND(&not_detached_nor_oob);
+ return length;
+}
+
void TypedArrayBuiltinsAssembler::CallCMemmove(TNode<RawPtrT> dest_ptr,
TNode<RawPtrT> src_ptr,
TNode<UintPtrT> byte_length) {
@@ -318,7 +338,7 @@ void TypedArrayBuiltinsAssembler::
CallCCopyFastNumberJSArrayElementsToTypedArray(
TNode<Context> context, TNode<JSArray> source, TNode<JSTypedArray> dest,
TNode<UintPtrT> source_length, TNode<UintPtrT> offset) {
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
Word32BinaryNot(IsBigInt64ElementsKind(LoadElementsKind(dest))));
TNode<ExternalReference> f = ExternalConstant(
ExternalReference::copy_fast_number_jsarray_elements_to_typed_array());
@@ -389,13 +409,14 @@ void TypedArrayBuiltinsAssembler::DispatchTypedArrayByElementsKind(
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
- BIND(&if_##type##array); \
- { \
- case_function(TYPE##_ELEMENTS, sizeof(ctype), 0); \
- Goto(&next); \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, NON_RAB_GSAB_TYPE) \
+ BIND(&if_##type##array); \
+ { \
+ case_function(TYPE##_ELEMENTS, sizeof(ctype), \
+ Context::NON_RAB_GSAB_TYPE##_ARRAY_FUN_INDEX); \
+ Goto(&next); \
}
- RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ RAB_GSAB_TYPED_ARRAYS_WITH_NON_RAB_GSAB_ELEMENTS_KIND(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
BIND(&if_unknown_type);
@@ -404,12 +425,6 @@ void TypedArrayBuiltinsAssembler::DispatchTypedArrayByElementsKind(
BIND(&next);
}
-void TypedArrayBuiltinsAssembler::AllocateJSTypedArrayExternalPointerEntry(
- TNode<JSTypedArray> holder) {
- InitializeExternalPointerField(
- holder, IntPtrConstant(JSTypedArray::kExternalPointerOffset));
-}
-
void TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr(
TNode<JSTypedArray> holder, TNode<ByteArray> base, TNode<UintPtrT> offset) {
offset = UintPtrAdd(UintPtrConstant(ByteArray::kHeaderSize - kHeapObjectTag),
diff --git a/chromium/v8/src/builtins/builtins-typed-array-gen.h b/chromium/v8/src/builtins/builtins-typed-array-gen.h
index bb8a15ef021..2807745ecb9 100644
--- a/chromium/v8/src/builtins/builtins-typed-array-gen.h
+++ b/chromium/v8/src/builtins/builtins-typed-array-gen.h
@@ -49,6 +49,10 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
TNode<Object> obj,
const char* method_name);
+ TNode<UintPtrT> ValidateTypedArrayAndGetLength(TNode<Context> context,
+ TNode<Object> obj,
+ const char* method_name);
+
void CallCMemmove(TNode<RawPtrT> dest_ptr, TNode<RawPtrT> src_ptr,
TNode<UintPtrT> byte_length);
@@ -83,7 +87,6 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
void DispatchTypedArrayByElementsKind(
TNode<Word32T> elements_kind, const TypedArraySwitchCase& case_function);
- void AllocateJSTypedArrayExternalPointerEntry(TNode<JSTypedArray> holder);
void SetJSTypedArrayOnHeapDataPtr(TNode<JSTypedArray> holder,
TNode<ByteArray> base,
TNode<UintPtrT> offset);
diff --git a/chromium/v8/src/builtins/builtins-typed-array.cc b/chromium/v8/src/builtins/builtins-typed-array.cc
index d6be81615dd..a7827e7d9f0 100644
--- a/chromium/v8/src/builtins/builtins-typed-array.cc
+++ b/chromium/v8/src/builtins/builtins-typed-array.cc
@@ -47,11 +47,12 @@ BUILTIN(TypedArrayPrototypeCopyWithin) {
HandleScope scope(isolate);
Handle<JSTypedArray> array;
- const char* method = "%TypedArray%.prototype.copyWithin";
+ const char* method_name = "%TypedArray%.prototype.copyWithin";
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, array, JSTypedArray::Validate(isolate, args.receiver(), method));
+ isolate, array,
+ JSTypedArray::Validate(isolate, args.receiver(), method_name));
- int64_t len = array->length();
+ int64_t len = array->GetLength();
int64_t to = 0;
int64_t from = 0;
int64_t final = len;
@@ -80,11 +81,37 @@ BUILTIN(TypedArrayPrototypeCopyWithin) {
if (count <= 0) return *array;
// TypedArray buffer may have been transferred/detached during parameter
- // processing above. Return early in this case, to prevent potential UAF error
- // TODO(caitp): throw here, as though the full algorithm were performed (the
- // throw would have come from ecma262/#sec-integerindexedelementget)
- // (see )
- if (V8_UNLIKELY(array->WasDetached())) return *array;
+ // processing above.
+ if (V8_UNLIKELY(array->WasDetached())) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kDetachedOperation,
+ isolate->factory()->NewStringFromAsciiChecked(
+ method_name)));
+ }
+
+ if (V8_UNLIKELY(array->is_backed_by_rab())) {
+ bool out_of_bounds = false;
+ int64_t new_len = array->GetLengthOrOutOfBounds(out_of_bounds);
+ if (out_of_bounds) {
+ const MessageTemplate message = MessageTemplate::kDetachedOperation;
+ Handle<String> operation =
+ isolate->factory()->NewStringFromAsciiChecked(method_name);
+ THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewTypeError(message, operation));
+ }
+ if (new_len < len) {
+ // We don't need to account for growing, since we only copy an already
+ // determined number of elements and growing won't change it. If to >
+ // new_len or from > new_len, the count below will be < 0, so we don't
+ // need to check them separately.
+ if (final > new_len) {
+ final = new_len;
+ }
+ count = std::min<int64_t>(final - from, new_len - to);
+ if (count <= 0) {
+ return *array;
+ }
+ }
+ }
// Ensure processed indexes are within array bounds
DCHECK_GE(from, 0);
@@ -113,9 +140,10 @@ BUILTIN(TypedArrayPrototypeFill) {
HandleScope scope(isolate);
Handle<JSTypedArray> array;
- const char* method = "%TypedArray%.prototype.fill";
+ const char* method_name = "%TypedArray%.prototype.fill";
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, array, JSTypedArray::Validate(isolate, args.receiver(), method));
+ isolate, array,
+ JSTypedArray::Validate(isolate, args.receiver(), method_name));
ElementsKind kind = array->GetElementsKind();
Handle<Object> obj_value = args.atOrUndefined(isolate, 1);
@@ -147,17 +175,22 @@ BUILTIN(TypedArrayPrototypeFill) {
}
}
+ if (V8_UNLIKELY(array->WasDetached())) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kDetachedOperation,
+ isolate->factory()->NewStringFromAsciiChecked(
+ method_name)));
+ }
+
if (V8_UNLIKELY(array->IsVariableLength())) {
bool out_of_bounds = false;
array->GetLengthOrOutOfBounds(out_of_bounds);
if (out_of_bounds) {
const MessageTemplate message = MessageTemplate::kDetachedOperation;
Handle<String> operation =
- isolate->factory()->NewStringFromAsciiChecked(method);
+ isolate->factory()->NewStringFromAsciiChecked(method_name);
THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewTypeError(message, operation));
}
- } else if (V8_UNLIKELY(array->WasDetached())) {
- return *array;
}
int64_t count = end - start;
@@ -178,9 +211,10 @@ BUILTIN(TypedArrayPrototypeIncludes) {
HandleScope scope(isolate);
Handle<JSTypedArray> array;
- const char* method = "%TypedArray%.prototype.includes";
+ const char* method_name = "%TypedArray%.prototype.includes";
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, array, JSTypedArray::Validate(isolate, args.receiver(), method));
+ isolate, array,
+ JSTypedArray::Validate(isolate, args.receiver(), method_name));
if (args.length() < 2) return ReadOnlyRoots(isolate).false_value();
@@ -195,10 +229,6 @@ BUILTIN(TypedArrayPrototypeIncludes) {
index = CapRelativeIndex(num, 0, len);
}
- // TODO(cwhan.tunz): throw. See the above comment in CopyWithin.
- if (V8_UNLIKELY(array->WasDetached()))
- return ReadOnlyRoots(isolate).false_value();
-
Handle<Object> search_element = args.atOrUndefined(isolate, 1);
ElementsAccessor* elements = array->GetElementsAccessor();
Maybe<bool> result =
@@ -211,9 +241,10 @@ BUILTIN(TypedArrayPrototypeIndexOf) {
HandleScope scope(isolate);
Handle<JSTypedArray> array;
- const char* method = "%TypedArray%.prototype.indexOf";
+ const char* method_name = "%TypedArray%.prototype.indexOf";
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, array, JSTypedArray::Validate(isolate, args.receiver(), method));
+ isolate, array,
+ JSTypedArray::Validate(isolate, args.receiver(), method_name));
int64_t len = array->length();
if (len == 0) return Smi::FromInt(-1);
@@ -226,7 +257,6 @@ BUILTIN(TypedArrayPrototypeIndexOf) {
index = CapRelativeIndex(num, 0, len);
}
- // TODO(cwhan.tunz): throw. See the above comment in CopyWithin.
if (V8_UNLIKELY(array->WasDetached())) return Smi::FromInt(-1);
Handle<Object> search_element = args.atOrUndefined(isolate, 1);
@@ -241,9 +271,10 @@ BUILTIN(TypedArrayPrototypeLastIndexOf) {
HandleScope scope(isolate);
Handle<JSTypedArray> array;
- const char* method = "%TypedArray%.prototype.lastIndexOf";
+ const char* method_name = "%TypedArray%.prototype.lastIndexOf";
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, array, JSTypedArray::Validate(isolate, args.receiver(), method));
+ isolate, array,
+ JSTypedArray::Validate(isolate, args.receiver(), method_name));
int64_t len = array->length();
if (len == 0) return Smi::FromInt(-1);
@@ -275,9 +306,10 @@ BUILTIN(TypedArrayPrototypeReverse) {
HandleScope scope(isolate);
Handle<JSTypedArray> array;
- const char* method = "%TypedArray%.prototype.reverse";
+ const char* method_name = "%TypedArray%.prototype.reverse";
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, array, JSTypedArray::Validate(isolate, args.receiver(), method));
+ isolate, array,
+ JSTypedArray::Validate(isolate, args.receiver(), method_name));
ElementsAccessor* elements = array->GetElementsAccessor();
elements->Reverse(*array);
diff --git a/chromium/v8/src/builtins/builtins.cc b/chromium/v8/src/builtins/builtins.cc
index e2d3acef368..d0045b43d5d 100644
--- a/chromium/v8/src/builtins/builtins.cc
+++ b/chromium/v8/src/builtins/builtins.cc
@@ -92,7 +92,7 @@ BytecodeOffset Builtins::GetContinuationBytecodeOffset(Builtin builtin) {
DCHECK(Builtins::KindOf(builtin) == TFJ || Builtins::KindOf(builtin) == TFC ||
Builtins::KindOf(builtin) == TFS);
return BytecodeOffset(BytecodeOffset::kFirstBuiltinContinuationId +
- static_cast<int>(builtin));
+ ToInt(builtin));
}
Builtin Builtins::GetBuiltinFromBytecodeOffset(BytecodeOffset id) {
@@ -112,9 +112,9 @@ const char* Builtins::Lookup(Address pc) {
// May be called during initialization (disassembler).
if (!initialized_) return nullptr;
- for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
- ++builtin) {
- if (code(builtin).contains(isolate_, pc)) return name(builtin);
+ for (Builtin builtin_ix = Builtins::kFirst; builtin_ix <= Builtins::kLast;
+ ++builtin_ix) {
+ if (code(builtin_ix).contains(isolate_, pc)) return name(builtin_ix);
}
return nullptr;
}
@@ -182,7 +182,7 @@ Handle<Code> Builtins::code_handle(Builtin builtin) {
// static
int Builtins::GetStackParameterCount(Builtin builtin) {
DCHECK(Builtins::KindOf(builtin) == TFJ);
- return builtin_metadata[static_cast<int>(builtin)].data.parameter_count;
+ return builtin_metadata[ToInt(builtin)].data.parameter_count;
}
// static
@@ -224,7 +224,7 @@ bool Builtins::HasJSLinkage(Builtin builtin) {
// static
const char* Builtins::name(Builtin builtin) {
- int index = static_cast<int>(builtin);
+ int index = ToInt(builtin);
DCHECK(IsBuiltinId(index));
return builtin_metadata[index].name;
}
@@ -262,7 +262,7 @@ void Builtins::PrintBuiltinSize() {
// static
Address Builtins::CppEntryOf(Builtin builtin) {
DCHECK(Builtins::IsCpp(builtin));
- return builtin_metadata[static_cast<int>(builtin)].data.cpp_entry;
+ return builtin_metadata[ToInt(builtin)].data.cpp_entry;
}
// static
@@ -292,18 +292,24 @@ bool Builtins::IsIsolateIndependentBuiltin(const Code code) {
}
// static
-void Builtins::InitializeBuiltinEntryTable(Isolate* isolate) {
- EmbeddedData d = EmbeddedData::FromBlob(isolate);
- Address* builtin_entry_table = isolate->builtin_entry_table();
- for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
- ++builtin) {
- // TODO(jgruber,chromium:1020986): Remove the CHECK once the linked issue is
- // resolved.
- CHECK(
- Builtins::IsBuiltinId(isolate->heap()->builtin(builtin).builtin_id()));
- DCHECK(isolate->heap()->builtin(builtin).is_off_heap_trampoline());
- builtin_entry_table[static_cast<int>(builtin)] =
- d.InstructionStartOfBuiltin(builtin);
+void Builtins::InitializeIsolateDataTables(Isolate* isolate) {
+ EmbeddedData embedded_data = EmbeddedData::FromBlob(isolate);
+ IsolateData* isolate_data = isolate->isolate_data();
+
+ // The entry table.
+ for (Builtin i = Builtins::kFirst; i <= Builtins::kLast; ++i) {
+ DCHECK(Builtins::IsBuiltinId(isolate->heap()->builtin(i).builtin_id()));
+ DCHECK(isolate->heap()->builtin(i).is_off_heap_trampoline());
+ isolate_data->builtin_entry_table()[ToInt(i)] =
+ embedded_data.InstructionStartOfBuiltin(i);
+ }
+
+ // T0 tables.
+ for (Builtin i = Builtins::kFirst; i <= Builtins::kLastTier0; ++i) {
+ const int ii = ToInt(i);
+ isolate_data->builtin_tier0_entry_table()[ii] =
+ isolate_data->builtin_entry_table()[ii];
+ isolate_data->builtin_tier0_table()[ii] = isolate_data->builtin_table()[ii];
}
}
@@ -314,10 +320,10 @@ void Builtins::EmitCodeCreateEvents(Isolate* isolate) {
return; // No need to iterate the entire table in this case.
}
- Address* builtins = isolate->builtins_table();
+ Address* builtins = isolate->builtin_table();
int i = 0;
HandleScope scope(isolate);
- for (; i < static_cast<int>(Builtin::kFirstBytecodeHandler); i++) {
+ for (; i < ToInt(Builtin::kFirstBytecodeHandler); i++) {
Handle<AbstractCode> code(AbstractCode::cast(Object(builtins[i])), isolate);
PROFILE(isolate, CodeCreateEvent(CodeEventListener::BUILTIN_TAG, code,
Builtins::name(FromInt(i))));
@@ -352,7 +358,7 @@ class OffHeapTrampolineGenerator {
// Generate replacement code that simply tail-calls the off-heap code.
DCHECK(!masm_.has_frame());
{
- FrameScope scope(&masm_, StackFrame::NONE);
+ FrameScope scope(&masm_, StackFrame::NO_FRAME_TYPE);
if (type == TrampolineType::kJump) {
masm_.CodeEntry();
masm_.JumpToInstructionStream(off_heap_entry);
@@ -420,7 +426,7 @@ Handle<ByteArray> Builtins::GenerateOffHeapTrampolineRelocInfo(
Builtins::Kind Builtins::KindOf(Builtin builtin) {
DCHECK(IsBuiltinId(builtin));
- return builtin_metadata[static_cast<int>(builtin)].kind;
+ return builtin_metadata[ToInt(builtin)].kind;
}
// static
diff --git a/chromium/v8/src/builtins/builtins.h b/chromium/v8/src/builtins/builtins.h
index 2d6f221ebdd..e606a3881e1 100644
--- a/chromium/v8/src/builtins/builtins.h
+++ b/chromium/v8/src/builtins/builtins.h
@@ -74,10 +74,14 @@ class Builtins {
#define ADD_ONE(Name, ...) +1
static constexpr int kBuiltinCount = 0 BUILTIN_LIST(
ADD_ONE, ADD_ONE, ADD_ONE, ADD_ONE, ADD_ONE, ADD_ONE, ADD_ONE);
+ static constexpr int kBuiltinTier0Count = 0 BUILTIN_LIST_TIER0(
+ ADD_ONE, ADD_ONE, ADD_ONE, ADD_ONE, ADD_ONE, ADD_ONE, ADD_ONE);
#undef ADD_ONE
static constexpr Builtin kFirst = static_cast<Builtin>(0);
static constexpr Builtin kLast = static_cast<Builtin>(kBuiltinCount - 1);
+ static constexpr Builtin kLastTier0 =
+ static_cast<Builtin>(kBuiltinTier0Count - 1);
static constexpr int kFirstWideBytecodeHandler =
static_cast<int>(Builtin::kFirstBytecodeHandler) +
@@ -96,11 +100,18 @@ class Builtins {
return static_cast<uint32_t>(maybe_id) <
static_cast<uint32_t>(kBuiltinCount);
}
+ static constexpr bool IsTier0(Builtin builtin) {
+ return builtin <= kLastTier0 && IsBuiltinId(builtin);
+ }
static constexpr Builtin FromInt(int id) {
DCHECK(IsBuiltinId(id));
return static_cast<Builtin>(id);
}
+ static constexpr int ToInt(Builtin id) {
+ DCHECK(IsBuiltinId(id));
+ return static_cast<int>(id);
+ }
// The different builtin kinds are documented in builtins-definitions.h.
enum Kind { CPP, TFJ, TFC, TFS, TFH, BCH, ASM };
@@ -195,9 +206,7 @@ class Builtins {
return kAllBuiltinsAreIsolateIndependent;
}
- // Initializes the table of builtin entry points based on the current contents
- // of the builtins table.
- static void InitializeBuiltinEntryTable(Isolate* isolate);
+ static void InitializeIsolateDataTables(Isolate* isolate);
// Emits a CodeCreateEvent for every builtin.
static void EmitCodeCreateEvents(Isolate* isolate);
diff --git a/chromium/v8/src/builtins/cast.tq b/chromium/v8/src/builtins/cast.tq
index b12ea5d9fe4..c53c970f9c3 100644
--- a/chromium/v8/src/builtins/cast.tq
+++ b/chromium/v8/src/builtins/cast.tq
@@ -793,7 +793,7 @@ macro Is<A : type extends Object, B : type extends Object>(
macro UnsafeCast<A : type extends Object>(implicit context: Context)(o: Object):
A {
- assert(Is<A>(o));
+ dcheck(Is<A>(o));
return %RawDownCast<A>(o);
}
@@ -803,12 +803,12 @@ macro UnsafeConstCast<T: type>(r: const &T):&T {
UnsafeCast<RegExpMatchInfo>(implicit context: Context)(o: Object):
RegExpMatchInfo {
- assert(Is<FixedArray>(o));
+ dcheck(Is<FixedArray>(o));
return %RawDownCast<RegExpMatchInfo>(o);
}
macro UnsafeCast<A : type extends WeakHeapObject>(o: A|Object): A {
- assert(IsWeakOrCleared(o));
+ dcheck(IsWeakOrCleared(o));
return %RawDownCast<A>(o);
}
diff --git a/chromium/v8/src/builtins/console.tq b/chromium/v8/src/builtins/console.tq
index c0daa19b6df..483b5422d82 100644
--- a/chromium/v8/src/builtins/console.tq
+++ b/chromium/v8/src/builtins/console.tq
@@ -12,7 +12,8 @@ javascript builtin FastConsoleAssert(
if (ToBoolean(arguments[0])) {
return Undefined;
} else {
- tail ConsoleAssert(target, newTarget, Convert<int32>(arguments.length));
+ tail ConsoleAssert(
+ target, newTarget, Convert<int32>(arguments.actual_count));
}
}
}
diff --git a/chromium/v8/src/builtins/convert.tq b/chromium/v8/src/builtins/convert.tq
index c1c73d00601..6a3c157db8e 100644
--- a/chromium/v8/src/builtins/convert.tq
+++ b/chromium/v8/src/builtins/convert.tq
@@ -29,7 +29,7 @@ FromConstexpr<Smi, constexpr int31>(i: constexpr int31): Smi {
return %FromConstexpr<Smi>(i);
}
FromConstexpr<PositiveSmi, constexpr int31>(i: constexpr int31): PositiveSmi {
- assert(i >= 0);
+ dcheck(i >= 0);
return %FromConstexpr<PositiveSmi>(i);
}
FromConstexpr<String, constexpr string>(s: constexpr string): String {
@@ -180,6 +180,9 @@ Convert<uint8, intptr>(i: intptr): uint8 {
Convert<int8, intptr>(i: intptr): int8 {
return %RawDownCast<int8>(TruncateIntPtrToInt32(i) << 24 >> 24);
}
+Convert<uint16, uint32>(i: uint32): uint16 {
+ return %RawDownCast<uint16>(i & 0xFFFF);
+}
Convert<int32, uint8>(i: uint8): int32 {
return Signed(Convert<uint32>(i));
}
@@ -229,11 +232,11 @@ Convert<TaggedIndex, intptr>(i: intptr): TaggedIndex {
}
Convert<intptr, uintptr>(ui: uintptr): intptr {
const i = Signed(ui);
- assert(i >= 0);
+ dcheck(i >= 0);
return i;
}
Convert<PositiveSmi, intptr>(i: intptr): PositiveSmi {
- assert(IsValidPositiveSmi(i));
+ dcheck(IsValidPositiveSmi(i));
return %RawDownCast<PositiveSmi>(SmiTag(i));
}
Convert<PositiveSmi, uintptr>(ui: uintptr): PositiveSmi labels IfOverflow {
diff --git a/chromium/v8/src/builtins/data-view.tq b/chromium/v8/src/builtins/data-view.tq
index 5f61a194728..4acc13b2239 100644
--- a/chromium/v8/src/builtins/data-view.tq
+++ b/chromium/v8/src/builtins/data-view.tq
@@ -6,6 +6,11 @@
namespace data_view {
+const kBuiltinNameByteLength: constexpr string =
+ 'DateView.prototype.byteLength';
+const kBuiltinNameByteOffset: constexpr string =
+ 'DateView.prototype.byteOffset';
+
macro MakeDataViewGetterNameString(kind: constexpr ElementsKind): String {
if constexpr (kind == ElementsKind::UINT8_ELEMENTS) {
return 'DataView.prototype.getUint8';
@@ -85,9 +90,7 @@ javascript builtin DataViewPrototypeGetByteLength(
const dataView: JSDataView =
ValidateDataView(context, receiver, 'get DataView.prototype.byte_length');
if (WasDetached(dataView)) {
- // TODO(bmeurer): According to the ES6 spec, we should throw a TypeError
- // here if the JSArrayBuffer of the {dataView} was detached.
- return 0;
+ ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameByteLength);
}
return Convert<Number>(dataView.byte_length);
}
@@ -98,9 +101,7 @@ javascript builtin DataViewPrototypeGetByteOffset(
const dataView: JSDataView =
ValidateDataView(context, receiver, 'get DataView.prototype.byte_offset');
if (WasDetached(dataView)) {
- // TODO(bmeurer): According to the ES6 spec, we should throw a TypeError
- // here if the JSArrayBuffer of the {dataView} was detached.
- return 0;
+ ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameByteOffset);
}
return Convert<Number>(dataView.byte_offset);
}
@@ -513,13 +514,14 @@ extern macro TruncateFloat64ToWord32(float64): uint32;
extern macro DataViewBuiltinsAssembler::StoreWord8(
RawPtr, uintptr, uint32): void;
-macro StoreDataView8(buffer: JSArrayBuffer, offset: uintptr, value: uint32) {
+macro StoreDataView8(
+ buffer: JSArrayBuffer, offset: uintptr, value: uint32): void {
StoreWord8(buffer.backing_store_ptr, offset, value & 0xFF);
}
macro StoreDataView16(
buffer: JSArrayBuffer, offset: uintptr, value: uint32,
- requestedLittleEndian: bool) {
+ requestedLittleEndian: bool): void {
const dataPointer: RawPtr = buffer.backing_store_ptr;
const b0: uint32 = value & 0xFF;
@@ -536,7 +538,7 @@ macro StoreDataView16(
macro StoreDataView32(
buffer: JSArrayBuffer, offset: uintptr, value: uint32,
- requestedLittleEndian: bool) {
+ requestedLittleEndian: bool): void {
const dataPointer: RawPtr = buffer.backing_store_ptr;
const b0: uint32 = value & 0xFF;
@@ -559,7 +561,7 @@ macro StoreDataView32(
macro StoreDataView64(
buffer: JSArrayBuffer, offset: uintptr, lowWord: uint32, highWord: uint32,
- requestedLittleEndian: bool) {
+ requestedLittleEndian: bool): void {
const dataPointer: RawPtr = buffer.backing_store_ptr;
const b0: uint32 = lowWord & 0xFF;
@@ -603,7 +605,7 @@ extern macro DataViewBuiltinsAssembler::DataViewDecodeBigIntSign(BigIntBase):
// on 64-bit platforms, and the 2 lowest BigInt digits on 32-bit ones.
macro StoreDataViewBigInt(
buffer: JSArrayBuffer, offset: uintptr, bigIntValue: BigInt,
- requestedLittleEndian: bool) {
+ requestedLittleEndian: bool): void {
const length: uint32 = DataViewDecodeBigIntLength(bigIntValue);
const sign: uint32 = DataViewDecodeBigIntSign(bigIntValue);
diff --git a/chromium/v8/src/builtins/finalization-registry.tq b/chromium/v8/src/builtins/finalization-registry.tq
index 389b9a5ce0a..72db154a6f5 100644
--- a/chromium/v8/src/builtins/finalization-registry.tq
+++ b/chromium/v8/src/builtins/finalization-registry.tq
@@ -22,7 +22,7 @@ macro SplitOffTail(weakCell: WeakCell): WeakCell|Undefined {
case (Undefined): {
}
case (tailIsNowAHead: WeakCell): {
- assert(tailIsNowAHead.prev == weakCell);
+ dcheck(tailIsNowAHead.prev == weakCell);
tailIsNowAHead.prev = Undefined;
}
}
@@ -37,7 +37,7 @@ PopClearedCell(finalizationRegistry: JSFinalizationRegistry): WeakCell|
return Undefined;
}
case (weakCell: WeakCell): {
- assert(weakCell.prev == Undefined);
+ dcheck(weakCell.prev == Undefined);
finalizationRegistry.cleared_cells = SplitOffTail(weakCell);
// If the WeakCell has an unregister token, remove the cell from the
@@ -55,7 +55,7 @@ PopClearedCell(finalizationRegistry: JSFinalizationRegistry): WeakCell|
}
transitioning macro PushCell(
- finalizationRegistry: JSFinalizationRegistry, cell: WeakCell) {
+ finalizationRegistry: JSFinalizationRegistry, cell: WeakCell): void {
cell.next = finalizationRegistry.active_cells;
typeswitch (finalizationRegistry.active_cells) {
case (Undefined): {
@@ -69,7 +69,7 @@ transitioning macro PushCell(
transitioning macro
FinalizationRegistryCleanupLoop(implicit context: Context)(
- finalizationRegistry: JSFinalizationRegistry, callback: Callable) {
+ finalizationRegistry: JSFinalizationRegistry, callback: Callable): void {
while (true) {
const weakCellHead = PopClearedCell(finalizationRegistry);
typeswitch (weakCellHead) {
@@ -118,9 +118,9 @@ FinalizationRegistryConstructor(
finalizationRegistry.flags =
SmiTag(FinalizationRegistryFlags{scheduled_for_cleanup: false});
// 7. Set finalizationRegistry.[[Cells]] to be an empty List.
- assert(finalizationRegistry.active_cells == Undefined);
- assert(finalizationRegistry.cleared_cells == Undefined);
- assert(finalizationRegistry.key_map == Undefined);
+ dcheck(finalizationRegistry.active_cells == Undefined);
+ dcheck(finalizationRegistry.cleared_cells == Undefined);
+ dcheck(finalizationRegistry.key_map == Undefined);
// 8. Return finalizationRegistry.
return finalizationRegistry;
}
diff --git a/chromium/v8/src/builtins/frame-arguments.tq b/chromium/v8/src/builtins/frame-arguments.tq
index 5f25c97dc3e..a877209b3e2 100644
--- a/chromium/v8/src/builtins/frame-arguments.tq
+++ b/chromium/v8/src/builtins/frame-arguments.tq
@@ -6,7 +6,11 @@
struct Arguments {
const frame: FrameWithArguments;
const base: RawPtr;
+ // length is the number of arguments without the receiver.
const length: intptr;
+ // actual_count is the actual number of arguments on the stack (depending on
+ // kJSArgcIncludesReceiver may or may not include the receiver).
+ const actual_count: intptr;
}
extern operator '[]' macro GetArgumentValue(Arguments, intptr): JSAny;
@@ -35,7 +39,7 @@ struct FrameWithArgumentsInfo {
// This macro is should only be used in builtins that can be called from
// interpreted or JITted code, not from CSA/Torque builtins (the number of
// returned formal parameters would be wrong).
-// It is difficult to actually check/assert this, since interpreted or JITted
+// It is difficult to actually check/dcheck this, since interpreted or JITted
// frames are StandardFrames, but so are hand-written builtins. Doing that
// more refined check would be prohibitively expensive.
macro GetFrameWithArgumentsInfo(implicit context: Context)():
@@ -45,8 +49,8 @@ macro GetFrameWithArgumentsInfo(implicit context: Context)():
const f: JSFunction = frame.function;
const shared: SharedFunctionInfo = f.shared_function_info;
- const formalParameterCount: bint =
- Convert<bint>(Convert<int32>(shared.formal_parameter_count));
+ const formalParameterCount: bint = Convert<bint>(Convert<int32>(
+ LoadSharedFunctionInfoFormalParameterCountWithoutReceiver(shared)));
// TODO(victorgomes): When removing the v8_disable_arguments_adaptor flag,
// FrameWithArgumentsInfo can be simplified, since the frame field already
// contains the argument count.
diff --git a/chromium/v8/src/builtins/frames.tq b/chromium/v8/src/builtins/frames.tq
index 03336bd464e..121c3bb3e11 100644
--- a/chromium/v8/src/builtins/frames.tq
+++ b/chromium/v8/src/builtins/frames.tq
@@ -21,7 +21,7 @@ FromConstexpr<FrameType, constexpr FrameType>(t: constexpr FrameType):
Cast<FrameType>(o: Object): FrameType
labels CastError {
if (TaggedIsNotSmi(o)) goto CastError;
- assert(
+ dcheck(
Convert<int32>(BitcastTaggedToWordForTagAndSmiBits(o)) <
Convert<int32>(kFrameTypeCount << kSmiTagSize));
return %RawDownCast<FrameType>(o);
@@ -66,8 +66,12 @@ operator '.caller' macro LoadCallerFromFrame(f: Frame): Frame {
const kStandardFrameArgCOffset: constexpr int31
generates 'StandardFrameConstants::kArgCOffset';
+const kJSArgcReceiverSlots: constexpr int31
+ generates 'kJSArgcReceiverSlots';
+
operator '.argument_count' macro LoadArgCFromFrame(f: Frame): intptr {
- return LoadIntptrFromFrame(f, kStandardFrameArgCOffset);
+ return LoadIntptrFromFrame(f, kStandardFrameArgCOffset) -
+ kJSArgcReceiverSlots;
}
type ContextOrFrameType = Context|FrameType;
diff --git a/chromium/v8/src/builtins/function.tq b/chromium/v8/src/builtins/function.tq
index e6ce7edfefd..4bd134e25f5 100644
--- a/chromium/v8/src/builtins/function.tq
+++ b/chromium/v8/src/builtins/function.tq
@@ -24,7 +24,8 @@ const kMinDescriptorsForFastBind:
constexpr int31 generates 'JSFunction::kMinDescriptorsForFastBind';
macro CheckAccessor(implicit context: Context)(
- array: DescriptorArray, index: constexpr int32, name: Name) labels Slow {
+ array: DescriptorArray, index: constexpr int32,
+ name: Name): void labels Slow {
const descriptor: DescriptorEntry = array.descriptors[index];
const key: Name|Undefined = descriptor.key;
if (!TaggedEqual(key, name)) goto Slow;
@@ -38,7 +39,7 @@ transitioning javascript builtin
FastFunctionPrototypeBind(
js-implicit context: NativeContext, receiver: JSAny, newTarget: JSAny,
target: JSFunction)(...arguments): JSAny {
- const argc: intptr = arguments.length;
+ const argc: intptr = arguments.actual_count;
try {
typeswitch (receiver) {
case (fn: JSFunction|JSBoundFunction): {
diff --git a/chromium/v8/src/builtins/growable-fixed-array-gen.cc b/chromium/v8/src/builtins/growable-fixed-array-gen.cc
index e242ced5c6a..46445822924 100644
--- a/chromium/v8/src/builtins/growable-fixed-array-gen.cc
+++ b/chromium/v8/src/builtins/growable-fixed-array-gen.cc
@@ -67,7 +67,7 @@ TNode<JSArray> GrowableFixedArray::ToJSArray(const TNode<Context> context) {
TNode<IntPtrT> GrowableFixedArray::NewCapacity(
TNode<IntPtrT> current_capacity) {
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
IntPtrGreaterThanOrEqual(current_capacity, IntPtrConstant(0)));
// Growth rate is analog to JSObject::NewElementsCapacity:
@@ -82,9 +82,9 @@ TNode<IntPtrT> GrowableFixedArray::NewCapacity(
TNode<FixedArray> GrowableFixedArray::ResizeFixedArray(
const TNode<IntPtrT> element_count, const TNode<IntPtrT> new_capacity) {
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(element_count, IntPtrConstant(0)));
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(new_capacity, IntPtrConstant(0)));
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(new_capacity, element_count));
+ CSA_DCHECK(this, IntPtrGreaterThanOrEqual(element_count, IntPtrConstant(0)));
+ CSA_DCHECK(this, IntPtrGreaterThanOrEqual(new_capacity, IntPtrConstant(0)));
+ CSA_DCHECK(this, IntPtrGreaterThanOrEqual(new_capacity, element_count));
const TNode<FixedArray> from_array = var_array_.value();
diff --git a/chromium/v8/src/builtins/growable-fixed-array.tq b/chromium/v8/src/builtins/growable-fixed-array.tq
index af9418b0c91..202422c0d23 100644
--- a/chromium/v8/src/builtins/growable-fixed-array.tq
+++ b/chromium/v8/src/builtins/growable-fixed-array.tq
@@ -5,19 +5,19 @@
namespace growable_fixed_array {
// TODO(pwong): Support FixedTypedArrays.
struct GrowableFixedArray {
- macro Push(obj: Object) {
+ macro Push(obj: Object): void {
this.EnsureCapacity();
this.array.objects[this.length++] = obj;
}
macro ResizeFixedArray(newCapacity: intptr): FixedArray {
- assert(this.length >= 0);
- assert(newCapacity >= 0);
- assert(newCapacity >= this.length);
+ dcheck(this.length >= 0);
+ dcheck(newCapacity >= 0);
+ dcheck(newCapacity >= this.length);
const first: intptr = 0;
return ExtractFixedArray(this.array, first, this.length, newCapacity);
}
- macro EnsureCapacity() {
- assert(this.length <= this.capacity);
+ macro EnsureCapacity(): void {
+ dcheck(this.length <= this.capacity);
if (this.capacity == this.length) {
// Growth rate is analog to JSObject::NewElementsCapacity:
// new_capacity = (current_capacity + (current_capacity >> 1)) + 16.
diff --git a/chromium/v8/src/builtins/ia32/builtins-ia32.cc b/chromium/v8/src/builtins/ia32/builtins-ia32.cc
index 7a8875fee9a..ad1d5ab39a2 100644
--- a/chromium/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/chromium/v8/src/builtins/ia32/builtins-ia32.cc
@@ -78,6 +78,36 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
+enum class ArgumentsElementType {
+ kRaw, // Push arguments as they are.
+ kHandle // Dereference arguments before pushing.
+};
+
+void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
+ Register scratch1, Register scratch2,
+ ArgumentsElementType element_type) {
+ DCHECK(!AreAliased(array, argc, scratch1, scratch2));
+ Register counter = scratch1;
+ Label loop, entry;
+ if (kJSArgcIncludesReceiver) {
+ __ lea(counter, Operand(argc, -kJSArgcReceiverSlots));
+ } else {
+ __ mov(counter, argc);
+ }
+ __ jmp(&entry);
+ __ bind(&loop);
+ Operand value(array, counter, times_system_pointer_size, 0);
+ if (element_type == ArgumentsElementType::kHandle) {
+ DCHECK(scratch2 != no_reg);
+ __ mov(scratch2, value);
+ value = Operand(scratch2, 0);
+ }
+ __ Push(value);
+ __ bind(&entry);
+ __ dec(counter);
+ __ j(greater_equal, &loop, Label::kNear);
+}
+
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax: number of arguments
@@ -109,7 +139,10 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ lea(esi, Operand(ebp, StandardFrameConstants::kCallerSPOffset +
kSystemPointerSize));
// Copy arguments to the expression stack.
- __ PushArray(esi, eax, ecx);
+ // esi: Pointer to start of arguments.
+ // eax: Number of arguments.
+ Generate_PushArguments(masm, esi, eax, ecx, no_reg,
+ ArgumentsElementType::kRaw);
// The receiver for the builtin/api call.
__ PushRoot(RootIndex::kTheHoleValue);
@@ -130,7 +163,9 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ DropArguments(edx, ecx, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ ret(0);
__ bind(&stack_overflow);
@@ -237,7 +272,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// InvokeFunction.
// Copy arguments to the expression stack.
- __ PushArray(edi, eax, ecx);
+ // edi: Pointer to start of arguments.
+ // eax: Number of arguments.
+ Generate_PushArguments(masm, edi, eax, ecx, no_reg,
+ ArgumentsElementType::kRaw);
// Push implicit receiver.
__ movd(ecx, xmm0);
@@ -282,7 +320,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ DropArguments(edx, ecx, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ ret(0);
// Otherwise we do a smi check and fall through to check if the return value
@@ -497,17 +537,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ bind(&enough_stack_space);
- // Copy arguments to the stack in a loop.
- Label loop, entry;
- __ Move(ecx, eax);
- __ jmp(&entry, Label::kNear);
- __ bind(&loop);
- // Push the parameter from argv.
- __ mov(scratch2, Operand(scratch1, ecx, times_system_pointer_size, 0));
- __ push(Operand(scratch2, 0)); // dereference handle
- __ bind(&entry);
- __ dec(ecx);
- __ j(greater_equal, &loop);
+ // Copy arguments to the stack.
+ // scratch1 (edx): Pointer to start of arguments.
+ // eax: Number of arguments.
+ Generate_PushArguments(masm, scratch1, eax, ecx, scratch2,
+ ArgumentsElementType::kHandle);
// Load the previous frame pointer to access C arguments
__ mov(scratch2, Operand(ebp, 0));
@@ -562,6 +596,16 @@ static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
__ bind(&done);
}
+static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
+ Register scratch) {
+ DCHECK(!AreAliased(code, scratch));
+ // Verify that the code kind is baseline code via the CodeKind.
+ __ mov(scratch, FieldOperand(code, Code::kFlagsOffset));
+ __ DecodeField<Code::KindField>(scratch);
+ __ cmp(scratch, Immediate(static_cast<int>(CodeKind::BASELINE)));
+ __ Assert(equal, AbortReason::kExpectedBaselineData);
+}
+
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Register sfi_data,
Register scratch1,
@@ -570,8 +614,16 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label done;
__ LoadMap(scratch1, sfi_data);
- __ CmpInstanceType(scratch1, BASELINE_DATA_TYPE);
- __ j(equal, is_baseline);
+ __ CmpInstanceType(scratch1, CODET_TYPE);
+ if (FLAG_debug_code) {
+ Label not_baseline;
+ __ j(not_equal, &not_baseline);
+ AssertCodeIsBaseline(masm, sfi_data, scratch1);
+ __ j(equal, is_baseline);
+ __ bind(&not_baseline);
+ } else {
+ __ j(equal, is_baseline);
+ }
__ CmpInstanceType(scratch1, INTERPRETER_DATA_TYPE);
__ j(not_equal, &done, Label::kNear);
@@ -641,6 +693,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ movzx_w(ecx, FieldOperand(
ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+ if (kJSArgcIncludesReceiver) {
+ __ dec(ecx);
+ }
__ mov(ebx,
FieldOperand(edx, JSGeneratorObject::kParametersAndRegistersOffset));
{
@@ -677,7 +732,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&is_baseline);
__ Pop(eax);
- __ CmpObjectType(ecx, BASELINE_DATA_TYPE, ecx);
+ __ CmpObjectType(ecx, CODET_TYPE, ecx);
__ Assert(equal, AbortReason::kMissingBytecodeArray);
__ bind(&ok);
@@ -757,7 +812,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ mov(actual_params_size, Operand(ebp, StandardFrameConstants::kArgCOffset));
__ lea(actual_params_size,
Operand(actual_params_size, times_system_pointer_size,
- kSystemPointerSize));
+ kJSArgcIncludesReceiver ? 0 : kSystemPointerSize));
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
@@ -1008,7 +1063,7 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
// stack left to right.
//
// The live registers are:
-// o eax: actual argument count (not including the receiver)
+// o eax: actual argument count
// o edi: the JS function object being called
// o edx: the incoming new target or generator object
// o esi: our context
@@ -1257,7 +1312,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the baseline code into the closure.
__ movd(ecx, xmm2);
- __ mov(ecx, FieldOperand(ecx, BaselineData::kBaselineCodeOffset));
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ push(edx); // Spill.
__ push(ecx);
@@ -1303,7 +1357,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
InterpreterPushArgsMode mode) {
DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- ecx : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
// they are to be pushed onto the stack.
@@ -1321,19 +1375,22 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Add a stack check before pushing the arguments.
__ StackOverflowCheck(eax, scratch, &stack_overflow, true);
-
__ movd(xmm0, eax); // Spill number of arguments.
// Compute the expected number of arguments.
- __ mov(scratch, eax);
+ int argc_modification = kJSArgcIncludesReceiver ? 0 : 1;
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ argc_modification -= 1;
+ }
+ if (argc_modification != 0) {
+ __ lea(scratch, Operand(eax, argc_modification));
+ } else {
+ __ mov(scratch, eax);
+ }
// Pop return address to allow tail-call after pushing arguments.
__ PopReturnAddressTo(eax);
- if (receiver_mode != ConvertReceiverMode::kNullOrUndefined) {
- __ add(scratch, Immediate(1)); // Add one for receiver.
- }
-
// Find the address of the last argument.
__ shl(scratch, kSystemPointerSizeLog2);
__ neg(scratch);
@@ -1385,9 +1442,10 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
Label* stack_overflow) {
// We have to move return address and the temporary registers above it
// before we can copy arguments onto the stack. To achieve this:
- // Step 1: Increment the stack pointer by num_args + 1 (for receiver).
- // Step 2: Move the return address and values around it to the top of stack.
- // Step 3: Copy the arguments into the correct locations.
+ // Step 1: Increment the stack pointer by num_args + 1 for receiver (if it is
+ // not included in argc already). Step 2: Move the return address and values
+ // around it to the top of stack. Step 3: Copy the arguments into the correct
+ // locations.
// current stack =====> required stack layout
// | | | return addr | (2) <-- esp (1)
// | | | addtl. slot |
@@ -1402,8 +1460,10 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
// Step 1 - Update the stack pointer.
+ constexpr int receiver_offset =
+ kJSArgcIncludesReceiver ? 0 : kSystemPointerSize;
__ lea(scratch1,
- Operand(num_args, times_system_pointer_size, kSystemPointerSize));
+ Operand(num_args, times_system_pointer_size, receiver_offset));
__ AllocateStackSpace(scratch1);
// Step 2 move return_address and slots around it to the correct locations.
@@ -1412,7 +1472,7 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
// extra slot for receiver, so no extra checks are required to avoid copy.
for (int i = 0; i < num_slots_to_move + 1; i++) {
__ mov(scratch1, Operand(esp, num_args, times_system_pointer_size,
- (i + 1) * kSystemPointerSize));
+ i * kSystemPointerSize + receiver_offset));
__ mov(Operand(esp, i * kSystemPointerSize), scratch1);
}
@@ -1434,7 +1494,11 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
__ bind(&loop_check);
__ inc(scratch1);
__ cmp(scratch1, eax);
- __ j(less_equal, &loop_header, Label::kNear);
+ if (kJSArgcIncludesReceiver) {
+ __ j(less, &loop_header, Label::kNear);
+ } else {
+ __ j(less_equal, &loop_header, Label::kNear);
+ }
}
} // anonymous namespace
@@ -1443,7 +1507,7 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
MacroAssembler* masm, InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- ecx : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order
// as they are to be pushed onto the stack.
@@ -1832,7 +1896,8 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
// the LAZY deopt point. eax contains the arguments count, the return value
// from LAZY is always the last argument.
__ movd(Operand(esp, eax, times_system_pointer_size,
- BuiltinContinuationFrameConstants::kFixedFrameSize),
+ BuiltinContinuationFrameConstants::kFixedFrameSize -
+ (kJSArgcIncludesReceiver ? kSystemPointerSize : 0)),
xmm0);
}
__ mov(
@@ -1894,23 +1959,29 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
Label no_arg_array, no_this_arg;
StackArgumentsAccessor args(eax);
// Spill receiver to allow the usage of edi as a scratch register.
- __ movd(xmm0, args[0]);
+ __ movd(xmm0, args.GetReceiverOperand());
__ LoadRoot(edx, RootIndex::kUndefinedValue);
__ mov(edi, edx);
- __ test(eax, eax);
- __ j(zero, &no_this_arg, Label::kNear);
+ if (kJSArgcIncludesReceiver) {
+ __ cmp(eax, Immediate(JSParameterCount(0)));
+ __ j(equal, &no_this_arg, Label::kNear);
+ } else {
+ __ test(eax, eax);
+ __ j(zero, &no_this_arg, Label::kNear);
+ }
{
__ mov(edi, args[1]);
- __ cmp(eax, Immediate(1));
+ __ cmp(eax, Immediate(JSParameterCount(1)));
__ j(equal, &no_arg_array, Label::kNear);
__ mov(edx, args[2]);
__ bind(&no_arg_array);
}
__ bind(&no_this_arg);
- __ DropArgumentsAndPushNewReceiver(eax, edi, ecx,
- TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(
+ eax, edi, ecx, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
// Restore receiver to edi.
__ movd(edi, xmm0);
@@ -1940,7 +2011,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// arguments to the receiver.
__ bind(&no_arguments);
{
- __ Move(eax, 0);
+ __ Move(eax, JSParameterCount(0));
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
}
@@ -1954,7 +2025,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// ...
// esp[8 * n] : Argument n-1
// esp[8 * (n + 1)] : Argument n
- // eax contains the number of arguments, n, not counting the receiver.
+ // eax contains the number of arguments, n.
// 1. Get the callable to call (passed as receiver) from the stack.
{
@@ -1969,8 +2040,13 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 3. Make sure we have at least one argument.
{
Label done;
- __ test(eax, eax);
- __ j(not_zero, &done, Label::kNear);
+ if (kJSArgcIncludesReceiver) {
+ __ cmp(eax, Immediate(JSParameterCount(0)));
+ __ j(greater, &done, Label::kNear);
+ } else {
+ __ test(eax, eax);
+ __ j(not_zero, &done, Label::kNear);
+ }
__ PushRoot(RootIndex::kUndefinedValue);
__ inc(eax);
__ bind(&done);
@@ -2004,12 +2080,12 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadRoot(edi, RootIndex::kUndefinedValue);
__ mov(edx, edi);
__ mov(ecx, edi);
- __ cmp(eax, Immediate(1));
+ __ cmp(eax, Immediate(JSParameterCount(1)));
__ j(below, &done, Label::kNear);
__ mov(edi, args[1]); // target
__ j(equal, &done, Label::kNear);
__ mov(ecx, args[2]); // thisArgument
- __ cmp(eax, Immediate(3));
+ __ cmp(eax, Immediate(JSParameterCount(3)));
__ j(below, &done, Label::kNear);
__ mov(edx, args[3]); // argumentsList
__ bind(&done);
@@ -2017,9 +2093,10 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// Spill argumentsList to use edx as a scratch register.
__ movd(xmm0, edx);
- __ DropArgumentsAndPushNewReceiver(eax, ecx, edx,
- TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(
+ eax, ecx, edx, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
// Restore argumentsList.
__ movd(edx, xmm0);
@@ -2061,13 +2138,13 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ LoadRoot(edi, RootIndex::kUndefinedValue);
__ mov(edx, edi);
__ mov(ecx, edi);
- __ cmp(eax, Immediate(1));
+ __ cmp(eax, Immediate(JSParameterCount(1)));
__ j(below, &done, Label::kNear);
__ mov(edi, args[1]); // target
__ mov(edx, edi);
__ j(equal, &done, Label::kNear);
__ mov(ecx, args[2]); // argumentsList
- __ cmp(eax, Immediate(3));
+ __ cmp(eax, Immediate(JSParameterCount(3)));
__ j(below, &done, Label::kNear);
__ mov(edx, args[3]); // new.target
__ bind(&done);
@@ -2078,7 +2155,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ DropArgumentsAndPushNewReceiver(
eax, masm->RootAsOperand(RootIndex::kUndefinedValue), ecx,
TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
// Restore argumentsList.
__ movd(ecx, xmm0);
@@ -2105,6 +2183,59 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
+namespace {
+
+// Allocate new stack space for |count| arguments and shift all existing
+// arguments already on the stack. |pointer_to_new_space_out| points to the
+// first free slot on the stack to copy additional arguments to and
+// |argc_in_out| is updated to include |count|.
+void Generate_AllocateSpaceAndShiftExistingArguments(
+ MacroAssembler* masm, Register count, Register argc_in_out,
+ Register pointer_to_new_space_out, Register scratch1, Register scratch2) {
+ DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1,
+ scratch2));
+ // Use pointer_to_new_space_out as scratch until we set it to the correct
+ // value at the end.
+ Register old_esp = pointer_to_new_space_out;
+ Register new_space = scratch1;
+ __ mov(old_esp, esp);
+
+ __ lea(new_space, Operand(count, times_system_pointer_size, 0));
+ __ AllocateStackSpace(new_space);
+
+ if (!kJSArgcIncludesReceiver) {
+ __ inc(argc_in_out);
+ }
+ Register current = scratch1;
+ Register value = scratch2;
+
+ Label loop, entry;
+ __ mov(current, 0);
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(value, Operand(old_esp, current, times_system_pointer_size, 0));
+ __ mov(Operand(esp, current, times_system_pointer_size, 0), value);
+ __ inc(current);
+ __ bind(&entry);
+ __ cmp(current, argc_in_out);
+ __ j(less_equal, &loop, Label::kNear);
+
+ // Point to the next free slot above the shifted arguments (argc + 1 slot for
+ // the return address).
+ __ lea(
+ pointer_to_new_space_out,
+ Operand(esp, argc_in_out, times_system_pointer_size, kSystemPointerSize));
+ // Update the total number of arguments.
+ if (kJSArgcIncludesReceiver) {
+ __ add(argc_in_out, count);
+ } else {
+ // Also subtract the receiver again.
+ __ lea(argc_in_out, Operand(argc_in_out, count, times_1, -1));
+ }
+}
+
+} // namespace
+
// static
// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
@@ -2112,17 +2243,15 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- edi : target
// -- esi : context for the Call / Construct builtin
- // -- eax : number of parameters on the stack (not including the receiver)
+ // -- eax : number of parameters on the stack
// -- ecx : len (number of elements to from args)
- // -- ecx : new.target (checked to be constructor or undefined)
+ // -- edx : new.target (checked to be constructor or undefined)
// -- esp[4] : arguments list (a FixedArray)
// -- esp[0] : return address.
// -----------------------------------
- // We need to preserve eax, edi, esi and ebx.
- __ movd(xmm0, edx);
- __ movd(xmm1, edi);
- __ movd(xmm2, eax);
+ __ movd(xmm0, edx); // Spill new.target.
+ __ movd(xmm1, edi); // Spill target.
__ movd(xmm3, esi); // Spill the context.
const Register kArgumentsList = esi;
@@ -2157,32 +2286,15 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ StackOverflowCheck(kArgumentsLength, edx, &stack_overflow);
__ movd(xmm4, kArgumentsList); // Spill the arguments list.
-
// Move the arguments already in the stack,
// including the receiver and the return address.
- {
- Label copy, check;
- Register src = edx, current = edi, tmp = esi;
- // Update stack pointer.
- __ mov(src, esp);
- __ lea(tmp, Operand(kArgumentsLength, times_system_pointer_size, 0));
- __ AllocateStackSpace(tmp);
- // Include return address and receiver.
- __ add(eax, Immediate(2));
- __ mov(current, Immediate(0));
- __ jmp(&check);
- // Loop.
- __ bind(&copy);
- __ mov(tmp, Operand(src, current, times_system_pointer_size, 0));
- __ mov(Operand(esp, current, times_system_pointer_size, 0), tmp);
- __ inc(current);
- __ bind(&check);
- __ cmp(current, eax);
- __ j(less, &copy);
- __ lea(edx, Operand(esp, eax, times_system_pointer_size, 0));
- }
-
+ // kArgumentsLength (ecx): Number of arguments to make room for.
+ // eax: Number of arguments already on the stack.
+ // edx: Points to first free slot on the stack after arguments were shifted.
+ Generate_AllocateSpaceAndShiftExistingArguments(masm, kArgumentsLength, eax,
+ edx, edi, esi);
__ movd(kArgumentsList, xmm4); // Recover arguments list.
+ __ movd(xmm2, eax); // Spill argument count.
// Push additional arguments onto the stack.
{
@@ -2207,12 +2319,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Restore eax, edi and edx.
__ movd(esi, xmm3); // Restore the context.
- __ movd(eax, xmm2);
- __ movd(edi, xmm1);
- __ movd(edx, xmm0);
-
- // Compute the actual parameter count.
- __ add(eax, kArgumentsLength);
+ __ movd(eax, xmm2); // Restore argument count.
+ __ movd(edi, xmm1); // Restore target.
+ __ movd(edx, xmm0); // Restore new.target.
// Tail-call to the actual Call or Construct builtin.
__ Jump(code, RelocInfo::CODE_TARGET);
@@ -2227,7 +2336,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
Handle<Code> code) {
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- edi : the target to call (can be any Object)
// -- esi : context for the Call / Construct builtin
// -- edx : the new target (for [[Construct]] calls)
@@ -2261,12 +2370,14 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
Label stack_done, stack_overflow;
__ mov(edx, Operand(ebp, StandardFrameConstants::kArgCOffset));
+ if (kJSArgcIncludesReceiver) {
+ __ dec(edx);
+ }
__ sub(edx, ecx);
__ j(less_equal, &stack_done);
{
// ----------- S t a t e -------------
- // -- eax : the number of arguments already in the stack (not including the
- // receiver)
+ // -- eax : the number of arguments already in the stack
// -- ecx : start index (to support rest parameters)
// -- edx : number of arguments to copy, i.e. arguments count - start index
// -- edi : the target to call (can be any Object)
@@ -2284,31 +2395,11 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// Move the arguments already in the stack,
// including the receiver and the return address.
- {
- Label copy, check;
- Register src = esi, current = edi;
- // Update stack pointer.
- __ mov(src, esp);
- __ lea(scratch, Operand(edx, times_system_pointer_size, 0));
- __ AllocateStackSpace(scratch);
- // Include return address and receiver.
- __ add(eax, Immediate(2));
- __ Move(current, 0);
- __ jmp(&check);
- // Loop.
- __ bind(&copy);
- __ mov(scratch, Operand(src, current, times_system_pointer_size, 0));
- __ mov(Operand(esp, current, times_system_pointer_size, 0), scratch);
- __ inc(current);
- __ bind(&check);
- __ cmp(current, eax);
- __ j(less, &copy);
- __ lea(esi, Operand(esp, eax, times_system_pointer_size, 0));
- }
-
- // Update total number of arguments.
- __ sub(eax, Immediate(2));
- __ add(eax, edx);
+ // edx: Number of arguments to make room for.
+ // eax: Number of arguments already on the stack.
+ // esi: Points to first free slot on the stack after arguments were shifted.
+ Generate_AllocateSpaceAndShiftExistingArguments(masm, edx, eax, esi, ebx,
+ edi);
// Point to the first argument to copy (skipping receiver).
__ lea(ecx, Operand(ecx, times_system_pointer_size,
@@ -2350,14 +2441,12 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
void Builtins::Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode) {
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- edi : the function to call (checked to be a JSFunction)
// -----------------------------------
StackArgumentsAccessor args(eax);
__ AssertFunction(edi, edx);
- // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
- // Check that the function is not a "classConstructor".
Label class_constructor;
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ test(FieldOperand(edx, SharedFunctionInfo::kFlagsOffset),
@@ -2376,7 +2465,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ j(not_zero, &done_convert);
{
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- edx : the shared function info.
// -- edi : the function to call (checked to be a JSFunction)
// -- esi : the function context.
@@ -2434,7 +2523,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ bind(&done_convert);
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- edx : the shared function info.
// -- edi : the function to call (checked to be a JSFunction)
// -- esi : the function context.
@@ -2443,6 +2532,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ movzx_w(
ecx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
__ InvokeFunctionCode(edi, no_reg, ecx, eax, InvokeType::kJump);
+
// The function is a "classConstructor", need to raise an exception.
__ bind(&class_constructor);
{
@@ -2456,7 +2546,7 @@ namespace {
void Generate_PushBoundArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- edx : new.target (only in case of [[Construct]])
// -- edi : target (checked to be a JSBoundFunction)
// -----------------------------------
@@ -2471,7 +2561,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ j(zero, &no_bound_arguments);
{
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- xmm0 : new.target (only in case of [[Construct]])
// -- edi : target (checked to be a JSBoundFunction)
// -- ecx : the [[BoundArguments]] (implemented as FixedArray)
@@ -2539,7 +2629,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// static
void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- edi : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(edi);
@@ -2561,47 +2651,59 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// static
void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- edi : the target to call (can be any Object).
// -----------------------------------
- StackArgumentsAccessor args(eax);
+ Register argc = eax;
+ Register target = edi;
+ Register map = ecx;
+ Register instance_type = edx;
+ DCHECK(!AreAliased(argc, target, map, instance_type));
+
+ StackArgumentsAccessor args(argc);
- Label non_callable, non_function, non_smi, non_jsfunction,
- non_jsboundfunction;
- __ JumpIfSmi(edi, &non_callable);
+ Label non_callable, non_smi, non_callable_jsfunction, non_jsboundfunction,
+ non_proxy, class_constructor;
+ __ JumpIfSmi(target, &non_callable);
__ bind(&non_smi);
- __ LoadMap(ecx, edi);
- __ CmpInstanceTypeRange(ecx, ecx, FIRST_JS_FUNCTION_TYPE,
- LAST_JS_FUNCTION_TYPE);
- __ j(above, &non_jsfunction);
+ __ LoadMap(map, target);
+ __ CmpInstanceTypeRange(map, instance_type, map,
+ FIRST_CALLABLE_JS_FUNCTION_TYPE,
+ LAST_CALLABLE_JS_FUNCTION_TYPE);
+ __ j(above, &non_callable_jsfunction);
__ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET);
- __ bind(&non_jsfunction);
- __ LoadMap(ecx, edi);
- __ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
+ __ bind(&non_callable_jsfunction);
+ __ cmpw(instance_type, Immediate(JS_BOUND_FUNCTION_TYPE));
__ j(not_equal, &non_jsboundfunction);
__ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
RelocInfo::CODE_TARGET);
// Check if target is a proxy and call CallProxy external builtin
__ bind(&non_jsboundfunction);
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+ __ LoadMap(map, target);
+ __ test_b(FieldOperand(map, Map::kBitFieldOffset),
Immediate(Map::Bits1::IsCallableBit::kMask));
__ j(zero, &non_callable);
// Call CallProxy external builtin
- __ CmpInstanceType(ecx, JS_PROXY_TYPE);
- __ j(not_equal, &non_function);
+ __ cmpw(instance_type, Immediate(JS_PROXY_TYPE));
+ __ j(not_equal, &non_proxy);
__ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET);
+ // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that the function is not a "classConstructor".
+ __ bind(&non_proxy);
+ __ cmpw(instance_type, Immediate(JS_CLASS_CONSTRUCTOR_TYPE));
+ __ j(equal, &class_constructor);
+
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
- __ bind(&non_function);
// Overwrite the original receiver with the (original) target.
- __ mov(args.GetReceiverOperand(), edi);
+ __ mov(args.GetReceiverOperand(), target);
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadNativeContextSlot(edi, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
+ __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(
ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
@@ -2610,15 +2712,25 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ bind(&non_callable);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(edi);
+ __ Push(target);
__ CallRuntime(Runtime::kThrowCalledNonCallable);
+ __ Trap(); // Unreachable.
+ }
+
+ // 4. The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ Push(target);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ __ Trap(); // Unreachable.
}
}
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- edx : the new target (checked to be a constructor)
// -- edi : the constructor to call (checked to be a JSFunction)
// -----------------------------------
@@ -2650,7 +2762,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- edx : the new target (checked to be a constructor)
// -- edi : the constructor to call (checked to be a JSBoundFunction)
// -----------------------------------
@@ -2677,25 +2789,30 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// static
void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- edx : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -- edi : the constructor to call (can be any Object)
// -----------------------------------
- StackArgumentsAccessor args(eax);
+ Register argc = eax;
+ Register target = edi;
+ Register map = ecx;
+ DCHECK(!AreAliased(argc, target, map));
+
+ StackArgumentsAccessor args(argc);
// Check if target is a Smi.
Label non_constructor, non_proxy, non_jsfunction, non_jsboundfunction;
- __ JumpIfSmi(edi, &non_constructor);
+ __ JumpIfSmi(target, &non_constructor);
// Check if target has a [[Construct]] internal method.
- __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+ __ mov(map, FieldOperand(target, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(map, Map::kBitFieldOffset),
Immediate(Map::Bits1::IsConstructorBit::kMask));
__ j(zero, &non_constructor);
// Dispatch based on instance type.
- __ CmpInstanceTypeRange(ecx, ecx, FIRST_JS_FUNCTION_TYPE,
+ __ CmpInstanceTypeRange(map, map, map, FIRST_JS_FUNCTION_TYPE,
LAST_JS_FUNCTION_TYPE);
__ j(above, &non_jsfunction);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
@@ -2704,15 +2821,15 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Only dispatch to bound functions after checking whether they are
// constructors.
__ bind(&non_jsfunction);
- __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
- __ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
+ __ mov(map, FieldOperand(target, HeapObject::kMapOffset));
+ __ CmpInstanceType(map, JS_BOUND_FUNCTION_TYPE);
__ j(not_equal, &non_jsboundfunction);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
RelocInfo::CODE_TARGET);
// Only dispatch to proxies after checking whether they are constructors.
__ bind(&non_jsboundfunction);
- __ CmpInstanceType(ecx, JS_PROXY_TYPE);
+ __ CmpInstanceType(map, JS_PROXY_TYPE);
__ j(not_equal, &non_proxy);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
RelocInfo::CODE_TARGET);
@@ -2721,9 +2838,10 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ bind(&non_proxy);
{
// Overwrite the original receiver with the (original) target.
- __ mov(args.GetReceiverOperand(), edi);
+ __ mov(args.GetReceiverOperand(), target);
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadNativeContextSlot(edi, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
+ __ LoadNativeContextSlot(target,
+ Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
@@ -2768,7 +2886,8 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
}
// Load deoptimization data from the code object.
- __ mov(ecx, Operand(eax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+ __ mov(ecx, Operand(eax, Code::kDeoptimizationDataOrInterpreterDataOffset -
+ kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data.
__ mov(ecx, Operand(ecx, FixedArray::OffsetOfElementAt(
@@ -4125,8 +4244,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// always have baseline code.
if (!is_osr) {
Label start_with_baseline;
- __ CmpObjectType(code_obj, BASELINE_DATA_TYPE,
- kInterpreterBytecodeOffsetRegister);
+ __ CmpObjectType(code_obj, CODET_TYPE, kInterpreterBytecodeOffsetRegister);
__ j(equal, &start_with_baseline);
// Start with bytecode as there is no baseline code.
@@ -4139,13 +4257,13 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ bind(&start_with_baseline);
} else if (FLAG_debug_code) {
- __ CmpObjectType(code_obj, BASELINE_DATA_TYPE,
- kInterpreterBytecodeOffsetRegister);
+ __ CmpObjectType(code_obj, CODET_TYPE, kInterpreterBytecodeOffsetRegister);
__ Assert(equal, AbortReason::kExpectedBaselineData);
}
- // Load baseline code from baseline data.
- __ mov(code_obj, FieldOperand(code_obj, BaselineData::kBaselineCodeOffset));
+ if (FLAG_debug_code) {
+ AssertCodeIsBaseline(masm, code_obj, ecx);
+ }
// Load the feedback vector.
Register feedback_vector = ecx;
diff --git a/chromium/v8/src/builtins/ic-callable.tq b/chromium/v8/src/builtins/ic-callable.tq
index dd29e8bf5e2..4e8c9691fa1 100644
--- a/chromium/v8/src/builtins/ic-callable.tq
+++ b/chromium/v8/src/builtins/ic-callable.tq
@@ -21,7 +21,7 @@ macro InSameNativeContext(lhs: Context, rhs: Context): bool {
macro MaybeObjectToStrong(maybeObject: MaybeObject):
HeapObject labels IfCleared {
- assert(IsWeakOrCleared(maybeObject));
+ dcheck(IsWeakOrCleared(maybeObject));
const weakObject = %RawDownCast<Weak<HeapObject>>(maybeObject);
return WeakToStrong(weakObject) otherwise IfCleared;
}
@@ -91,10 +91,10 @@ macro SetCallFeedbackContent(implicit context: Context)(
macro CollectCallFeedback(
maybeTarget: JSAny, maybeReceiver: Lazy<JSAny>, context: Context,
maybeFeedbackVector: Undefined|FeedbackVector, slotId: uintptr): void {
- // TODO(v8:9891): Remove this assert once all callers are ported to Torque.
- // This assert ensures correctness of maybeFeedbackVector's type which can
+ // TODO(v8:9891): Remove this dcheck once all callers are ported to Torque.
+ // This dcheck ensures correctness of maybeFeedbackVector's type which can
// be easily broken for calls from CSA.
- assert(
+ dcheck(
IsUndefined(maybeFeedbackVector) ||
Is<FeedbackVector>(maybeFeedbackVector));
const feedbackVector =
@@ -158,7 +158,7 @@ macro CollectCallFeedback(
SetCallFeedbackContent(
feedbackVector, slotId, CallFeedbackContent::kReceiver);
} else {
- assert(!FeedbackValueIsReceiver(feedbackVector, slotId));
+ dcheck(!FeedbackValueIsReceiver(feedbackVector, slotId));
}
TryInitializeAsMonomorphic(recordedFunction, feedbackVector, slotId)
otherwise TransitionToMegamorphic;
@@ -170,10 +170,10 @@ macro CollectCallFeedback(
macro CollectInstanceOfFeedback(
maybeTarget: JSAny, context: Context,
maybeFeedbackVector: Undefined|FeedbackVector, slotId: uintptr): void {
- // TODO(v8:9891): Remove this assert once all callers are ported to Torque.
- // This assert ensures correctness of maybeFeedbackVector's type which can
+ // TODO(v8:9891): Remove this dcheck once all callers are ported to Torque.
+ // This dcheck ensures correctness of maybeFeedbackVector's type which can
// be easily broken for calls from CSA.
- assert(
+ dcheck(
IsUndefined(maybeFeedbackVector) ||
Is<FeedbackVector>(maybeFeedbackVector));
const feedbackVector =
@@ -228,10 +228,10 @@ macro CollectConstructFeedback(implicit context: Context)(
updateFeedbackMode: constexpr UpdateFeedbackMode):
never labels ConstructGeneric,
ConstructArray(AllocationSite) {
- // TODO(v8:9891): Remove this assert once all callers are ported to Torque.
- // This assert ensures correctness of maybeFeedbackVector's type which can
+ // TODO(v8:9891): Remove this dcheck once all callers are ported to Torque.
+ // This dcheck ensures correctness of maybeFeedbackVector's type which can
// be easily broken for calls from CSA.
- assert(
+ dcheck(
IsUndefined(maybeFeedbackVector) ||
Is<FeedbackVector>(maybeFeedbackVector));
diff --git a/chromium/v8/src/builtins/ic-dynamic-check-maps.tq b/chromium/v8/src/builtins/ic-dynamic-check-maps.tq
index 691f793b56f..3e194116fd8 100644
--- a/chromium/v8/src/builtins/ic-dynamic-check-maps.tq
+++ b/chromium/v8/src/builtins/ic-dynamic-check-maps.tq
@@ -22,7 +22,7 @@ macro PerformPolymorphicCheck(
const polymorphicArray = UnsafeCast<WeakFixedArray>(expectedPolymorphicArray);
const weakActualMap = MakeWeak(actualMap);
const length = polymorphicArray.length_intptr;
- assert(length > 0);
+ dcheck(length > 0);
for (let mapIndex: intptr = 0; mapIndex < length;
mapIndex += FeedbackIteratorEntrySize()) {
@@ -30,7 +30,7 @@ macro PerformPolymorphicCheck(
UnsafeCast<WeakHeapObject>(polymorphicArray[mapIndex]);
if (maybeCachedMap == weakActualMap) {
const handlerIndex = mapIndex + FeedbackIteratorHandlerOffset();
- assert(handlerIndex < length);
+ dcheck(handlerIndex < length);
const maybeHandler =
Cast<Object>(polymorphicArray[handlerIndex]) otherwise unreachable;
if (TaggedEqual(maybeHandler, actualHandler)) {
@@ -49,7 +49,7 @@ macro PerformMonomorphicCheck(
actualMap: Map, actualHandler: Smi|DataHandler): int32 {
if (TaggedEqual(expectedMap, actualMap)) {
const handlerIndex = slotIndex + 1;
- assert(handlerIndex < feedbackVector.length_intptr);
+ dcheck(handlerIndex < feedbackVector.length_intptr);
const maybeHandler =
Cast<Object>(feedbackVector[handlerIndex]) otherwise unreachable;
if (TaggedEqual(actualHandler, maybeHandler)) {
diff --git a/chromium/v8/src/builtins/ic.tq b/chromium/v8/src/builtins/ic.tq
index a9e92cf63ec..110ed885030 100644
--- a/chromium/v8/src/builtins/ic.tq
+++ b/chromium/v8/src/builtins/ic.tq
@@ -62,7 +62,8 @@ extern macro StoreFeedbackVectorSlot(
constexpr int32): void;
extern macro StoreWeakReferenceInFeedbackVector(
FeedbackVector, uintptr, HeapObject): MaybeObject;
-extern macro ReportFeedbackUpdate(FeedbackVector, uintptr, constexpr string);
+extern macro ReportFeedbackUpdate(
+ FeedbackVector, uintptr, constexpr string): void;
extern operator '.length_intptr' macro LoadFeedbackVectorLength(FeedbackVector):
intptr;
diff --git a/chromium/v8/src/builtins/internal-coverage.tq b/chromium/v8/src/builtins/internal-coverage.tq
index 07bfc40d8f2..ec5026861e2 100644
--- a/chromium/v8/src/builtins/internal-coverage.tq
+++ b/chromium/v8/src/builtins/internal-coverage.tq
@@ -17,8 +17,8 @@ macro GetCoverageInfo(implicit context: Context)(function: JSFunction):
}
macro IncrementBlockCount(implicit context: Context)(
- coverageInfo: CoverageInfo, slot: Smi) {
- assert(Convert<int32>(slot) < coverageInfo.slot_count);
+ coverageInfo: CoverageInfo, slot: Smi): void {
+ dcheck(Convert<int32>(slot) < coverageInfo.slot_count);
++coverageInfo.slots[slot].block_count;
}
diff --git a/chromium/v8/src/builtins/internal.tq b/chromium/v8/src/builtins/internal.tq
index d0863f13a06..adf513edf4f 100644
--- a/chromium/v8/src/builtins/internal.tq
+++ b/chromium/v8/src/builtins/internal.tq
@@ -19,10 +19,10 @@ builtin GetTemplateObject(
// handler; the current advantage of the split implementation is that the
// bytecode can skip most work if feedback exists.
- // TODO(v8:9891): Remove this assert once all callers are ported to Torque.
- // This assert ensures correctness of maybeFeedbackVector's type which can
+ // TODO(v8:9891): Remove this dcheck once all callers are ported to Torque.
+ // This dcheck ensures correctness of maybeFeedbackVector's type which can
// be easily broken for calls from CSA.
- assert(
+ dcheck(
IsUndefined(maybeFeedbackVector) ||
Is<FeedbackVector>(maybeFeedbackVector));
try {
@@ -52,14 +52,14 @@ extern transitioning builtin ForInFilter(implicit context: Context)(
extern enum ForInFeedback extends uint31 { kAny, ...}
extern macro UpdateFeedback(
SmiTagged<ForInFeedback>, Undefined | FeedbackVector, uintptr,
- constexpr UpdateFeedbackMode);
+ constexpr UpdateFeedbackMode): void;
@export
transitioning macro ForInNextSlow(
context: Context, slot: uintptr, receiver: JSAnyNotSmi, key: JSAny,
cacheType: Object, maybeFeedbackVector: Undefined|FeedbackVector,
guaranteedFeedback: constexpr UpdateFeedbackMode): JSAny {
- assert(receiver.map != cacheType); // Handled on the fast path.
+ dcheck(receiver.map != cacheType); // Handled on the fast path.
UpdateFeedback(
SmiTag<ForInFeedback>(ForInFeedback::kAny), maybeFeedbackVector, slot,
guaranteedFeedback);
diff --git a/chromium/v8/src/builtins/iterator.tq b/chromium/v8/src/builtins/iterator.tq
index c2652e7eb02..0511c0aa690 100644
--- a/chromium/v8/src/builtins/iterator.tq
+++ b/chromium/v8/src/builtins/iterator.tq
@@ -52,10 +52,10 @@ transitioning builtin GetIteratorWithFeedback(
context: Context, receiver: JSAny, loadSlot: TaggedIndex,
callSlot: TaggedIndex,
maybeFeedbackVector: Undefined|FeedbackVector): JSAny {
- // TODO(v8:9891): Remove this assert once all callers are ported to Torque.
- // This assert ensures correctness of maybeFeedbackVector's type which can
+ // TODO(v8:9891): Remove this dcheck once all callers are ported to Torque.
+ // This dcheck ensures correctness of maybeFeedbackVector's type which can
// be easily broken for calls from CSA.
- assert(
+ dcheck(
IsUndefined(maybeFeedbackVector) ||
Is<FeedbackVector>(maybeFeedbackVector));
let iteratorMethod: JSAny;
@@ -117,7 +117,7 @@ transitioning builtin CallIteratorWithFeedback(
// https://tc39.es/ecma262/#sec-iteratorclose
@export
transitioning macro IteratorCloseOnException(implicit context: Context)(
- iterator: IteratorRecord) {
+ iterator: IteratorRecord): void {
try {
// 4. Let innerResult be GetMethod(iterator, "return").
const method = GetProperty(iterator.object, kReturnString);
diff --git a/chromium/v8/src/builtins/loong64/builtins-loong64.cc b/chromium/v8/src/builtins/loong64/builtins-loong64.cc
new file mode 100644
index 00000000000..714353fc96d
--- /dev/null
+++ b/chromium/v8/src/builtins/loong64/builtins-loong64.cc
@@ -0,0 +1,3755 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_LOONG64
+
+#include "src/api/api-arguments.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/interface-descriptors-inl.h"
+#include "src/debug/debug.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/frame-constants.h"
+#include "src/execution/frames.h"
+#include "src/logging/counters.h"
+// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/codegen/loong64/constants-loong64.h"
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/register-configuration.h"
+#include "src/heap/heap-inl.h"
+#include "src/objects/cell.h"
+#include "src/objects/foreign.h"
+#include "src/objects/heap-number.h"
+#include "src/objects/js-generator.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/smi.h"
+#include "src/runtime/runtime.h"
+
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/wasm-linkage.h"
+#include "src/wasm/wasm-objects.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
+ __ li(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
+ __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
+ RelocInfo::CODE_TARGET);
+}
+
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ // ----------- S t a t e -------------
+ // -- a0 : actual argument count
+ // -- a1 : target function (preserved for callee)
+ // -- a3 : new target (preserved for callee)
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the target function, the new target and the actual
+ // argument count.
+ // Push function as parameter to the runtime call.
+ __ SmiTag(kJavaScriptCallArgCountRegister);
+ __ Push(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
+ kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister);
+
+ __ CallRuntime(function_id, 1);
+ __ LoadCodeObjectEntry(a2, a0);
+ // Restore target function, new target and actual argument count.
+ __ Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
+ kJavaScriptCallArgCountRegister);
+ __ SmiUntag(kJavaScriptCallArgCountRegister);
+ }
+
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Jump(a2);
+}
+
+namespace {
+
+void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : constructor function
+ // -- a3 : new target
+ // -- cp : context
+ // -- ra : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ // Enter a construct frame.
+ {
+ FrameScope scope(masm, StackFrame::CONSTRUCT);
+
+ // Preserve the incoming parameters on the stack.
+ __ SmiTag(a0);
+ __ Push(cp, a0);
+ __ SmiUntag(a0);
+
+ // Set up pointer to last argument (skip receiver).
+ __ Add_d(
+ t2, fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
+ // Copy arguments and receiver to the expression stack.
+ __ PushArray(t2, a0, t3, t0);
+ // The receiver for the builtin/api call.
+ __ PushRoot(RootIndex::kTheHoleValue);
+
+ // Call the function.
+ // a0: number of arguments (untagged)
+ // a1: constructor function
+ // a3: new target
+ __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
+
+ // Restore context from the frame.
+ __ Ld_d(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ // Restore smi-tagged arguments count from the frame.
+ __ Ld_d(t3, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ // Leave construct frame.
+ }
+
+ // Remove caller arguments from the stack and return.
+ __ SmiScale(t3, t3, kPointerSizeLog2);
+ __ Add_d(sp, sp, t3);
+ __ Add_d(sp, sp, kPointerSize);
+ __ Ret();
+}
+
+} // namespace
+
+// The construct stub for ES5 constructor functions and ES6 class constructors.
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0: number of arguments (untagged)
+ // -- a1: constructor function
+ // -- a3: new target
+ // -- cp: context
+ // -- ra: return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ // Enter a construct frame.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ Label post_instantiation_deopt_entry, not_create_implicit_receiver;
+ __ EnterFrame(StackFrame::CONSTRUCT);
+
+ // Preserve the incoming parameters on the stack.
+ __ SmiTag(a0);
+ __ Push(cp, a0, a1);
+ __ PushRoot(RootIndex::kUndefinedValue);
+ __ Push(a3);
+
+ // ----------- S t a t e -------------
+ // -- sp[0*kPointerSize]: new target
+ // -- sp[1*kPointerSize]: padding
+ // -- a1 and sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
+ // -----------------------------------
+
+ __ Ld_d(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld_wu(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset));
+ __ DecodeField<SharedFunctionInfo::FunctionKindBits>(t2);
+ __ JumpIfIsInRange(t2, kDefaultDerivedConstructor, kDerivedConstructor,
+ &not_create_implicit_receiver);
+
+ // If not derived class constructor: Allocate the new receiver object.
+ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, t2,
+ t3);
+ __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), RelocInfo::CODE_TARGET);
+ __ Branch(&post_instantiation_deopt_entry);
+
+ // Else: use TheHoleValue as receiver for constructor call
+ __ bind(&not_create_implicit_receiver);
+ __ LoadRoot(a0, RootIndex::kTheHoleValue);
+
+ // ----------- S t a t e -------------
+ // -- a0: receiver
+ // -- Slot 4 / sp[0*kPointerSize]: new target
+ // -- Slot 3 / sp[1*kPointerSize]: padding
+ // -- Slot 2 / sp[2*kPointerSize]: constructor function
+ // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[4*kPointerSize]: context
+ // -----------------------------------
+ // Deoptimizer enters here.
+ masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+ masm->pc_offset());
+ __ bind(&post_instantiation_deopt_entry);
+
+ // Restore new target.
+ __ Pop(a3);
+
+ // Push the allocated receiver to the stack.
+ __ Push(a0);
+
+ // We need two copies because we may have to return the original one
+ // and the calling conventions dictate that the called function pops the
+ // receiver. The second copy is pushed after the arguments, we saved in a6
+ // since a0 will store the return value of callRuntime.
+ __ mov(a6, a0);
+
+ // Set up pointer to last argument.
+ __ Add_d(
+ t2, fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
+
+ // ----------- S t a t e -------------
+ // -- r3: new target
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: implicit receiver
+ // -- sp[2*kPointerSize]: padding
+ // -- sp[3*kPointerSize]: constructor function
+ // -- sp[4*kPointerSize]: number of arguments (tagged)
+ // -- sp[5*kPointerSize]: context
+ // -----------------------------------
+
+ // Restore constructor function and argument count.
+ __ Ld_d(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
+ __ Ld_d(a0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ __ SmiUntag(a0);
+
+ Label stack_overflow;
+ __ StackOverflowCheck(a0, t0, t1, &stack_overflow);
+
+ // TODO(victorgomes): When the arguments adaptor is completely removed, we
+ // should get the formal parameter count and copy the arguments in its
+ // correct position (including any undefined), instead of delaying this to
+ // InvokeFunction.
+
+ // Copy arguments and receiver to the expression stack.
+ __ PushArray(t2, a0, t0, t1);
+ // We need two copies because we may have to return the original one
+ // and the calling conventions dictate that the called function pops the
+ // receiver. The second copy is pushed after the arguments,
+ __ Push(a6);
+
+ // Call the function.
+ __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
+
+ // ----------- S t a t e -------------
+ // -- s0: constructor result
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: padding
+ // -- sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments
+ // -- sp[4*kPointerSize]: context
+ // -----------------------------------
+
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+ masm->pc_offset());
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, do_throw, leave_and_return, check_receiver;
+
+ // If the result is undefined, we jump out to using the implicit receiver.
+ __ JumpIfNotRoot(a0, RootIndex::kUndefinedValue, &check_receiver);
+
+ // Otherwise we do a smi check and fall through to check if the return value
+ // is a valid receiver.
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ Ld_d(a0, MemOperand(sp, 0 * kPointerSize));
+ __ JumpIfRoot(a0, RootIndex::kTheHoleValue, &do_throw);
+
+ __ bind(&leave_and_return);
+ // Restore smi-tagged arguments count from the frame.
+ __ Ld_d(a1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ // Leave construct frame.
+ __ LeaveFrame(StackFrame::CONSTRUCT);
+
+ // Remove caller arguments from the stack and return.
+ __ SmiScale(a4, a1, kPointerSizeLog2);
+ __ Add_d(sp, sp, a4);
+ __ Add_d(sp, sp, kPointerSize);
+ __ Ret();
+
+ __ bind(&check_receiver);
+ __ JumpIfSmi(a0, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ __ GetObjectType(a0, t2, t2);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ Branch(&leave_and_return, greater_equal, t2,
+ Operand(FIRST_JS_RECEIVER_TYPE));
+ __ Branch(&use_receiver);
+
+ __ bind(&do_throw);
+ // Restore the context from the frame.
+ __ Ld_d(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
+ __ break_(0xCC);
+
+ __ bind(&stack_overflow);
+ // Restore the context from the frame.
+ __ Ld_d(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ __ break_(0xCC);
+}
+
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+ Generate_JSBuiltinsConstructStubHelper(masm);
+}
+
+static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
+ Register scratch) {
+ DCHECK(!AreAliased(code, scratch));
+ // Verify that the code kind is baseline code via the CodeKind.
+ __ Ld_d(scratch, FieldMemOperand(code, Code::kFlagsOffset));
+ __ DecodeField<Code::KindField>(scratch);
+ __ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
+ Operand(static_cast<int>(CodeKind::BASELINE)));
+}
+
+// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
+// the more general dispatch.
+static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
+ Register sfi_data,
+ Register scratch1,
+ Label* is_baseline) {
+ Label done;
+
+ __ GetObjectType(sfi_data, scratch1, scratch1);
+ if (FLAG_debug_code) {
+ Label not_baseline;
+ __ Branch(&not_baseline, ne, scratch1, Operand(CODET_TYPE));
+ AssertCodeIsBaseline(masm, sfi_data, scratch1);
+ __ Branch(is_baseline);
+ __ bind(&not_baseline);
+ } else {
+ __ Branch(is_baseline, eq, scratch1, Operand(CODET_TYPE));
+ }
+ __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
+ __ Ld_d(sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
+
+ __ bind(&done);
+}
+
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the value to pass to the generator
+ // -- a1 : the JSGeneratorObject to resume
+ // -- ra : return address
+ // -----------------------------------
+ // Store input value into generator object.
+ __ St_d(a0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
+ __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, a0,
+ kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore);
+ // Check that a1 is still valid, RecordWrite might have clobbered it.
+ __ AssertGeneratorObject(a1);
+
+ // Load suspended function and context.
+ __ Ld_d(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+ __ Ld_d(cp, FieldMemOperand(a4, JSFunction::kContextOffset));
+
+ // Flood function if we are stepping.
+ Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+ Label stepping_prepared;
+ ExternalReference debug_hook =
+ ExternalReference::debug_hook_on_function_call_address(masm->isolate());
+ __ li(a5, debug_hook);
+ __ Ld_b(a5, MemOperand(a5, 0));
+ __ Branch(&prepare_step_in_if_stepping, ne, a5, Operand(zero_reg));
+
+ // Flood function if we need to continue stepping in the suspended generator.
+ ExternalReference debug_suspended_generator =
+ ExternalReference::debug_suspended_generator_address(masm->isolate());
+ __ li(a5, debug_suspended_generator);
+ __ Ld_d(a5, MemOperand(a5, 0));
+ __ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(a5));
+ __ bind(&stepping_prepared);
+
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label stack_overflow;
+ __ LoadStackLimit(kScratchReg,
+ MacroAssembler::StackLimitKind::kRealStackLimit);
+ __ Branch(&stack_overflow, lo, sp, Operand(kScratchReg));
+
+ // ----------- S t a t e -------------
+ // -- a1 : the JSGeneratorObject to resume
+ // -- a4 : generator function
+ // -- cp : generator context
+ // -- ra : return address
+ // -----------------------------------
+
+ // Push holes for arguments to generator function. Since the parser forced
+ // context allocation for any variables in generators, the actual argument
+ // values have already been copied into the context and these dummy values
+ // will never be used.
+ __ Ld_d(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld_hu(
+ a3, FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Ld_d(t1, FieldMemOperand(
+ a1, JSGeneratorObject::kParametersAndRegistersOffset));
+ {
+ Label done_loop, loop;
+ __ bind(&loop);
+ __ Sub_d(a3, a3, Operand(1));
+ __ Branch(&done_loop, lt, a3, Operand(zero_reg));
+ __ Alsl_d(kScratchReg, a3, t1, kPointerSizeLog2, t7);
+ __ Ld_d(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
+ __ Push(kScratchReg);
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ // Push receiver.
+ __ Ld_d(kScratchReg,
+ FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
+ __ Push(kScratchReg);
+ }
+
+ // Underlying function needs to have bytecode available.
+ if (FLAG_debug_code) {
+ Label is_baseline;
+ __ Ld_d(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld_d(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecodeOrBaseline(masm, a3, t5, &is_baseline);
+ __ GetObjectType(a3, a3, a3);
+ __ Assert(eq, AbortReason::kMissingBytecodeArray, a3,
+ Operand(BYTECODE_ARRAY_TYPE));
+ __ bind(&is_baseline);
+ }
+
+ // Resume (Ignition/TurboFan) generator object.
+ {
+ __ Ld_d(a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld_hu(a0, FieldMemOperand(
+ a0, SharedFunctionInfo::kFormalParameterCountOffset));
+ // We abuse new.target both to indicate that this is a resume call and to
+ // pass in the generator object. In ordinary calls, new.target is always
+ // undefined because generator functions are non-constructable.
+ __ Move(a3, a1);
+ __ Move(a1, a4);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
+ __ JumpCodeObject(a2);
+ }
+
+ __ bind(&prepare_step_in_if_stepping);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1, a4);
+ // Push hole as receiver since we do not use it for stepping.
+ __ PushRoot(RootIndex::kTheHoleValue);
+ __ CallRuntime(Runtime::kDebugOnFunctionCall);
+ __ Pop(a1);
+ }
+ __ Ld_d(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+ __ Branch(&stepping_prepared);
+
+ __ bind(&prepare_step_in_suspended_generator);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1);
+ __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+ __ Pop(a1);
+ }
+ __ Ld_d(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+ __ Branch(&stepping_prepared);
+
+ __ bind(&stack_overflow);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ __ break_(0xCC); // This should be unreachable.
+ }
+}
+
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1);
+ __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
+}
+
+// Clobbers scratch1 and scratch2; preserves all other registers.
+static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
+ Register scratch1, Register scratch2) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadStackLimit(scratch1, MacroAssembler::StackLimitKind::kRealStackLimit);
+ // Make a2 the space we have left. The stack might already be overflowed
+ // here which will cause r2 to become negative.
+ __ sub_d(scratch1, sp, scratch1);
+ // Check if the arguments will overflow the stack.
+ __ slli_d(scratch2, argc, kPointerSizeLog2);
+ __ Branch(&okay, gt, scratch1, Operand(scratch2)); // Signed comparison.
+
+ // Out of stack space.
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+
+ __ bind(&okay);
+}
+
+namespace {
+
+// Called with the native C calling convention. The corresponding function
+// signature is either:
+//
+// using JSEntryFunction = GeneratedCode<Address(
+// Address root_register_value, Address new_target, Address target,
+// Address receiver, intptr_t argc, Address** args)>;
+// or
+// using JSEntryFunction = GeneratedCode<Address(
+// Address root_register_value, MicrotaskQueue* microtask_queue)>;
+void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
+ Builtin entry_trampoline) {
+ Label invoke, handler_entry, exit;
+
+ {
+ NoRootArrayScope no_root_array(masm);
+
+ // Registers:
+ // either
+ // a0: root register value
+ // a1: entry address
+ // a2: function
+ // a3: receiver
+ // a4: argc
+ // a5: argv
+ // or
+ // a0: root register value
+ // a1: microtask_queue
+
+ // Save callee saved registers on the stack.
+ __ MultiPush(kCalleeSaved | ra.bit());
+
+ // Save callee-saved FPU registers.
+ __ MultiPushFPU(kCalleeSavedFPU);
+ // Set up the reserved register for 0.0.
+ __ Move(kDoubleRegZero, 0.0);
+
+ // Initialize the root register.
+ // C calling convention. The first argument is passed in a0.
+ __ mov(kRootRegister, a0);
+ }
+
+ // a1: entry address
+ // a2: function
+ // a3: receiver
+ // a4: argc
+ // a5: argv
+
+ // We build an EntryFrame.
+ __ li(s1, Operand(-1)); // Push a bad frame pointer to fail if it is used.
+ __ li(s2, Operand(StackFrame::TypeToMarker(type)));
+ __ li(s3, Operand(StackFrame::TypeToMarker(type)));
+ ExternalReference c_entry_fp = ExternalReference::Create(
+ IsolateAddressId::kCEntryFPAddress, masm->isolate());
+ __ li(s5, c_entry_fp);
+ __ Ld_d(s4, MemOperand(s5, 0));
+ __ Push(s1, s2, s3, s4);
+
+ // Clear c_entry_fp, now we've pushed its previous value to the stack.
+ // If the c_entry_fp is not already zero and we don't clear it, the
+ // SafeStackFrameIterator will assume we are executing C++ and miss the JS
+ // frames on top.
+ __ St_d(zero_reg, MemOperand(s5, 0));
+
+ // Set up frame pointer for the frame to be pushed.
+ __ addi_d(fp, sp, -EntryFrameConstants::kCallerFPOffset);
+
+ // Registers:
+ // either
+ // a1: entry address
+ // a2: function
+ // a3: receiver
+ // a4: argc
+ // a5: argv
+ // or
+ // a1: microtask_queue
+ //
+ // Stack:
+ // caller fp |
+ // function slot | entry frame
+ // context slot |
+ // bad fp (0xFF...F) |
+ // callee saved registers + ra
+ // [ O32: 4 args slots]
+ // args
+
+ // If this is the outermost JS call, set js_entry_sp value.
+ Label non_outermost_js;
+ ExternalReference js_entry_sp = ExternalReference::Create(
+ IsolateAddressId::kJSEntrySPAddress, masm->isolate());
+ __ li(s1, js_entry_sp);
+ __ Ld_d(s2, MemOperand(s1, 0));
+ __ Branch(&non_outermost_js, ne, s2, Operand(zero_reg));
+ __ St_d(fp, MemOperand(s1, 0));
+ __ li(s3, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ Label cont;
+ __ b(&cont);
+ __ nop(); // Branch delay slot nop.
+ __ bind(&non_outermost_js);
+ __ li(s3, Operand(StackFrame::INNER_JSENTRY_FRAME));
+ __ bind(&cont);
+ __ Push(s3);
+
+ // Jump to a faked try block that does the invoke, with a faked catch
+ // block that sets the pending exception.
+ __ jmp(&invoke);
+ __ bind(&handler_entry);
+
+ // Store the current pc as the handler offset. It's used later to create the
+ // handler table.
+ masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
+
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel. Coming in here the
+ // fp will be invalid because the PushStackHandler below sets it to 0 to
+ // signal the existence of the JSEntry frame.
+ __ li(s1, ExternalReference::Create(
+ IsolateAddressId::kPendingExceptionAddress, masm->isolate()));
+ __ St_d(a0,
+ MemOperand(s1, 0)); // We come back from 'invoke'. result is in a0.
+ __ LoadRoot(a0, RootIndex::kException);
+ __ b(&exit); // b exposes branch delay slot.
+ __ nop(); // Branch delay slot nop.
+
+ // Invoke: Link this frame into the handler chain.
+ __ bind(&invoke);
+ __ PushStackHandler();
+ // If an exception not caught by another handler occurs, this handler
+ // returns control to the code after the bal(&invoke) above, which
+ // restores all kCalleeSaved registers (including cp and fp) to their
+ // saved values before returning a failure to C.
+ //
+ // Registers:
+ // either
+ // a0: root register value
+ // a1: entry address
+ // a2: function
+ // a3: receiver
+ // a4: argc
+ // a5: argv
+ // or
+ // a0: root register value
+ // a1: microtask_queue
+ //
+ // Stack:
+ // handler frame
+ // entry frame
+ // callee saved registers + ra
+ // [ O32: 4 args slots]
+ // args
+ //
+ // Invoke the function by calling through JS entry trampoline builtin and
+ // pop the faked function when we return.
+
+ Handle<Code> trampoline_code =
+ masm->isolate()->builtins()->code_handle(entry_trampoline);
+ __ Call(trampoline_code, RelocInfo::CODE_TARGET);
+
+ // Unlink this frame from the handler chain.
+ __ PopStackHandler();
+
+ __ bind(&exit); // a0 holds result
+ // Check if the current stack frame is marked as the outermost JS frame.
+ Label non_outermost_js_2;
+ __ Pop(a5);
+ __ Branch(&non_outermost_js_2, ne, a5,
+ Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ li(a5, js_entry_sp);
+ __ St_d(zero_reg, MemOperand(a5, 0));
+ __ bind(&non_outermost_js_2);
+
+ // Restore the top frame descriptors from the stack.
+ __ Pop(a5);
+ __ li(a4, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ masm->isolate()));
+ __ St_d(a5, MemOperand(a4, 0));
+
+ // Reset the stack to the callee saved registers.
+ __ addi_d(sp, sp, -EntryFrameConstants::kCallerFPOffset);
+
+ // Restore callee-saved fpu registers.
+ __ MultiPopFPU(kCalleeSavedFPU);
+
+ // Restore callee saved registers from the stack.
+ __ MultiPop(kCalleeSaved | ra.bit());
+ // Return.
+ __ Jump(ra);
+}
+
+} // namespace
+
+void Builtins::Generate_JSEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::ENTRY, Builtin::kJSEntryTrampoline);
+}
+
+void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
+ Builtin::kJSConstructEntryTrampoline);
+}
+
+void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::ENTRY,
+ Builtin::kRunMicrotasksTrampoline);
+}
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+ bool is_construct) {
+ // ----------- S t a t e -------------
+ // -- a1: new.target
+ // -- a2: function
+ // -- a3: receiver_pointer
+ // -- a4: argc
+ // -- a5: argv
+ // -----------------------------------
+
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Setup the context (we need to use the caller context from the isolate).
+ ExternalReference context_address = ExternalReference::Create(
+ IsolateAddressId::kContextAddress, masm->isolate());
+ __ li(cp, context_address);
+ __ Ld_d(cp, MemOperand(cp, 0));
+
+ // Push the function and the receiver onto the stack.
+ __ Push(a2);
+
+ // Check if we have enough stack space to push all arguments.
+ __ addi_d(a6, a4, 1);
+ Generate_CheckStackOverflow(masm, a6, a0, s2);
+
+ // Copy arguments to the stack in a loop.
+ // a4: argc
+ // a5: argv, i.e. points to first arg
+ Label loop, entry;
+ __ Alsl_d(s1, a4, a5, kPointerSizeLog2, t7);
+ __ b(&entry);
+ // s1 points past last arg.
+ __ bind(&loop);
+ __ addi_d(s1, s1, -kPointerSize);
+ __ Ld_d(s2, MemOperand(s1, 0)); // Read next parameter.
+ __ Ld_d(s2, MemOperand(s2, 0)); // Dereference handle.
+ __ Push(s2); // Push parameter.
+ __ bind(&entry);
+ __ Branch(&loop, ne, a5, Operand(s1));
+
+ // Push the receive.
+ __ Push(a3);
+
+ // a0: argc
+ // a1: function
+ // a3: new.target
+ __ mov(a3, a1);
+ __ mov(a1, a2);
+ __ mov(a0, a4);
+
+ // Initialize all JavaScript callee-saved registers, since they will be seen
+ // by the garbage collector as part of handlers.
+ __ LoadRoot(a4, RootIndex::kUndefinedValue);
+ __ mov(a5, a4);
+ __ mov(s1, a4);
+ __ mov(s2, a4);
+ __ mov(s3, a4);
+ __ mov(s4, a4);
+ __ mov(s5, a4);
+ // s6 holds the root address. Do not clobber.
+ // s7 is cp. Do not init.
+
+ // Invoke the code.
+ Handle<Code> builtin = is_construct
+ ? BUILTIN_CODE(masm->isolate(), Construct)
+ : masm->isolate()->builtins()->Call();
+ __ Call(builtin, RelocInfo::CODE_TARGET);
+
+ // Leave internal frame.
+ }
+ __ Jump(ra);
+}
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
+ // a1: microtask_queue
+ __ mov(RunMicrotasksDescriptor::MicrotaskQueueRegister(), a1);
+ __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
+}
+
+static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
+ Register optimized_code,
+ Register closure) {
+ DCHECK(!AreAliased(optimized_code, closure));
+ // Store code entry in the closure.
+ __ St_d(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
+ __ RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code,
+ kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore,
+ RememberedSetAction::kOmit, SmiCheck::kOmit);
+}
+
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
+ Register scratch2) {
+ Register params_size = scratch1;
+
+ // Get the size of the formal parameters + receiver (in bytes).
+ __ Ld_d(params_size,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ Ld_w(params_size,
+ FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
+
+ Register actual_params_size = scratch2;
+ // Compute the size of the actual parameters + receiver (in bytes).
+ __ Ld_d(actual_params_size,
+ MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ __ slli_d(actual_params_size, actual_params_size, kPointerSizeLog2);
+ __ Add_d(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
+
+ // If actual is bigger than formal, then we should use it to free up the stack
+ // arguments.
+ __ slt(t2, params_size, actual_params_size);
+ __ Movn(params_size, actual_params_size, t2);
+
+ // Leave the frame (also dropping the register file).
+ __ LeaveFrame(StackFrame::INTERPRETED);
+
+ // Drop receiver + arguments.
+ __ Add_d(sp, sp, params_size);
+}
+
+// Tail-call |function_id| if |actual_marker| == |expected_marker|
+static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
+ Register actual_marker,
+ OptimizationMarker expected_marker,
+ Runtime::FunctionId function_id) {
+ Label no_match;
+ __ Branch(&no_match, ne, actual_marker, Operand(expected_marker));
+ GenerateTailCallToReturnedCode(masm, function_id);
+ __ bind(&no_match);
+}
+
+static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
+ Register optimized_code_entry) {
+ // ----------- S t a t e -------------
+ // -- a0 : actual argument count
+ // -- a3 : new target (preserved for callee if needed, and caller)
+ // -- a1 : target function (preserved for callee if needed, and caller)
+ // -----------------------------------
+ DCHECK(!AreAliased(optimized_code_entry, a1, a3));
+
+ Register closure = a1;
+ Label heal_optimized_code_slot;
+
+ // If the optimized code is cleared, go to runtime to update the optimization
+ // marker field.
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
+ &heal_optimized_code_slot);
+
+ // Check if the optimized code is marked for deopt. If it is, call the
+ // runtime to clear it.
+ __ Ld_d(a6, FieldMemOperand(optimized_code_entry,
+ Code::kCodeDataContainerOffset));
+ __ Ld_w(a6, FieldMemOperand(a6, CodeDataContainer::kKindSpecificFlagsOffset));
+ __ And(a6, a6, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ __ Branch(&heal_optimized_code_slot, ne, a6, Operand(zero_reg));
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ // The feedback vector is no longer used, so re-use it as a scratch
+ // register.
+ ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure);
+
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ LoadCodeObjectEntry(a2, optimized_code_entry);
+ __ Jump(a2);
+
+ // Optimized code slot contains deoptimized code or code is cleared and
+ // optimized code marker isn't updated. Evict the code, update the marker
+ // and re-enter the closure's code.
+ __ bind(&heal_optimized_code_slot);
+ GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
+}
+
+static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
+ Register optimization_marker) {
+ // ----------- S t a t e -------------
+ // -- a0 : actual argument count
+ // -- a3 : new target (preserved for callee if needed, and caller)
+ // -- a1 : target function (preserved for callee if needed, and caller)
+ // -- feedback vector (preserved for caller if needed)
+ // -- optimization_marker : a Smi containing a non-zero optimization marker.
+ // -----------------------------------
+ DCHECK(!AreAliased(feedback_vector, a1, a3, optimization_marker));
+
+ // TODO(v8:8394): The logging of first execution will break if
+ // feedback vectors are not allocated. We need to find a different way of
+ // logging these events if required.
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kLogFirstExecution,
+ Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent);
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent);
+
+ // Marker should be one of LogFirstExecution / CompileOptimized /
+ // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
+ // here.
+ if (FLAG_debug_code) {
+ __ stop();
+ }
+}
+
+// Advance the current bytecode offset. This simulates what all bytecode
+// handlers do upon completion of the underlying operation. Will bail out to a
+// label if the bytecode (without prefix) is a return bytecode. Will not advance
+// the bytecode offset if the current bytecode is a JumpLoop, instead just
+// re-executing the JumpLoop to jump to the correct bytecode.
+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
+ Register bytecode_array,
+ Register bytecode_offset,
+ Register bytecode, Register scratch1,
+ Register scratch2, Register scratch3,
+ Label* if_return) {
+ Register bytecode_size_table = scratch1;
+
+ // The bytecode offset value will be increased by one in wide and extra wide
+ // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
+ // will restore the original bytecode. In order to simplify the code, we have
+ // a backup of it.
+ Register original_bytecode_offset = scratch3;
+ DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode,
+ bytecode_size_table, original_bytecode_offset));
+ __ Move(original_bytecode_offset, bytecode_offset);
+ __ li(bytecode_size_table, ExternalReference::bytecode_size_table_address());
+
+ // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
+ Label process_bytecode, extra_wide;
+ STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
+ STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
+ STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
+ STATIC_ASSERT(3 ==
+ static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
+ __ Branch(&process_bytecode, hi, bytecode, Operand(3));
+ __ And(scratch2, bytecode, Operand(1));
+ __ Branch(&extra_wide, ne, scratch2, Operand(zero_reg));
+
+ // Load the next bytecode and update table to the wide scaled table.
+ __ Add_d(bytecode_offset, bytecode_offset, Operand(1));
+ __ Add_d(scratch2, bytecode_array, bytecode_offset);
+ __ Ld_bu(bytecode, MemOperand(scratch2, 0));
+ __ Add_d(bytecode_size_table, bytecode_size_table,
+ Operand(kByteSize * interpreter::Bytecodes::kBytecodeCount));
+ __ jmp(&process_bytecode);
+
+ __ bind(&extra_wide);
+ // Load the next bytecode and update table to the extra wide scaled table.
+ __ Add_d(bytecode_offset, bytecode_offset, Operand(1));
+ __ Add_d(scratch2, bytecode_array, bytecode_offset);
+ __ Ld_bu(bytecode, MemOperand(scratch2, 0));
+ __ Add_d(bytecode_size_table, bytecode_size_table,
+ Operand(2 * kByteSize * interpreter::Bytecodes::kBytecodeCount));
+
+ __ bind(&process_bytecode);
+
+// Bailout to the return label if this is a return bytecode.
+#define JUMP_IF_EQUAL(NAME) \
+ __ Branch(if_return, eq, bytecode, \
+ Operand(static_cast<int>(interpreter::Bytecode::k##NAME)));
+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
+#undef JUMP_IF_EQUAL
+
+ // If this is a JumpLoop, re-execute it to perform the jump to the beginning
+ // of the loop.
+ Label end, not_jump_loop;
+ __ Branch(&not_jump_loop, ne, bytecode,
+ Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
+ // We need to restore the original bytecode_offset since we might have
+ // increased it to skip the wide / extra-wide prefix bytecode.
+ __ Move(bytecode_offset, original_bytecode_offset);
+ __ jmp(&end);
+
+ __ bind(&not_jump_loop);
+ // Otherwise, load the size of the current bytecode and advance the offset.
+ __ Add_d(scratch2, bytecode_size_table, bytecode);
+ __ Ld_b(scratch2, MemOperand(scratch2, 0));
+ __ Add_d(bytecode_offset, bytecode_offset, scratch2);
+
+ __ bind(&end);
+}
+
+// Read off the optimization state in the feedback vector and check if there
+// is optimized code or a optimization marker that needs to be processed.
+static void LoadOptimizationStateAndJumpIfNeedsProcessing(
+ MacroAssembler* masm, Register optimization_state, Register feedback_vector,
+ Label* has_optimized_code_or_marker) {
+ ASM_CODE_COMMENT(masm);
+ Register scratch = t2;
+ // TODO(liuyu): Remove CHECK
+ CHECK_NE(t2, optimization_state);
+ CHECK_NE(t2, feedback_vector);
+ __ Ld_w(optimization_state,
+ FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
+ __ And(
+ scratch, optimization_state,
+ Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
+ __ Branch(has_optimized_code_or_marker, ne, scratch, Operand(zero_reg));
+}
+
+static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
+ MacroAssembler* masm, Register optimization_state,
+ Register feedback_vector) {
+ ASM_CODE_COMMENT(masm);
+ Label maybe_has_optimized_code;
+ // Check if optimized code marker is available
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ And(
+ scratch, optimization_state,
+ Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
+ __ Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg));
+ }
+
+ Register optimization_marker = optimization_state;
+ __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
+ MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
+
+ __ bind(&maybe_has_optimized_code);
+ Register optimized_code_entry = optimization_state;
+ __ Ld_d(optimization_marker,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kMaybeOptimizedCodeOffset));
+
+ TailCallOptimizedCodeSlot(masm, optimized_code_entry);
+}
+
+// static
+void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
+ UseScratchRegisterScope temps(masm);
+ temps.Include(s1.bit() | s2.bit());
+ temps.Exclude(t7.bit());
+ auto descriptor =
+ Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
+ Register closure = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kClosure);
+ // Load the feedback vector from the closure.
+ Register feedback_vector = temps.Acquire();
+ __ Ld_d(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ Ld_d(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ if (FLAG_debug_code) {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ GetObjectType(feedback_vector, scratch, scratch);
+ __ Assert(eq, AbortReason::kExpectedFeedbackVector, scratch,
+ Operand(FEEDBACK_VECTOR_TYPE));
+ }
+ // Check for an optimization marker.
+ Label has_optimized_code_or_marker;
+ Register optimization_state = no_reg;
+ {
+ UseScratchRegisterScope temps(masm);
+ optimization_state = temps.Acquire();
+ // optimization_state will be used only in |has_optimized_code_or_marker|
+ // and outside it can be reused.
+ LoadOptimizationStateAndJumpIfNeedsProcessing(
+ masm, optimization_state, feedback_vector,
+ &has_optimized_code_or_marker);
+ }
+ // Increment invocation count for the function.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register invocation_count = temps.Acquire();
+ __ Ld_w(invocation_count,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountOffset));
+ __ Add_w(invocation_count, invocation_count, Operand(1));
+ __ St_w(invocation_count,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountOffset));
+ }
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ {
+ ASM_CODE_COMMENT_STRING(masm, "Frame Setup");
+ // Normally the first thing we'd do here is Push(ra, fp), but we already
+ // entered the frame in BaselineCompiler::Prologue, as we had to use the
+ // value ra before the call to this BaselineOutOfLinePrologue builtin.
+ Register callee_context = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kCalleeContext);
+ Register callee_js_function = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kClosure);
+ __ Push(callee_context, callee_js_function);
+ DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
+ DCHECK_EQ(callee_js_function, kJSFunctionRegister);
+
+ Register argc = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
+ // We'll use the bytecode for both code age/OSR resetting, and pushing onto
+ // the frame, so load it into a register.
+ Register bytecodeArray = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
+
+ // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
+ // are 8-bit fields next to each other, so we could just optimize by writing
+ // a 16-bit. These static asserts guard our assumption is valid.
+ STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
+ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
+ __ St_h(zero_reg,
+ FieldMemOperand(bytecodeArray,
+ BytecodeArray::kOsrLoopNestingLevelOffset));
+
+ __ Push(argc, bytecodeArray);
+
+ // Baseline code frames store the feedback vector where interpreter would
+ // store the bytecode offset.
+ if (FLAG_debug_code) {
+ UseScratchRegisterScope temps(masm);
+ Register invocation_count = temps.Acquire();
+ __ GetObjectType(feedback_vector, invocation_count, invocation_count);
+ __ Assert(eq, AbortReason::kExpectedFeedbackVector, invocation_count,
+ Operand(FEEDBACK_VECTOR_TYPE));
+ }
+ // Our stack is currently aligned. We have have to push something along with
+ // the feedback vector to keep it that way -- we may as well start
+ // initialising the register frame.
+ // TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves
+ // `undefined` in the accumulator register, to skip the load in the baseline
+ // code.
+ __ Push(feedback_vector);
+ }
+
+ Label call_stack_guard;
+ Register frame_size = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
+ {
+ ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt check");
+ // Stack check. This folds the checks for both the interrupt stack limit
+ // check and the real stack limit into one by just checking for the
+ // interrupt limit. The interrupt limit is either equal to the real stack
+ // limit or tighter. By ensuring we have space until that limit after
+ // building the frame we can quickly precheck both at once.
+ UseScratchRegisterScope temps(masm);
+ Register sp_minus_frame_size = temps.Acquire();
+ __ Sub_d(sp_minus_frame_size, sp, frame_size);
+ Register interrupt_limit = temps.Acquire();
+ __ LoadStackLimit(interrupt_limit,
+ MacroAssembler::StackLimitKind::kInterruptStackLimit);
+ __ Branch(&call_stack_guard, Uless, sp_minus_frame_size,
+ Operand(interrupt_limit));
+ }
+
+ // Do "fast" return to the caller pc in ra.
+ // TODO(v8:11429): Document this frame setup better.
+ __ Ret();
+
+ __ bind(&has_optimized_code_or_marker);
+ {
+ ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
+ UseScratchRegisterScope temps(masm);
+ temps.Exclude(optimization_state);
+ // Ensure the optimization_state is not allocated again.
+ // Drop the frame created by the baseline call.
+ __ Pop(ra, fp);
+ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
+ feedback_vector);
+ __ Trap();
+ }
+
+ __ bind(&call_stack_guard);
+ {
+ ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ // Save incoming new target or generator
+ __ Push(kJavaScriptCallNewTargetRegister);
+ __ SmiTag(frame_size);
+ __ Push(frame_size);
+ __ CallRuntime(Runtime::kStackGuardWithGap);
+ __ Pop(kJavaScriptCallNewTargetRegister);
+ }
+ __ Ret();
+ temps.Exclude(s1.bit() | s2.bit());
+}
+
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right.
+//
+// The live registers are:
+// o a0 : actual argument count (not including the receiver)
+// o a1: the JS function object being called.
+// o a3: the incoming new target or generator object
+// o cp: our context
+// o fp: the caller's frame pointer
+// o sp: stack pointer
+// o ra: return address
+//
+// The function builds an interpreter frame. See InterpreterFrameConstants in
+// frame-constants.h for its layout.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ Register closure = a1;
+ Register feedback_vector = a2;
+
+ // Get the bytecode array from the function object and load it into
+ // kInterpreterBytecodeArrayRegister.
+ __ Ld_d(kScratchReg,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld_d(
+ kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset));
+ Label is_baseline;
+ GetSharedFunctionInfoBytecodeOrBaseline(
+ masm, kInterpreterBytecodeArrayRegister, kScratchReg, &is_baseline);
+
+ // The bytecode array could have been flushed from the shared function info,
+ // if so, call into CompileLazy.
+ Label compile_lazy;
+ __ GetObjectType(kInterpreterBytecodeArrayRegister, kScratchReg, kScratchReg);
+ __ Branch(&compile_lazy, ne, kScratchReg, Operand(BYTECODE_ARRAY_TYPE));
+
+ // Load the feedback vector from the closure.
+ __ Ld_d(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ Ld_d(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ Label push_stack_frame;
+ // Check if feedback vector is valid. If valid, check for optimized code
+ // and update invocation count. Otherwise, setup the stack frame.
+ __ Ld_d(a4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ Ld_hu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
+ __ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE));
+
+ // Read off the optimization state in the feedback vector, and if there
+ // is optimized code or an optimization marker, call that instead.
+ Register optimization_state = a4;
+ __ Ld_w(optimization_state,
+ FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
+
+ // Check if the optimized code slot is not empty or has a optimization marker.
+ Label has_optimized_code_or_marker;
+
+ __ andi(t0, optimization_state,
+ FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask);
+ __ Branch(&has_optimized_code_or_marker, ne, t0, Operand(zero_reg));
+
+ Label not_optimized;
+ __ bind(&not_optimized);
+
+ // Increment invocation count for the function.
+ __ Ld_w(a4, FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountOffset));
+ __ Add_w(a4, a4, Operand(1));
+ __ St_w(a4, FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountOffset));
+
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ __ bind(&push_stack_frame);
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ PushStandardFrame(closure);
+
+ // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
+ // 8-bit fields next to each other, so we could just optimize by writing a
+ // 16-bit. These static asserts guard our assumption is valid.
+ STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
+ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
+ __ St_h(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kOsrLoopNestingLevelOffset));
+
+ // Load initial bytecode offset.
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+ // Push bytecode array and Smi tagged bytecode array offset.
+ __ SmiTag(a4, kInterpreterBytecodeOffsetRegister);
+ __ Push(kInterpreterBytecodeArrayRegister, a4);
+
+ // Allocate the local and temporary register file on the stack.
+ Label stack_overflow;
+ {
+ // Load frame size (word) from the BytecodeArray object.
+ __ Ld_w(a4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kFrameSizeOffset));
+
+ // Do a stack check to ensure we don't go over the limit.
+ __ Sub_d(a5, sp, Operand(a4));
+ __ LoadStackLimit(a2, MacroAssembler::StackLimitKind::kRealStackLimit);
+ __ Branch(&stack_overflow, lo, a5, Operand(a2));
+
+ // If ok, push undefined as the initial value for all register file entries.
+ Label loop_header;
+ Label loop_check;
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+ __ Branch(&loop_check);
+ __ bind(&loop_header);
+ // TODO(rmcilroy): Consider doing more than one push per loop iteration.
+ __ Push(kInterpreterAccumulatorRegister);
+ // Continue loop if not done.
+ __ bind(&loop_check);
+ __ Sub_d(a4, a4, Operand(kPointerSize));
+ __ Branch(&loop_header, ge, a4, Operand(zero_reg));
+ }
+
+ // If the bytecode array has a valid incoming new target or generator object
+ // register, initialize it with incoming value which was passed in r3.
+ Label no_incoming_new_target_or_generator_register;
+ __ Ld_w(a5, FieldMemOperand(
+ kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
+ __ Branch(&no_incoming_new_target_or_generator_register, eq, a5,
+ Operand(zero_reg));
+ __ Alsl_d(a5, a5, fp, kPointerSizeLog2, t7);
+ __ St_d(a3, MemOperand(a5, 0));
+ __ bind(&no_incoming_new_target_or_generator_register);
+
+ // Perform interrupt stack check.
+ // TODO(solanes): Merge with the real stack limit check above.
+ Label stack_check_interrupt, after_stack_check_interrupt;
+ __ LoadStackLimit(a5, MacroAssembler::StackLimitKind::kInterruptStackLimit);
+ __ Branch(&stack_check_interrupt, lo, sp, Operand(a5));
+ __ bind(&after_stack_check_interrupt);
+
+ // The accumulator is already loaded with undefined.
+
+ // Load the dispatch table into a register and dispatch to the bytecode
+ // handler at the current bytecode offset.
+ Label do_dispatch;
+ __ bind(&do_dispatch);
+ __ li(kInterpreterDispatchTableRegister,
+ ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
+ __ Add_d(t5, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister);
+ __ Ld_bu(a7, MemOperand(t5, 0));
+ __ Alsl_d(kScratchReg, a7, kInterpreterDispatchTableRegister,
+ kPointerSizeLog2, t7);
+ __ Ld_d(kJavaScriptCallCodeStartRegister, MemOperand(kScratchReg, 0));
+ __ Call(kJavaScriptCallCodeStartRegister);
+ masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
+
+ // Any returns to the entry trampoline are either due to the return bytecode
+ // or the interpreter tail calling a builtin and then a dispatch.
+
+ // Get bytecode array and bytecode offset from the stack frame.
+ __ Ld_d(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ Ld_d(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+ // Either return, or advance to the next bytecode and dispatch.
+ Label do_return;
+ __ Add_d(a1, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister);
+ __ Ld_bu(a1, MemOperand(a1, 0));
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, a1, a2, a3,
+ a4, &do_return);
+ __ jmp(&do_dispatch);
+
+ __ bind(&do_return);
+ // The return value is in a0.
+ LeaveInterpreterFrame(masm, t0, t1);
+ __ Jump(ra);
+
+ __ bind(&stack_check_interrupt);
+ // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
+ // for the call to the StackGuard.
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset)));
+ __ St_d(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ CallRuntime(Runtime::kStackGuard);
+
+ // After the call, restore the bytecode array, bytecode offset and accumulator
+ // registers again. Also, restore the bytecode offset in the stack to its
+ // previous value.
+ __ Ld_d(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+
+ __ SmiTag(a5, kInterpreterBytecodeOffsetRegister);
+ __ St_d(a5, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+ __ jmp(&after_stack_check_interrupt);
+
+ __ bind(&has_optimized_code_or_marker);
+ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
+ feedback_vector);
+
+ __ bind(&is_baseline);
+ {
+ // Load the feedback vector from the closure.
+ __ Ld_d(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ Ld_d(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ Label install_baseline_code;
+ // Check if feedback vector is valid. If not, call prepare for baseline to
+ // allocate it.
+ __ Ld_d(t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ Ld_hu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+ __ Branch(&install_baseline_code, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
+
+ // Check for an optimization marker.
+ LoadOptimizationStateAndJumpIfNeedsProcessing(
+ masm, optimization_state, feedback_vector,
+ &has_optimized_code_or_marker);
+
+ // Load the baseline code into the closure.
+ __ Move(a2, kInterpreterBytecodeArrayRegister);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ ReplaceClosureCodeWithOptimizedCode(masm, a2, closure);
+ __ JumpCodeObject(a2);
+
+ __ bind(&install_baseline_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode);
+ }
+
+ __ bind(&compile_lazy);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+ // Unreachable code.
+ __ break_(0xCC);
+
+ __ bind(&stack_overflow);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ break_(0xCC);
+}
+
+static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
+ Register start_address,
+ Register scratch, Register scratch2) {
+ // Find the address of the last argument.
+ __ Sub_d(scratch, num_args, Operand(1));
+ __ slli_d(scratch, scratch, kPointerSizeLog2);
+ __ Sub_d(start_address, start_address, scratch);
+
+ // Push the arguments.
+ __ PushArray(start_address, num_args, scratch, scratch2,
+ TurboAssembler::PushArrayOrder::kReverse);
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsThenCallImpl(
+ MacroAssembler* masm, ConvertReceiverMode receiver_mode,
+ InterpreterPushArgsMode mode) {
+ DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a2 : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -- a1 : the target to call (can be any Object).
+ // -----------------------------------
+ Label stack_overflow;
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // The spread argument should not be pushed.
+ __ Sub_d(a0, a0, Operand(1));
+ }
+
+ __ Add_d(a3, a0, Operand(1)); // Add one for receiver.
+
+ __ StackOverflowCheck(a3, a4, t0, &stack_overflow);
+
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ // Don't copy receiver.
+ __ mov(a3, a0);
+ }
+
+ // This function modifies a2, t0 and a4.
+ GenerateInterpreterPushArgs(masm, a3, a2, a4, t0);
+
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ __ PushRoot(RootIndex::kUndefinedValue);
+ }
+
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // Pass the spread in the register a2.
+ // a2 already points to the penultime argument, the spread
+ // is below that.
+ __ Ld_d(a2, MemOperand(a2, -kSystemPointerSize));
+ }
+
+ // Call the target.
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
+ RelocInfo::CODE_TARGET);
+ } else {
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
+ RelocInfo::CODE_TARGET);
+ }
+
+ __ bind(&stack_overflow);
+ {
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ break_(0xCC);
+ }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
+ MacroAssembler* masm, InterpreterPushArgsMode mode) {
+ // ----------- S t a t e -------------
+ // -- a0 : argument count (not including receiver)
+ // -- a3 : new target
+ // -- a1 : constructor to call
+ // -- a2 : allocation site feedback if available, undefined otherwise.
+ // -- a4 : address of the first argument
+ // -----------------------------------
+ Label stack_overflow;
+ __ addi_d(a6, a0, 1);
+ __ StackOverflowCheck(a6, a5, t0, &stack_overflow);
+
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // The spread argument should not be pushed.
+ __ Sub_d(a0, a0, Operand(1));
+ }
+
+ // Push the arguments, This function modifies t0, a4 and a5.
+ GenerateInterpreterPushArgs(masm, a0, a4, a5, t0);
+
+ // Push a slot for the receiver.
+ __ Push(zero_reg);
+
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // Pass the spread in the register a2.
+ // a4 already points to the penultimate argument, the spread
+ // lies in the next interpreter register.
+ __ Ld_d(a2, MemOperand(a4, -kSystemPointerSize));
+ } else {
+ __ AssertUndefinedOrAllocationSite(a2, t0);
+ }
+
+ if (mode == InterpreterPushArgsMode::kArrayFunction) {
+ __ AssertFunction(a1);
+
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ Jump(BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl),
+ RelocInfo::CODE_TARGET);
+ } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // Call the constructor with a0, a1, and a3 unmodified.
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
+ RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
+ // Call the constructor with a0, a1, and a3 unmodified.
+ __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
+ }
+
+ __ bind(&stack_overflow);
+ {
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ break_(0xCC);
+ }
+}
+
+static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
+ // Set the return address to the correct point in the interpreter entry
+ // trampoline.
+ Label builtin_trampoline, trampoline_loaded;
+ Smi interpreter_entry_return_pc_offset(
+ masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
+
+ // If the SFI function_data is an InterpreterData, the function will have a
+ // custom copy of the interpreter entry trampoline for profiling. If so,
+ // get the custom trampoline, otherwise grab the entry address of the global
+ // trampoline.
+ __ Ld_d(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ Ld_d(t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld_d(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
+ __ GetObjectType(t0, kInterpreterDispatchTableRegister,
+ kInterpreterDispatchTableRegister);
+ __ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister,
+ Operand(INTERPRETER_DATA_TYPE));
+
+ __ Ld_d(t0,
+ FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
+ __ Add_d(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Branch(&trampoline_loaded);
+
+ __ bind(&builtin_trampoline);
+ __ li(t0, ExternalReference::
+ address_of_interpreter_entry_trampoline_instruction_start(
+ masm->isolate()));
+ __ Ld_d(t0, MemOperand(t0, 0));
+
+ __ bind(&trampoline_loaded);
+ __ Add_d(ra, t0, Operand(interpreter_entry_return_pc_offset.value()));
+
+ // Initialize the dispatch table register.
+ __ li(kInterpreterDispatchTableRegister,
+ ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
+
+ // Get the bytecode array pointer from the frame.
+ __ Ld_d(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ SmiTst(kInterpreterBytecodeArrayRegister, kScratchReg);
+ __ Assert(ne,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
+ kScratchReg, Operand(zero_reg));
+ __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
+ __ Assert(eq,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
+ a1, Operand(BYTECODE_ARRAY_TYPE));
+ }
+
+ // Get the target bytecode offset from the frame.
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+ if (FLAG_debug_code) {
+ Label okay;
+ __ Branch(&okay, ge, kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ // Unreachable code.
+ __ break_(0xCC);
+ __ bind(&okay);
+ }
+
+ // Dispatch to the target bytecode.
+ __ Add_d(a1, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister);
+ __ Ld_bu(a7, MemOperand(a1, 0));
+ __ Alsl_d(a1, a7, kInterpreterDispatchTableRegister, kPointerSizeLog2, t7);
+ __ Ld_d(kJavaScriptCallCodeStartRegister, MemOperand(a1, 0));
+ __ Jump(kJavaScriptCallCodeStartRegister);
+}
+
+void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
+ // Advance the current bytecode offset stored within the given interpreter
+ // stack frame. This simulates what all bytecode handlers do upon completion
+ // of the underlying operation.
+ __ Ld_d(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ Ld_d(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+ Label enter_bytecode, function_entry_bytecode;
+ __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+
+ // Load the current bytecode.
+ __ Add_d(a1, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister);
+ __ Ld_bu(a1, MemOperand(a1, 0));
+
+ // Advance to the next bytecode.
+ Label if_return;
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, a1, a2, a3,
+ a4, &if_return);
+
+ __ bind(&enter_bytecode);
+ // Convert new bytecode offset to a Smi and save in the stackframe.
+ __ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
+ __ St_d(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+ Generate_InterpreterEnterBytecode(masm);
+
+ __ bind(&function_entry_bytecode);
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset. Detect this case and advance to the first
+ // actual bytecode.
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ Branch(&enter_bytecode);
+
+ // We should never take the if_return path.
+ __ bind(&if_return);
+ __ Abort(AbortReason::kInvalidBytecodeAdvance);
+}
+
+void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
+ Generate_InterpreterEnterBytecode(masm);
+}
+
+namespace {
+void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
+ bool java_script_builtin,
+ bool with_result) {
+ const RegisterConfiguration* config(RegisterConfiguration::Default());
+ int allocatable_register_count = config->num_allocatable_general_registers();
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ if (with_result) {
+ if (java_script_builtin) {
+ __ mov(scratch, a0);
+ } else {
+ // Overwrite the hole inserted by the deoptimizer with the return value
+ // from the LAZY deopt point.
+ __ St_d(
+ a0,
+ MemOperand(
+ sp, config->num_allocatable_general_registers() * kPointerSize +
+ BuiltinContinuationFrameConstants::kFixedFrameSize));
+ }
+ }
+ for (int i = allocatable_register_count - 1; i >= 0; --i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ __ Pop(Register::from_code(code));
+ if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
+ __ SmiUntag(Register::from_code(code));
+ }
+ }
+
+ if (with_result && java_script_builtin) {
+ // Overwrite the hole inserted by the deoptimizer with the return value from
+ // the LAZY deopt point. t0 contains the arguments count, the return value
+ // from LAZY is always the last argument.
+ __ Add_d(a0, a0,
+ Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
+ __ Alsl_d(t0, a0, sp, kSystemPointerSizeLog2, t7);
+ __ St_d(scratch, MemOperand(t0, 0));
+ // Recover arguments count.
+ __ Sub_d(a0, a0,
+ Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
+ }
+
+ __ Ld_d(
+ fp,
+ MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ // Load builtin index (stored as a Smi) and use it to get the builtin start
+ // address from the builtins table.
+ __ Pop(t0);
+ __ Add_d(sp, sp,
+ Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ __ Pop(ra);
+ __ LoadEntryFromBuiltinIndex(t0);
+ __ Jump(t0);
+}
+} // namespace
+
+void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, false);
+}
+
+void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, true);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, false);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, true);
+}
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+ }
+
+ DCHECK_EQ(kInterpreterAccumulatorRegister.code(), a0.code());
+ __ Ld_d(a0, MemOperand(sp, 0 * kPointerSize));
+ __ Add_d(sp, sp, Operand(1 * kPointerSize)); // Remove state.
+ __ Ret();
+}
+
+namespace {
+
+void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
+ Operand offset = Operand(zero_reg)) {
+ __ Add_d(ra, entry_address, offset);
+ // And "return" to the OSR entry point of the function.
+ __ Ret();
+}
+
+void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement);
+ }
+
+ // If the code object is null, just return to the caller.
+ __ Ret(eq, a0, Operand(Smi::zero()));
+
+ if (is_interpreter) {
+ // Drop the handler frame that is be sitting on top of the actual
+ // JavaScript frame. This is the case then OSR is triggered from bytecode.
+ __ LeaveFrame(StackFrame::STUB);
+ }
+
+ // Load deoptimization data from the code object.
+ // <deopt_data> = <code>[#deoptimization_data_offset]
+ __ Ld_d(a1, MemOperand(a0, Code::kDeoptimizationDataOrInterpreterDataOffset -
+ kHeapObjectTag));
+
+ // Load the OSR entrypoint offset from the deoptimization data.
+ // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+ __ SmiUntag(a1, MemOperand(a1, FixedArray::OffsetOfElementAt(
+ DeoptimizationData::kOsrPcOffsetIndex) -
+ kHeapObjectTag));
+
+ // Compute the target address = code_obj + header_size + osr_offset
+ // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+ __ Add_d(a0, a0, a1);
+ Generate_OSREntry(masm, a0, Operand(Code::kHeaderSize - kHeapObjectTag));
+}
+} // namespace
+
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+ return OnStackReplacement(masm, true);
+}
+
+void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
+ __ Ld_d(kContextRegister,
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
+ return OnStackReplacement(masm, false);
+}
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc
+ // -- sp[0] : receiver
+ // -- sp[4] : thisArg
+ // -- sp[8] : argArray
+ // -----------------------------------
+
+ Register argc = a0;
+ Register arg_array = a2;
+ Register receiver = a1;
+ Register this_arg = a5;
+ Register undefined_value = a3;
+ Register scratch = a4;
+
+ __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
+
+ // 1. Load receiver into a1, argArray into a2 (if present), remove all
+ // arguments from the stack (including the receiver), and push thisArg (if
+ // present) instead.
+ {
+ // Claim (2 - argc) dummy arguments form the stack, to put the stack in a
+ // consistent state for a simple pop operation.
+
+ __ mov(scratch, argc);
+ __ Ld_d(this_arg, MemOperand(sp, kPointerSize));
+ __ Ld_d(arg_array, MemOperand(sp, 2 * kPointerSize));
+ __ Movz(arg_array, undefined_value, scratch); // if argc == 0
+ __ Movz(this_arg, undefined_value, scratch); // if argc == 0
+ __ Sub_d(scratch, scratch, Operand(1));
+ __ Movz(arg_array, undefined_value, scratch); // if argc == 1
+ __ Ld_d(receiver, MemOperand(sp, 0));
+ __ Alsl_d(sp, argc, sp, kSystemPointerSizeLog2, t7);
+ __ St_d(this_arg, MemOperand(sp, 0));
+ }
+
+ // ----------- S t a t e -------------
+ // -- a2 : argArray
+ // -- a1 : receiver
+ // -- a3 : undefined root value
+ // -- sp[0] : thisArg
+ // -----------------------------------
+
+ // 2. We don't need to check explicitly for callable receiver here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
+
+ // 3. Tail call with no arguments if argArray is null or undefined.
+ Label no_arguments;
+ __ JumpIfRoot(arg_array, RootIndex::kNullValue, &no_arguments);
+ __ Branch(&no_arguments, eq, arg_array, Operand(undefined_value));
+
+ // 4a. Apply the receiver to the given argArray.
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
+ RelocInfo::CODE_TARGET);
+
+ // 4b. The argArray is either null or undefined, so we tail call without any
+ // arguments to the receiver.
+ __ bind(&no_arguments);
+ {
+ __ mov(a0, zero_reg);
+ DCHECK(receiver == a1);
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ }
+}
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
+ // 1. Get the callable to call (passed as receiver) from the stack.
+ { __ Pop(a1); }
+
+ // 2. Make sure we have at least one argument.
+ // a0: actual number of arguments
+ {
+ Label done;
+ __ Branch(&done, ne, a0, Operand(zero_reg));
+ __ PushRoot(RootIndex::kUndefinedValue);
+ __ Add_d(a0, a0, Operand(1));
+ __ bind(&done);
+ }
+
+ // 3. Adjust the actual number of arguments.
+ __ addi_d(a0, a0, -1);
+
+ // 4. Call the callable.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc
+ // -- sp[0] : receiver
+ // -- sp[8] : target (if argc >= 1)
+ // -- sp[16] : thisArgument (if argc >= 2)
+ // -- sp[24] : argumentsList (if argc == 3)
+ // -----------------------------------
+
+ Register argc = a0;
+ Register arguments_list = a2;
+ Register target = a1;
+ Register this_argument = a5;
+ Register undefined_value = a3;
+ Register scratch = a4;
+
+ __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
+
+ // 1. Load target into a1 (if present), argumentsList into a2 (if present),
+ // remove all arguments from the stack (including the receiver), and push
+ // thisArgument (if present) instead.
+ {
+ // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
+ // consistent state for a simple pop operation.
+
+ __ mov(scratch, argc);
+ __ Ld_d(target, MemOperand(sp, kPointerSize));
+ __ Ld_d(this_argument, MemOperand(sp, 2 * kPointerSize));
+ __ Ld_d(arguments_list, MemOperand(sp, 3 * kPointerSize));
+ __ Movz(arguments_list, undefined_value, scratch); // if argc == 0
+ __ Movz(this_argument, undefined_value, scratch); // if argc == 0
+ __ Movz(target, undefined_value, scratch); // if argc == 0
+ __ Sub_d(scratch, scratch, Operand(1));
+ __ Movz(arguments_list, undefined_value, scratch); // if argc == 1
+ __ Movz(this_argument, undefined_value, scratch); // if argc == 1
+ __ Sub_d(scratch, scratch, Operand(1));
+ __ Movz(arguments_list, undefined_value, scratch); // if argc == 2
+
+ __ Alsl_d(sp, argc, sp, kSystemPointerSizeLog2, t7);
+ __ St_d(this_argument, MemOperand(sp, 0)); // Overwrite receiver
+ }
+
+ // ----------- S t a t e -------------
+ // -- a2 : argumentsList
+ // -- a1 : target
+ // -- a3 : undefined root value
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+
+ // 2. We don't need to check explicitly for callable target here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
+
+ // 3. Apply the target to the given argumentsList.
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
+ RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc
+ // -- sp[0] : receiver
+ // -- sp[8] : target
+ // -- sp[16] : argumentsList
+ // -- sp[24] : new.target (optional)
+ // -----------------------------------
+
+ Register argc = a0;
+ Register arguments_list = a2;
+ Register target = a1;
+ Register new_target = a3;
+ Register undefined_value = a4;
+ Register scratch = a5;
+
+ __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
+
+ // 1. Load target into a1 (if present), argumentsList into a2 (if present),
+ // new.target into a3 (if present, otherwise use target), remove all
+ // arguments from the stack (including the receiver), and push thisArgument
+ // (if present) instead.
+ {
+ // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
+ // consistent state for a simple pop operation.
+
+ __ mov(scratch, argc);
+ __ Ld_d(target, MemOperand(sp, kPointerSize));
+ __ Ld_d(arguments_list, MemOperand(sp, 2 * kPointerSize));
+ __ Ld_d(new_target, MemOperand(sp, 3 * kPointerSize));
+ __ Movz(arguments_list, undefined_value, scratch); // if argc == 0
+ __ Movz(new_target, undefined_value, scratch); // if argc == 0
+ __ Movz(target, undefined_value, scratch); // if argc == 0
+ __ Sub_d(scratch, scratch, Operand(1));
+ __ Movz(arguments_list, undefined_value, scratch); // if argc == 1
+ __ Movz(new_target, target, scratch); // if argc == 1
+ __ Sub_d(scratch, scratch, Operand(1));
+ __ Movz(new_target, target, scratch); // if argc == 2
+
+ __ Alsl_d(sp, argc, sp, kSystemPointerSizeLog2, t7);
+ __ St_d(undefined_value, MemOperand(sp, 0)); // Overwrite receiver
+ }
+
+ // ----------- S t a t e -------------
+ // -- a2 : argumentsList
+ // -- a1 : target
+ // -- a3 : new.target
+ // -- sp[0] : receiver (undefined)
+ // -----------------------------------
+
+ // 2. We don't need to check explicitly for constructor target here,
+ // since that's the first thing the Construct/ConstructWithArrayLike
+ // builtins will do.
+
+ // 3. We don't need to check explicitly for constructor new.target here,
+ // since that's the second thing the Construct/ConstructWithArrayLike
+ // builtins will do.
+
+ // 4. Construct the target with the given new.target and argumentsList.
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike),
+ RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
+ // ----------- S t a t e -------------
+ // -- a1 : target
+ // -- a0 : number of parameters on the stack (not including the receiver)
+ // -- a2 : arguments list (a FixedArray)
+ // -- a4 : len (number of elements to push from args)
+ // -- a3 : new.target (for [[Construct]])
+ // -----------------------------------
+ if (FLAG_debug_code) {
+ // Allow a2 to be a FixedArray, or a FixedDoubleArray if a4 == 0.
+ Label ok, fail;
+ __ AssertNotSmi(a2);
+ __ GetObjectType(a2, t8, t8);
+ __ Branch(&ok, eq, t8, Operand(FIXED_ARRAY_TYPE));
+ __ Branch(&fail, ne, t8, Operand(FIXED_DOUBLE_ARRAY_TYPE));
+ __ Branch(&ok, eq, a4, Operand(zero_reg));
+ // Fall through.
+ __ bind(&fail);
+ __ Abort(AbortReason::kOperandIsNotAFixedArray);
+
+ __ bind(&ok);
+ }
+
+ Register args = a2;
+ Register len = a4;
+
+ // Check for stack overflow.
+ Label stack_overflow;
+ __ StackOverflowCheck(len, kScratchReg, a5, &stack_overflow);
+
+ // Move the arguments already in the stack,
+ // including the receiver and the return address.
+ {
+ Label copy;
+ Register src = a6, dest = a7;
+ __ mov(src, sp);
+ __ slli_d(t0, a4, kSystemPointerSizeLog2);
+ __ Sub_d(sp, sp, Operand(t0));
+ // Update stack pointer.
+ __ mov(dest, sp);
+ __ Add_d(t0, a0, Operand(zero_reg));
+
+ __ bind(&copy);
+ __ Ld_d(t1, MemOperand(src, 0));
+ __ St_d(t1, MemOperand(dest, 0));
+ __ Sub_d(t0, t0, Operand(1));
+ __ Add_d(src, src, Operand(kSystemPointerSize));
+ __ Add_d(dest, dest, Operand(kSystemPointerSize));
+ __ Branch(&copy, ge, t0, Operand(zero_reg));
+ }
+
+ // Push arguments onto the stack (thisArgument is already on the stack).
+ {
+ Label done, push, loop;
+ Register src = a6;
+ Register scratch = len;
+
+ __ addi_d(src, args, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add_d(a0, a0, len); // The 'len' argument for Call() or Construct().
+ __ Branch(&done, eq, len, Operand(zero_reg));
+ __ slli_d(scratch, len, kPointerSizeLog2);
+ __ Sub_d(scratch, sp, Operand(scratch));
+ __ LoadRoot(t1, RootIndex::kTheHoleValue);
+ __ bind(&loop);
+ __ Ld_d(a5, MemOperand(src, 0));
+ __ addi_d(src, src, kPointerSize);
+ __ Branch(&push, ne, a5, Operand(t1));
+ __ LoadRoot(a5, RootIndex::kUndefinedValue);
+ __ bind(&push);
+ __ St_d(a5, MemOperand(a7, 0));
+ __ Add_d(a7, a7, Operand(kSystemPointerSize));
+ __ Add_d(scratch, scratch, Operand(kSystemPointerSize));
+ __ Branch(&loop, ne, scratch, Operand(sp));
+ __ bind(&done);
+ }
+
+ // Tail-call to the actual Call or Construct builtin.
+ __ Jump(code, RelocInfo::CODE_TARGET);
+
+ __ bind(&stack_overflow);
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+}
+
+// static
+void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
+ CallOrConstructMode mode,
+ Handle<Code> code) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a3 : the new.target (for [[Construct]] calls)
+ // -- a1 : the target to call (can be any Object)
+ // -- a2 : start index (to support rest parameters)
+ // -----------------------------------
+
+ // Check if new.target has a [[Construct]] internal method.
+ if (mode == CallOrConstructMode::kConstruct) {
+ Label new_target_constructor, new_target_not_constructor;
+ __ JumpIfSmi(a3, &new_target_not_constructor);
+ __ Ld_d(t1, FieldMemOperand(a3, HeapObject::kMapOffset));
+ __ Ld_bu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
+ __ And(t1, t1, Operand(Map::Bits1::IsConstructorBit::kMask));
+ __ Branch(&new_target_constructor, ne, t1, Operand(zero_reg));
+ __ bind(&new_target_not_constructor);
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ Push(a3);
+ __ CallRuntime(Runtime::kThrowNotConstructor);
+ }
+ __ bind(&new_target_constructor);
+ }
+
+ Label stack_done, stack_overflow;
+ __ Ld_d(a7, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ __ Sub_w(a7, a7, a2);
+ __ Branch(&stack_done, le, a7, Operand(zero_reg));
+ {
+ // Check for stack overflow.
+ __ StackOverflowCheck(a7, a4, a5, &stack_overflow);
+
+ // Forward the arguments from the caller frame.
+
+ // Point to the first argument to copy (skipping the receiver).
+ __ Add_d(a6, fp,
+ Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
+ kSystemPointerSize));
+ __ Alsl_d(a6, a2, a6, kSystemPointerSizeLog2, t7);
+
+ // Move the arguments already in the stack,
+ // including the receiver and the return address.
+ {
+ Label copy;
+ Register src = t0, dest = a2;
+ __ mov(src, sp);
+ // Update stack pointer.
+ __ slli_d(t1, a7, kSystemPointerSizeLog2);
+ __ Sub_d(sp, sp, Operand(t1));
+ __ mov(dest, sp);
+ __ Add_d(t2, a0, Operand(zero_reg));
+
+ __ bind(&copy);
+ __ Ld_d(t1, MemOperand(src, 0));
+ __ St_d(t1, MemOperand(dest, 0));
+ __ Sub_d(t2, t2, Operand(1));
+ __ Add_d(src, src, Operand(kSystemPointerSize));
+ __ Add_d(dest, dest, Operand(kSystemPointerSize));
+ __ Branch(&copy, ge, t2, Operand(zero_reg));
+ }
+
+ // Copy arguments from the caller frame.
+ // TODO(victorgomes): Consider using forward order as potentially more cache
+ // friendly.
+ {
+ Label loop;
+ __ Add_d(a0, a0, a7);
+ __ bind(&loop);
+ {
+ __ Sub_w(a7, a7, Operand(1));
+ __ Alsl_d(t0, a7, a6, kPointerSizeLog2, t7);
+ __ Ld_d(kScratchReg, MemOperand(t0, 0));
+ __ Alsl_d(t0, a7, a2, kPointerSizeLog2, t7);
+ __ St_d(kScratchReg, MemOperand(t0, 0));
+ __ Branch(&loop, ne, a7, Operand(zero_reg));
+ }
+ }
+ }
+ __ Branch(&stack_done);
+ __ bind(&stack_overflow);
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&stack_done);
+
+ // Tail-call to the {code} handler.
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+ ConvertReceiverMode mode) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSFunction)
+ // -----------------------------------
+ __ AssertFunction(a1);
+
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that function is not a "classConstructor".
+ Label class_constructor;
+ __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld_wu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
+ __ And(kScratchReg, a3,
+ Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
+ __ Branch(&class_constructor, ne, kScratchReg, Operand(zero_reg));
+
+ // Enter the context of the function; ToObject has to run in the function
+ // context, and we also need to take the global proxy from the function
+ // context in case of conversion.
+ __ Ld_d(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ // We need to convert the receiver for non-native sloppy mode functions.
+ Label done_convert;
+ __ Ld_wu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
+ __ And(kScratchReg, a3,
+ Operand(SharedFunctionInfo::IsNativeBit::kMask |
+ SharedFunctionInfo::IsStrictBit::kMask));
+ __ Branch(&done_convert, ne, kScratchReg, Operand(zero_reg));
+ {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSFunction)
+ // -- a2 : the shared function info.
+ // -- cp : the function context.
+ // -----------------------------------
+
+ if (mode == ConvertReceiverMode::kNullOrUndefined) {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(a3);
+ } else {
+ Label convert_to_object, convert_receiver;
+ __ LoadReceiver(a3, a0);
+ __ JumpIfSmi(a3, &convert_to_object);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ GetObjectType(a3, a4, a4);
+ __ Branch(&done_convert, hs, a4, Operand(FIRST_JS_RECEIVER_TYPE));
+ if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+ Label convert_global_proxy;
+ __ JumpIfRoot(a3, RootIndex::kUndefinedValue, &convert_global_proxy);
+ __ JumpIfNotRoot(a3, RootIndex::kNullValue, &convert_to_object);
+ __ bind(&convert_global_proxy);
+ {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(a3);
+ }
+ __ Branch(&convert_receiver);
+ }
+ __ bind(&convert_to_object);
+ {
+ // Convert receiver using ToObject.
+ // TODO(bmeurer): Inline the allocation here to avoid building the frame
+ // in the fast case? (fall back to AllocateInNewSpace?)
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(a0);
+ __ Push(a0, a1);
+ __ mov(a0, a3);
+ __ Push(cp);
+ __ Call(BUILTIN_CODE(masm->isolate(), ToObject),
+ RelocInfo::CODE_TARGET);
+ __ Pop(cp);
+ __ mov(a3, a0);
+ __ Pop(a0, a1);
+ __ SmiUntag(a0);
+ }
+ __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ bind(&convert_receiver);
+ }
+ __ StoreReceiver(a3, a0, kScratchReg);
+ }
+ __ bind(&done_convert);
+
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSFunction)
+ // -- a2 : the shared function info.
+ // -- cp : the function context.
+ // -----------------------------------
+
+ __ Ld_hu(
+ a2, FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ InvokeFunctionCode(a1, no_reg, a2, a0, InvokeType::kJump);
+
+ // The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ Push(a1);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ }
+}
+
+// static
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -----------------------------------
+ __ AssertBoundFunction(a1);
+
+ // Patch the receiver to [[BoundThis]].
+ {
+ __ Ld_d(t0, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
+ __ StoreReceiver(t0, a0, kScratchReg);
+ }
+
+ // Load [[BoundArguments]] into a2 and length of that into a4.
+ __ Ld_d(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
+ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -- a2 : the [[BoundArguments]] (implemented as FixedArray)
+ // -- a4 : the number of [[BoundArguments]]
+ // -----------------------------------
+
+ // Reserve stack space for the [[BoundArguments]].
+ {
+ Label done;
+ __ slli_d(a5, a4, kPointerSizeLog2);
+ __ Sub_d(t0, sp, Operand(a5));
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ __ LoadStackLimit(kScratchReg,
+ MacroAssembler::StackLimitKind::kRealStackLimit);
+ __ Branch(&done, hs, t0, Operand(kScratchReg));
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ }
+ __ bind(&done);
+ }
+
+ // Pop receiver.
+ __ Pop(t0);
+
+ // Push [[BoundArguments]].
+ {
+ Label loop, done_loop;
+ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ Add_d(a0, a0, Operand(a4));
+ __ Add_d(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ bind(&loop);
+ __ Sub_d(a4, a4, Operand(1));
+ __ Branch(&done_loop, lt, a4, Operand(zero_reg));
+ __ Alsl_d(a5, a4, a2, kPointerSizeLog2, t7);
+ __ Ld_d(kScratchReg, MemOperand(a5, 0));
+ __ Push(kScratchReg);
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Push receiver.
+ __ Push(t0);
+
+ // Call the [[BoundTargetFunction]] via the Call builtin.
+ __ Ld_d(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
+ RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the target to call (can be any Object).
+ // -----------------------------------
+
+ Label non_callable, non_smi;
+ __ JumpIfSmi(a1, &non_callable);
+ __ bind(&non_smi);
+ __ LoadMap(t1, a1);
+ __ GetInstanceTypeRange(t1, t2, FIRST_JS_FUNCTION_TYPE, t8);
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode),
+ RelocInfo::CODE_TARGET, ls, t8,
+ Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
+
+ // Check if target has a [[Call]] internal method.
+ __ Ld_bu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
+ __ And(t1, t1, Operand(Map::Bits1::IsCallableBit::kMask));
+ __ Branch(&non_callable, eq, t1, Operand(zero_reg));
+
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq,
+ t2, Operand(JS_PROXY_TYPE));
+
+ // 2. Call to something else, which might have a [[Call]] internal method (if
+ // not we raise an exception).
+ // Overwrite the original receiver with the (original) target.
+ __ StoreReceiver(a1, a0, kScratchReg);
+ // Let the "call_as_function_delegate" take care of the rest.
+ __ LoadNativeContextSlot(a1, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
+ __ Jump(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
+
+ // 3. Call to something that is not callable.
+ __ bind(&non_callable);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable);
+ }
+}
+
+void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the constructor to call (checked to be a JSFunction)
+ // -- a3 : the new target (checked to be a constructor)
+ // -----------------------------------
+ __ AssertConstructor(a1);
+ __ AssertFunction(a1);
+
+ // Calling convention for function specific ConstructStubs require
+ // a2 to contain either an AllocationSite or undefined.
+ __ LoadRoot(a2, RootIndex::kUndefinedValue);
+
+ Label call_generic_stub;
+
+ // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
+ __ Ld_d(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld_wu(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset));
+ __ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
+ __ Branch(&call_generic_stub, eq, a4, Operand(zero_reg));
+
+ __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&call_generic_stub);
+ __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric),
+ RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -- a3 : the new target (checked to be a constructor)
+ // -----------------------------------
+ __ AssertConstructor(a1);
+ __ AssertBoundFunction(a1);
+
+ // Load [[BoundArguments]] into a2 and length of that into a4.
+ __ Ld_d(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
+ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -- a2 : the [[BoundArguments]] (implemented as FixedArray)
+ // -- a3 : the new target (checked to be a constructor)
+ // -- a4 : the number of [[BoundArguments]]
+ // -----------------------------------
+
+ // Reserve stack space for the [[BoundArguments]].
+ {
+ Label done;
+ __ slli_d(a5, a4, kPointerSizeLog2);
+ __ Sub_d(t0, sp, Operand(a5));
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ __ LoadStackLimit(kScratchReg,
+ MacroAssembler::StackLimitKind::kRealStackLimit);
+ __ Branch(&done, hs, t0, Operand(kScratchReg));
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ }
+ __ bind(&done);
+ }
+
+ // Pop receiver.
+ __ Pop(t0);
+
+ // Push [[BoundArguments]].
+ {
+ Label loop, done_loop;
+ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ Add_d(a0, a0, Operand(a4));
+ __ Add_d(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ bind(&loop);
+ __ Sub_d(a4, a4, Operand(1));
+ __ Branch(&done_loop, lt, a4, Operand(zero_reg));
+ __ Alsl_d(a5, a4, a2, kPointerSizeLog2, t7);
+ __ Ld_d(kScratchReg, MemOperand(a5, 0));
+ __ Push(kScratchReg);
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Push receiver.
+ __ Push(t0);
+
+ // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+ {
+ Label skip_load;
+ __ Branch(&skip_load, ne, a1, Operand(a3));
+ __ Ld_d(a3,
+ FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ bind(&skip_load);
+ }
+
+ // Construct the [[BoundTargetFunction]] via the Construct builtin.
+ __ Ld_d(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_Construct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the constructor to call (can be any Object)
+ // -- a3 : the new target (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -----------------------------------
+
+ // Check if target is a Smi.
+ Label non_constructor, non_proxy;
+ __ JumpIfSmi(a1, &non_constructor);
+
+ // Check if target has a [[Construct]] internal method.
+ __ Ld_d(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ Ld_bu(t3, FieldMemOperand(t1, Map::kBitFieldOffset));
+ __ And(t3, t3, Operand(Map::Bits1::IsConstructorBit::kMask));
+ __ Branch(&non_constructor, eq, t3, Operand(zero_reg));
+
+ // Dispatch based on instance type.
+ __ GetInstanceTypeRange(t1, t2, FIRST_JS_FUNCTION_TYPE, t8);
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
+ RelocInfo::CODE_TARGET, ls, t8,
+ Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
+
+ // Only dispatch to bound functions after checking whether they are
+ // constructors.
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
+
+ // Only dispatch to proxies after checking whether they are constructors.
+ __ Branch(&non_proxy, ne, t2, Operand(JS_PROXY_TYPE));
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
+ RelocInfo::CODE_TARGET);
+
+ // Called Construct on an exotic Object with a [[Construct]] internal method.
+ __ bind(&non_proxy);
+ {
+ // Overwrite the original receiver with the (original) target.
+ __ StoreReceiver(a1, a0, kScratchReg);
+ // Let the "call_as_constructor_delegate" take care of the rest.
+ __ LoadNativeContextSlot(a1, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
+ __ Jump(masm->isolate()->builtins()->CallFunction(),
+ RelocInfo::CODE_TARGET);
+ }
+
+ // Called Construct on an Object that doesn't have a [[Construct]] internal
+ // method.
+ __ bind(&non_constructor);
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable),
+ RelocInfo::CODE_TARGET);
+}
+
+#if V8_ENABLE_WEBASSEMBLY
+void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
+ // The function index was put in t0 by the jump table trampoline.
+ // Convert to Smi for the runtime call
+ __ SmiTag(kWasmCompileLazyFuncIndexRegister);
+ {
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
+ FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
+
+ // Save all parameter registers (see wasm-linkage.h). They might be
+ // overwritten in the runtime call below. We don't have any callee-saved
+ // registers in wasm, so no need to store anything else.
+ RegList gp_regs = 0;
+ for (Register gp_param_reg : wasm::kGpParamRegisters) {
+ gp_regs |= gp_param_reg.bit();
+ }
+
+ RegList fp_regs = 0;
+ for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
+ fp_regs |= fp_param_reg.bit();
+ }
+
+ CHECK_EQ(NumRegs(gp_regs), arraysize(wasm::kGpParamRegisters));
+ CHECK_EQ(NumRegs(fp_regs), arraysize(wasm::kFpParamRegisters));
+ CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs,
+ NumRegs(gp_regs));
+ CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
+ NumRegs(fp_regs));
+
+ __ MultiPush(gp_regs);
+ __ MultiPushFPU(fp_regs);
+
+ // kFixedFrameSizeFromFp is hard coded to include space for Simd
+ // registers, so we still need to allocate extra (unused) space on the stack
+ // as if they were saved.
+ __ Sub_d(sp, sp, base::bits::CountPopulation(fp_regs) * kDoubleSize);
+
+ // Pass instance and function index as an explicit arguments to the runtime
+ // function.
+ __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
+ // Initialize the JavaScript context with 0. CEntry will use it to
+ // set the current context on the isolate.
+ __ Move(kContextRegister, Smi::zero());
+ __ CallRuntime(Runtime::kWasmCompileLazy, 2);
+ __ mov(t8, a0);
+
+ __ Add_d(sp, sp, base::bits::CountPopulation(fp_regs) * kDoubleSize);
+ // Restore registers.
+ __ MultiPopFPU(fp_regs);
+ __ MultiPop(gp_regs);
+ }
+ // Finally, jump to the entrypoint.
+ __ Jump(t8);
+}
+
+void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
+ {
+ FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
+
+ // Save all parameter registers. They might hold live values, we restore
+ // them after the runtime call.
+ __ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs);
+ __ MultiPushFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
+
+ // Initialize the JavaScript context with 0. CEntry will use it to
+ // set the current context on the isolate.
+ __ Move(cp, Smi::zero());
+ __ CallRuntime(Runtime::kWasmDebugBreak, 0);
+
+ // Restore registers.
+ __ MultiPopFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
+ __ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs);
+ }
+ __ Ret();
+}
+
+void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
+ __ Trap();
+}
+
+void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
+ // Only needed on x64.
+ __ Trap();
+}
+
+#endif // V8_ENABLE_WEBASSEMBLY
+
+void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
+ SaveFPRegsMode save_doubles, ArgvMode argv_mode,
+ bool builtin_exit_frame) {
+ // Called from JavaScript; parameters are on stack as if calling JS function
+ // a0: number of arguments including receiver
+ // a1: pointer to builtin function
+ // fp: frame pointer (restored after C call)
+ // sp: stack pointer (restored as callee's sp after C call)
+ // cp: current context (C callee-saved)
+ //
+ // If argv_mode == ArgvMode::kRegister:
+ // a2: pointer to the first argument
+
+ if (argv_mode == ArgvMode::kRegister) {
+ // Move argv into the correct register.
+ __ mov(s1, a2);
+ } else {
+ // Compute the argv pointer in a callee-saved register.
+ __ Alsl_d(s1, a0, sp, kPointerSizeLog2, t7);
+ __ Sub_d(s1, s1, kPointerSize);
+ }
+
+ // Enter the exit frame that transitions from JavaScript to C++.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(
+ save_doubles == SaveFPRegsMode::kSave, 0,
+ builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
+
+ // s0: number of arguments including receiver (C callee-saved)
+ // s1: pointer to first argument (C callee-saved)
+ // s2: pointer to builtin function (C callee-saved)
+
+ // Prepare arguments for C routine.
+ // a0 = argc
+ __ mov(s0, a0);
+ __ mov(s2, a1);
+
+ // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
+ // also need to reserve the 4 argument slots on the stack.
+
+ __ AssertStackIsAligned();
+
+ // a0 = argc, a1 = argv, a2 = isolate
+ __ li(a2, ExternalReference::isolate_address(masm->isolate()));
+ __ mov(a1, s1);
+
+ __ StoreReturnAddressAndCall(s2);
+
+ // Result returned in a0 or a1:a0 - do not destroy these registers!
+
+ // Check result for exception sentinel.
+ Label exception_returned;
+ __ LoadRoot(a4, RootIndex::kException);
+ __ Branch(&exception_returned, eq, a4, Operand(a0));
+
+ // Check that there is no pending exception, otherwise we
+ // should have returned the exception sentinel.
+ if (FLAG_debug_code) {
+ Label okay;
+ ExternalReference pending_exception_address = ExternalReference::Create(
+ IsolateAddressId::kPendingExceptionAddress, masm->isolate());
+ __ li(a2, pending_exception_address);
+ __ Ld_d(a2, MemOperand(a2, 0));
+ __ LoadRoot(a4, RootIndex::kTheHoleValue);
+ // Cannot use check here as it attempts to generate call into runtime.
+ __ Branch(&okay, eq, a4, Operand(a2));
+ __ stop();
+ __ bind(&okay);
+ }
+
+ // Exit C frame and return.
+ // a0:a1: result
+ // sp: stack pointer
+ // fp: frame pointer
+ Register argc = argv_mode == ArgvMode::kRegister
+ // We don't want to pop arguments so set argc to no_reg.
+ ? no_reg
+ // s0: still holds argc (callee-saved).
+ : s0;
+ __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc, EMIT_RETURN);
+
+ // Handling of exception.
+ __ bind(&exception_returned);
+
+ ExternalReference pending_handler_context_address = ExternalReference::Create(
+ IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
+ ExternalReference pending_handler_entrypoint_address =
+ ExternalReference::Create(
+ IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
+ ExternalReference pending_handler_fp_address = ExternalReference::Create(
+ IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
+ ExternalReference pending_handler_sp_address = ExternalReference::Create(
+ IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
+
+ // Ask the runtime for help to determine the handler. This will set a0 to
+ // contain the current pending exception, don't clobber it.
+ ExternalReference find_handler =
+ ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(3, 0, a0);
+ __ mov(a0, zero_reg);
+ __ mov(a1, zero_reg);
+ __ li(a2, ExternalReference::isolate_address(masm->isolate()));
+ __ CallCFunction(find_handler, 3);
+ }
+
+ // Retrieve the handler context, SP and FP.
+ __ li(cp, pending_handler_context_address);
+ __ Ld_d(cp, MemOperand(cp, 0));
+ __ li(sp, pending_handler_sp_address);
+ __ Ld_d(sp, MemOperand(sp, 0));
+ __ li(fp, pending_handler_fp_address);
+ __ Ld_d(fp, MemOperand(fp, 0));
+
+ // If the handler is a JS frame, restore the context to the frame. Note that
+ // the context will be set to (cp == 0) for non-JS frames.
+ Label zero;
+ __ Branch(&zero, eq, cp, Operand(zero_reg));
+ __ St_d(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ bind(&zero);
+
+ // Clear c_entry_fp, like we do in `LeaveExitFrame`.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ li(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ masm->isolate()));
+ __ St_d(zero_reg, MemOperand(scratch, 0));
+ }
+
+ // Compute the handler entry address and jump to it.
+ __ li(t7, pending_handler_entrypoint_address);
+ __ Ld_d(t7, MemOperand(t7, 0));
+ __ Jump(t7);
+}
+
+void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
+ Label done;
+ Register result_reg = t0;
+
+ Register scratch = GetRegisterThatIsNotOneOf(result_reg);
+ Register scratch2 = GetRegisterThatIsNotOneOf(result_reg, scratch);
+ Register scratch3 = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch2);
+ DoubleRegister double_scratch = kScratchDoubleReg;
+
+ // Account for saved regs.
+ const int kArgumentOffset = 4 * kPointerSize;
+
+ __ Push(result_reg);
+ __ Push(scratch, scratch2, scratch3);
+
+ // Load double input.
+ __ Fld_d(double_scratch, MemOperand(sp, kArgumentOffset));
+
+ // Try a conversion to a signed integer.
+ __ ftintrz_w_d(double_scratch, double_scratch);
+ // Move the converted value into the result register.
+ __ movfr2gr_s(scratch3, double_scratch);
+
+ // Retrieve and restore the FCSR.
+ __ movfcsr2gr(scratch);
+
+ // Check for overflow and NaNs.
+ __ And(scratch, scratch,
+ kFCSRExceptionCauseMask ^ kFCSRDivideByZeroCauseMask);
+ // If we had no exceptions then set result_reg and we are done.
+ Label error;
+ __ Branch(&error, ne, scratch, Operand(zero_reg));
+ __ Move(result_reg, scratch3);
+ __ Branch(&done);
+ __ bind(&error);
+
+ // Load the double value and perform a manual truncation.
+ Register input_high = scratch2;
+ Register input_low = scratch3;
+
+ __ Ld_w(input_low,
+ MemOperand(sp, kArgumentOffset + Register::kMantissaOffset));
+ __ Ld_w(input_high,
+ MemOperand(sp, kArgumentOffset + Register::kExponentOffset));
+
+ Label normal_exponent;
+ // Extract the biased exponent in result.
+ __ bstrpick_w(result_reg, input_high,
+ HeapNumber::kExponentShift + HeapNumber::kExponentBits - 1,
+ HeapNumber::kExponentShift);
+
+ // Check for Infinity and NaNs, which should return 0.
+ __ Sub_w(scratch, result_reg, HeapNumber::kExponentMask);
+ __ Movz(result_reg, zero_reg, scratch);
+ __ Branch(&done, eq, scratch, Operand(zero_reg));
+
+ // Express exponent as delta to (number of mantissa bits + 31).
+ __ Sub_w(result_reg, result_reg,
+ Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
+
+ // If the delta is strictly positive, all bits would be shifted away,
+ // which means that we can return 0.
+ __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
+ __ mov(result_reg, zero_reg);
+ __ Branch(&done);
+
+ __ bind(&normal_exponent);
+ const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
+ // Calculate shift.
+ __ Add_w(scratch, result_reg,
+ Operand(kShiftBase + HeapNumber::kMantissaBits));
+
+ // Save the sign.
+ Register sign = result_reg;
+ result_reg = no_reg;
+ __ And(sign, input_high, Operand(HeapNumber::kSignMask));
+
+ // On ARM shifts > 31 bits are valid and will result in zero. On LOONG64 we
+ // need to check for this specific case.
+ Label high_shift_needed, high_shift_done;
+ __ Branch(&high_shift_needed, lt, scratch, Operand(32));
+ __ mov(input_high, zero_reg);
+ __ Branch(&high_shift_done);
+ __ bind(&high_shift_needed);
+
+ // Set the implicit 1 before the mantissa part in input_high.
+ __ Or(input_high, input_high,
+ Operand(1 << HeapNumber::kMantissaBitsInTopWord));
+ // Shift the mantissa bits to the correct position.
+ // We don't need to clear non-mantissa bits as they will be shifted away.
+ // If they weren't, it would mean that the answer is in the 32bit range.
+ __ sll_w(input_high, input_high, scratch);
+
+ __ bind(&high_shift_done);
+
+ // Replace the shifted bits with bits from the lower mantissa word.
+ Label pos_shift, shift_done;
+ __ li(kScratchReg, 32);
+ __ sub_w(scratch, kScratchReg, scratch);
+ __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
+
+ // Negate scratch.
+ __ Sub_w(scratch, zero_reg, scratch);
+ __ sll_w(input_low, input_low, scratch);
+ __ Branch(&shift_done);
+
+ __ bind(&pos_shift);
+ __ srl_w(input_low, input_low, scratch);
+
+ __ bind(&shift_done);
+ __ Or(input_high, input_high, Operand(input_low));
+ // Restore sign if necessary.
+ __ mov(scratch, sign);
+ result_reg = sign;
+ sign = no_reg;
+ __ Sub_w(result_reg, zero_reg, input_high);
+ __ Movz(result_reg, input_high, scratch);
+
+ __ bind(&done);
+
+ __ St_d(result_reg, MemOperand(sp, kArgumentOffset));
+ __ Pop(scratch, scratch2, scratch3);
+ __ Pop(result_reg);
+ __ Ret();
+}
+
+namespace {
+
+int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+ int64_t offset = (ref0.address() - ref1.address());
+ DCHECK(static_cast<int>(offset) == offset);
+ return static_cast<int>(offset);
+}
+
+// Calls an API function. Allocates HandleScope, extracts returned value
+// from handle and propagates exceptions. Restores context. stack_space
+// - space to be unwound on exit (includes the call JS arguments space and
+// the additional space allocated for the fast call).
+void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
+ ExternalReference thunk_ref, int stack_space,
+ MemOperand* stack_space_operand,
+ MemOperand return_value_operand) {
+ Isolate* isolate = masm->isolate();
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate);
+ const int kNextOffset = 0;
+ const int kLimitOffset = AddressOffset(
+ ExternalReference::handle_scope_limit_address(isolate), next_address);
+ const int kLevelOffset = AddressOffset(
+ ExternalReference::handle_scope_level_address(isolate), next_address);
+
+ DCHECK(function_address == a1 || function_address == a2);
+
+ Label profiler_enabled, end_profiler_check;
+ __ li(t7, ExternalReference::is_profiling_address(isolate));
+ __ Ld_b(t7, MemOperand(t7, 0));
+ __ Branch(&profiler_enabled, ne, t7, Operand(zero_reg));
+ __ li(t7, ExternalReference::address_of_runtime_stats_flag());
+ __ Ld_w(t7, MemOperand(t7, 0));
+ __ Branch(&profiler_enabled, ne, t7, Operand(zero_reg));
+ {
+ // Call the api function directly.
+ __ mov(t7, function_address);
+ __ Branch(&end_profiler_check);
+ }
+
+ __ bind(&profiler_enabled);
+ {
+ // Additional parameter is the address of the actual callback.
+ __ li(t7, thunk_ref);
+ }
+ __ bind(&end_profiler_check);
+
+ // Allocate HandleScope in callee-save registers.
+ __ li(s5, next_address);
+ __ Ld_d(s0, MemOperand(s5, kNextOffset));
+ __ Ld_d(s1, MemOperand(s5, kLimitOffset));
+ __ Ld_w(s2, MemOperand(s5, kLevelOffset));
+ __ Add_w(s2, s2, Operand(1));
+ __ St_w(s2, MemOperand(s5, kLevelOffset));
+
+ __ StoreReturnAddressAndCall(t7);
+
+ Label promote_scheduled_exception;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+ Label return_value_loaded;
+
+ // Load value from ReturnValue.
+ __ Ld_d(a0, return_value_operand);
+ __ bind(&return_value_loaded);
+
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ __ St_d(s0, MemOperand(s5, kNextOffset));
+ if (FLAG_debug_code) {
+ __ Ld_w(a1, MemOperand(s5, kLevelOffset));
+ __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
+ Operand(s2));
+ }
+ __ Sub_w(s2, s2, Operand(1));
+ __ St_w(s2, MemOperand(s5, kLevelOffset));
+ __ Ld_d(kScratchReg, MemOperand(s5, kLimitOffset));
+ __ Branch(&delete_allocated_handles, ne, s1, Operand(kScratchReg));
+
+ // Leave the API exit frame.
+ __ bind(&leave_exit_frame);
+
+ if (stack_space_operand == nullptr) {
+ DCHECK_NE(stack_space, 0);
+ __ li(s0, Operand(stack_space));
+ } else {
+ DCHECK_EQ(stack_space, 0);
+ __ Ld_d(s0, *stack_space_operand);
+ }
+
+ static constexpr bool kDontSaveDoubles = false;
+ static constexpr bool kRegisterContainsSlotCount = false;
+ __ LeaveExitFrame(kDontSaveDoubles, s0, NO_EMIT_RETURN,
+ kRegisterContainsSlotCount);
+
+ // Check if the function scheduled an exception.
+ __ LoadRoot(a4, RootIndex::kTheHoleValue);
+ __ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate));
+ __ Ld_d(a5, MemOperand(kScratchReg, 0));
+ __ Branch(&promote_scheduled_exception, ne, a4, Operand(a5));
+
+ __ Ret();
+
+ // Re-throw by promoting a scheduled exception.
+ __ bind(&promote_scheduled_exception);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ __ bind(&delete_allocated_handles);
+ __ St_d(s1, MemOperand(s5, kLimitOffset));
+ __ mov(s0, a0);
+ __ PrepareCallCFunction(1, s1);
+ __ li(a0, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
+ __ mov(a0, s0);
+ __ jmp(&leave_exit_frame);
+}
+
+} // namespace
+
+void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- cp : context
+ // -- a1 : api function address
+ // -- a2 : arguments count (not including the receiver)
+ // -- a3 : call data
+ // -- a0 : holder
+ // -- sp[0] : receiver
+ // -- sp[8] : first argument
+ // -- ...
+ // -- sp[(argc) * 8] : last argument
+ // -----------------------------------
+
+ Register api_function_address = a1;
+ Register argc = a2;
+ Register call_data = a3;
+ Register holder = a0;
+ Register scratch = t0;
+ Register base = t1; // For addressing MemOperands on the stack.
+
+ DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch,
+ base));
+
+ using FCA = FunctionCallbackArguments;
+
+ STATIC_ASSERT(FCA::kArgsLength == 6);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+
+ // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
+ //
+ // Target state:
+ // sp[0 * kPointerSize]: kHolder
+ // sp[1 * kPointerSize]: kIsolate
+ // sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue)
+ // sp[3 * kPointerSize]: undefined (kReturnValue)
+ // sp[4 * kPointerSize]: kData
+ // sp[5 * kPointerSize]: undefined (kNewTarget)
+
+ // Set up the base register for addressing through MemOperands. It will point
+ // at the receiver (located at sp + argc * kPointerSize).
+ __ Alsl_d(base, argc, sp, kPointerSizeLog2, t7);
+
+ // Reserve space on the stack.
+ __ Sub_d(sp, sp, Operand(FCA::kArgsLength * kPointerSize));
+
+ // kHolder.
+ __ St_d(holder, MemOperand(sp, 0 * kPointerSize));
+
+ // kIsolate.
+ __ li(scratch, ExternalReference::isolate_address(masm->isolate()));
+ __ St_d(scratch, MemOperand(sp, 1 * kPointerSize));
+
+ // kReturnValueDefaultValue and kReturnValue.
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
+ __ St_d(scratch, MemOperand(sp, 2 * kPointerSize));
+ __ St_d(scratch, MemOperand(sp, 3 * kPointerSize));
+
+ // kData.
+ __ St_d(call_data, MemOperand(sp, 4 * kPointerSize));
+
+ // kNewTarget.
+ __ St_d(scratch, MemOperand(sp, 5 * kPointerSize));
+
+ // Keep a pointer to kHolder (= implicit_args) in a scratch register.
+ // We use it below to set up the FunctionCallbackInfo object.
+ __ mov(scratch, sp);
+
+ // Allocate the v8::Arguments structure in the arguments' space since
+ // it's not controlled by GC.
+ static constexpr int kApiStackSpace = 4;
+ static constexpr bool kDontSaveDoubles = false;
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace);
+
+ // EnterExitFrame may align the sp.
+
+ // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
+ // Arguments are after the return address (pushed by EnterExitFrame()).
+ __ St_d(scratch, MemOperand(sp, 1 * kPointerSize));
+
+ // FunctionCallbackInfo::values_ (points at the first varargs argument passed
+ // on the stack).
+ __ Add_d(scratch, scratch,
+ Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
+
+ __ St_d(scratch, MemOperand(sp, 2 * kPointerSize));
+
+ // FunctionCallbackInfo::length_.
+ // Stored as int field, 32-bit integers within struct on stack always left
+ // justified by n64 ABI.
+ __ St_w(argc, MemOperand(sp, 3 * kPointerSize));
+
+ // We also store the number of bytes to drop from the stack after returning
+ // from the API function here.
+ // Note: Unlike on other architectures, this stores the number of slots to
+ // drop, not the number of bytes.
+ __ Add_d(scratch, argc, Operand(FCA::kArgsLength + 1 /* receiver */));
+ __ St_d(scratch, MemOperand(sp, 4 * kPointerSize));
+
+ // v8::InvocationCallback's argument.
+ DCHECK(!AreAliased(api_function_address, scratch, a0));
+ __ Add_d(a0, sp, Operand(1 * kPointerSize));
+
+ ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
+
+ // There are two stack slots above the arguments we constructed on the stack.
+ // TODO(jgruber): Document what these arguments are.
+ static constexpr int kStackSlotsAboveFCA = 2;
+ MemOperand return_value_operand(
+ fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize);
+
+ static constexpr int kUseStackSpaceOperand = 0;
+ MemOperand stack_space_operand(sp, 4 * kPointerSize);
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ kUseStackSpaceOperand, &stack_space_operand,
+ return_value_operand);
+}
+
+void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+ Register receiver = ApiGetterDescriptor::ReceiverRegister();
+ Register holder = ApiGetterDescriptor::HolderRegister();
+ Register callback = ApiGetterDescriptor::CallbackRegister();
+ Register scratch = a4;
+ DCHECK(!AreAliased(receiver, holder, callback, scratch));
+
+ Register api_function_address = a2;
+
+ // Here and below +1 is for name() pushed after the args_ array.
+ using PCA = PropertyCallbackArguments;
+ __ Sub_d(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
+ __ St_d(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
+ __ Ld_d(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+ __ St_d(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
+ __ St_d(scratch,
+ MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
+ __ St_d(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
+ kPointerSize));
+ __ li(scratch, ExternalReference::isolate_address(masm->isolate()));
+ __ St_d(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
+ __ St_d(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
+ // should_throw_on_error -> false
+ DCHECK_EQ(0, Smi::zero().ptr());
+ __ St_d(zero_reg,
+ MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
+ __ Ld_d(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+ __ St_d(scratch, MemOperand(sp, 0 * kPointerSize));
+
+ // v8::PropertyCallbackInfo::args_ array and name handle.
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
+ __ mov(a0, sp); // a0 = Handle<Name>
+ __ Add_d(a1, a0, Operand(1 * kPointerSize)); // a1 = v8::PCI::args_
+
+ const int kApiStackSpace = 1;
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, kApiStackSpace);
+
+ // Create v8::PropertyCallbackInfo object on the stack and initialize
+ // it's args_ field.
+ __ St_d(a1, MemOperand(sp, 1 * kPointerSize));
+ __ Add_d(a1, sp, Operand(1 * kPointerSize));
+ // a1 = v8::PropertyCallbackInfo&
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback();
+
+ __ Ld_d(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+ __ Ld_d(api_function_address,
+ FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
+
+ // +3 is to skip prolog, return address and name handle.
+ MemOperand return_value_operand(
+ fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
+ MemOperand* const kUseStackSpaceConstant = nullptr;
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ kStackUnwindSpace, kUseStackSpaceConstant,
+ return_value_operand);
+}
+
+void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
+ // The sole purpose of DirectCEntry is for movable callers (e.g. any general
+ // purpose Code object) to be able to call into C functions that may trigger
+ // GC and thus move the caller.
+ //
+ // DirectCEntry places the return address on the stack (updated by the GC),
+ // making the call GC safe. The irregexp backend relies on this.
+
+ __ St_d(ra, MemOperand(sp, 0)); // Store the return address.
+ __ Call(t7); // Call the C++ function.
+ __ Ld_d(ra, MemOperand(sp, 0)); // Return to calling code.
+
+ // TODO(LOONG_dev): LOONG64 Check this assert.
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ // In case of an error the return address may point to a memory area
+ // filled with kZapValue by the GC. Dereference the address and check for
+ // this.
+ __ Ld_d(a4, MemOperand(ra, 0));
+ __ Assert(ne, AbortReason::kReceivedInvalidReturnAddress, a4,
+ Operand(reinterpret_cast<uint64_t>(kZapValue)));
+ }
+
+ __ Jump(ra);
+}
+
+namespace {
+
+// This code tries to be close to ia32 code so that any changes can be
+// easily ported.
+void Generate_DeoptimizationEntry(MacroAssembler* masm,
+ DeoptimizeKind deopt_kind) {
+ Isolate* isolate = masm->isolate();
+
+ // Unlike on ARM we don't save all the registers, just the useful ones.
+ // For the rest, there are gaps on the stack, so the offsets remain the same.
+ const int kNumberOfRegisters = Register::kNumRegisters;
+
+ RegList restored_regs = kJSCallerSaved | kCalleeSaved;
+ RegList saved_regs = restored_regs | sp.bit() | ra.bit();
+
+ const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
+
+ // Save all double FPU registers before messing with them.
+ __ Sub_d(sp, sp, Operand(kDoubleRegsSize));
+ const RegisterConfiguration* config = RegisterConfiguration::Default();
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
+ int offset = code * kDoubleSize;
+ __ Fst_d(fpu_reg, MemOperand(sp, offset));
+ }
+
+ // Push saved_regs (needed to populate FrameDescription::registers_).
+ // Leave gaps for other registers.
+ __ Sub_d(sp, sp, kNumberOfRegisters * kPointerSize);
+ for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
+ if ((saved_regs & (1 << i)) != 0) {
+ __ St_d(ToRegister(i), MemOperand(sp, kPointerSize * i));
+ }
+ }
+
+ __ li(a2,
+ ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate));
+ __ St_d(fp, MemOperand(a2, 0));
+
+ const int kSavedRegistersAreaSize =
+ (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
+
+ __ li(a2, Operand(Deoptimizer::kFixedExitSizeMarker));
+ // Get the address of the location in the code object (a3) (return
+ // address for lazy deoptimization) and compute the fp-to-sp delta in
+ // register a4.
+ __ mov(a3, ra);
+ __ Add_d(a4, sp, Operand(kSavedRegistersAreaSize));
+
+ __ sub_d(a4, fp, a4);
+
+ // Allocate a new deoptimizer object.
+ __ PrepareCallCFunction(6, a5);
+ // Pass six arguments, according to n64 ABI.
+ __ mov(a0, zero_reg);
+ Label context_check;
+ __ Ld_d(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ JumpIfSmi(a1, &context_check);
+ __ Ld_d(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ bind(&context_check);
+ __ li(a1, Operand(static_cast<int>(deopt_kind)));
+ // a2: bailout id already loaded.
+ // a3: code address or 0 already loaded.
+ // a4: already has fp-to-sp delta.
+ __ li(a5, ExternalReference::isolate_address(isolate));
+
+ // Call Deoptimizer::New().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
+ }
+
+ // Preserve "deoptimizer" object in register a0 and get the input
+ // frame descriptor pointer to a1 (deoptimizer->input_);
+ // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
+ __ Ld_d(a1, MemOperand(a0, Deoptimizer::input_offset()));
+
+ // Copy core registers into FrameDescription::registers_[kNumRegisters].
+ DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ if ((saved_regs & (1 << i)) != 0) {
+ __ Ld_d(a2, MemOperand(sp, i * kPointerSize));
+ __ St_d(a2, MemOperand(a1, offset));
+ } else if (FLAG_debug_code) {
+ __ li(a2, Operand(kDebugZapValue));
+ __ St_d(a2, MemOperand(a1, offset));
+ }
+ }
+
+ int double_regs_offset = FrameDescription::double_registers_offset();
+ // Copy FPU registers to
+ // double_registers_[DoubleRegister::kNumAllocatableRegisters]
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ int dst_offset = code * kDoubleSize + double_regs_offset;
+ int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ __ Fld_d(f0, MemOperand(sp, src_offset));
+ __ Fst_d(f0, MemOperand(a1, dst_offset));
+ }
+
+ // Remove the saved registers from the stack.
+ __ Add_d(sp, sp, Operand(kSavedRegistersAreaSize));
+
+ // Compute a pointer to the unwinding limit in register a2; that is
+ // the first stack slot not part of the input frame.
+ __ Ld_d(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
+ __ add_d(a2, a2, sp);
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ Add_d(a3, a1, Operand(FrameDescription::frame_content_offset()));
+ Label pop_loop;
+ Label pop_loop_header;
+ __ Branch(&pop_loop_header);
+ __ bind(&pop_loop);
+ __ Pop(a4);
+ __ St_d(a4, MemOperand(a3, 0));
+ __ addi_d(a3, a3, sizeof(uint64_t));
+ __ bind(&pop_loop_header);
+ __ BranchShort(&pop_loop, ne, a2, Operand(sp));
+ // Compute the output frame in the deoptimizer.
+ __ Push(a0); // Preserve deoptimizer object across call.
+ // a0: deoptimizer object; a1: scratch.
+ __ PrepareCallCFunction(1, a1);
+ // Call Deoptimizer::ComputeOutputFrames().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
+ }
+ __ Pop(a0); // Restore deoptimizer object (class Deoptimizer).
+
+ __ Ld_d(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
+
+ // Replace the current (input) frame with the output frames.
+ Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
+ // Outer loop state: a4 = current "FrameDescription** output_",
+ // a1 = one past the last FrameDescription**.
+ __ Ld_w(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
+ __ Ld_d(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_.
+ __ Alsl_d(a1, a1, a4, kPointerSizeLog2);
+ __ Branch(&outer_loop_header);
+ __ bind(&outer_push_loop);
+ // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
+ __ Ld_d(a2, MemOperand(a4, 0)); // output_[ix]
+ __ Ld_d(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
+ __ Branch(&inner_loop_header);
+ __ bind(&inner_push_loop);
+ __ Sub_d(a3, a3, Operand(sizeof(uint64_t)));
+ __ Add_d(a6, a2, Operand(a3));
+ __ Ld_d(a7, MemOperand(a6, FrameDescription::frame_content_offset()));
+ __ Push(a7);
+ __ bind(&inner_loop_header);
+ __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
+
+ __ Add_d(a4, a4, Operand(kPointerSize));
+ __ bind(&outer_loop_header);
+ __ BranchShort(&outer_push_loop, lt, a4, Operand(a1));
+
+ __ Ld_d(a1, MemOperand(a0, Deoptimizer::input_offset()));
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
+ int src_offset = code * kDoubleSize + double_regs_offset;
+ __ Fld_d(fpu_reg, MemOperand(a1, src_offset));
+ }
+
+ // Push pc and continuation from the last output frame.
+ __ Ld_d(a6, MemOperand(a2, FrameDescription::pc_offset()));
+ __ Push(a6);
+ __ Ld_d(a6, MemOperand(a2, FrameDescription::continuation_offset()));
+ __ Push(a6);
+
+ // Technically restoring 'at' should work unless zero_reg is also restored
+ // but it's safer to check for this.
+ DCHECK(!(t7.bit() & restored_regs));
+ // Restore the registers from the last output frame.
+ __ mov(t7, a2);
+ for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ if ((restored_regs & (1 << i)) != 0) {
+ __ Ld_d(ToRegister(i), MemOperand(t7, offset));
+ }
+ }
+
+ __ Pop(t7); // Get continuation, leave pc on stack.
+ __ Pop(ra);
+ __ Jump(t7);
+ __ stop();
+}
+
+} // namespace
+
+void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
+}
+
+namespace {
+
+// Restarts execution either at the current or next (in execution order)
+// bytecode. If there is baseline code on the shared function info, converts an
+// interpreter frame into a baseline frame and continues execution in baseline
+// code. Otherwise execution continues with bytecode.
+void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
+ bool next_bytecode,
+ bool is_osr = false) {
+ Label start;
+ __ bind(&start);
+
+ // Get function from the frame.
+ Register closure = a1;
+ __ Ld_d(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+
+ // Get the Code object from the shared function info.
+ Register code_obj = s1;
+ __ Ld_d(code_obj,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld_d(code_obj,
+ FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Check if we have baseline code. For OSR entry it is safe to assume we
+ // always have baseline code.
+ if (!is_osr) {
+ Label start_with_baseline;
+ __ GetObjectType(code_obj, t2, t2);
+ __ Branch(&start_with_baseline, eq, t2, Operand(CODET_TYPE));
+
+ // Start with bytecode as there is no baseline code.
+ Builtin builtin_id = next_bytecode
+ ? Builtin::kInterpreterEnterAtNextBytecode
+ : Builtin::kInterpreterEnterAtBytecode;
+ __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
+ RelocInfo::CODE_TARGET);
+
+ // Start with baseline code.
+ __ bind(&start_with_baseline);
+ } else if (FLAG_debug_code) {
+ __ GetObjectType(code_obj, t2, t2);
+ __ Assert(eq, AbortReason::kExpectedBaselineData, t2, Operand(CODET_TYPE));
+ }
+
+ if (FLAG_debug_code) {
+ AssertCodeIsBaseline(masm, code_obj, t2);
+ }
+
+ // Replace BytecodeOffset with the feedback vector.
+ Register feedback_vector = a2;
+ __ Ld_d(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ Ld_d(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ Label install_baseline_code;
+ // Check if feedback vector is valid. If not, call prepare for baseline to
+ // allocate it.
+ __ GetObjectType(feedback_vector, t2, t2);
+ __ Branch(&install_baseline_code, ne, t2, Operand(FEEDBACK_VECTOR_TYPE));
+
+ // Save BytecodeOffset from the stack frame.
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ // Replace BytecodeOffset with the feedback vector.
+ __ St_d(feedback_vector,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ feedback_vector = no_reg;
+
+ // Compute baseline pc for bytecode offset.
+ ExternalReference get_baseline_pc_extref;
+ if (next_bytecode || is_osr) {
+ get_baseline_pc_extref =
+ ExternalReference::baseline_pc_for_next_executed_bytecode();
+ } else {
+ get_baseline_pc_extref =
+ ExternalReference::baseline_pc_for_bytecode_offset();
+ }
+
+ Register get_baseline_pc = a3;
+ __ li(get_baseline_pc, get_baseline_pc_extref);
+
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset.
+ // TODO(pthier): Investigate if it is feasible to handle this special case
+ // in TurboFan instead of here.
+ Label valid_bytecode_offset, function_entry_bytecode;
+ if (!is_osr) {
+ __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+ }
+
+ __ Sub_d(kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeOffsetRegister,
+ (BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+ __ bind(&valid_bytecode_offset);
+ // Get bytecode array from the stack frame.
+ __ Ld_d(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ // Save the accumulator register, since it's clobbered by the below call.
+ __ Push(kInterpreterAccumulatorRegister);
+ {
+ Register arg_reg_1 = a0;
+ Register arg_reg_2 = a1;
+ Register arg_reg_3 = a2;
+ __ Move(arg_reg_1, code_obj);
+ __ Move(arg_reg_2, kInterpreterBytecodeOffsetRegister);
+ __ Move(arg_reg_3, kInterpreterBytecodeArrayRegister);
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallCFunction(get_baseline_pc, 3, 0);
+ }
+ __ Add_d(code_obj, code_obj, kReturnRegister0);
+ __ Pop(kInterpreterAccumulatorRegister);
+
+ if (is_osr) {
+ // Reset the OSR loop nesting depth to disarm back edges.
+ // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
+ // Sparkplug here.
+ // TODO(liuyu): Remove Ld as arm64 after register reallocation.
+ __ Ld_d(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ St_h(zero_reg,
+ FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kOsrLoopNestingLevelOffset));
+ Generate_OSREntry(masm, code_obj,
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ } else {
+ __ Add_d(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(code_obj);
+ }
+ __ Trap(); // Unreachable.
+
+ if (!is_osr) {
+ __ bind(&function_entry_bytecode);
+ // If the bytecode offset is kFunctionEntryOffset, get the start address of
+ // the first bytecode.
+ __ mov(kInterpreterBytecodeOffsetRegister, zero_reg);
+ if (next_bytecode) {
+ __ li(get_baseline_pc,
+ ExternalReference::baseline_pc_for_bytecode_offset());
+ }
+ __ Branch(&valid_bytecode_offset);
+ }
+
+ __ bind(&install_baseline_code);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(kInterpreterAccumulatorRegister);
+ __ Push(closure);
+ __ CallRuntime(Runtime::kInstallBaselineCode, 1);
+ __ Pop(kInterpreterAccumulatorRegister);
+ }
+ // Retry from the start after installing baseline code.
+ __ Branch(&start);
+}
+
+} // namespace
+
+void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, false);
+}
+
+void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, true);
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, false, true);
+}
+
+void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
+ Generate_DynamicCheckMapsTrampoline<DynamicCheckMapsDescriptor>(
+ masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMaps));
+}
+
+void Builtins::Generate_DynamicCheckMapsWithFeedbackVectorTrampoline(
+ MacroAssembler* masm) {
+ Generate_DynamicCheckMapsTrampoline<
+ DynamicCheckMapsWithFeedbackVectorDescriptor>(
+ masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMapsWithFeedbackVector));
+}
+
+template <class Descriptor>
+void Builtins::Generate_DynamicCheckMapsTrampoline(
+ MacroAssembler* masm, Handle<Code> builtin_target) {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+
+ // Only save the registers that the DynamicCheckMaps builtin can clobber.
+ Descriptor descriptor;
+ RegList registers = descriptor.allocatable_registers();
+ // FLAG_debug_code is enabled CSA checks will call C function and so we need
+ // to save all CallerSaved registers too.
+ if (FLAG_debug_code) registers |= kJSCallerSaved;
+ __ MaybeSaveRegisters(registers);
+
+ // Load the immediate arguments from the deopt exit to pass to the builtin.
+ Register slot_arg = descriptor.GetRegisterParameter(Descriptor::kSlot);
+ Register handler_arg = descriptor.GetRegisterParameter(Descriptor::kHandler);
+ __ Ld_d(handler_arg, MemOperand(fp, CommonFrameConstants::kCallerPCOffset));
+ __ Ld_d(
+ slot_arg,
+ MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs1PcOffset));
+ __ Ld_d(
+ handler_arg,
+ MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs2PcOffset));
+ __ Call(builtin_target, RelocInfo::CODE_TARGET);
+
+ Label deopt, bailout;
+ __ Branch(&deopt, ne, a0,
+ Operand(static_cast<int64_t>(DynamicCheckMapsStatus::kSuccess)));
+
+ __ MaybeRestoreRegisters(registers);
+ __ LeaveFrame(StackFrame::INTERNAL);
+ __ Ret();
+
+ __ bind(&deopt);
+ __ Branch(&bailout, eq, a0,
+ Operand(static_cast<int64_t>(DynamicCheckMapsStatus::kBailout)));
+
+ if (FLAG_debug_code) {
+ __ Assert(eq, AbortReason::kUnexpectedDynamicCheckMapsStatus, a0,
+ Operand(static_cast<int64_t>(DynamicCheckMapsStatus::kDeopt)));
+ }
+ __ MaybeRestoreRegisters(registers);
+ __ LeaveFrame(StackFrame::INTERNAL);
+ Handle<Code> deopt_eager = masm->isolate()->builtins()->code_handle(
+ Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kEager));
+ __ Jump(deopt_eager, RelocInfo::CODE_TARGET);
+
+ __ bind(&bailout);
+ __ MaybeRestoreRegisters(registers);
+ __ LeaveFrame(StackFrame::INTERNAL);
+ Handle<Code> deopt_bailout = masm->isolate()->builtins()->code_handle(
+ Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kBailout));
+ __ Jump(deopt_bailout, RelocInfo::CODE_TARGET);
+}
+
+#undef __
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_LOONG64
diff --git a/chromium/v8/src/builtins/math.tq b/chromium/v8/src/builtins/math.tq
index fbcf35fedc2..b3d2aafb56f 100644
--- a/chromium/v8/src/builtins/math.tq
+++ b/chromium/v8/src/builtins/math.tq
@@ -416,7 +416,7 @@ MathHypot(
} else if (max == 0) {
return 0;
}
- assert(max > 0);
+ dcheck(max > 0);
// Kahan summation to avoid rounding errors.
// Normalize the numbers to the largest one to avoid overflow.
diff --git a/chromium/v8/src/builtins/mips/builtins-mips.cc b/chromium/v8/src/builtins/mips/builtins-mips.cc
index 8f4bf4d06bd..9a97f0fa4e6 100644
--- a/chromium/v8/src/builtins/mips/builtins-mips.cc
+++ b/chromium/v8/src/builtins/mips/builtins-mips.cc
@@ -612,6 +612,16 @@ void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
__ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
}
+static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
+ Register scratch) {
+ DCHECK(!AreAliased(code, scratch));
+ // Verify that the code kind is baseline code via the CodeKind.
+ __ lw(scratch, FieldMemOperand(code, Code::kFlagsOffset));
+ __ DecodeField<Code::KindField>(scratch);
+ __ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
+ Operand(static_cast<int>(CodeKind::BASELINE)));
+}
+
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Register sfi_data,
Register scratch1,
@@ -620,7 +630,15 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label done;
__ GetObjectType(sfi_data, scratch1, scratch1);
- __ Branch(is_baseline, eq, scratch1, Operand(BASELINE_DATA_TYPE));
+ if (FLAG_debug_code) {
+ Label not_baseline;
+ __ Branch(&not_baseline, ne, scratch1, Operand(CODET_TYPE));
+ AssertCodeIsBaseline(masm, sfi_data, scratch1);
+ __ Branch(is_baseline);
+ __ bind(&not_baseline);
+ } else {
+ __ Branch(is_baseline, eq, scratch1, Operand(CODET_TYPE));
+ }
__ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
__ lw(sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
@@ -1389,8 +1407,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
&has_optimized_code_or_marker);
// Load the baseline code into the closure.
- __ Lw(a2, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BaselineData::kBaselineCodeOffset));
+ __ Move(a2, kInterpreterBytecodeArrayRegister);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
ReplaceClosureCodeWithOptimizedCode(masm, a2, closure, t4, t5);
__ JumpCodeObject(a2);
@@ -1779,7 +1796,8 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
}
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
- __ lw(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+ __ lw(a1, MemOperand(v0, Code::kDeoptimizationDataOrInterpreterDataOffset -
+ kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
@@ -2723,12 +2741,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&zero);
- // Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
- // with both configurations. It is safe to always do this, because the
- // underlying register is caller-saved and can be arbitrarily clobbered.
- __ ResetSpeculationPoisonRegister();
-
// Clear c_entry_fp, like we do in `LeaveExitFrame`.
{
UseScratchRegisterScope temps(masm);
@@ -3964,7 +3976,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
if (!is_osr) {
Label start_with_baseline;
__ GetObjectType(code_obj, t6, t6);
- __ Branch(&start_with_baseline, eq, t6, Operand(BASELINE_DATA_TYPE));
+ __ Branch(&start_with_baseline, eq, t6, Operand(CODET_TYPE));
// Start with bytecode as there is no baseline code.
Builtin builtin_id = next_bytecode
@@ -3977,12 +3989,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ bind(&start_with_baseline);
} else if (FLAG_debug_code) {
__ GetObjectType(code_obj, t6, t6);
- __ Assert(eq, AbortReason::kExpectedBaselineData, t6,
- Operand(BASELINE_DATA_TYPE));
+ __ Assert(eq, AbortReason::kExpectedBaselineData, t6, Operand(CODET_TYPE));
}
- // Load baseline code from baseline data.
- __ Lw(code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
+ if (FLAG_debug_code) {
+ AssertCodeIsBaseline(masm, code_obj, t2);
+ }
// Replace BytecodeOffset with the feedback vector.
Register feedback_vector = a2;
diff --git a/chromium/v8/src/builtins/mips64/builtins-mips64.cc b/chromium/v8/src/builtins/mips64/builtins-mips64.cc
index 45e1c32f82f..3f8824d97d3 100644
--- a/chromium/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/chromium/v8/src/builtins/mips64/builtins-mips64.cc
@@ -300,6 +300,16 @@ void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
+static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
+ Register scratch) {
+ DCHECK(!AreAliased(code, scratch));
+ // Verify that the code kind is baseline code via the CodeKind.
+ __ Ld(scratch, FieldMemOperand(code, Code::kFlagsOffset));
+ __ DecodeField<Code::KindField>(scratch);
+ __ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
+ Operand(static_cast<int>(CodeKind::BASELINE)));
+}
+
// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
// the more general dispatch.
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
@@ -309,11 +319,18 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label done;
__ GetObjectType(sfi_data, scratch1, scratch1);
- __ Branch(is_baseline, eq, scratch1, Operand(BASELINE_DATA_TYPE));
+ if (FLAG_debug_code) {
+ Label not_baseline;
+ __ Branch(&not_baseline, ne, scratch1, Operand(CODET_TYPE));
+ AssertCodeIsBaseline(masm, sfi_data, scratch1);
+ __ Branch(is_baseline);
+ __ bind(&not_baseline);
+ } else {
+ __ Branch(is_baseline, eq, scratch1, Operand(CODET_TYPE));
+ }
__ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
__ Ld(sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
-
__ bind(&done);
}
@@ -1402,8 +1419,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
&has_optimized_code_or_marker);
// Load the baseline code into the closure.
- __ Ld(a2, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BaselineData::kBaselineCodeOffset));
+ __ Move(a2, kInterpreterBytecodeArrayRegister);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
ReplaceClosureCodeWithOptimizedCode(masm, a2, closure, t0, t1);
__ JumpCodeObject(a2);
@@ -1788,7 +1804,8 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
}
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
- __ Ld(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+ __ Ld(a1, MemOperand(v0, Code::kDeoptimizationDataOrInterpreterDataOffset -
+ kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
@@ -2814,12 +2831,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ Sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&zero);
- // Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
- // with both configurations. It is safe to always do this, because the
- // underlying register is caller-saved and can be arbitrarily clobbered.
- __ ResetSpeculationPoisonRegister();
-
// Clear c_entry_fp, like we do in `LeaveExitFrame`.
{
UseScratchRegisterScope temps(masm);
@@ -3549,7 +3560,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
if (!is_osr) {
Label start_with_baseline;
__ GetObjectType(code_obj, t2, t2);
- __ Branch(&start_with_baseline, eq, t2, Operand(BASELINE_DATA_TYPE));
+ __ Branch(&start_with_baseline, eq, t2, Operand(CODET_TYPE));
// Start with bytecode as there is no baseline code.
Builtin builtin_id = next_bytecode
@@ -3562,12 +3573,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ bind(&start_with_baseline);
} else if (FLAG_debug_code) {
__ GetObjectType(code_obj, t2, t2);
- __ Assert(eq, AbortReason::kExpectedBaselineData, t2,
- Operand(BASELINE_DATA_TYPE));
+ __ Assert(eq, AbortReason::kExpectedBaselineData, t2, Operand(CODET_TYPE));
}
- // Load baseline code from baseline data.
- __ Ld(code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
+ if (FLAG_debug_code) {
+ AssertCodeIsBaseline(masm, code_obj, t2);
+ }
// Replace BytecodeOffset with the feedback vector.
Register feedback_vector = a2;
diff --git a/chromium/v8/src/builtins/number.tq b/chromium/v8/src/builtins/number.tq
index f4bd4cc578d..777dd210d6e 100644
--- a/chromium/v8/src/builtins/number.tq
+++ b/chromium/v8/src/builtins/number.tq
@@ -62,7 +62,7 @@ transitioning macro ThisNumberValue(implicit context: Context)(
}
macro ToCharCode(input: int32): char8 {
- assert(0 <= input && input < 36);
+ dcheck(0 <= input && input < 36);
return input < 10 ?
%RawDownCast<char8>(Unsigned(input + kAsciiZero)) :
%RawDownCast<char8>(Unsigned(input - 10 + kAsciiLowerCaseA));
@@ -78,7 +78,7 @@ macro NumberToStringSmi(x: int32, radix: int32): String labels Slow {
return StringFromSingleCharCode(ToCharCode(n));
}
} else {
- assert(isNegative);
+ dcheck(isNegative);
if (n == kMinInt32) {
goto Slow;
}
@@ -92,7 +92,7 @@ macro NumberToStringSmi(x: int32, radix: int32): String labels Slow {
temp = temp / radix;
length = length + 1;
}
- assert(length > 0);
+ dcheck(length > 0);
const strSeq = AllocateNonEmptySeqOneByteString(Unsigned(length));
let cursor: intptr = Convert<intptr>(length) - 1;
while (n > 0) {
@@ -102,15 +102,15 @@ macro NumberToStringSmi(x: int32, radix: int32): String labels Slow {
cursor = cursor - 1;
}
if (isNegative) {
- assert(cursor == 0);
+ dcheck(cursor == 0);
// Insert '-' to result.
*UnsafeConstCast(&strSeq.chars[0]) = 45;
} else {
- assert(cursor == -1);
+ dcheck(cursor == -1);
// In sync with Factory::SmiToString: If radix = 10 and positive number,
// update hash for string.
if (radix == 10) {
- assert(strSeq.raw_hash_field == kNameEmptyHashField);
+ dcheck(strSeq.raw_hash_field == kNameEmptyHashField);
strSeq.raw_hash_field = MakeArrayIndexHash(Unsigned(x), Unsigned(length));
}
}
diff --git a/chromium/v8/src/builtins/object-fromentries.tq b/chromium/v8/src/builtins/object-fromentries.tq
index 81a0859d29b..34ab73148f0 100644
--- a/chromium/v8/src/builtins/object-fromentries.tq
+++ b/chromium/v8/src/builtins/object-fromentries.tq
@@ -58,7 +58,7 @@ ObjectFromEntries(
const fastIteratorResultMap: Map = GetIteratorResultMap();
let i: iterator::IteratorRecord = iterator::GetIterator(iterable);
try {
- assert(!IsNullOrUndefined(i.object));
+ dcheck(!IsNullOrUndefined(i.object));
while (true) {
const step: JSReceiver =
iterator::IteratorStep(i, fastIteratorResultMap)
diff --git a/chromium/v8/src/builtins/ppc/builtins-ppc.cc b/chromium/v8/src/builtins/ppc/builtins-ppc.cc
index 02b76175ec1..4087a158c76 100644
--- a/chromium/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/chromium/v8/src/builtins/ppc/builtins-ppc.cc
@@ -1641,7 +1641,8 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ LoadTaggedPointerField(
- r4, FieldMemOperand(r3, Code::kDeoptimizationDataOffset), r0);
+ r4, FieldMemOperand(r3, Code::kDeoptimizationDataOrInterpreterDataOffset),
+ r0);
{
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
@@ -2046,8 +2047,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -----------------------------------
__ AssertFunction(r4);
- // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
- // Check that the function is not a "classConstructor".
Label class_constructor;
__ LoadTaggedPointerField(
r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
@@ -2062,6 +2061,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
r0);
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
+ __ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kFlagsOffset));
__ andi(r0, r6,
Operand(SharedFunctionInfo::IsStrictBit::kMask |
SharedFunctionInfo::IsNativeBit::kMask));
@@ -2243,34 +2243,48 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// -- r3 : the number of arguments (not including the receiver)
// -- r4 : the target to call (can be any Object).
// -----------------------------------
-
- Label non_callable, non_smi;
- __ JumpIfSmi(r4, &non_callable);
- __ bind(&non_smi);
- __ LoadMap(r7, r4);
- __ CompareInstanceTypeRange(r7, r8, FIRST_JS_FUNCTION_TYPE,
- LAST_JS_FUNCTION_TYPE);
+ Register argc = r3;
+ Register target = r4;
+ Register map = r7;
+ Register instance_type = r8;
+ DCHECK(!AreAliased(argc, target, map, instance_type));
+
+ Label non_callable, class_constructor;
+ __ JumpIfSmi(target, &non_callable);
+ __ LoadMap(map, target);
+ __ CompareInstanceTypeRange(map, instance_type,
+ FIRST_CALLABLE_JS_FUNCTION_TYPE,
+ LAST_CALLABLE_JS_FUNCTION_TYPE);
__ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET, le);
- __ cmpi(r8, Operand(JS_BOUND_FUNCTION_TYPE));
+ __ cmpi(instance_type, Operand(JS_BOUND_FUNCTION_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
RelocInfo::CODE_TARGET, eq);
// Check if target has a [[Call]] internal method.
- __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
- __ TestBit(r7, Map::Bits1::IsCallableBit::kShift, r0);
- __ beq(&non_callable, cr0);
+ {
+ Register flags = r7;
+ __ lbz(flags, FieldMemOperand(map, Map::kBitFieldOffset));
+ map = no_reg;
+ __ TestBit(flags, Map::Bits1::IsCallableBit::kShift, r0);
+ __ beq(&non_callable, cr0);
+ }
// Check if target is a proxy and call CallProxy external builtin
- __ cmpi(r8, Operand(JS_PROXY_TYPE));
+ __ cmpi(instance_type, Operand(JS_PROXY_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq);
+ // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that the function is not a "classConstructor".
+ __ cmpi(instance_type, Operand(JS_CLASS_CONSTRUCTOR_TYPE));
+ __ beq(&class_constructor);
+
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
// Overwrite the original receiver the (original) target.
- __ StoreReceiver(r4, r3, r8);
+ __ StoreReceiver(target, argc, r8);
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadNativeContextSlot(r4, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
+ __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(
ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
@@ -2279,9 +2293,18 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ bind(&non_callable);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r4);
+ __ Push(target);
__ CallRuntime(Runtime::kThrowCalledNonCallable);
}
+
+ // 4. The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(target);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ __ Trap(); // Unreachable.
+ }
}
// static
@@ -2351,32 +2374,41 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// -- r6 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
+ Register argc = r3;
+ Register target = r4;
+ Register map = r7;
+ Register instance_type = r8;
+ DCHECK(!AreAliased(argc, target, map, instance_type));
// Check if target is a Smi.
Label non_constructor, non_proxy;
- __ JumpIfSmi(r4, &non_constructor);
+ __ JumpIfSmi(target, &non_constructor);
// Check if target has a [[Construct]] internal method.
- __ LoadTaggedPointerField(r7, FieldMemOperand(r4, HeapObject::kMapOffset),
- r0);
- __ lbz(r5, FieldMemOperand(r7, Map::kBitFieldOffset));
- __ TestBit(r5, Map::Bits1::IsConstructorBit::kShift, r0);
- __ beq(&non_constructor, cr0);
+ __ LoadTaggedPointerField(
+ map, FieldMemOperand(target, HeapObject::kMapOffset), r0);
+ {
+ Register flags = r5;
+ DCHECK(!AreAliased(argc, target, map, instance_type, flags));
+ __ lbz(flags, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ TestBit(flags, Map::Bits1::IsConstructorBit::kShift, r0);
+ __ beq(&non_constructor, cr0);
+ }
// Dispatch based on instance type.
- __ CompareInstanceTypeRange(r7, r8, FIRST_JS_FUNCTION_TYPE,
+ __ CompareInstanceTypeRange(map, instance_type, FIRST_JS_FUNCTION_TYPE,
LAST_JS_FUNCTION_TYPE);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
RelocInfo::CODE_TARGET, le);
// Only dispatch to bound functions after checking whether they are
// constructors.
- __ cmpi(r8, Operand(JS_BOUND_FUNCTION_TYPE));
+ __ cmpi(instance_type, Operand(JS_BOUND_FUNCTION_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
RelocInfo::CODE_TARGET, eq);
// Only dispatch to proxies after checking whether they are constructors.
- __ cmpi(r8, Operand(JS_PROXY_TYPE));
+ __ cmpi(instance_type, Operand(JS_PROXY_TYPE));
__ bne(&non_proxy);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
RelocInfo::CODE_TARGET);
@@ -2385,9 +2417,10 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ bind(&non_proxy);
{
// Overwrite the original receiver with the (original) target.
- __ StoreReceiver(r4, r3, r8);
+ __ StoreReceiver(target, argc, r8);
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadNativeContextSlot(r4, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
+ __ LoadNativeContextSlot(target,
+ Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
@@ -2646,12 +2679,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ StoreU64(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&skip);
- // Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
- // with both configurations. It is safe to always do this, because the
- // underlying register is caller-saved and can be arbitrarily clobbered.
- __ ResetSpeculationPoisonRegister();
-
// Clear c_entry_fp, like we do in `LeaveExitFrame`.
{
UseScratchRegisterScope temps(masm);
diff --git a/chromium/v8/src/builtins/promise-abstract-operations.tq b/chromium/v8/src/builtins/promise-abstract-operations.tq
index 0e435afad9b..5c871d3ff0e 100644
--- a/chromium/v8/src/builtins/promise-abstract-operations.tq
+++ b/chromium/v8/src/builtins/promise-abstract-operations.tq
@@ -194,7 +194,7 @@ transitioning builtin
FulfillPromise(implicit context: Context)(
promise: JSPromise, value: JSAny): Undefined {
// Assert: The value of promise.[[PromiseState]] is "pending".
- assert(promise.Status() == PromiseState::kPending);
+ dcheck(promise.Status() == PromiseState::kPending);
RunContextPromiseHookResolve(promise);
@@ -469,7 +469,7 @@ transitioning macro PerformPromiseThenImpl(implicit context: Context)(
resultPromiseOrCapability);
} else
deferred {
- assert(promise.Status() == PromiseState::kRejected);
+ dcheck(promise.Status() == PromiseState::kRejected);
handlerContext = ExtractHandlerContext(onRejected, onFulfilled);
microtask = NewPromiseRejectReactionJobTask(
handlerContext, reactionsOrResult, onRejected,
diff --git a/chromium/v8/src/builtins/promise-all-element-closure.tq b/chromium/v8/src/builtins/promise-all-element-closure.tq
index 16e91dae06b..24b9cfb346e 100644
--- a/chromium/v8/src/builtins/promise-all-element-closure.tq
+++ b/chromium/v8/src/builtins/promise-all-element-closure.tq
@@ -103,7 +103,7 @@ transitioning macro PromiseAllResolveElementClosure<F: type>(
}
}
- assert(
+ dcheck(
promiseContext.length ==
SmiTag(PromiseAllResolveElementContextSlots::
kPromiseAllResolveElementLength));
@@ -111,10 +111,10 @@ transitioning macro PromiseAllResolveElementClosure<F: type>(
function.context = nativeContext;
// Determine the index from the {function}.
- assert(kPropertyArrayNoHashSentinel == 0);
+ dcheck(kPropertyArrayNoHashSentinel == 0);
const identityHash =
LoadJSReceiverIdentityHash(function) otherwise unreachable;
- assert(identityHash > 0);
+ dcheck(identityHash > 0);
const index = identityHash - 1;
let remainingElementsCount = *ContextSlot(
diff --git a/chromium/v8/src/builtins/promise-all.tq b/chromium/v8/src/builtins/promise-all.tq
index 5ab64a167d3..602908d7f66 100644
--- a/chromium/v8/src/builtins/promise-all.tq
+++ b/chromium/v8/src/builtins/promise-all.tq
@@ -44,15 +44,15 @@ macro CreatePromiseAllResolveElementFunction(implicit context: Context)(
resolveElementContext: PromiseAllResolveElementContext, index: Smi,
nativeContext: NativeContext,
resolveFunction: SharedFunctionInfo): JSFunction {
- assert(index > 0);
- assert(index < kPropertyArrayHashFieldMax);
+ dcheck(index > 0);
+ dcheck(index < kPropertyArrayHashFieldMax);
const map = *ContextSlot(
nativeContext, ContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
const resolve = AllocateFunctionWithMapAndContext(
map, resolveFunction, resolveElementContext);
- assert(kPropertyArrayNoHashSentinel == 0);
+ dcheck(kPropertyArrayNoHashSentinel == 0);
resolve.properties_or_hash = index;
return resolve;
}
@@ -332,7 +332,7 @@ transitioning macro GeneratePromiseAll<F1: type, F2: type>(
const capability = NewPromiseCapability(receiver, False);
// NewPromiseCapability guarantees that receiver is Constructor.
- assert(Is<Constructor>(receiver));
+ dcheck(Is<Constructor>(receiver));
const constructor = UnsafeCast<Constructor>(receiver);
try {
diff --git a/chromium/v8/src/builtins/promise-any.tq b/chromium/v8/src/builtins/promise-any.tq
index d86e265d6c2..1555511eda3 100644
--- a/chromium/v8/src/builtins/promise-any.tq
+++ b/chromium/v8/src/builtins/promise-any.tq
@@ -57,14 +57,14 @@ transitioning macro CreatePromiseAnyRejectElementContext(
macro CreatePromiseAnyRejectElementFunction(implicit context: Context)(
rejectElementContext: PromiseAnyRejectElementContext, index: Smi,
nativeContext: NativeContext): JSFunction {
- assert(index > 0);
- assert(index < kPropertyArrayHashFieldMax);
+ dcheck(index > 0);
+ dcheck(index < kPropertyArrayHashFieldMax);
const map = *ContextSlot(
nativeContext, ContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
const rejectInfo = PromiseAnyRejectElementSharedFunConstant();
const reject =
AllocateFunctionWithMapAndContext(map, rejectInfo, rejectElementContext);
- assert(kPropertyArrayNoHashSentinel == 0);
+ dcheck(kPropertyArrayNoHashSentinel == 0);
reject.properties_or_hash = index;
return reject;
}
@@ -89,7 +89,7 @@ PromiseAnyRejectElementClosure(
return Undefined;
}
- assert(
+ dcheck(
context.length ==
SmiTag(
PromiseAnyRejectElementContextSlots::kPromiseAnyRejectElementLength));
@@ -100,9 +100,9 @@ PromiseAnyRejectElementClosure(
target.context = nativeContext;
// 5. Let index be F.[[Index]].
- assert(kPropertyArrayNoHashSentinel == 0);
+ dcheck(kPropertyArrayNoHashSentinel == 0);
const identityHash = LoadJSReceiverIdentityHash(target) otherwise unreachable;
- assert(identityHash > 0);
+ dcheck(identityHash > 0);
const index = identityHash - 1;
// 6. Let errors be F.[[Errors]].
@@ -328,7 +328,7 @@ PromiseAny(
const capability = NewPromiseCapability(receiver, False);
// NewPromiseCapability guarantees that receiver is Constructor.
- assert(Is<Constructor>(receiver));
+ dcheck(Is<Constructor>(receiver));
const constructor = UnsafeCast<Constructor>(receiver);
try {
@@ -365,7 +365,7 @@ PromiseAny(
goto Reject(e);
} label Reject(e: Object) deferred {
// Exception must be bound to a JS value.
- assert(e != TheHole);
+ dcheck(e != TheHole);
Call(
context, UnsafeCast<Callable>(capability.reject), Undefined,
UnsafeCast<JSAny>(e));
diff --git a/chromium/v8/src/builtins/promise-finally.tq b/chromium/v8/src/builtins/promise-finally.tq
index f5764868508..ff979f97320 100644
--- a/chromium/v8/src/builtins/promise-finally.tq
+++ b/chromium/v8/src/builtins/promise-finally.tq
@@ -70,7 +70,7 @@ PromiseCatchFinally(
*ContextSlot(context, PromiseFinallyContextSlot::kConstructorSlot);
// 5. Assert: IsConstructor(C) is true.
- assert(IsConstructor(constructor));
+ dcheck(IsConstructor(constructor));
// 6. Let promise be ? PromiseResolve(C, result).
const promise = PromiseResolve(constructor, result);
@@ -117,7 +117,7 @@ PromiseThenFinally(
*ContextSlot(context, PromiseFinallyContextSlot::kConstructorSlot);
// 5. Assert: IsConstructor(C) is true.
- assert(IsConstructor(constructor));
+ dcheck(IsConstructor(constructor));
// 6. Let promise be ? PromiseResolve(C, result).
const promise = PromiseResolve(constructor, result);
@@ -185,7 +185,7 @@ PromisePrototypeFinally(
}
// 4. Assert: IsConstructor(C) is true.
- assert(IsConstructor(constructor));
+ dcheck(IsConstructor(constructor));
// 5. If IsCallable(onFinally) is not true,
// a. Let thenFinally be onFinally.
diff --git a/chromium/v8/src/builtins/promise-misc.tq b/chromium/v8/src/builtins/promise-misc.tq
index 58a4ad3c0d5..e8b4842dd5e 100644
--- a/chromium/v8/src/builtins/promise-misc.tq
+++ b/chromium/v8/src/builtins/promise-misc.tq
@@ -49,7 +49,7 @@ macro PromiseInit(promise: JSPromise): void {
macro InnerNewJSPromise(implicit context: Context)(): JSPromise {
const promiseFun = *NativeContextSlot(ContextSlot::PROMISE_FUNCTION_INDEX);
- assert(IsFunctionWithPrototypeSlotMap(promiseFun.map));
+ dcheck(IsFunctionWithPrototypeSlotMap(promiseFun.map));
const promiseMap = UnsafeCast<Map>(promiseFun.prototype_or_initial_map);
const promiseHeapObject = promise_internal::AllocateJSPromise(context);
*UnsafeConstCast(&promiseHeapObject.map) = promiseMap;
@@ -103,7 +103,7 @@ macro NewPromiseRejectReactionJobTask(implicit context: Context)(
@export
transitioning macro RunContextPromiseHookInit(implicit context: Context)(
- promise: JSPromise, parent: Object) {
+ promise: JSPromise, parent: Object): void {
const maybeHook = *NativeContextSlot(
ContextSlot::PROMISE_HOOK_INIT_FUNCTION_INDEX);
const hook = Cast<Callable>(maybeHook) otherwise return;
@@ -119,7 +119,7 @@ transitioning macro RunContextPromiseHookInit(implicit context: Context)(
@export
transitioning macro RunContextPromiseHookResolve(implicit context: Context)(
- promise: JSPromise) {
+ promise: JSPromise): void {
RunContextPromiseHook(
ContextSlot::PROMISE_HOOK_RESOLVE_FUNCTION_INDEX, promise,
PromiseHookFlags());
@@ -127,14 +127,14 @@ transitioning macro RunContextPromiseHookResolve(implicit context: Context)(
@export
transitioning macro RunContextPromiseHookResolve(implicit context: Context)(
- promise: JSPromise, flags: uint32) {
+ promise: JSPromise, flags: uint32): void {
RunContextPromiseHook(
ContextSlot::PROMISE_HOOK_RESOLVE_FUNCTION_INDEX, promise, flags);
}
@export
transitioning macro RunContextPromiseHookBefore(implicit context: Context)(
- promiseOrCapability: JSPromise|PromiseCapability|Undefined) {
+ promiseOrCapability: JSPromise|PromiseCapability|Undefined): void {
RunContextPromiseHook(
ContextSlot::PROMISE_HOOK_BEFORE_FUNCTION_INDEX, promiseOrCapability,
PromiseHookFlags());
@@ -142,7 +142,8 @@ transitioning macro RunContextPromiseHookBefore(implicit context: Context)(
@export
transitioning macro RunContextPromiseHookBefore(implicit context: Context)(
- promiseOrCapability: JSPromise|PromiseCapability|Undefined, flags: uint32) {
+ promiseOrCapability: JSPromise|PromiseCapability|Undefined, flags: uint32):
+ void {
RunContextPromiseHook(
ContextSlot::PROMISE_HOOK_BEFORE_FUNCTION_INDEX, promiseOrCapability,
flags);
@@ -150,7 +151,7 @@ transitioning macro RunContextPromiseHookBefore(implicit context: Context)(
@export
transitioning macro RunContextPromiseHookAfter(implicit context: Context)(
- promiseOrCapability: JSPromise|PromiseCapability|Undefined) {
+ promiseOrCapability: JSPromise|PromiseCapability|Undefined): void {
RunContextPromiseHook(
ContextSlot::PROMISE_HOOK_AFTER_FUNCTION_INDEX, promiseOrCapability,
PromiseHookFlags());
@@ -158,7 +159,8 @@ transitioning macro RunContextPromiseHookAfter(implicit context: Context)(
@export
transitioning macro RunContextPromiseHookAfter(implicit context: Context)(
- promiseOrCapability: JSPromise|PromiseCapability|Undefined, flags: uint32) {
+ promiseOrCapability: JSPromise|PromiseCapability|Undefined, flags: uint32):
+ void {
RunContextPromiseHook(
ContextSlot::PROMISE_HOOK_AFTER_FUNCTION_INDEX, promiseOrCapability,
flags);
@@ -166,7 +168,8 @@ transitioning macro RunContextPromiseHookAfter(implicit context: Context)(
transitioning macro RunContextPromiseHook(implicit context: Context)(
slot: Slot<NativeContext, Undefined|Callable>,
- promiseOrCapability: JSPromise|PromiseCapability|Undefined, flags: uint32) {
+ promiseOrCapability: JSPromise|PromiseCapability|Undefined,
+ flags: uint32): void {
if (!IsContextPromiseHookEnabled(flags)) return;
const maybeHook = *NativeContextSlot(slot);
const hook = Cast<Callable>(maybeHook) otherwise return;
@@ -192,7 +195,7 @@ transitioning macro RunContextPromiseHook(implicit context: Context)(
}
transitioning macro RunAnyPromiseHookInit(implicit context: Context)(
- promise: JSPromise, parent: Object) {
+ promise: JSPromise, parent: Object): void {
const promiseHookFlags = PromiseHookFlags();
// Fast return if no hooks are set.
if (promiseHookFlags == 0) return;
@@ -230,7 +233,7 @@ transitioning macro NewJSPromise(implicit context: Context)(): JSPromise {
@export
transitioning macro NewJSPromise(implicit context: Context)(
status: constexpr PromiseState, result: JSAny): JSPromise {
- assert(status != PromiseState::kPending);
+ dcheck(status != PromiseState::kPending);
const instance = InnerNewJSPromise();
instance.reactions_or_result = result;
diff --git a/chromium/v8/src/builtins/promise-race.tq b/chromium/v8/src/builtins/promise-race.tq
index 973ddd8bacb..eed1fae3890 100644
--- a/chromium/v8/src/builtins/promise-race.tq
+++ b/chromium/v8/src/builtins/promise-race.tq
@@ -27,7 +27,7 @@ PromiseRace(
const promise = capability.promise;
// NewPromiseCapability guarantees that receiver is Constructor.
- assert(Is<Constructor>(receiver));
+ dcheck(Is<Constructor>(receiver));
const constructor = UnsafeCast<Constructor>(receiver);
// For catch prediction, don't treat the .then calls as handling it;
diff --git a/chromium/v8/src/builtins/promise-resolve.tq b/chromium/v8/src/builtins/promise-resolve.tq
index fa3d19411fc..5b0a82ca3d1 100644
--- a/chromium/v8/src/builtins/promise-resolve.tq
+++ b/chromium/v8/src/builtins/promise-resolve.tq
@@ -138,8 +138,8 @@ ResolvePromise(implicit context: Context)(
// ensures that the intrinsic %ObjectPrototype% doesn't contain any
// "then" property. This helps to avoid negative lookups on iterator
// results from async generators.
- assert(IsJSReceiverMap(resolutionMap));
- assert(!IsPromiseThenProtectorCellInvalid());
+ dcheck(IsJSReceiverMap(resolutionMap));
+ dcheck(!IsPromiseThenProtectorCellInvalid());
if (resolutionMap ==
*NativeContextSlot(
nativeContext, ContextSlot::ITERATOR_RESULT_MAP_INDEX)) {
diff --git a/chromium/v8/src/builtins/proxy-delete-property.tq b/chromium/v8/src/builtins/proxy-delete-property.tq
index a5925c2f7dc..330cf8e0cdf 100644
--- a/chromium/v8/src/builtins/proxy-delete-property.tq
+++ b/chromium/v8/src/builtins/proxy-delete-property.tq
@@ -15,15 +15,15 @@ ProxyDeleteProperty(implicit context: Context)(
// Handle deeply nested proxy.
PerformStackCheck();
// 1. Assert: IsPropertyKey(P) is true.
- assert(TaggedIsNotSmi(name));
- assert(Is<Name>(name));
- assert(!IsPrivateSymbol(name));
+ dcheck(TaggedIsNotSmi(name));
+ dcheck(Is<Name>(name));
+ dcheck(!IsPrivateSymbol(name));
try {
// 2. Let handler be O.[[ProxyHandler]].
// 3. If handler is null, throw a TypeError exception.
// 4. Assert: Type(handler) is Object.
- assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ dcheck(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
const handler =
Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
diff --git a/chromium/v8/src/builtins/proxy-get-property.tq b/chromium/v8/src/builtins/proxy-get-property.tq
index 563b38be371..0471cf318ac 100644
--- a/chromium/v8/src/builtins/proxy-get-property.tq
+++ b/chromium/v8/src/builtins/proxy-get-property.tq
@@ -17,9 +17,9 @@ ProxyGetProperty(implicit context: Context)(
onNonExistent: Smi): JSAny {
PerformStackCheck();
// 1. Assert: IsPropertyKey(P) is true.
- assert(TaggedIsNotSmi(name));
- assert(Is<Name>(name));
- assert(!IsPrivateSymbol(name));
+ dcheck(TaggedIsNotSmi(name));
+ dcheck(Is<Name>(name));
+ dcheck(!IsPrivateSymbol(name));
// 2. Let handler be O.[[ProxyHandler]].
// 3. If handler is null, throw a TypeError exception.
diff --git a/chromium/v8/src/builtins/proxy-get-prototype-of.tq b/chromium/v8/src/builtins/proxy-get-prototype-of.tq
index 152489ecb6a..ad22ab29868 100644
--- a/chromium/v8/src/builtins/proxy-get-prototype-of.tq
+++ b/chromium/v8/src/builtins/proxy-get-prototype-of.tq
@@ -16,7 +16,7 @@ ProxyGetPrototypeOf(implicit context: Context)(proxy: JSProxy): JSAny {
// 1. Let handler be O.[[ProxyHandler]].
// 2. If handler is null, throw a TypeError exception.
// 3. Assert: Type(handler) is Object.
- assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ dcheck(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
const handler =
Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
@@ -40,7 +40,7 @@ ProxyGetPrototypeOf(implicit context: Context)(proxy: JSProxy): JSAny {
// 9. Let extensibleTarget be ? IsExtensible(target).
// 10. If extensibleTarget is true, return handlerProto.
const extensibleTarget: JSAny = object::ObjectIsExtensibleImpl(target);
- assert(extensibleTarget == True || extensibleTarget == False);
+ dcheck(extensibleTarget == True || extensibleTarget == False);
if (extensibleTarget == True) {
return handlerProto;
}
diff --git a/chromium/v8/src/builtins/proxy-has-property.tq b/chromium/v8/src/builtins/proxy-has-property.tq
index fc81d5dcc9d..75ac60d03cc 100644
--- a/chromium/v8/src/builtins/proxy-has-property.tq
+++ b/chromium/v8/src/builtins/proxy-has-property.tq
@@ -10,19 +10,19 @@ namespace proxy {
// https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-hasproperty-p
transitioning builtin ProxyHasProperty(implicit context: Context)(
proxy: JSProxy, name: PropertyKey): JSAny {
- assert(Is<JSProxy>(proxy));
+ dcheck(Is<JSProxy>(proxy));
PerformStackCheck();
// 1. Assert: IsPropertyKey(P) is true.
- assert(Is<Name>(name));
- assert(!IsPrivateSymbol(name));
+ dcheck(Is<Name>(name));
+ dcheck(!IsPrivateSymbol(name));
try {
// 2. Let handler be O.[[ProxyHandler]].
// 3. If handler is null, throw a TypeError exception.
// 4. Assert: Type(handler) is Object.
- assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ dcheck(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
const handler =
Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
diff --git a/chromium/v8/src/builtins/proxy-is-extensible.tq b/chromium/v8/src/builtins/proxy-is-extensible.tq
index a7c2c56d441..58f147c2964 100644
--- a/chromium/v8/src/builtins/proxy-is-extensible.tq
+++ b/chromium/v8/src/builtins/proxy-is-extensible.tq
@@ -16,7 +16,7 @@ transitioning builtin ProxyIsExtensible(implicit context: Context)(
// 1. Let handler be O.[[ProxyHandler]].
// 2. If handler is null, throw a TypeError exception.
// 3. Assert: Type(handler) is Object.
- assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ dcheck(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
const handler =
Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
diff --git a/chromium/v8/src/builtins/proxy-prevent-extensions.tq b/chromium/v8/src/builtins/proxy-prevent-extensions.tq
index a5a3d93da44..9f7a226b3ab 100644
--- a/chromium/v8/src/builtins/proxy-prevent-extensions.tq
+++ b/chromium/v8/src/builtins/proxy-prevent-extensions.tq
@@ -17,7 +17,7 @@ ProxyPreventExtensions(implicit context: Context)(
// 1. Let handler be O.[[ProxyHandler]].
// 2. If handler is null, throw a TypeError exception.
// 3. Assert: Type(handler) is Object.
- assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ dcheck(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
const handler =
Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
@@ -38,7 +38,7 @@ ProxyPreventExtensions(implicit context: Context)(
// 8.b If extensibleTarget is true, throw a TypeError exception.
if (ToBoolean(trapResult)) {
const extensibleTarget: JSAny = object::ObjectIsExtensibleImpl(target);
- assert(extensibleTarget == True || extensibleTarget == False);
+ dcheck(extensibleTarget == True || extensibleTarget == False);
if (extensibleTarget == True) {
ThrowTypeError(MessageTemplate::kProxyPreventExtensionsExtensible);
}
diff --git a/chromium/v8/src/builtins/proxy-revoke.tq b/chromium/v8/src/builtins/proxy-revoke.tq
index d031bb9f1d9..0c6c9dbb258 100644
--- a/chromium/v8/src/builtins/proxy-revoke.tq
+++ b/chromium/v8/src/builtins/proxy-revoke.tq
@@ -26,7 +26,7 @@ ProxyRevoke(js-implicit context: Context)(): Undefined {
*proxySlot = Null;
// 4. Assert: p is a Proxy object.
- assert(Is<JSProxy>(proxy));
+ dcheck(Is<JSProxy>(proxy));
// 5. Set p.[[ProxyTarget]] to null.
proxy.target = Null;
diff --git a/chromium/v8/src/builtins/proxy-set-property.tq b/chromium/v8/src/builtins/proxy-set-property.tq
index 441a5d418d0..8a7dfde9e5b 100644
--- a/chromium/v8/src/builtins/proxy-set-property.tq
+++ b/chromium/v8/src/builtins/proxy-set-property.tq
@@ -11,7 +11,7 @@ SetPropertyWithReceiver(implicit context: Context)(
Object, Name, Object, Object): void;
transitioning macro CallThrowTypeErrorIfStrict(implicit context: Context)(
- message: constexpr MessageTemplate) {
+ message: constexpr MessageTemplate): void {
ThrowTypeErrorIfStrict(SmiConstant(message), Null, Null);
}
@@ -22,8 +22,8 @@ ProxySetProperty(implicit context: Context)(
proxy: JSProxy, name: PropertyKey|PrivateSymbol, value: JSAny,
receiverValue: JSAny): JSAny {
// 1. Assert: IsPropertyKey(P) is true.
- assert(TaggedIsNotSmi(name));
- assert(Is<Name>(name));
+ dcheck(TaggedIsNotSmi(name));
+ dcheck(Is<Name>(name));
let key: PropertyKey;
typeswitch (name) {
@@ -40,7 +40,7 @@ ProxySetProperty(implicit context: Context)(
// 2. Let handler be O.[[ProxyHandler]].
// 3. If handler is null, throw a TypeError exception.
// 4. Assert: Type(handler) is Object.
- assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ dcheck(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
const handler =
Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
diff --git a/chromium/v8/src/builtins/proxy-set-prototype-of.tq b/chromium/v8/src/builtins/proxy-set-prototype-of.tq
index ec68cef44c8..57ceb277844 100644
--- a/chromium/v8/src/builtins/proxy-set-prototype-of.tq
+++ b/chromium/v8/src/builtins/proxy-set-prototype-of.tq
@@ -15,12 +15,12 @@ ProxySetPrototypeOf(implicit context: Context)(
const kTrapName: constexpr string = 'setPrototypeOf';
try {
// 1. Assert: Either Type(V) is Object or Type(V) is Null.
- assert(proto == Null || Is<JSReceiver>(proto));
+ dcheck(proto == Null || Is<JSReceiver>(proto));
// 2. Let handler be O.[[ProxyHandler]].
// 3. If handler is null, throw a TypeError exception.
// 4. Assert: Type(handler) is Object.
- assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ dcheck(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
const handler =
Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
@@ -48,7 +48,7 @@ ProxySetPrototypeOf(implicit context: Context)(
// 10. Let extensibleTarget be ? IsExtensible(target).
// 11. If extensibleTarget is true, return true.
const extensibleTarget: Object = object::ObjectIsExtensibleImpl(target);
- assert(extensibleTarget == True || extensibleTarget == False);
+ dcheck(extensibleTarget == True || extensibleTarget == False);
if (extensibleTarget == True) {
return True;
}
diff --git a/chromium/v8/src/builtins/proxy.tq b/chromium/v8/src/builtins/proxy.tq
index e80ed361921..9e56a28903e 100644
--- a/chromium/v8/src/builtins/proxy.tq
+++ b/chromium/v8/src/builtins/proxy.tq
@@ -11,13 +11,13 @@ extern macro ProxiesCodeStubAssembler::AllocateProxy(implicit context: Context)(
extern transitioning macro ProxiesCodeStubAssembler::CheckGetSetTrapResult(
implicit context: Context)(
- JSReceiver, JSProxy, Name, Object, constexpr int31);
+ JSReceiver, JSProxy, Name, Object, constexpr int31): void;
extern transitioning macro ProxiesCodeStubAssembler::CheckDeleteTrapResult(
- implicit context: Context)(JSReceiver, JSProxy, Name);
+ implicit context: Context)(JSReceiver, JSProxy, Name): void;
extern transitioning macro ProxiesCodeStubAssembler::CheckHasTrapResult(
- implicit context: Context)(JSReceiver, JSProxy, Name);
+ implicit context: Context)(JSReceiver, JSProxy, Name): void;
const kProxyGet: constexpr int31
generates 'JSProxy::AccessKind::kGet';
diff --git a/chromium/v8/src/builtins/regexp-match-all.tq b/chromium/v8/src/builtins/regexp-match-all.tq
index 932972d8440..1f9aa1819f4 100644
--- a/chromium/v8/src/builtins/regexp-match-all.tq
+++ b/chromium/v8/src/builtins/regexp-match-all.tq
@@ -41,7 +41,7 @@ transitioning macro RegExpPrototypeMatchAllImpl(implicit context: Context)(
const flags: String = FastFlagsGetter(fastRegExp);
matcher = RegExpCreate(nativeContext, source, flags);
const matcherRegExp = UnsafeCast<JSRegExp>(matcher);
- assert(IsFastRegExpPermissive(matcherRegExp));
+ dcheck(IsFastRegExpPermissive(matcherRegExp));
// 7. Let lastIndex be ? ToLength(? Get(R, "lastIndex")).
// 8. Perform ? Set(matcher, "lastIndex", lastIndex, true).
@@ -159,7 +159,7 @@ transitioning javascript builtin RegExpStringIteratorPrototypeNext(
return AllocateJSIteratorResult(UnsafeCast<JSAny>(match), False);
}
// a. If global is true,
- assert(flags.global);
+ dcheck(flags.global);
if (isFastRegExp) {
// i. Let matchStr be ? ToString(? Get(match, "0")).
const match = UnsafeCast<JSRegExpResult>(match);
@@ -168,7 +168,7 @@ transitioning javascript builtin RegExpStringIteratorPrototypeNext(
// When iterating_regexp is fast, we assume it stays fast even after
// accessing the first match from the RegExp result.
- assert(IsFastRegExpPermissive(iteratingRegExp));
+ dcheck(IsFastRegExpPermissive(iteratingRegExp));
const iteratingRegExp = UnsafeCast<JSRegExp>(iteratingRegExp);
if (matchStr == kEmptyString) {
// 1. Let thisIndex be ? ToLength(? Get(R, "lastIndex")).
@@ -186,7 +186,7 @@ transitioning javascript builtin RegExpStringIteratorPrototypeNext(
// iii. Return ! CreateIterResultObject(match, false).
return AllocateJSIteratorResult(match, False);
}
- assert(!isFastRegExp);
+ dcheck(!isFastRegExp);
// i. Let matchStr be ? ToString(? Get(match, "0")).
const match = UnsafeCast<JSAny>(match);
const matchStr = ToString_Inline(GetProperty(match, SmiConstant(0)));
diff --git a/chromium/v8/src/builtins/regexp-match.tq b/chromium/v8/src/builtins/regexp-match.tq
index 5fca09893c7..3da132636a9 100644
--- a/chromium/v8/src/builtins/regexp-match.tq
+++ b/chromium/v8/src/builtins/regexp-match.tq
@@ -22,7 +22,7 @@ extern macro UnsafeLoadFixedArrayElement(
transitioning macro RegExpPrototypeMatchBody(implicit context: Context)(
regexp: JSReceiver, string: String, isFastPath: constexpr bool): JSAny {
if constexpr (isFastPath) {
- assert(Is<FastJSRegExp>(regexp));
+ dcheck(Is<FastJSRegExp>(regexp));
}
const isGlobal: bool = FlagGetter(regexp, Flag::kGlobal, isFastPath);
@@ -32,7 +32,7 @@ transitioning macro RegExpPrototypeMatchBody(implicit context: Context)(
RegExpExec(regexp, string);
}
- assert(isGlobal);
+ dcheck(isGlobal);
const isUnicode: bool = FlagGetter(regexp, Flag::kUnicode, isFastPath);
StoreLastIndex(regexp, 0, isFastPath);
@@ -74,7 +74,7 @@ transitioning macro RegExpPrototypeMatchBody(implicit context: Context)(
string, UnsafeCast<Smi>(matchFrom), UnsafeCast<Smi>(matchTo));
}
} else {
- assert(!isFastPath);
+ dcheck(!isFastPath);
const resultTemp = RegExpExec(regexp, string);
if (resultTemp == Null) {
goto IfDidNotMatch;
@@ -96,7 +96,7 @@ transitioning macro RegExpPrototypeMatchBody(implicit context: Context)(
}
let lastIndex = LoadLastIndex(regexp, isFastPath);
if constexpr (isFastPath) {
- assert(TaggedIsPositiveSmi(lastIndex));
+ dcheck(TaggedIsPositiveSmi(lastIndex));
} else {
lastIndex = ToLength_Inline(lastIndex);
}
@@ -109,7 +109,7 @@ transitioning macro RegExpPrototypeMatchBody(implicit context: Context)(
// incremented to overflow the Smi range since the maximal string
// length is less than the maximal Smi value.
StaticAssertStringLengthFitsSmi();
- assert(TaggedIsPositiveSmi(newLastIndex));
+ dcheck(TaggedIsPositiveSmi(newLastIndex));
}
StoreLastIndex(regexp, newLastIndex, isFastPath);
diff --git a/chromium/v8/src/builtins/regexp-replace.tq b/chromium/v8/src/builtins/regexp-replace.tq
index c59a41b27f6..d26f8d6949c 100644
--- a/chromium/v8/src/builtins/regexp-replace.tq
+++ b/chromium/v8/src/builtins/regexp-replace.tq
@@ -22,7 +22,7 @@ StringReplaceNonGlobalRegExpWithFunction(implicit context: Context)(
transitioning macro RegExpReplaceCallableNoExplicitCaptures(
implicit context: Context)(
matchesElements: FixedArray, matchesLength: intptr, string: String,
- replaceFn: Callable) {
+ replaceFn: Callable): void {
let matchStart: Smi = 0;
for (let i: intptr = 0; i < matchesLength; i++) {
typeswitch (matchesElements.objects[i]) {
@@ -63,7 +63,8 @@ transitioning macro RegExpReplaceCallableNoExplicitCaptures(
transitioning macro
RegExpReplaceCallableWithExplicitCaptures(implicit context: Context)(
- matchesElements: FixedArray, matchesLength: intptr, replaceFn: Callable) {
+ matchesElements: FixedArray, matchesLength: intptr,
+ replaceFn: Callable): void {
for (let i: intptr = 0; i < matchesLength; i++) {
const elArray =
Cast<JSArray>(matchesElements.objects[i]) otherwise continue;
@@ -175,9 +176,9 @@ transitioning macro RegExpReplaceFastString(implicit context: Context)(
transitioning builtin RegExpReplace(implicit context: Context)(
regexp: FastJSRegExp, string: String, replaceValue: JSAny): String {
- // TODO(pwong): Remove assert when all callers (StringPrototypeReplace) are
+ // TODO(pwong): Remove dcheck when all callers (StringPrototypeReplace) are
// from Torque.
- assert(Is<FastJSRegExp>(regexp));
+ dcheck(Is<FastJSRegExp>(regexp));
// 2. Is {replace_value} callable?
typeswitch (replaceValue) {
diff --git a/chromium/v8/src/builtins/regexp-search.tq b/chromium/v8/src/builtins/regexp-search.tq
index b70d23a0dd5..7deec8b1c64 100644
--- a/chromium/v8/src/builtins/regexp-search.tq
+++ b/chromium/v8/src/builtins/regexp-search.tq
@@ -9,7 +9,7 @@ namespace regexp {
transitioning macro
RegExpPrototypeSearchBodyFast(implicit context: Context)(
regexp: JSRegExp, string: String): JSAny {
- assert(IsFastRegExpPermissive(regexp));
+ dcheck(IsFastRegExpPermissive(regexp));
// Grab the initial value of last index.
const previousLastIndex: Smi = FastLoadLastIndex(regexp);
diff --git a/chromium/v8/src/builtins/regexp.tq b/chromium/v8/src/builtins/regexp.tq
index 29fad267361..5760b066586 100644
--- a/chromium/v8/src/builtins/regexp.tq
+++ b/chromium/v8/src/builtins/regexp.tq
@@ -86,7 +86,7 @@ transitioning macro RegExpPrototypeExecBodyWithoutResult(
regexp: JSRegExp, string: String, regexpLastIndex: Number,
isFastPath: constexpr bool): RegExpMatchInfo labels IfDidNotMatch {
if (isFastPath) {
- assert(HasInitialRegExpMap(regexp));
+ dcheck(HasInitialRegExpMap(regexp));
} else {
IncrementUseCounter(context, SmiConstant(kRegExpExecCalledOnSlowRegExp));
}
@@ -397,7 +397,7 @@ transitioning macro IsRegExp(implicit context: Context)(obj: JSAny): bool {
return Is<JSRegExp>(receiver);
}
- assert(value != Undefined);
+ dcheck(value != Undefined);
// The common path. Symbol.match exists, equals the RegExpPrototypeMatch
// function (and is thus trueish), and the receiver is a JSRegExp.
if (ToBoolean(value)) {
@@ -408,7 +408,7 @@ transitioning macro IsRegExp(implicit context: Context)(obj: JSAny): bool {
return true;
}
- assert(!ToBoolean(value));
+ dcheck(!ToBoolean(value));
if (Is<JSRegExp>(receiver)) {
IncrementUseCounter(context, SmiConstant(kRegExpMatchIsFalseishOnJSRegExp));
}
diff --git a/chromium/v8/src/builtins/riscv64/builtins-riscv64.cc b/chromium/v8/src/builtins/riscv64/builtins-riscv64.cc
index f79e392f480..3676ae34419 100644
--- a/chromium/v8/src/builtins/riscv64/builtins-riscv64.cc
+++ b/chromium/v8/src/builtins/riscv64/builtins-riscv64.cc
@@ -320,6 +320,15 @@ void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
+static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
+ Register scratch) {
+ DCHECK(!AreAliased(code, scratch));
+ // Verify that the code kind is baseline code via the CodeKind.
+ __ Ld(scratch, FieldMemOperand(code, Code::kFlagsOffset));
+ __ DecodeField<Code::KindField>(scratch);
+ __ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
+ Operand(static_cast<int64_t>(CodeKind::BASELINE)));
+}
// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
// the more general dispatch.
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
@@ -330,7 +339,8 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label done;
__ GetObjectType(sfi_data, scratch1, scratch1);
- __ Branch(is_baseline, eq, scratch1, Operand(BASELINE_DATA_TYPE));
+ __ Branch(is_baseline, eq, scratch1, Operand(CODET_TYPE));
+
__ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE),
Label::Distance::kNear);
__ LoadTaggedPointerField(
@@ -401,17 +411,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
__ Lhu(a3,
FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
- UseScratchRegisterScope temps(masm);
- Register scratch = temps.Acquire();
__ LoadTaggedPointerField(
- scratch,
+ t1,
FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label done_loop, loop;
__ bind(&loop);
__ Sub64(a3, a3, Operand(1));
__ Branch(&done_loop, lt, a3, Operand(zero_reg), Label::Distance::kNear);
- __ CalcScaledAddress(kScratchReg, scratch, a3, kTaggedSizeLog2);
+ __ CalcScaledAddress(kScratchReg, t1, a3, kTaggedSizeLog2);
__ LoadAnyTaggedField(
kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
__ Push(kScratchReg);
@@ -575,9 +583,14 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ li(s3, Operand(StackFrame::TypeToMarker(type)));
ExternalReference c_entry_fp = ExternalReference::Create(
IsolateAddressId::kCEntryFPAddress, masm->isolate());
- __ li(s4, c_entry_fp);
- __ Ld(s4, MemOperand(s4));
+ __ li(s5, c_entry_fp);
+ __ Ld(s4, MemOperand(s5));
__ Push(s1, s2, s3, s4);
+ // Clear c_entry_fp, now we've pushed its previous value to the stack.
+ // If the c_entry_fp is not already zero and we don't clear it, the
+ // SafeStackFrameIterator will assume we are executing C++ and miss the JS
+ // frames on top.
+ __ Sd(zero_reg, MemOperand(s5));
// Set up frame pointer for the frame to be pushed.
__ Add64(fp, sp, -EntryFrameConstants::kCallerFPOffset);
// Registers:
@@ -1010,7 +1023,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
// Bailout to the return label if this is a return bytecode.
#define JUMP_IF_EQUAL(NAME) \
__ Branch(if_return, eq, bytecode, \
- Operand(static_cast<int>(interpreter::Bytecode::k##NAME)));
+ Operand(static_cast<int64_t>(interpreter::Bytecode::k##NAME)));
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
#undef JUMP_IF_EQUAL
@@ -1018,7 +1031,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
// of the loop.
Label end, not_jump_loop;
__ Branch(&not_jump_loop, ne, bytecode,
- Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)),
+ Operand(static_cast<int64_t>(interpreter::Bytecode::kJumpLoop)),
Label::Distance::kNear);
// We need to restore the original bytecode_offset since we might have
// increased it to skip the wide / extra-wide prefix bytecode.
@@ -1160,9 +1173,9 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// store the bytecode offset.
if (FLAG_debug_code) {
UseScratchRegisterScope temps(masm);
- Register type = temps.Acquire();
- __ GetObjectType(feedback_vector, type, type);
- __ Assert(eq, AbortReason::kExpectedFeedbackVector, type,
+ Register invocation_count = temps.Acquire();
+ __ GetObjectType(feedback_vector, invocation_count, invocation_count);
+ __ Assert(eq, AbortReason::kExpectedFeedbackVector, invocation_count,
Operand(FEEDBACK_VECTOR_TYPE));
}
// Our stack is currently aligned. We have have to push something along with
@@ -1171,8 +1184,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves
// `undefined` in the accumulator register, to skip the load in the baseline
// code.
- __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
- __ Push(feedback_vector, kInterpreterAccumulatorRegister);
+ __ Push(feedback_vector);
}
Label call_stack_guard;
@@ -1203,7 +1215,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
{
ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
// Drop the frame created by the baseline call.
- __ Pop(fp, ra);
+ __ Pop(ra, fp);
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
feedback_vector);
__ Trap();
@@ -1212,14 +1224,13 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ bind(&call_stack_guard);
{
ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
- Register new_target = descriptor.GetRegisterParameter(
- BaselineOutOfLinePrologueDescriptor::kJavaScriptCallNewTarget);
-
FrameScope frame_scope(masm, StackFrame::INTERNAL);
// Save incoming new target or generator
- __ Push(zero_reg, new_target);
- __ CallRuntime(Runtime::kStackGuard);
- __ Pop(new_target, zero_reg);
+ __ Push(kJavaScriptCallNewTargetRegister);
+ __ SmiTag(frame_size);
+ __ Push(frame_size);
+ __ CallRuntime(Runtime::kStackGuardWithGap);
+ __ Pop(kJavaScriptCallNewTargetRegister);
}
__ Ret();
temps.Exclude(kScratchReg.bit() | kScratchReg2.bit());
@@ -1239,7 +1250,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// o ra: return address
//
// The function builds an interpreter frame. See InterpreterFrameConstants in
-// frames.h for its layout.
+// frames-constants.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Register closure = a1;
Register feedback_vector = a2;
@@ -1466,36 +1477,28 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&is_baseline);
{
// Load the feedback vector from the closure.
- __ Ld(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ Ld(feedback_vector,
- FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ LoadTaggedPointerField(
+ feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedPointerField(
+ feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
// allocate it.
- __ Ld(scratch, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
- __ Lh(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ __ LoadTaggedPointerField(
+ scratch, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ Lhu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ Branch(&install_baseline_code, ne, scratch,
Operand(FEEDBACK_VECTOR_TYPE));
- // Read off the optimization state in the feedback vector.
- // TODO(v8:11429): Is this worth doing here? Baseline code will check it
- // anyway...
- __ Ld(optimization_state,
- FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
-
- // Check if there is optimized code or a optimization marker that needes to
- // be processed.
- __ And(
- scratch, optimization_state,
- Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
- __ Branch(&has_optimized_code_or_marker, ne, scratch, Operand(zero_reg));
+ // Check for an optimization marker.
+ LoadOptimizationStateAndJumpIfNeedsProcessing(
+ masm, optimization_state, feedback_vector,
+ &has_optimized_code_or_marker);
// Load the baseline code into the closure.
- __ LoadTaggedPointerField(
- a2, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BaselineData::kBaselineCodeOffset));
+ __ Move(a2, kInterpreterBytecodeArrayRegister);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
ReplaceClosureCodeWithOptimizedCode(masm, a2, closure, scratch, scratch2);
__ JumpCodeObject(a2);
@@ -1888,7 +1891,8 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ LoadTaggedPointerField(
- a1, MemOperand(a0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+ a1, MemOperand(a0, Code::kDeoptimizationDataOrInterpreterDataOffset -
+ kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
@@ -2713,6 +2717,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
+#if V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in t0 by the jump table trampoline.
// Convert to Smi for the runtime call
@@ -2728,7 +2733,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
for (Register gp_param_reg : wasm::kGpParamRegisters) {
gp_regs |= gp_param_reg.bit();
}
- // Also push x1, because we must push multiples of 16 bytes (see
+ // Also push a1, because we must push multiples of 16 bytes (see
// {TurboAssembler::PushCPURegList}.
CHECK_EQ(0, NumRegs(gp_regs) % 2);
@@ -2786,6 +2791,7 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
}
__ Ret();
}
+#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
@@ -2909,12 +2915,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ Sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&zero);
- // Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
- // with both configurations. It is safe to always do this, because the
- // underlying register is caller-saved and can be arbitrarily clobbered.
- __ ResetSpeculationPoisonRegister();
-
// Compute the handler entry address and jump to it.
UseScratchRegisterScope temp(masm);
Register scratch = temp.Acquire();
@@ -3479,7 +3479,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ JumpIfSmi(a1, &context_check);
__ Ld(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ bind(&context_check);
- __ li(a1, Operand(static_cast<int>(deopt_kind)));
+ __ li(a1, Operand(static_cast<int64_t>(deopt_kind)));
// a2: bailout id already loaded.
// a3: code address or 0 already loaded.
// a4: already has fp-to-sp delta.
@@ -3640,7 +3640,6 @@ namespace {
void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
bool next_bytecode,
bool is_osr = false) {
- __ Push(zero_reg, kInterpreterAccumulatorRegister);
Label start;
__ bind(&start);
@@ -3649,7 +3648,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ Ld(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
// Get the Code object from the shared function info.
- Register code_obj = a4;
+ Register code_obj = s1;
__ LoadTaggedPointerField(
code_obj,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
@@ -3664,10 +3663,9 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ GetObjectType(code_obj, scratch, scratch);
- __ Branch(&start_with_baseline, eq, scratch, Operand(BASELINE_DATA_TYPE));
+ __ Branch(&start_with_baseline, eq, scratch, Operand(CODET_TYPE));
// Start with bytecode as there is no baseline code.
- __ Pop(zero_reg, kInterpreterAccumulatorRegister);
Builtin builtin_id = next_bytecode
? Builtin::kInterpreterEnterAtNextBytecode
: Builtin::kInterpreterEnterAtBytecode;
@@ -3681,13 +3679,13 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
Register scratch = temps.Acquire();
__ GetObjectType(code_obj, scratch, scratch);
__ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
- Operand(BASELINE_DATA_TYPE));
+ Operand(CODET_TYPE));
+ }
+ if (FLAG_debug_code) {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ AssertCodeIsBaseline(masm, code_obj, scratch);
}
-
- // Load baseline code from baseline data.
- __ LoadTaggedPointerField(
- code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
-
// Replace BytecodeOffset with the feedback vector.
Register feedback_vector = a2;
__ LoadTaggedPointerField(
@@ -3701,7 +3699,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
UseScratchRegisterScope temps(masm);
Register type = temps.Acquire();
__ GetObjectType(feedback_vector, type, type);
- __ Branch(&install_baseline_code, eq, type, Operand(FEEDBACK_VECTOR_TYPE));
+ __ Branch(&install_baseline_code, ne, type, Operand(FEEDBACK_VECTOR_TYPE));
// Save BytecodeOffset from the stack frame.
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
@@ -3711,7 +3709,6 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
feedback_vector = no_reg;
// Compute baseline pc for bytecode offset.
- __ Push(zero_reg, kInterpreterAccumulatorRegister);
ExternalReference get_baseline_pc_extref;
if (next_bytecode || is_osr) {
get_baseline_pc_extref =
@@ -3744,6 +3741,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Get bytecode array from the stack frame.
__ Ld(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ Push(kInterpreterAccumulatorRegister);
{
Register arg_reg_1 = a0;
Register arg_reg_2 = a1;
@@ -3755,13 +3753,15 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ CallCFunction(get_baseline_pc, 3, 0);
}
__ Add64(code_obj, code_obj, kReturnRegister0);
- __ Pop(kInterpreterAccumulatorRegister, zero_reg);
+ __ Pop(kInterpreterAccumulatorRegister);
if (is_osr) {
// Reset the OSR loop nesting depth to disarm back edges.
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
- __ Sd(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ __ Ld(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ Sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrLoopNestingLevelOffset));
Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -3786,8 +3786,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ bind(&install_baseline_code);
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(kInterpreterAccumulatorRegister);
__ Push(closure);
__ CallRuntime(Runtime::kInstallBaselineCode, 1);
+ __ Pop(kInterpreterAccumulatorRegister);
}
// Retry from the start after installing baseline code.
__ Branch(&start);
@@ -3849,7 +3851,7 @@ void Builtins::Generate_DynamicCheckMapsTrampoline(
Label deopt, bailout;
__ Branch(&deopt, ne, a0,
- Operand(static_cast<int>(DynamicCheckMapsStatus::kSuccess)),
+ Operand(static_cast<int64_t>(DynamicCheckMapsStatus::kSuccess)),
Label::Distance::kNear);
__ MaybeRestoreRegisters(registers);
@@ -3858,11 +3860,11 @@ void Builtins::Generate_DynamicCheckMapsTrampoline(
__ bind(&deopt);
__ Branch(&bailout, eq, a0,
- Operand(static_cast<int>(DynamicCheckMapsStatus::kBailout)));
+ Operand(static_cast<int64_t>(DynamicCheckMapsStatus::kBailout)));
if (FLAG_debug_code) {
__ Assert(eq, AbortReason::kUnexpectedDynamicCheckMapsStatus, a0,
- Operand(static_cast<int>(DynamicCheckMapsStatus::kDeopt)));
+ Operand(static_cast<int64_t>(DynamicCheckMapsStatus::kDeopt)));
}
__ MaybeRestoreRegisters(registers);
__ LeaveFrame(StackFrame::INTERNAL);
diff --git a/chromium/v8/src/builtins/s390/builtins-s390.cc b/chromium/v8/src/builtins/s390/builtins-s390.cc
index 5129cc6ee31..6e94704780c 100644
--- a/chromium/v8/src/builtins/s390/builtins-s390.cc
+++ b/chromium/v8/src/builtins/s390/builtins-s390.cc
@@ -1681,7 +1681,8 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ LoadTaggedPointerField(
- r3, FieldMemOperand(r2, Code::kDeoptimizationDataOffset));
+ r3,
+ FieldMemOperand(r2, Code::kDeoptimizationDataOrInterpreterDataOffset));
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
@@ -2090,8 +2091,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -----------------------------------
__ AssertFunction(r3);
- // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
- // Check that the function is not a "classConstructor".
Label class_constructor;
__ LoadTaggedPointerField(
r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
@@ -2106,6 +2105,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
FieldMemOperand(r3, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
+ __ LoadU32(r5, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
__ AndP(r0, r5,
Operand(SharedFunctionInfo::IsStrictBit::kMask |
SharedFunctionInfo::IsNativeBit::kMask));
@@ -2285,34 +2285,48 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// -- r2 : the number of arguments (not including the receiver)
// -- r3 : the target to call (can be any Object).
// -----------------------------------
-
- Label non_callable, non_smi;
- __ JumpIfSmi(r3, &non_callable);
- __ bind(&non_smi);
- __ LoadMap(r6, r3);
- __ CompareInstanceTypeRange(r6, r7, FIRST_JS_FUNCTION_TYPE,
- LAST_JS_FUNCTION_TYPE);
+ Register argc = r2;
+ Register target = r3;
+ Register map = r6;
+ Register instance_type = r7;
+ DCHECK(!AreAliased(argc, target, map, instance_type));
+
+ Label non_callable, class_constructor;
+ __ JumpIfSmi(target, &non_callable);
+ __ LoadMap(map, target);
+ __ CompareInstanceTypeRange(map, instance_type,
+ FIRST_CALLABLE_JS_FUNCTION_TYPE,
+ LAST_CALLABLE_JS_FUNCTION_TYPE);
__ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET, le);
- __ CmpS64(r7, Operand(JS_BOUND_FUNCTION_TYPE));
+ __ CmpS64(instance_type, Operand(JS_BOUND_FUNCTION_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
RelocInfo::CODE_TARGET, eq);
// Check if target has a [[Call]] internal method.
- __ LoadU8(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
- __ TestBit(r6, Map::Bits1::IsCallableBit::kShift);
- __ beq(&non_callable);
+ {
+ Register flags = r6;
+ __ LoadU8(flags, FieldMemOperand(map, Map::kBitFieldOffset));
+ map = no_reg;
+ __ TestBit(flags, Map::Bits1::IsCallableBit::kShift);
+ __ beq(&non_callable);
+ }
// Check if target is a proxy and call CallProxy external builtin
- __ CmpS64(r7, Operand(JS_PROXY_TYPE));
+ __ CmpS64(instance_type, Operand(JS_PROXY_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq);
+ // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that the function is not a "classConstructor".
+ __ CmpS64(instance_type, Operand(JS_CLASS_CONSTRUCTOR_TYPE));
+ __ beq(&class_constructor);
+
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
// Overwrite the original receiver the (original) target.
- __ StoreReceiver(r3, r2, r7);
+ __ StoreReceiver(target, argc, r7);
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadNativeContextSlot(r3, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
+ __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(
ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
@@ -2321,8 +2335,18 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ bind(&non_callable);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r3);
+ __ Push(target);
__ CallRuntime(Runtime::kThrowCalledNonCallable);
+ __ Trap(); // Unreachable.
+ }
+
+ // 4. The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(target);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ __ Trap(); // Unreachable.
}
}
@@ -2392,31 +2416,41 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// -- r5 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
+ Register argc = r2;
+ Register target = r3;
+ Register map = r6;
+ Register instance_type = r7;
+ DCHECK(!AreAliased(argc, target, map, instance_type));
// Check if target is a Smi.
Label non_constructor, non_proxy;
- __ JumpIfSmi(r3, &non_constructor);
+ __ JumpIfSmi(target, &non_constructor);
// Check if target has a [[Construct]] internal method.
- __ LoadTaggedPointerField(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadU8(r4, FieldMemOperand(r6, Map::kBitFieldOffset));
- __ TestBit(r4, Map::Bits1::IsConstructorBit::kShift);
- __ beq(&non_constructor);
+ __ LoadTaggedPointerField(map,
+ FieldMemOperand(target, HeapObject::kMapOffset));
+ {
+ Register flags = r4;
+ DCHECK(!AreAliased(argc, target, map, instance_type, flags));
+ __ LoadU8(flags, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ TestBit(flags, Map::Bits1::IsConstructorBit::kShift);
+ __ beq(&non_constructor);
+ }
// Dispatch based on instance type.
- __ CompareInstanceTypeRange(r6, r7, FIRST_JS_FUNCTION_TYPE,
+ __ CompareInstanceTypeRange(map, instance_type, FIRST_JS_FUNCTION_TYPE,
LAST_JS_FUNCTION_TYPE);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
RelocInfo::CODE_TARGET, le);
// Only dispatch to bound functions after checking whether they are
// constructors.
- __ CmpS64(r7, Operand(JS_BOUND_FUNCTION_TYPE));
+ __ CmpS64(instance_type, Operand(JS_BOUND_FUNCTION_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
RelocInfo::CODE_TARGET, eq);
// Only dispatch to proxies after checking whether they are constructors.
- __ CmpS64(r7, Operand(JS_PROXY_TYPE));
+ __ CmpS64(instance_type, Operand(JS_PROXY_TYPE));
__ bne(&non_proxy);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
RelocInfo::CODE_TARGET);
@@ -2425,9 +2459,10 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ bind(&non_proxy);
{
// Overwrite the original receiver with the (original) target.
- __ StoreReceiver(r3, r2, r7);
+ __ StoreReceiver(target, argc, r7);
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadNativeContextSlot(r3, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
+ __ LoadNativeContextSlot(target,
+ Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
@@ -2679,12 +2714,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ StoreU64(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&skip);
- // Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
- // with both configurations. It is safe to always do this, because the
- // underlying register is caller-saved and can be arbitrarily clobbered.
- __ ResetSpeculationPoisonRegister();
-
// Clear c_entry_fp, like we do in `LeaveExitFrame`.
{
UseScratchRegisterScope temps(masm);
diff --git a/chromium/v8/src/builtins/setup-builtins-internal.cc b/chromium/v8/src/builtins/setup-builtins-internal.cc
index 2724f9a2001..d61a2705fb3 100644
--- a/chromium/v8/src/builtins/setup-builtins-internal.cc
+++ b/chromium/v8/src/builtins/setup-builtins-internal.cc
@@ -66,7 +66,7 @@ Handle<Code> BuildPlaceholder(Isolate* isolate, Builtin builtin) {
ExternalAssemblerBuffer(buffer, kBufferSize));
DCHECK(!masm.has_frame());
{
- FrameScope scope(&masm, StackFrame::NONE);
+ FrameScope frame_scope(&masm, StackFrame::NO_FRAME_TYPE);
// The contents of placeholder don't matter, as long as they don't create
// embedded constants or external references.
masm.Move(kJavaScriptCallCodeStartRegister, Smi::zero());
@@ -156,11 +156,11 @@ Code BuildWithCodeStubAssemblerJS(Isolate* isolate, Builtin builtin,
CanonicalHandleScope canonical(isolate);
Zone zone(isolate->allocator(), ZONE_NAME, kCompressGraphZone);
- const int argc_with_recv =
- (argc == kDontAdaptArgumentsSentinel) ? 0 : argc + 1;
- compiler::CodeAssemblerState state(
- isolate, &zone, argc_with_recv, CodeKind::BUILTIN, name,
- PoisoningMitigationLevel::kDontPoison, builtin);
+ const int argc_with_recv = (argc == kDontAdaptArgumentsSentinel)
+ ? 0
+ : argc + (kJSArgcIncludesReceiver ? 0 : 1);
+ compiler::CodeAssemblerState state(isolate, &zone, argc_with_recv,
+ CodeKind::BUILTIN, name, builtin);
generator(&state);
Handle<Code> code = compiler::CodeAssembler::GenerateCode(
&state, BuiltinAssemblerOptions(isolate, builtin),
@@ -183,9 +183,8 @@ Code BuildWithCodeStubAssemblerCS(Isolate* isolate, Builtin builtin,
CallInterfaceDescriptor descriptor(interface_descriptor);
// Ensure descriptor is already initialized.
DCHECK_LE(0, descriptor.GetRegisterParameterCount());
- compiler::CodeAssemblerState state(
- isolate, &zone, descriptor, CodeKind::BUILTIN, name,
- PoisoningMitigationLevel::kDontPoison, builtin);
+ compiler::CodeAssemblerState state(isolate, &zone, descriptor,
+ CodeKind::BUILTIN, name, builtin);
generator(&state);
Handle<Code> code = compiler::CodeAssembler::GenerateCode(
&state, BuiltinAssemblerOptions(isolate, builtin),
diff --git a/chromium/v8/src/builtins/string-pad.tq b/chromium/v8/src/builtins/string-pad.tq
index b95e68628a4..6812a32b7d7 100644
--- a/chromium/v8/src/builtins/string-pad.tq
+++ b/chromium/v8/src/builtins/string-pad.tq
@@ -22,7 +22,7 @@ transitioning macro StringPad(implicit context: Context)(
return receiverString;
}
const maxLength: Number = ToLength_Inline(arguments[0]);
- assert(IsNumberNormalized(maxLength));
+ dcheck(IsNumberNormalized(maxLength));
typeswitch (maxLength) {
case (smiMaxLength: Smi): {
@@ -49,7 +49,7 @@ transitioning macro StringPad(implicit context: Context)(
}
// Pad.
- assert(fillLength > 0);
+ dcheck(fillLength > 0);
// Throw if max_length is greater than String::kMaxLength.
if (!TaggedIsSmi(maxLength)) {
ThrowInvalidStringLength(context);
@@ -59,7 +59,7 @@ transitioning macro StringPad(implicit context: Context)(
if (smiMaxLength > SmiConstant(kStringMaxLength)) {
ThrowInvalidStringLength(context);
}
- assert(smiMaxLength > stringLength);
+ dcheck(smiMaxLength > stringLength);
const padLength: Smi = smiMaxLength - stringLength;
let padding: String;
@@ -85,11 +85,11 @@ transitioning macro StringPad(implicit context: Context)(
}
// Return result.
- assert(padLength == padding.length_smi);
+ dcheck(padLength == padding.length_smi);
if (variant == kStringPadStart) {
return padding + receiverString;
}
- assert(variant == kStringPadEnd);
+ dcheck(variant == kStringPadEnd);
return receiverString + padding;
}
diff --git a/chromium/v8/src/builtins/string-repeat.tq b/chromium/v8/src/builtins/string-repeat.tq
index e1e33eb53ab..b5ced876b71 100644
--- a/chromium/v8/src/builtins/string-repeat.tq
+++ b/chromium/v8/src/builtins/string-repeat.tq
@@ -7,8 +7,8 @@ const kBuiltinName: constexpr string = 'String.prototype.repeat';
builtin StringRepeat(implicit context: Context)(
string: String, count: Smi): String {
- assert(count >= 0);
- assert(string != kEmptyString);
+ dcheck(count >= 0);
+ dcheck(string != kEmptyString);
let result: String = kEmptyString;
let powerOfTwoRepeats: String = string;
@@ -50,7 +50,7 @@ transitioning javascript builtin StringPrototypeRepeat(
return StringRepeat(s, n);
}
case (heapNum: HeapNumber): deferred {
- assert(IsNumberNormalized(heapNum));
+ dcheck(IsNumberNormalized(heapNum));
const n = LoadHeapNumberValue(heapNum);
// 4. If n < 0, throw a RangeError exception.
diff --git a/chromium/v8/src/builtins/string-substr.tq b/chromium/v8/src/builtins/string-substr.tq
index 068c4437ca6..9c0f63d085e 100644
--- a/chromium/v8/src/builtins/string-substr.tq
+++ b/chromium/v8/src/builtins/string-substr.tq
@@ -27,7 +27,7 @@ transitioning javascript builtin StringPrototypeSubstr(
// 7. Let resultLength be min(max(end, 0), size - intStart).
const length = arguments[1];
const lengthLimit = size - initStart;
- assert(lengthLimit <= size);
+ dcheck(lengthLimit <= size);
const resultLength: uintptr = length != Undefined ?
ClampToIndexRange(length, lengthLimit) :
lengthLimit;
diff --git a/chromium/v8/src/builtins/torque-csa-header-includes.h b/chromium/v8/src/builtins/torque-csa-header-includes.h
index 879fda5bbea..750843e6db0 100644
--- a/chromium/v8/src/builtins/torque-csa-header-includes.h
+++ b/chromium/v8/src/builtins/torque-csa-header-includes.h
@@ -14,6 +14,5 @@
#include "src/compiler/code-assembler.h"
#include "src/utils/utils.h"
#include "torque-generated/csa-types.h"
-#include "torque-generated/field-offsets.h"
#endif // V8_BUILTINS_TORQUE_CSA_HEADER_INCLUDES_H_
diff --git a/chromium/v8/src/builtins/torque-internal.tq b/chromium/v8/src/builtins/torque-internal.tq
index d9f05f55331..9fe503f5f53 100644
--- a/chromium/v8/src/builtins/torque-internal.tq
+++ b/chromium/v8/src/builtins/torque-internal.tq
@@ -231,19 +231,20 @@ const kAllocateBaseFlags: constexpr AllocationFlag =
AllocationFlag::kAllowLargeObjectAllocation;
macro AllocateFromNew(
sizeInBytes: intptr, map: Map, pretenured: bool): UninitializedHeapObject {
- assert(ValidAllocationSize(sizeInBytes, map));
+ dcheck(ValidAllocationSize(sizeInBytes, map));
if (pretenured) {
return Allocate(
sizeInBytes,
%RawConstexprCast<constexpr AllocationFlag>(
- kAllocateBaseFlags | AllocationFlag::kPretenured));
+ %RawConstexprCast<constexpr int32>(kAllocateBaseFlags) |
+ %RawConstexprCast<constexpr int32>(AllocationFlag::kPretenured)));
} else {
return Allocate(sizeInBytes, kAllocateBaseFlags);
}
}
macro InitializeFieldsFromIterator<T: type, Iterator: type>(
- target: MutableSlice<T>, originIterator: Iterator) {
+ target: MutableSlice<T>, originIterator: Iterator): void {
let targetIterator = target.Iterator();
let originIterator = originIterator;
while (true) {
@@ -253,12 +254,14 @@ macro InitializeFieldsFromIterator<T: type, Iterator: type>(
}
// Dummy implementations: do not initialize for UninitializedIterator.
InitializeFieldsFromIterator<char8, UninitializedIterator>(
- _target: MutableSlice<char8>, _originIterator: UninitializedIterator) {}
+ _target: MutableSlice<char8>,
+ _originIterator: UninitializedIterator): void {}
InitializeFieldsFromIterator<char16, UninitializedIterator>(
- _target: MutableSlice<char16>, _originIterator: UninitializedIterator) {}
+ _target: MutableSlice<char16>,
+ _originIterator: UninitializedIterator): void {}
extern macro IsDoubleHole(HeapObject, intptr): bool;
-extern macro StoreDoubleHole(HeapObject, intptr);
+extern macro StoreDoubleHole(HeapObject, intptr): void;
macro LoadFloat64OrHole(r:&float64_or_hole): float64_or_hole {
return float64_or_hole{
@@ -267,7 +270,7 @@ macro LoadFloat64OrHole(r:&float64_or_hole): float64_or_hole {
value: *unsafe::NewReference<float64>(r.object, r.offset)
};
}
-macro StoreFloat64OrHole(r:&float64_or_hole, value: float64_or_hole) {
+macro StoreFloat64OrHole(r:&float64_or_hole, value: float64_or_hole): void {
if (value.is_hole) {
StoreDoubleHole(
%RawDownCast<HeapObject>(r.object), r.offset - kHeapObjectTag);
@@ -297,12 +300,12 @@ macro DownCastForTorqueClass<T : type extends HeapObject>(o: HeapObject):
return %RawDownCast<T>(o);
}
-extern macro StaticAssert(bool, constexpr string);
+extern macro StaticAssert(bool, constexpr string): void;
// This is for the implementation of the dot operator. In any context where the
// dot operator is available, the correct way to get the length of an indexed
// field x from object o is `(&o.x).length`.
-intrinsic %IndexedFieldLength<T: type>(o: T, f: constexpr string);
+intrinsic %IndexedFieldLength<T: type>(o: T, f: constexpr string): intptr;
// If field x is defined as optional, then &o.x returns a reference to the field
// or crashes the program (unreachable) if the field is not present. Usually
@@ -311,7 +314,8 @@ intrinsic %IndexedFieldLength<T: type>(o: T, f: constexpr string);
// optional field, which is either length zero or one depending on whether the
// field is present. This intrinsic provides Slices for both indexed fields
// (equivalent to &o.x) and optional fields.
-intrinsic %FieldSlice<T: type>(o: T, f: constexpr string);
+intrinsic %FieldSlice<T: type, TSlice: type>(
+ o: T, f: constexpr string): TSlice;
} // namespace torque_internal
@@ -321,7 +325,7 @@ struct UninitializedIterator {}
// %RawDownCast should *never* be used anywhere in Torque code except for
// in Torque-based UnsafeCast operators preceeded by an appropriate
-// type assert()
+// type dcheck()
intrinsic %RawDownCast<To: type, From: type>(x: From): To;
intrinsic %RawConstexprCast<To: type, From: type>(f: From): To;
diff --git a/chromium/v8/src/builtins/typed-array-at.tq b/chromium/v8/src/builtins/typed-array-at.tq
index 6ec4730d943..cd7dcfdedd6 100644
--- a/chromium/v8/src/builtins/typed-array-at.tq
+++ b/chromium/v8/src/builtins/typed-array-at.tq
@@ -8,9 +8,10 @@ transitioning javascript builtin TypedArrayPrototypeAt(
js-implicit context: NativeContext, receiver: JSAny)(index: JSAny): JSAny {
// 1. Let O be the this value.
// 2. Perform ? ValidateTypedArray(O).
- const o = ValidateTypedArray(context, receiver, '%TypedArray%.prototype.at');
- // 3. Let len be O.[[ArrayLength]].
- const len = Convert<Number>(o.length);
+ // 3. Let len be IntegerIndexedObjectLength(O).
+ const len = Convert<Number>(ValidateTypedArrayAndGetLength(
+ context, receiver, '%TypedArray%.prototype.at'));
+
// 4. Let relativeIndex be ? ToInteger(index).
const relativeIndex = ToInteger_Inline(index);
// 5. If relativeIndex ≥ 0, then
@@ -23,6 +24,6 @@ transitioning javascript builtin TypedArrayPrototypeAt(
return Undefined;
}
// 8. Return ? Get(O, ! ToString(k)).
- return GetProperty(o, k);
+ return GetProperty(receiver, k);
}
}
diff --git a/chromium/v8/src/builtins/typed-array-createtypedarray.tq b/chromium/v8/src/builtins/typed-array-createtypedarray.tq
index 2f94f6205f7..45a396afe63 100644
--- a/chromium/v8/src/builtins/typed-array-createtypedarray.tq
+++ b/chromium/v8/src/builtins/typed-array-createtypedarray.tq
@@ -28,8 +28,8 @@ transitioning macro AllocateTypedArray(implicit context: Context)(
isLengthTracking: bool): JSTypedArray {
let elements: ByteArray;
if constexpr (isOnHeap) {
- assert(!IsResizableArrayBuffer(buffer));
- assert(!isLengthTracking);
+ dcheck(!IsResizableArrayBuffer(buffer));
+ dcheck(!isLengthTracking);
elements = AllocateByteArray(byteLength);
} else {
elements = kEmptyByteArray;
@@ -44,7 +44,7 @@ transitioning macro AllocateTypedArray(implicit context: Context)(
// allocator is NOT used. When the mock array buffer is used, impossibly
// large allocations are allowed that would erroneously cause an overflow
// and this assertion to fail.
- assert(
+ dcheck(
IsMockArrayBufferAllocatorFlag() ||
(backingStore + byteOffset) >= backingStore);
}
@@ -62,13 +62,12 @@ transitioning macro AllocateTypedArray(implicit context: Context)(
typedArray.bit_field.is_length_tracking = isLengthTracking;
typedArray.bit_field.is_backed_by_rab =
IsResizableArrayBuffer(buffer) && !IsSharedArrayBuffer(buffer);
- typed_array::AllocateJSTypedArrayExternalPointerEntry(typedArray);
if constexpr (isOnHeap) {
typed_array::SetJSTypedArrayOnHeapDataPtr(typedArray, elements, byteOffset);
} else {
typed_array::SetJSTypedArrayOffHeapDataPtr(
typedArray, buffer.backing_store_ptr, byteOffset);
- assert(
+ dcheck(
typedArray.data_ptr ==
(buffer.backing_store_ptr + Convert<intptr>(byteOffset)));
}
@@ -165,7 +164,7 @@ transitioning macro ConstructByArrayLike(implicit context: Context)(
} else if (length > 0) {
const byteLength = typedArray.byte_length;
- assert(byteLength <= kArrayBufferMaxByteLength);
+ dcheck(byteLength <= kArrayBufferMaxByteLength);
if (IsSharedArrayBuffer(src.buffer)) {
typed_array::CallCRelaxedMemcpy(
typedArray.data_ptr, src.data_ptr, byteLength);
@@ -293,7 +292,7 @@ transitioning macro ConstructByArrayBuffer(implicit context: Context)(
// in the step 12 branch.
newByteLength = bufferByteLength - offset;
newLength = elementsInfo.CalculateLength(newByteLength)
- otherwise IfInvalidOffset;
+ otherwise IfInvalidLength;
// 12. Else,
} else {
@@ -327,7 +326,7 @@ transitioning macro ConstructByArrayBuffer(implicit context: Context)(
transitioning macro TypedArrayCreateByLength(implicit context: Context)(
constructor: Constructor, length: Number, methodName: constexpr string):
JSTypedArray {
- assert(IsSafeInteger(length));
+ dcheck(IsSafeInteger(length));
// 1. Let newTypedArray be ? Construct(constructor, argumentList).
const newTypedArrayObj = Construct(constructor, length);
@@ -385,7 +384,7 @@ transitioning macro ConstructByJSReceiver(implicit context: Context)(
transitioning builtin CreateTypedArray(
context: Context, target: JSFunction, newTarget: JSReceiver, arg1: JSAny,
arg2: JSAny, arg3: JSAny): JSTypedArray {
- assert(IsConstructor(target));
+ dcheck(IsConstructor(target));
// 4. Let O be ? AllocateTypedArray(constructorName, NewTarget,
// "%TypedArrayPrototype%").
try {
@@ -442,7 +441,7 @@ transitioning macro TypedArraySpeciesCreate(implicit context: Context)(
// It is assumed that the CreateTypedArray builtin does not produce a
// typed array that fails ValidateTypedArray
- assert(!IsDetachedBuffer(typedArray.buffer));
+ dcheck(!IsDetachedBuffer(typedArray.buffer));
return typedArray;
} label IfSlow deferred {
@@ -456,7 +455,7 @@ transitioning macro TypedArraySpeciesCreate(implicit context: Context)(
if constexpr (numArgs == 1) {
newObj = Construct(constructor, arg0);
} else {
- assert(numArgs == 3);
+ dcheck(numArgs == 3);
newObj = Construct(constructor, arg0, arg1, arg2);
}
diff --git a/chromium/v8/src/builtins/typed-array-every.tq b/chromium/v8/src/builtins/typed-array-every.tq
index fdd4961dee6..f2701a040b9 100644
--- a/chromium/v8/src/builtins/typed-array-every.tq
+++ b/chromium/v8/src/builtins/typed-array-every.tq
@@ -7,24 +7,43 @@
namespace typed_array {
const kBuiltinNameEvery: constexpr string = '%TypedArray%.prototype.every';
+// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.every
transitioning macro EveryAllElements(implicit context: Context)(
- array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
- thisArg: JSAny): Boolean {
+ array: typed_array::AttachedJSTypedArray, length: uintptr,
+ callbackfn: Callable, thisArg: JSAny): Boolean {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
- const length: uintptr = witness.Get().length;
+
+ // 5. Let k be 0.
+ // 6. Repeat, while k < len
for (let k: uintptr = 0; k < length; k++) {
- // BUG(4895): We should throw on detached buffers rather than simply exit.
- witness.Recheck() otherwise break;
- const value: JSAny = witness.Load(k);
+ // 6a. Let Pk be ! ToString(𝔽(k)).
+ // There is no need to cast ToString to load elements.
+
+ // 6b. Let kValue be ! Get(O, Pk).
+ // kValue must be undefined when the buffer is detached.
+ let value: JSAny;
+ try {
+ witness.RecheckIndex(k) otherwise goto IsDetachedOrOutOfBounds;
+ value = witness.Load(k);
+ } label IsDetachedOrOutOfBounds deferred {
+ value = Undefined;
+ }
+
+ // 6c. Let testResult be ! ToBoolean(? Call(callbackfn, thisArg, « kValue,
+ // 𝔽(k), O »)).
// TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
// indices to optimize Convert<Number>(k) for the most common case.
const result = Call(
context, callbackfn, thisArg, value, Convert<Number>(k),
witness.GetStable());
+ // 6d. If testResult is false, return false.
if (!ToBoolean(result)) {
return False;
}
+ // 6e. Set k to k + 1. (done by the loop).
}
+
+ // 7. Return true.
return True;
}
@@ -35,19 +54,24 @@ TypedArrayPrototypeEvery(
// arguments[0] = callback
// arguments[1] = thisArg
try {
+ // 1. Let O be the this value.
+ // 2. Perform ? ValidateTypedArray(O).
+ // 3. Let len be IntegerIndexedObjectLength(O).
const array: JSTypedArray = Cast<JSTypedArray>(receiver)
otherwise NotTypedArray;
- const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
-
+ const length = LoadJSTypedArrayLengthAndCheckDetached(array)
+ otherwise IsDetachedOrOutOfBounds;
+ // 4. If IsCallable(callbackfn) is false, throw a TypeError exception.
const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
const thisArg = arguments[1];
- return EveryAllElements(uarray, callbackfn, thisArg);
- } label NotCallable deferred {
- ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
+ return EveryAllElements(
+ %RawDownCast<AttachedJSTypedArray>(array), length, callbackfn, thisArg);
} label NotTypedArray deferred {
ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameEvery);
- } label IsDetached deferred {
+ } label IsDetachedOrOutOfBounds deferred {
ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameEvery);
+ } label NotCallable deferred {
+ ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
}
}
}
diff --git a/chromium/v8/src/builtins/typed-array-filter.tq b/chromium/v8/src/builtins/typed-array-filter.tq
index 15d40f92ebe..18fbce9f09f 100644
--- a/chromium/v8/src/builtins/typed-array-filter.tq
+++ b/chromium/v8/src/builtins/typed-array-filter.tq
@@ -38,11 +38,15 @@ transitioning javascript builtin TypedArrayPrototypeFilter(
// 8. Let captured be 0.
// 9. Repeat, while k < len
for (let k: uintptr = 0; k < len; k++) {
- witness.Recheck() otherwise IsDetached;
-
+ let value: JSAny;
// a. Let Pk be ! ToString(k).
// b. Let kValue be ? Get(O, Pk).
- const value: JSAny = witness.Load(k);
+ try {
+ witness.Recheck() otherwise goto IsDetached;
+ value = witness.Load(k);
+ } label IsDetached deferred {
+ value = Undefined;
+ }
// c. Let selected be ToBoolean(? Call(callbackfn, T, « kValue, k, O
// »)).
@@ -57,7 +61,7 @@ transitioning javascript builtin TypedArrayPrototypeFilter(
// ii. Increase captured by 1.
if (ToBoolean(selected)) kept.Push(value);
- // e.Increase k by 1.
+ // e. Increase k by 1. (done by the loop)
}
// 10. Let A be ? TypedArraySpeciesCreate(O, captured).
diff --git a/chromium/v8/src/builtins/typed-array-find.tq b/chromium/v8/src/builtins/typed-array-find.tq
index 24a13dbc23e..b37b4ef8a91 100644
--- a/chromium/v8/src/builtins/typed-array-find.tq
+++ b/chromium/v8/src/builtins/typed-array-find.tq
@@ -7,24 +7,45 @@
namespace typed_array {
const kBuiltinNameFind: constexpr string = '%TypedArray%.prototype.find';
+// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.find
transitioning macro FindAllElements(implicit context: Context)(
- array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
+ array: typed_array::AttachedJSTypedArray, predicate: Callable,
thisArg: JSAny): JSAny {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
const length: uintptr = witness.Get().length;
+
+ // 6. Repeat, while k < len
for (let k: uintptr = 0; k < length; k++) {
- // BUG(4895): We should throw on detached buffers rather than simply exit.
- witness.Recheck() otherwise break;
- const value: JSAny = witness.Load(k);
+ // 6a. Let Pk be ! ToString(𝔽(k)).
+ // There is no need to cast ToString to load elements.
+
+ // 6b. Let kValue be ! Get(O, Pk).
+ // kValue must be undefined when the buffer is detached.
+ let value: JSAny;
+ try {
+ witness.Recheck() otherwise goto IsDetached;
+ value = witness.Load(k);
+ } label IsDetached deferred {
+ value = Undefined;
+ }
+
+ // 6c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
+ // 𝔽(k), O »)).
// TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
// indices to optimize Convert<Number>(k) for the most common case.
const result = Call(
- context, callbackfn, thisArg, value, Convert<Number>(k),
+ context, predicate, thisArg, value, Convert<Number>(k),
witness.GetStable());
+
+ // 6d. If testResult is true, return kValue.
if (ToBoolean(result)) {
return value;
}
+
+ // 6e. Set k to k + 1. (done by the loop).
}
+
+ // 7. Return undefined.
return Undefined;
}
@@ -39,9 +60,9 @@ TypedArrayPrototypeFind(
otherwise NotTypedArray;
const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
- const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
+ const predicate = Cast<Callable>(arguments[0]) otherwise NotCallable;
const thisArg = arguments[1];
- return FindAllElements(uarray, callbackfn, thisArg);
+ return FindAllElements(uarray, predicate, thisArg);
} label NotCallable deferred {
ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
} label NotTypedArray deferred {
diff --git a/chromium/v8/src/builtins/typed-array-findindex.tq b/chromium/v8/src/builtins/typed-array-findindex.tq
index 7bb01151f35..aede90dc7f7 100644
--- a/chromium/v8/src/builtins/typed-array-findindex.tq
+++ b/chromium/v8/src/builtins/typed-array-findindex.tq
@@ -9,19 +9,33 @@ const kBuiltinNameFindIndex: constexpr string =
'%TypedArray%.prototype.findIndex';
transitioning macro FindIndexAllElements(implicit context: Context)(
- array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
+ array: typed_array::AttachedJSTypedArray, predicate: Callable,
thisArg: JSAny): Number {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
const length: uintptr = witness.Get().length;
+
+ // 6. Repeat, while k < len
for (let k: uintptr = 0; k < length; k++) {
- // BUG(4895): We should throw on detached buffers rather than simply exit.
- witness.Recheck() otherwise break;
- const value: JSAny = witness.Load(k);
+ // 6a. Let Pk be ! ToString(𝔽(k)).
+ // There is no need to cast ToString to load elements.
+
+ // 6b. Let kValue be ! Get(O, Pk).
+ // kValue must be undefined when the buffer is detached.
+ let value: JSAny;
+ try {
+ witness.Recheck() otherwise goto IsDetached;
+ value = witness.Load(k);
+ } label IsDetached deferred {
+ value = Undefined;
+ }
+
+ // 6c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
+ // 𝔽(k), O »)).
// TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
// indices to optimize Convert<Number>(k) for the most common case.
const indexNumber: Number = Convert<Number>(k);
const result = Call(
- context, callbackfn, thisArg, value, indexNumber, witness.GetStable());
+ context, predicate, thisArg, value, indexNumber, witness.GetStable());
if (ToBoolean(result)) {
return indexNumber;
}
@@ -40,9 +54,9 @@ TypedArrayPrototypeFindIndex(
otherwise NotTypedArray;
const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
- const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
+ const predicate = Cast<Callable>(arguments[0]) otherwise NotCallable;
const thisArg = arguments[1];
- return FindIndexAllElements(uarray, callbackfn, thisArg);
+ return FindIndexAllElements(uarray, predicate, thisArg);
} label NotCallable deferred {
ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
} label NotTypedArray deferred {
diff --git a/chromium/v8/src/builtins/typed-array-findlast.tq b/chromium/v8/src/builtins/typed-array-findlast.tq
index 634e17b9368..15f67760c0f 100644
--- a/chromium/v8/src/builtins/typed-array-findlast.tq
+++ b/chromium/v8/src/builtins/typed-array-findlast.tq
@@ -8,56 +8,28 @@ namespace typed_array {
const kBuiltinNameFindLast: constexpr string =
'%TypedArray%.prototype.findLast';
-// Continuation part of
-// https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlast
-// when array buffer was detached.
-transitioning builtin FindLastAllElementsDetachedContinuation(
- implicit context: Context)(
- array: JSTypedArray, predicate: Callable, thisArg: JSAny,
- initialK: Number): JSAny {
- // 6. Repeat, while k ≥ 0
- for (let k: Number = initialK; k >= 0; k--) {
- // 6a. Let Pk be ! ToString(𝔽(k)).
- // there is no need to cast ToString to load elements.
-
- // 6b. Let kValue be ! Get(O, Pk).
- // kValue must be undefined when the buffer was detached.
-
- // 6c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
- // 𝔽(k), O »)).
- // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
- // indices to optimize Convert<Number>(k) for the most common case.
- const result =
- Call(context, predicate, thisArg, Undefined, Convert<Number>(k), array);
- // 6d. If testResult is true, return kValue.
- if (ToBoolean(result)) {
- return Undefined;
- }
-
- // 6e. Set k to k - 1. (done by the loop).
- }
-
- // 7. Return undefined.
- return Undefined;
-}
-
// https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlast
transitioning macro FindLastAllElements(implicit context: Context)(
array: typed_array::AttachedJSTypedArray, predicate: Callable,
- thisArg: JSAny): JSAny labels
-Bailout(Number) {
+ thisArg: JSAny): JSAny {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
// 3. Let len be O.[[ArrayLength]].
const length: uintptr = witness.Get().length;
// 5. Let k be len - 1.
// 6. Repeat, while k ≥ 0
for (let k: uintptr = length; k-- > 0;) {
- witness.Recheck() otherwise goto Bailout(Convert<Number>(k));
// 6a. Let Pk be ! ToString(𝔽(k)).
- // there is no need to cast ToString to load elements.
+ // There is no need to cast ToString to load elements.
// 6b. Let kValue be ! Get(O, Pk).
- const value: JSAny = witness.Load(k);
+ // kValue must be undefined when the buffer was detached.
+ let value: JSAny;
+ try {
+ witness.Recheck() otherwise goto IsDetached;
+ value = witness.Load(k);
+ } label IsDetached deferred {
+ value = Undefined;
+ }
// 6c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
// 𝔽(k), O »)).
@@ -94,13 +66,7 @@ TypedArrayPrototypeFindLast(
// 4. If IsCallable(predicate) is false, throw a TypeError exception.
const predicate = Cast<Callable>(arguments[0]) otherwise NotCallable;
const thisArg = arguments[1];
- try {
- return FindLastAllElements(uarray, predicate, thisArg)
- otherwise Bailout;
- } label Bailout(k: Number) deferred {
- return FindLastAllElementsDetachedContinuation(
- uarray, predicate, thisArg, k);
- }
+ return FindLastAllElements(uarray, predicate, thisArg);
} label NotCallable deferred {
ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
} label NotTypedArray deferred {
diff --git a/chromium/v8/src/builtins/typed-array-findlastindex.tq b/chromium/v8/src/builtins/typed-array-findlastindex.tq
index 4b20114c91b..56d139d8b1b 100644
--- a/chromium/v8/src/builtins/typed-array-findlastindex.tq
+++ b/chromium/v8/src/builtins/typed-array-findlastindex.tq
@@ -8,57 +8,28 @@ namespace typed_array {
const kBuiltinNameFindLastIndex: constexpr string =
'%TypedArray%.prototype.findIndexLast';
-// Continuation part of
-// https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlastindex
-// when array buffer was detached.
-transitioning builtin FindLastIndexAllElementsDetachedContinuation(
- implicit context: Context)(
- array: JSTypedArray, predicate: Callable, thisArg: JSAny,
- initialK: Number): Number {
- // 6. Repeat, while k ≥ 0
- for (let k: Number = initialK; k >= 0; k--) {
- // 6a. Let Pk be ! ToString(𝔽(k)).
- // there is no need to cast ToString to load elements.
-
- // 6b. Let kValue be ! Get(O, Pk).
- // kValue must be undefined when the buffer was detached.
-
- // 6c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
- // 𝔽(k), O »)).
- // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
- // indices to optimize Convert<Number>(k) for the most common case.
- const indexNumber: Number = Convert<Number>(k);
- const result =
- Call(context, predicate, thisArg, Undefined, indexNumber, array);
- // 6d. If testResult is true, return 𝔽(k).
- if (ToBoolean(result)) {
- return indexNumber;
- }
-
- // 6e. Set k to k - 1. (done by the loop).
- }
-
- // 7. Return -1𝔽.
- return -1;
-}
-
// https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlastindex
transitioning macro FindLastIndexAllElements(implicit context: Context)(
array: typed_array::AttachedJSTypedArray, predicate: Callable,
- thisArg: JSAny): Number labels
-Bailout(Number) {
+ thisArg: JSAny): Number {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
// 3. Let len be O.[[ArrayLength]].
const length: uintptr = witness.Get().length;
// 5. Let k be len - 1.
// 6. Repeat, while k ≥ 0
for (let k: uintptr = length; k-- > 0;) {
- witness.Recheck() otherwise goto Bailout(Convert<Number>(k));
// 6a. Let Pk be ! ToString(𝔽(k)).
- // there is no need to cast ToString to load elements.
+ // There is no need to cast ToString to load elements.
// 6b. Let kValue be ! Get(O, Pk).
- const value: JSAny = witness.Load(k);
+ // kValue must be undefined when the buffer was detached.
+ let value: JSAny;
+ try {
+ witness.Recheck() otherwise goto IsDetached;
+ value = witness.Load(k);
+ } label IsDetached deferred {
+ value = Undefined;
+ }
// 6c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
// 𝔽(k), O »)).
@@ -96,13 +67,7 @@ TypedArrayPrototypeFindLastIndex(
const predicate = Cast<Callable>(arguments[0]) otherwise NotCallable;
const thisArg = arguments[1];
- try {
- return FindLastIndexAllElements(uarray, predicate, thisArg)
- otherwise Bailout;
- } label Bailout(k: Number) deferred {
- return FindLastIndexAllElementsDetachedContinuation(
- uarray, predicate, thisArg, k);
- }
+ return FindLastIndexAllElements(uarray, predicate, thisArg);
} label NotCallable deferred {
ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
} label NotTypedArray deferred {
diff --git a/chromium/v8/src/builtins/typed-array-foreach.tq b/chromium/v8/src/builtins/typed-array-foreach.tq
index d696d9c8dd8..fa227bc75be 100644
--- a/chromium/v8/src/builtins/typed-array-foreach.tq
+++ b/chromium/v8/src/builtins/typed-array-foreach.tq
@@ -12,16 +12,33 @@ transitioning macro ForEachAllElements(implicit context: Context)(
thisArg: JSAny): Undefined {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
const length: uintptr = witness.Get().length;
+
+ // 6. Repeat, while k < len
for (let k: uintptr = 0; k < length; k++) {
- // BUG(4895): We should throw on detached buffers rather than simply exit.
- witness.Recheck() otherwise break;
- const value: JSAny = witness.Load(k);
+ // 6a. Let Pk be ! ToString(𝔽(k)).
+ // There is no need to cast ToString to load elements.
+
+ // 6b. Let kValue be ! Get(O, Pk).
+ // kValue must be undefined when the buffer is detached.
+ let value: JSAny;
+ try {
+ witness.Recheck() otherwise goto IsDetached;
+ value = witness.Load(k);
+ } label IsDetached deferred {
+ value = Undefined;
+ }
+
+ // 6c. Perform ? Call(callbackfn, thisArg, « kValue, 𝔽(k), O »).
// TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
// indices to optimize Convert<Number>(k) for the most common case.
Call(
context, callbackfn, thisArg, value, Convert<Number>(k),
witness.GetStable());
+
+ // 6d. Set k to k + 1. (done by the loop).
}
+
+ // 7. Return undefined.
return Undefined;
}
diff --git a/chromium/v8/src/builtins/typed-array-reduce.tq b/chromium/v8/src/builtins/typed-array-reduce.tq
index a54ed1040e9..0261599106d 100644
--- a/chromium/v8/src/builtins/typed-array-reduce.tq
+++ b/chromium/v8/src/builtins/typed-array-reduce.tq
@@ -12,11 +12,17 @@ transitioning macro ReduceAllElements(implicit context: Context)(
initialValue: JSAny|TheHole): JSAny {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
const length: uintptr = witness.Get().length;
+
let accumulator = initialValue;
for (let k: uintptr = 0; k < length; k++) {
- // BUG(4895): We should throw on detached buffers rather than simply exit.
- witness.Recheck() otherwise break;
- const value: JSAny = witness.Load(k);
+ let value: JSAny;
+ try {
+ witness.Recheck()
+ otherwise goto IsDetached;
+ value = witness.Load(k);
+ } label IsDetached deferred {
+ value = Undefined;
+ }
typeswitch (accumulator) {
case (TheHole): {
accumulator = value;
diff --git a/chromium/v8/src/builtins/typed-array-reduceright.tq b/chromium/v8/src/builtins/typed-array-reduceright.tq
index 9ba2f70de4e..5449c4f1fcf 100644
--- a/chromium/v8/src/builtins/typed-array-reduceright.tq
+++ b/chromium/v8/src/builtins/typed-array-reduceright.tq
@@ -8,6 +8,7 @@ namespace typed_array {
const kBuiltinNameReduceRight: constexpr string =
'%TypedArray%.prototype.reduceRight';
+// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.reduceright
transitioning macro ReduceRightAllElements(implicit context: Context)(
array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
initialValue: JSAny|TheHole): JSAny {
@@ -15,9 +16,14 @@ transitioning macro ReduceRightAllElements(implicit context: Context)(
const length: uintptr = witness.Get().length;
let accumulator = initialValue;
for (let k: uintptr = length; k-- > 0;) {
- // BUG(4895): We should throw on detached buffers rather than simply exit.
- witness.Recheck() otherwise break;
- const value: JSAny = witness.Load(k);
+ let value: JSAny;
+ try {
+ witness.Recheck()
+ otherwise goto IsDetached;
+ value = witness.Load(k);
+ } label IsDetached deferred {
+ value = Undefined;
+ }
typeswitch (accumulator) {
case (TheHole): {
accumulator = value;
diff --git a/chromium/v8/src/builtins/typed-array-set.tq b/chromium/v8/src/builtins/typed-array-set.tq
index f4d2a40f411..e40ff9f7377 100644
--- a/chromium/v8/src/builtins/typed-array-set.tq
+++ b/chromium/v8/src/builtins/typed-array-set.tq
@@ -115,18 +115,6 @@ TypedArrayPrototypeSetArray(implicit context: Context, receiver: JSAny)(
IfDetached {
// Steps 9-13 are not observable, do them later.
- // TODO(v8:8906): This ported behaviour is an observable spec violation and
- // the comment below seems to be outdated. Consider removing this code.
- try {
- const _arrayArgNum = Cast<Number>(arrayArg) otherwise NotNumber;
- // For number as a first argument, throw TypeError instead of silently
- // ignoring the call, so that users know they did something wrong.
- // (Consistent with Firefox and Blink/WebKit)
- ThrowTypeError(MessageTemplate::kInvalidArgument);
- } label NotNumber {
- // Proceed to step 14.
- }
-
// 14. Let src be ? ToObject(array).
const src: JSReceiver = ToObject_Inline(context, arrayArg);
@@ -267,8 +255,8 @@ TypedArrayPrototypeSetTypedArray(implicit context: Context, receiver: JSAny)(
otherwise unreachable;
const dstPtr: RawPtr = target.data_ptr + Convert<intptr>(startOffset);
- assert(countBytes <= target.byte_length - startOffset);
- assert(countBytes <= typedArray.byte_length);
+ dcheck(countBytes <= target.byte_length - startOffset);
+ dcheck(countBytes <= typedArray.byte_length);
// 29. If srcType is the same as targetType, then
// a. NOTE: If srcType and targetType are the same, the transfer must
diff --git a/chromium/v8/src/builtins/typed-array-slice.tq b/chromium/v8/src/builtins/typed-array-slice.tq
index 2a18433f93d..356bf36d4ca 100644
--- a/chromium/v8/src/builtins/typed-array-slice.tq
+++ b/chromium/v8/src/builtins/typed-array-slice.tq
@@ -12,7 +12,7 @@ extern macro TypedArrayBuiltinsAssembler::CallCCopyTypedArrayElementsSlice(
macro FastCopy(
src: typed_array::AttachedJSTypedArray, dest: JSTypedArray, k: uintptr,
- count: uintptr) labels IfSlow {
+ count: uintptr): void labels IfSlow {
if (IsForceSlowPath()) goto IfSlow;
const srcKind: ElementsKind = src.elements_kind;
@@ -22,7 +22,10 @@ macro FastCopy(
// with the src because of custom species constructor. If the types
// of src and result array are the same and they are not sharing the
// same buffer, use memmove.
- if (srcKind != destInfo.kind) goto IfSlow;
+ if (srcKind != destInfo.kind) {
+ // TODO(v8:11111): Enable the fast branch for RAB / GSAB.
+ goto IfSlow;
+ }
if (dest.buffer == src.buffer) {
goto IfSlow;
}
@@ -33,8 +36,8 @@ macro FastCopy(
otherwise unreachable;
const srcPtr: RawPtr = src.data_ptr + Convert<intptr>(startOffset);
- assert(countBytes <= dest.byte_length);
- assert(countBytes <= src.byte_length - startOffset);
+ dcheck(countBytes <= dest.byte_length);
+ dcheck(countBytes <= src.byte_length - startOffset);
if (IsSharedArrayBuffer(src.buffer)) {
// SABs need a relaxed memmove to preserve atomicity.
@@ -45,7 +48,7 @@ macro FastCopy(
}
macro SlowCopy(implicit context: Context)(
- src: JSTypedArray, dest: JSTypedArray, k: uintptr, final: uintptr) {
+ src: JSTypedArray, dest: JSTypedArray, k: uintptr, final: uintptr): void {
if (typed_array::IsBigInt64ElementsKind(src.elements_kind) !=
typed_array::IsBigInt64ElementsKind(dest.elements_kind))
deferred {
@@ -63,11 +66,10 @@ transitioning javascript builtin TypedArrayPrototypeSlice(
// 1. Let O be the this value.
// 2. Perform ? ValidateTypedArray(O).
- const src: JSTypedArray =
- ValidateTypedArray(context, receiver, kBuiltinNameSlice);
-
// 3. Let len be O.[[ArrayLength]].
- const len: uintptr = src.length;
+ const len =
+ ValidateTypedArrayAndGetLength(context, receiver, kBuiltinNameSlice);
+ const src: JSTypedArray = UnsafeCast<JSTypedArray>(receiver);
// 4. Let relativeStart be ? ToInteger(start).
// 5. If relativeStart < 0, let k be max((len + relativeStart), 0);
@@ -81,11 +83,11 @@ transitioning javascript builtin TypedArrayPrototypeSlice(
// 7. If relativeEnd < 0, let final be max((len + relativeEnd), 0);
// else let final be min(relativeEnd, len).
const end = arguments[1];
- const final: uintptr =
+ let final: uintptr =
end != Undefined ? ConvertToRelativeIndex(end, len) : len;
// 8. Let count be max(final - k, 0).
- const count: uintptr = Unsigned(IntPtrMax(Signed(final - k), 0));
+ let count: uintptr = Unsigned(IntPtrMax(Signed(final - k), 0));
// 9. Let A be ? TypedArraySpeciesCreate(O, « count »).
const dest: JSTypedArray =
@@ -93,9 +95,19 @@ transitioning javascript builtin TypedArrayPrototypeSlice(
if (count > 0) {
try {
- const srcAttached = typed_array::EnsureAttached(src)
- otherwise IfDetached;
- FastCopy(srcAttached, dest, k, count) otherwise IfSlow;
+ const newLength =
+ LoadJSTypedArrayLengthAndCheckDetached(src) otherwise IfDetached;
+ // If the backing buffer is a RAB, it's possible that the length has
+ // decreased since the last time we loaded it.
+ if (k >= newLength) {
+ return dest;
+ }
+ if (final > newLength) {
+ final = newLength;
+ count = Unsigned(IntPtrMax(Signed(final - k), 0));
+ }
+ FastCopy(%RawDownCast<AttachedJSTypedArray>(src), dest, k, count)
+ otherwise IfSlow;
} label IfDetached deferred {
ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameSlice);
} label IfSlow deferred {
diff --git a/chromium/v8/src/builtins/typed-array-some.tq b/chromium/v8/src/builtins/typed-array-some.tq
index ecdfae1e8a5..d9f37937b43 100644
--- a/chromium/v8/src/builtins/typed-array-some.tq
+++ b/chromium/v8/src/builtins/typed-array-some.tq
@@ -7,24 +7,45 @@
namespace typed_array {
const kBuiltinNameSome: constexpr string = '%TypedArray%.prototype.some';
+// https://tc39.es/ecma262/#sec-%typedarray%.prototype.some
transitioning macro SomeAllElements(implicit context: Context)(
- array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
- thisArg: JSAny): Boolean {
+ array: typed_array::AttachedJSTypedArray, length: uintptr,
+ callbackfn: Callable, thisArg: JSAny): Boolean {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
- const length: uintptr = witness.Get().length;
+
+ // 5. Let k be 0.
+ // 6. Repeat, while k < len
for (let k: uintptr = 0; k < length; k++) {
- // BUG(4895): We should throw on detached buffers rather than simply exit.
- witness.Recheck() otherwise break;
- const value: JSAny = witness.Load(k);
+ // 6a. Let Pk be ! ToString(𝔽(k)).
+ // There is no need to cast ToString to load elements.
+
+ // 6b. Let kValue be ! Get(O, Pk).
+ // kValue must be undefined when the buffer is detached.
+ let value: JSAny;
+ try {
+ witness.RecheckIndex(k) otherwise goto IsDetachedOrOutOfBounds;
+ value = witness.Load(k);
+ } label IsDetachedOrOutOfBounds deferred {
+ value = Undefined;
+ }
+
+ // 6c. Let testResult be ! ToBoolean(? Call(callbackfn, thisArg, « kValue,
+ // 𝔽(k), O »)).
// TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
// indices to optimize Convert<Number>(k) for the most common case.
const result = Call(
context, callbackfn, thisArg, value, Convert<Number>(k),
witness.GetStable());
+
+ // 6d. If testResult is true, return true.
if (ToBoolean(result)) {
return True;
}
+
+ // 6e. Set k to k + 1. (done by the loop).
}
+
+ // 7. Return false.
return False;
}
@@ -33,21 +54,26 @@ transitioning javascript builtin
TypedArrayPrototypeSome(
js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
// arguments[0] = callback
- // arguments[1] = thisArg.
+ // arguments[1] = thisArg
try {
+ // 1. Let O be the this value.
+ // 2. Perform ? ValidateTypedArray(O).
+ // 3. Let len be IntegerIndexedObjectLength(O).
const array: JSTypedArray = Cast<JSTypedArray>(receiver)
otherwise NotTypedArray;
- const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
-
+ const length = LoadJSTypedArrayLengthAndCheckDetached(array)
+ otherwise IsDetachedOrOutOfBounds;
+ // 4. If IsCallable(callbackfn) is false, throw a TypeError exception.
const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
const thisArg = arguments[1];
- return SomeAllElements(uarray, callbackfn, thisArg);
- } label NotCallable deferred {
- ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
+ return SomeAllElements(
+ %RawDownCast<AttachedJSTypedArray>(array), length, callbackfn, thisArg);
} label NotTypedArray deferred {
ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameSome);
- } label IsDetached deferred {
+ } label IsDetachedOrOutOfBounds deferred {
ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameSome);
+ } label NotCallable deferred {
+ ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
}
}
}
diff --git a/chromium/v8/src/builtins/typed-array-sort.tq b/chromium/v8/src/builtins/typed-array-sort.tq
index 614852f444a..1487d1396f1 100644
--- a/chromium/v8/src/builtins/typed-array-sort.tq
+++ b/chromium/v8/src/builtins/typed-array-sort.tq
@@ -33,7 +33,7 @@ transitioning macro
TypedArrayMerge(
implicit context: Context, array: JSTypedArray, comparefn: Callable)(
source: FixedArray, from: uintptr, middle: uintptr, to: uintptr,
- target: FixedArray) {
+ target: FixedArray): void {
let left: uintptr = from;
let right: uintptr = middle;
@@ -56,7 +56,7 @@ TypedArrayMerge(
} else {
// No elements on the left, but the right does, so we take
// from the right.
- assert(left == middle);
+ dcheck(left == middle);
target.objects[targetIndex] = source.objects[right++];
}
}
@@ -66,7 +66,7 @@ transitioning builtin
TypedArrayMergeSort(implicit context: Context)(
source: FixedArray, from: uintptr, to: uintptr, target: FixedArray,
array: JSTypedArray, comparefn: Callable): JSAny {
- assert(to - from > 1);
+ dcheck(to - from > 1);
const middle: uintptr = from + ((to - from) >>> 1);
// On the next recursion step source becomes target and vice versa.
diff --git a/chromium/v8/src/builtins/typed-array.tq b/chromium/v8/src/builtins/typed-array.tq
index 87bcb2fb592..c64573cb3be 100644
--- a/chromium/v8/src/builtins/typed-array.tq
+++ b/chromium/v8/src/builtins/typed-array.tq
@@ -19,6 +19,7 @@ type Float64Elements extends ElementsKind;
type Uint8ClampedElements extends ElementsKind;
type BigUint64Elements extends ElementsKind;
type BigInt64Elements extends ElementsKind;
+type RabGsabUint8Elements extends ElementsKind;
@export
struct TypedArrayElementsInfo {
@@ -56,6 +57,8 @@ extern runtime TypedArrayCopyElements(
Context, JSTypedArray, Object, Number): void;
extern macro TypedArrayBuiltinsAssembler::ValidateTypedArray(
Context, JSAny, constexpr string): JSTypedArray;
+extern macro TypedArrayBuiltinsAssembler::ValidateTypedArrayAndGetLength(
+ Context, JSAny, constexpr string): uintptr;
extern macro TypedArrayBuiltinsAssembler::CallCMemcpy(
RawPtr, RawPtr, uintptr): void;
@@ -80,9 +83,12 @@ extern macro TypedArrayBuiltinsAssembler::IsBigInt64ElementsKind(ElementsKind):
extern macro LoadFixedTypedArrayElementAsTagged(
RawPtr, uintptr, constexpr ElementsKind): Numeric;
extern macro TypedArrayBuiltinsAssembler::StoreJSTypedArrayElementFromNumeric(
- Context, JSTypedArray, uintptr, Numeric, constexpr ElementsKind);
+ Context, JSTypedArray, uintptr, Numeric, constexpr ElementsKind): void;
extern macro TypedArrayBuiltinsAssembler::StoreJSTypedArrayElementFromTagged(
- Context, JSTypedArray, uintptr, JSAny, constexpr ElementsKind)
+ Context, JSTypedArray, uintptr, JSAny,
+ constexpr ElementsKind): void labels IfDetached;
+
+extern macro LoadJSTypedArrayLengthAndCheckDetached(JSTypedArray): uintptr
labels IfDetached;
type LoadNumericFn = builtin(JSTypedArray, uintptr) => Numeric;
@@ -100,21 +106,22 @@ struct TypedArrayAccessor {
}
macro StoreNumeric(
- context: Context, array: JSTypedArray, index: uintptr, value: Numeric) {
+ context: Context, array: JSTypedArray, index: uintptr,
+ value: Numeric): void {
const storefn: StoreNumericFn = this.storeNumericFn;
const result = storefn(context, array, index, value);
- assert(result == kStoreSucceded);
+ dcheck(result == kStoreSucceded);
}
macro StoreJSAny(
- context: Context, array: JSTypedArray, index: uintptr, value: JSAny)
- labels IfDetached {
+ context: Context, array: JSTypedArray, index: uintptr,
+ value: JSAny): void labels IfDetached {
const storefn: StoreJSAnyFn = this.storeJSAnyFn;
const result = storefn(context, array, index, value);
if (result == kStoreFailureArrayDetached) {
goto IfDetached;
}
- assert(result == kStoreSucceded);
+ dcheck(result == kStoreSucceded);
}
loadNumericFn: LoadNumericFn;
@@ -130,7 +137,15 @@ macro GetTypedArrayAccessor<T : type extends ElementsKind>():
return TypedArrayAccessor{loadNumericFn, storeNumericFn, storeJSAnyFn};
}
-macro GetTypedArrayAccessor(elementsKind: ElementsKind): TypedArrayAccessor {
+macro GetTypedArrayAccessor(elementsKindParam: ElementsKind):
+ TypedArrayAccessor {
+ let elementsKind = elementsKindParam;
+ if (IsElementsKindGreaterThanOrEqual(
+ elementsKind, kFirstRabGsabFixedTypedArrayElementsKind)) {
+ elementsKind = %RawDownCast<ElementsKind>(
+ elementsKind - kFirstRabGsabFixedTypedArrayElementsKind +
+ kFirstFixedTypedArrayElementsKind);
+ }
if (IsElementsKindGreaterThan(elementsKind, ElementsKind::UINT32_ELEMENTS)) {
if (elementsKind == ElementsKind::INT32_ELEMENTS) {
return GetTypedArrayAccessor<Int32Elements>();
@@ -161,22 +176,23 @@ macro GetTypedArrayAccessor(elementsKind: ElementsKind): TypedArrayAccessor {
unreachable;
}
-extern macro
-TypedArrayBuiltinsAssembler::AllocateJSTypedArrayExternalPointerEntry(
- JSTypedArray): void;
-
extern macro TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr(
JSTypedArray, ByteArray, uintptr): void;
extern macro TypedArrayBuiltinsAssembler::SetJSTypedArrayOffHeapDataPtr(
JSTypedArray, RawPtr, uintptr): void;
+extern macro IsJSTypedArrayDetachedOrOutOfBounds(JSTypedArray):
+ never labels Detached, NotDetached;
// AttachedJSTypedArray guards that the array's buffer is not detached.
transient type AttachedJSTypedArray extends JSTypedArray;
macro EnsureAttached(array: JSTypedArray): AttachedJSTypedArray
labels Detached {
- if (IsDetachedBuffer(array.buffer)) goto Detached;
- return %RawDownCast<AttachedJSTypedArray>(array);
+ try {
+ IsJSTypedArrayDetachedOrOutOfBounds(array) otherwise Detached, NotDetached;
+ } label NotDetached {
+ return %RawDownCast<AttachedJSTypedArray>(array);
+ }
}
struct AttachedJSTypedArrayWitness {
@@ -188,11 +204,21 @@ struct AttachedJSTypedArrayWitness {
return this.stable;
}
- macro Recheck() labels Detached {
+ // TODO(v8:11111): Migrate users to use RecheckIndex.
+ macro Recheck(): void labels Detached {
if (IsDetachedBuffer(this.stable.buffer)) goto Detached;
this.unstable = %RawDownCast<AttachedJSTypedArray>(this.stable);
}
+ macro RecheckIndex(index: uintptr): void labels DetachedOrOutOfBounds {
+ const length = LoadJSTypedArrayLengthAndCheckDetached(this.stable)
+ otherwise DetachedOrOutOfBounds;
+ if (index >= length) {
+ goto DetachedOrOutOfBounds;
+ }
+ this.unstable = %RawDownCast<AttachedJSTypedArray>(this.stable);
+ }
+
macro Load(implicit context: Context)(k: uintptr): JSAny {
const lf: LoadNumericFn = this.loadfn;
return lf(this.unstable, k);
diff --git a/chromium/v8/src/builtins/wasm.tq b/chromium/v8/src/builtins/wasm.tq
index 7fc4a03e353..e4aea1446d6 100644
--- a/chromium/v8/src/builtins/wasm.tq
+++ b/chromium/v8/src/builtins/wasm.tq
@@ -160,7 +160,7 @@ builtin WasmTableGet(tableIndex: intptr, index: int32): Object {
const instance: WasmInstanceObject = LoadInstanceFromFrame();
const entryIndex: intptr = ChangeInt32ToIntPtr(index);
try {
- assert(IsValidPositiveSmi(tableIndex));
+ dcheck(IsValidPositiveSmi(tableIndex));
if (!IsValidPositiveSmi(entryIndex)) goto IndexOutOfRange;
const tables: FixedArray = LoadTablesFromInstance(instance);
@@ -193,7 +193,7 @@ builtin WasmTableSet(tableIndex: intptr, index: int32, value: Object): Object {
const instance: WasmInstanceObject = LoadInstanceFromFrame();
const entryIndex: intptr = ChangeInt32ToIntPtr(index);
try {
- assert(IsValidPositiveSmi(tableIndex));
+ dcheck(IsValidPositiveSmi(tableIndex));
if (!IsValidPositiveSmi(entryIndex)) goto IndexOutOfRange;
const tables: FixedArray = LoadTablesFromInstance(instance);
@@ -364,16 +364,7 @@ builtin WasmArrayCopyWithChecks(
srcIndex + length > srcArray.length || srcIndex + length < srcIndex) {
tail ThrowWasmTrapArrayOutOfBounds();
}
- tail runtime::WasmArrayCopy(
- LoadContextFromFrame(), dstArray, SmiFromUint32(dstIndex), srcArray,
- SmiFromUint32(srcIndex), SmiFromUint32(length));
-}
-
-// We put all uint32 parameters at the beginning so that they are assigned to
-// registers.
-builtin WasmArrayCopy(
- dstIndex: uint32, srcIndex: uint32, length: uint32, dstArray: WasmArray,
- srcArray: WasmArray): JSAny {
+ if (length == 0) return Undefined;
tail runtime::WasmArrayCopy(
LoadContextFromFrame(), dstArray, SmiFromUint32(dstIndex), srcArray,
SmiFromUint32(srcIndex), SmiFromUint32(length));
@@ -381,7 +372,7 @@ builtin WasmArrayCopy(
// Redeclaration with different typing (value is an Object, not JSAny).
extern transitioning runtime
-CreateDataProperty(implicit context: Context)(JSReceiver, JSAny, Object);
+CreateDataProperty(implicit context: Context)(JSReceiver, JSAny, Object): void;
transitioning builtin WasmAllocateObjectWrapper(implicit context: Context)(
obj: Object): JSObject {
@@ -422,7 +413,7 @@ builtin UintPtr53ToNumber(value: uintptr): Number {
const valueFloat = ChangeUintPtrToFloat64(value);
// Values need to be within [0..2^53], such that they can be represented as
// float64.
- assert(ChangeFloat64ToUintPtr(valueFloat) == value);
+ dcheck(ChangeFloat64ToUintPtr(valueFloat) == value);
return AllocateHeapNumberWithValue(valueFloat);
}
diff --git a/chromium/v8/src/builtins/weak-ref.tq b/chromium/v8/src/builtins/weak-ref.tq
index 18385e52db3..56d3fc1c431 100644
--- a/chromium/v8/src/builtins/weak-ref.tq
+++ b/chromium/v8/src/builtins/weak-ref.tq
@@ -4,7 +4,8 @@
namespace runtime {
-extern runtime JSWeakRefAddToKeptObjects(implicit context: Context)(JSReceiver);
+extern runtime JSWeakRefAddToKeptObjects(implicit context: Context)(JSReceiver):
+ void;
} // namespace runtime
diff --git a/chromium/v8/src/builtins/x64/builtins-x64.cc b/chromium/v8/src/builtins/x64/builtins-x64.cc
index 14186e3be6d..6cc437b19b5 100644
--- a/chromium/v8/src/builtins/x64/builtins-x64.cc
+++ b/chromium/v8/src/builtins/x64/builtins-x64.cc
@@ -83,6 +83,35 @@ static void GenerateTailCallToReturnedCode(
namespace {
+enum class ArgumentsElementType {
+ kRaw, // Push arguments as they are.
+ kHandle // Dereference arguments before pushing.
+};
+
+void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
+ Register scratch,
+ ArgumentsElementType element_type) {
+ DCHECK(!AreAliased(array, argc, scratch, kScratchRegister));
+ Register counter = scratch;
+ Label loop, entry;
+ if (kJSArgcIncludesReceiver) {
+ __ leaq(counter, Operand(argc, -kJSArgcReceiverSlots));
+ } else {
+ __ movq(counter, argc);
+ }
+ __ jmp(&entry);
+ __ bind(&loop);
+ Operand value(array, counter, times_system_pointer_size, 0);
+ if (element_type == ArgumentsElementType::kHandle) {
+ __ movq(kScratchRegister, value);
+ value = Operand(kScratchRegister, 0);
+ }
+ __ Push(value);
+ __ bind(&entry);
+ __ decq(counter);
+ __ j(greater_equal, &loop, Label::kNear);
+}
+
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax: number of arguments
@@ -112,7 +141,9 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset +
kSystemPointerSize));
// Copy arguments to the expression stack.
- __ PushArray(rbx, rax, rcx);
+ // rbx: Pointer to start of arguments.
+ // rax: Number of arguments.
+ Generate_PushArguments(masm, rbx, rax, rcx, ArgumentsElementType::kRaw);
// The receiver for the builtin/api call.
__ PushRoot(RootIndex::kTheHoleValue);
@@ -129,8 +160,10 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
}
// Remove caller arguments from the stack and return.
- __ DropArguments(rbx, rcx, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArguments(rbx, rcx, MacroAssembler::kCountIsSmi,
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ ret(0);
@@ -236,7 +269,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// InvokeFunction.
// Copy arguments to the expression stack.
- __ PushArray(rbx, rax, rcx);
+ // rbx: Pointer to start of arguments.
+ // rax: Number of arguments.
+ Generate_PushArguments(masm, rbx, rax, rcx, ArgumentsElementType::kRaw);
// Push implicit receiver.
__ Push(r8);
@@ -279,8 +314,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ movq(rbx, Operand(rbp, ConstructFrameConstants::kLengthOffset));
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
- __ DropArguments(rbx, rcx, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArguments(rbx, rcx, MacroAssembler::kCountIsSmi,
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ ret(0);
// If the result is a smi, it is *not* an object in the ECMA sense.
@@ -607,18 +644,12 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ bind(&enough_stack_space);
- // Copy arguments to the stack in a loop.
+ // Copy arguments to the stack.
// Register rbx points to array of pointers to handle locations.
// Push the values of these handles.
- Label loop, entry;
- __ movq(rcx, rax);
- __ jmp(&entry, Label::kNear);
- __ bind(&loop);
- __ movq(kScratchRegister, Operand(rbx, rcx, times_system_pointer_size, 0));
- __ Push(Operand(kScratchRegister, 0)); // dereference handle
- __ bind(&entry);
- __ decq(rcx);
- __ j(greater_equal, &loop, Label::kNear);
+ // rbx: Pointer to start of arguments.
+ // rax: Number of arguments.
+ Generate_PushArguments(masm, rbx, rax, rcx, ArgumentsElementType::kHandle);
// Push the receiver.
__ Push(r9);
@@ -651,6 +682,21 @@ void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
__ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
}
+static void AssertCodeIsBaselineAllowClobber(MacroAssembler* masm,
+ Register code, Register scratch) {
+ // Verify that the code kind is baseline code via the CodeKind.
+ __ movl(scratch, FieldOperand(code, Code::kFlagsOffset));
+ __ DecodeField<Code::KindField>(scratch);
+ __ cmpl(scratch, Immediate(static_cast<int>(CodeKind::BASELINE)));
+ __ Assert(equal, AbortReason::kExpectedBaselineData);
+}
+
+static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
+ Register scratch) {
+ DCHECK(!AreAliased(code, scratch));
+ return AssertCodeIsBaselineAllowClobber(masm, code, scratch);
+}
+
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Register sfi_data,
Register scratch1,
@@ -659,8 +705,21 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label done;
__ LoadMap(scratch1, sfi_data);
- __ CmpInstanceType(scratch1, BASELINE_DATA_TYPE);
- __ j(equal, is_baseline);
+ __ CmpInstanceType(scratch1, CODET_TYPE);
+ if (FLAG_debug_code) {
+ Label not_baseline;
+ __ j(not_equal, &not_baseline);
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ __ LoadCodeDataContainerCodeNonBuiltin(scratch1, sfi_data);
+ AssertCodeIsBaselineAllowClobber(masm, scratch1, scratch1);
+ } else {
+ AssertCodeIsBaseline(masm, sfi_data, scratch1);
+ }
+ __ j(equal, is_baseline);
+ __ bind(&not_baseline);
+ } else {
+ __ j(equal, is_baseline);
+ }
__ CmpInstanceType(scratch1, INTERPRETER_DATA_TYPE);
__ j(not_equal, &done, Label::kNear);
@@ -736,7 +795,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movzxwq(
rcx, FieldOperand(rcx, SharedFunctionInfo::kFormalParameterCountOffset));
-
+ if (kJSArgcIncludesReceiver) {
+ __ decq(rcx);
+ }
__ LoadTaggedPointerField(
rbx, FieldOperand(rdx, JSGeneratorObject::kParametersAndRegistersOffset));
@@ -771,7 +832,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ jmp(&ok);
__ bind(&is_baseline);
- __ CmpObjectType(rcx, BASELINE_DATA_TYPE, rcx);
+ __ CmpObjectType(rcx, CODET_TYPE, rcx);
__ Assert(equal, AbortReason::kMissingBytecodeArray);
__ bind(&ok);
@@ -862,7 +923,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
Operand(rbp, StandardFrameConstants::kArgCOffset));
__ leaq(actual_params_size,
Operand(actual_params_size, times_system_pointer_size,
- kSystemPointerSize));
+ kJSArgcIncludesReceiver ? 0 : kSystemPointerSize));
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
@@ -1107,7 +1168,7 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
// stack left to right.
//
// The live registers are:
-// o rax: actual argument count (not including the receiver)
+// o rax: actual argument count
// o rdi: the JS function object being called
// o rdx: the incoming new target or generator object
// o rsi: our context
@@ -1335,9 +1396,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
&has_optimized_code_or_marker);
// Load the baseline code into the closure.
- __ LoadTaggedPointerField(rcx,
- FieldOperand(kInterpreterBytecodeArrayRegister,
- BaselineData::kBaselineCodeOffset));
+ __ Move(rcx, kInterpreterBytecodeArrayRegister);
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
ReplaceClosureCodeWithOptimizedCode(
masm, rcx, closure, kInterpreterBytecodeArrayRegister,
@@ -1374,7 +1433,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
InterpreterPushArgsMode mode) {
DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rbx : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
// they are to be pushed onto the stack.
@@ -1387,7 +1446,15 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
__ decl(rax);
}
- __ leal(rcx, Operand(rax, 1)); // Add one for receiver.
+ int argc_modification = kJSArgcIncludesReceiver ? 0 : 1;
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ argc_modification -= 1;
+ }
+ if (argc_modification != 0) {
+ __ leal(rcx, Operand(rax, argc_modification));
+ } else {
+ __ movl(rcx, rax);
+ }
// Add a stack check before pushing arguments.
__ StackOverflowCheck(rcx, &stack_overflow);
@@ -1395,11 +1462,6 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Pop return address to allow tail-call after pushing arguments.
__ PopReturnAddressTo(kScratchRegister);
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- // Don't copy receiver.
- __ decq(rcx);
- }
-
// rbx and rdx will be modified.
GenerateInterpreterPushArgs(masm, rcx, rbx, rdx);
@@ -1439,7 +1501,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
MacroAssembler* masm, InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdx : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -- rdi : the constructor to call (can be any Object)
@@ -1462,7 +1524,12 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
}
// rcx and r8 will be modified.
- GenerateInterpreterPushArgs(masm, rax, rcx, r8);
+ Register argc_without_receiver = rax;
+ if (kJSArgcIncludesReceiver) {
+ argc_without_receiver = r11;
+ __ leaq(argc_without_receiver, Operand(rax, -kJSArgcReceiverSlots));
+ }
+ GenerateInterpreterPushArgs(masm, argc_without_receiver, rcx, r8);
// Push slot for the receiver to be constructed.
__ Push(Immediate(0));
@@ -1761,7 +1828,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// Push the baseline code return address now, as if it had been pushed by
// the call to this builtin.
__ PushReturnAddressFrom(return_address);
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ FrameScope inner_frame_scope(masm, StackFrame::INTERNAL);
// Save incoming new target or generator
__ Push(new_target);
__ SmiTag(frame_size);
@@ -1809,7 +1876,8 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
// the LAZY deopt point. rax contains the arguments count, the return value
// from LAZY is always the last argument.
__ movq(Operand(rsp, rax, times_system_pointer_size,
- BuiltinContinuationFrameConstants::kFixedFrameSize),
+ BuiltinContinuationFrameConstants::kFixedFrameSize -
+ (kJSArgcIncludesReceiver ? kSystemPointerSize : 0)),
kScratchRegister);
}
__ movq(
@@ -1883,19 +1951,20 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadRoot(rdx, RootIndex::kUndefinedValue);
__ movq(rbx, rdx);
__ movq(rdi, args[0]);
- __ testq(rax, rax);
- __ j(zero, &no_this_arg, Label::kNear);
+ __ cmpq(rax, Immediate(JSParameterCount(0)));
+ __ j(equal, &no_this_arg, Label::kNear);
{
__ movq(rdx, args[1]);
- __ cmpq(rax, Immediate(1));
+ __ cmpq(rax, Immediate(JSParameterCount(1)));
__ j(equal, &no_arg_array, Label::kNear);
__ movq(rbx, args[2]);
__ bind(&no_arg_array);
}
__ bind(&no_this_arg);
- __ DropArgumentsAndPushNewReceiver(rax, rdx, rcx,
- TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(
+ rax, rdx, rcx, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -1923,7 +1992,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// Function.prototype.apply() yet, we use a normal Call builtin here.
__ bind(&no_arguments);
{
- __ Move(rax, 0);
+ __ Move(rax, JSParameterCount(0));
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
}
@@ -1937,7 +2006,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// ...
// rsp[8 * n] : Argument n-1
// rsp[8 * (n + 1)] : Argument n
- // rax contains the number of arguments, n, not counting the receiver.
+ // rax contains the number of arguments, n.
// 1. Get the callable to call (passed as receiver) from the stack.
{
@@ -1952,8 +2021,13 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 3. Make sure we have at least one argument.
{
Label done;
- __ testq(rax, rax);
- __ j(not_zero, &done, Label::kNear);
+ if (kJSArgcIncludesReceiver) {
+ __ cmpq(rax, Immediate(JSParameterCount(0)));
+ __ j(greater, &done, Label::kNear);
+ } else {
+ __ testq(rax, rax);
+ __ j(not_zero, &done, Label::kNear);
+ }
__ PushRoot(RootIndex::kUndefinedValue);
__ incq(rax);
__ bind(&done);
@@ -1989,18 +2063,19 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadRoot(rdi, RootIndex::kUndefinedValue);
__ movq(rdx, rdi);
__ movq(rbx, rdi);
- __ cmpq(rax, Immediate(1));
+ __ cmpq(rax, Immediate(JSParameterCount(1)));
__ j(below, &done, Label::kNear);
__ movq(rdi, args[1]); // target
__ j(equal, &done, Label::kNear);
__ movq(rdx, args[2]); // thisArgument
- __ cmpq(rax, Immediate(3));
+ __ cmpq(rax, Immediate(JSParameterCount(3)));
__ j(below, &done, Label::kNear);
__ movq(rbx, args[3]); // argumentsList
__ bind(&done);
- __ DropArgumentsAndPushNewReceiver(rax, rdx, rcx,
- TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(
+ rax, rdx, rcx, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -2039,20 +2114,21 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ LoadRoot(rdi, RootIndex::kUndefinedValue);
__ movq(rdx, rdi);
__ movq(rbx, rdi);
- __ cmpq(rax, Immediate(1));
+ __ cmpq(rax, Immediate(JSParameterCount(1)));
__ j(below, &done, Label::kNear);
__ movq(rdi, args[1]); // target
__ movq(rdx, rdi); // new.target defaults to target
__ j(equal, &done, Label::kNear);
__ movq(rbx, args[2]); // argumentsList
- __ cmpq(rax, Immediate(3));
+ __ cmpq(rax, Immediate(JSParameterCount(3)));
__ j(below, &done, Label::kNear);
__ movq(rdx, args[3]); // new.target
__ bind(&done);
__ DropArgumentsAndPushNewReceiver(
rax, masm->RootAsOperand(RootIndex::kUndefinedValue), rcx,
TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -2076,13 +2152,68 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
+namespace {
+
+// Allocate new stack space for |count| arguments and shift all existing
+// arguments already on the stack. |pointer_to_new_space_out| points to the
+// first free slot on the stack to copy additional arguments to and
+// |argc_in_out| is updated to include |count|.
+void Generate_AllocateSpaceAndShiftExistingArguments(
+ MacroAssembler* masm, Register count, Register argc_in_out,
+ Register pointer_to_new_space_out, Register scratch1, Register scratch2) {
+ DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1,
+ scratch2, kScratchRegister));
+ // Use pointer_to_new_space_out as scratch until we set it to the correct
+ // value at the end.
+ Register old_rsp = pointer_to_new_space_out;
+ Register new_space = kScratchRegister;
+ __ movq(old_rsp, rsp);
+
+ __ leaq(new_space, Operand(count, times_system_pointer_size, 0));
+ __ AllocateStackSpace(new_space);
+
+ Register copy_count = argc_in_out;
+ if (!kJSArgcIncludesReceiver) {
+ // We have a spare register, so use it instead of clobbering argc.
+ // lea + add (to add the count to argc in the end) uses 1 less byte than
+ // inc + lea (with base, index and disp), at the cost of 1 extra register.
+ copy_count = scratch1;
+ __ leaq(copy_count, Operand(argc_in_out, 1)); // Include the receiver.
+ }
+ Register current = scratch2;
+ Register value = kScratchRegister;
+
+ Label loop, entry;
+ __ Move(current, 0);
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(value, Operand(old_rsp, current, times_system_pointer_size, 0));
+ __ movq(Operand(rsp, current, times_system_pointer_size, 0), value);
+ __ incq(current);
+ __ bind(&entry);
+ __ cmpq(current, copy_count);
+ __ j(less_equal, &loop, Label::kNear);
+
+ // Point to the next free slot above the shifted arguments (copy_count + 1
+ // slot for the return address).
+ __ leaq(
+ pointer_to_new_space_out,
+ Operand(rsp, copy_count, times_system_pointer_size, kSystemPointerSize));
+ // We use addl instead of addq here because we can omit REX.W, saving 1 byte.
+ // We are especially constrained here because we are close to reaching the
+ // limit for a near jump to the stackoverflow label, so every byte counts.
+ __ addl(argc_in_out, count); // Update total number of arguments.
+}
+
+} // namespace
+
// static
// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
// -- rdi : target
- // -- rax : number of parameters on the stack (not including the receiver)
+ // -- rax : number of parameters on the stack
// -- rbx : arguments list (a FixedArray)
// -- rcx : len (number of elements to push from args)
// -- rdx : new.target (for [[Construct]])
@@ -2114,28 +2245,10 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Push additional arguments onto the stack.
// Move the arguments already in the stack,
// including the receiver and the return address.
- {
- Label copy, check;
- Register src = r8, dest = rsp, num = r9, current = r12;
- __ movq(src, rsp);
- __ leaq(kScratchRegister, Operand(rcx, times_system_pointer_size, 0));
- __ AllocateStackSpace(kScratchRegister);
- __ leaq(num, Operand(rax, 2)); // Number of words to copy.
- // +2 for receiver and return address.
- __ Move(current, 0);
- __ jmp(&check);
- __ bind(&copy);
- __ movq(kScratchRegister,
- Operand(src, current, times_system_pointer_size, 0));
- __ movq(Operand(dest, current, times_system_pointer_size, 0),
- kScratchRegister);
- __ incq(current);
- __ bind(&check);
- __ cmpq(current, num);
- __ j(less, &copy);
- __ leaq(r8, Operand(rsp, num, times_system_pointer_size, 0));
- }
-
+ // rcx: Number of arguments to make room for.
+ // rax: Number of arguments already on the stack.
+ // r8: Points to first free slot on the stack after arguments were shifted.
+ Generate_AllocateSpaceAndShiftExistingArguments(masm, rcx, rax, r8, r9, r12);
// Copy the additional arguments onto the stack.
{
Register value = r12;
@@ -2156,7 +2269,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ incl(current);
__ jmp(&loop);
__ bind(&done);
- __ addq(rax, current);
}
// Tail-call to the actual Call or Construct builtin.
@@ -2171,7 +2283,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
Handle<Code> code) {
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdx : the new target (for [[Construct]] calls)
// -- rdi : the target to call (can be any Object)
// -- rcx : start index (to support rest parameters)
@@ -2197,12 +2309,14 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
Label stack_done, stack_overflow;
__ movq(r8, Operand(rbp, StandardFrameConstants::kArgCOffset));
+ if (kJSArgcIncludesReceiver) {
+ __ decq(r8);
+ }
__ subl(r8, rcx);
__ j(less_equal, &stack_done);
{
// ----------- S t a t e -------------
- // -- rax : the number of arguments already in the stack (not including the
- // receiver)
+ // -- rax : the number of arguments already in the stack
// -- rbp : point to the caller stack frame
// -- rcx : start index (to support rest parameters)
// -- rdx : the new target (for [[Construct]] calls)
@@ -2216,29 +2330,11 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// Forward the arguments from the caller frame.
// Move the arguments already in the stack,
// including the receiver and the return address.
- {
- Label copy, check;
- Register src = r9, dest = rsp, num = r12, current = r15;
- __ movq(src, rsp);
- __ leaq(kScratchRegister, Operand(r8, times_system_pointer_size, 0));
- __ AllocateStackSpace(kScratchRegister);
- __ leaq(num, Operand(rax, 2)); // Number of words to copy.
- // +2 for receiver and return address.
- __ Move(current, 0);
- __ jmp(&check);
- __ bind(&copy);
- __ movq(kScratchRegister,
- Operand(src, current, times_system_pointer_size, 0));
- __ movq(Operand(dest, current, times_system_pointer_size, 0),
- kScratchRegister);
- __ incq(current);
- __ bind(&check);
- __ cmpq(current, num);
- __ j(less, &copy);
- __ leaq(r9, Operand(rsp, num, times_system_pointer_size, 0));
- }
-
- __ addl(rax, r8); // Update total number of arguments.
+ // r8: Number of arguments to make room for.
+ // rax: Number of arguments already on the stack.
+ // r9: Points to first free slot on the stack after arguments were shifted.
+ Generate_AllocateSpaceAndShiftExistingArguments(masm, r8, rax, r9, r12,
+ r15);
// Point to the first argument to copy (skipping receiver).
__ leaq(rcx, Operand(rcx, times_system_pointer_size,
@@ -2274,24 +2370,21 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
void Builtins::Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode) {
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdi : the function to call (checked to be a JSFunction)
// -----------------------------------
StackArgumentsAccessor args(rax);
__ AssertFunction(rdi);
- // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
- // Check that the function is not a "classConstructor".
Label class_constructor;
__ LoadTaggedPointerField(
rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ testl(FieldOperand(rdx, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ j(not_zero, &class_constructor);
-
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdx : the shared function info.
// -- rdi : the function to call (checked to be a JSFunction)
// -----------------------------------
@@ -2308,7 +2401,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ j(not_zero, &done_convert);
{
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdx : the shared function info.
// -- rdi : the function to call (checked to be a JSFunction)
// -- rsi : the function context.
@@ -2365,7 +2458,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ bind(&done_convert);
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdx : the shared function info.
// -- rdi : the function to call (checked to be a JSFunction)
// -- rsi : the function context.
@@ -2373,7 +2466,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ movzxwq(
rbx, FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
-
__ InvokeFunctionCode(rdi, no_reg, rbx, rax, InvokeType::kJump);
// The function is a "classConstructor", need to raise an exception.
@@ -2389,7 +2481,7 @@ namespace {
void Generate_PushBoundArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdx : new.target (only in case of [[Construct]])
// -- rdi : target (checked to be a JSBoundFunction)
// -----------------------------------
@@ -2403,7 +2495,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ j(zero, &no_bound_arguments);
{
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdx : new.target (only in case of [[Construct]])
// -- rdi : target (checked to be a JSBoundFunction)
// -- rcx : the [[BoundArguments]] (implemented as FixedArray)
@@ -2467,7 +2559,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// static
void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdi : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(rdi);
@@ -2491,39 +2583,51 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// static
void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdi : the target to call (can be any Object)
// -----------------------------------
- StackArgumentsAccessor args(rax);
+ Register argc = rax;
+ Register target = rdi;
+ Register map = rcx;
+ Register instance_type = rdx;
+ DCHECK(!AreAliased(argc, target, map, instance_type));
- Label non_callable;
- __ JumpIfSmi(rdi, &non_callable);
- __ LoadMap(rcx, rdi);
- __ CmpInstanceTypeRange(rcx, FIRST_JS_FUNCTION_TYPE, LAST_JS_FUNCTION_TYPE);
+ StackArgumentsAccessor args(argc);
+
+ Label non_callable, class_constructor;
+ __ JumpIfSmi(target, &non_callable);
+ __ LoadMap(map, target);
+ __ CmpInstanceTypeRange(map, instance_type, FIRST_CALLABLE_JS_FUNCTION_TYPE,
+ LAST_CALLABLE_JS_FUNCTION_TYPE);
__ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET, below_equal);
- __ CmpInstanceType(rcx, JS_BOUND_FUNCTION_TYPE);
+ __ cmpw(instance_type, Immediate(JS_BOUND_FUNCTION_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
RelocInfo::CODE_TARGET, equal);
// Check if target has a [[Call]] internal method.
- __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+ __ testb(FieldOperand(map, Map::kBitFieldOffset),
Immediate(Map::Bits1::IsCallableBit::kMask));
__ j(zero, &non_callable, Label::kNear);
// Check if target is a proxy and call CallProxy external builtin
- __ CmpInstanceType(rcx, JS_PROXY_TYPE);
+ __ cmpw(instance_type, Immediate(JS_PROXY_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET,
equal);
+ // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that the function is not a "classConstructor".
+ __ cmpw(instance_type, Immediate(JS_CLASS_CONSTRUCTOR_TYPE));
+ __ j(equal, &class_constructor);
+
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
// Overwrite the original receiver with the (original) target.
- __ movq(args.GetReceiverOperand(), rdi);
+ __ movq(args.GetReceiverOperand(), target);
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadNativeContextSlot(rdi, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
+ __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(
ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
@@ -2532,15 +2636,25 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ bind(&non_callable);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(rdi);
+ __ Push(target);
__ CallRuntime(Runtime::kThrowCalledNonCallable);
+ __ Trap(); // Unreachable.
+ }
+
+ // 4. The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ Push(target);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ __ Trap(); // Unreachable.
}
}
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdx : the new target (checked to be a constructor)
// -- rdi : the constructor to call (checked to be a JSFunction)
// -----------------------------------
@@ -2566,7 +2680,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdx : the new target (checked to be a constructor)
// -- rdi : the constructor to call (checked to be a JSBoundFunction)
// -----------------------------------
@@ -2595,45 +2709,53 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// static
void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdx : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -- rdi : the constructor to call (can be any Object)
// -----------------------------------
- StackArgumentsAccessor args(rax);
+ Register argc = rax;
+ Register target = rdi;
+ Register map = rcx;
+ Register instance_type = r8;
+ DCHECK(!AreAliased(argc, target, map, instance_type));
+
+ StackArgumentsAccessor args(argc);
// Check if target is a Smi.
Label non_constructor;
- __ JumpIfSmi(rdi, &non_constructor);
+ __ JumpIfSmi(target, &non_constructor);
// Check if target has a [[Construct]] internal method.
- __ LoadMap(rcx, rdi);
- __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+ __ LoadMap(map, target);
+ __ testb(FieldOperand(map, Map::kBitFieldOffset),
Immediate(Map::Bits1::IsConstructorBit::kMask));
__ j(zero, &non_constructor);
// Dispatch based on instance type.
- __ CmpInstanceTypeRange(rcx, FIRST_JS_FUNCTION_TYPE, LAST_JS_FUNCTION_TYPE);
+ __ CmpInstanceTypeRange(map, instance_type, FIRST_JS_FUNCTION_TYPE,
+ LAST_JS_FUNCTION_TYPE);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
RelocInfo::CODE_TARGET, below_equal);
// Only dispatch to bound functions after checking whether they are
// constructors.
- __ CmpInstanceType(rcx, JS_BOUND_FUNCTION_TYPE);
+ __ cmpw(instance_type, Immediate(JS_BOUND_FUNCTION_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
RelocInfo::CODE_TARGET, equal);
// Only dispatch to proxies after checking whether they are constructors.
- __ CmpInstanceType(rcx, JS_PROXY_TYPE);
+ __ cmpw(instance_type, Immediate(JS_PROXY_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy), RelocInfo::CODE_TARGET,
equal);
// Called Construct on an exotic Object with a [[Construct]] internal method.
{
// Overwrite the original receiver with the (original) target.
- __ movq(args.GetReceiverOperand(), rdi);
+ __ movq(args.GetReceiverOperand(), target);
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadNativeContextSlot(rdi, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
+ __ LoadNativeContextSlot(target,
+ Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
@@ -2676,8 +2798,8 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
}
// Load deoptimization data from the code object.
- __ LoadTaggedPointerField(rbx,
- FieldOperand(rax, Code::kDeoptimizationDataOffset));
+ __ LoadTaggedPointerField(
+ rbx, FieldOperand(rax, Code::kDeoptimizationDataOrInterpreterDataOffset));
// Load the OSR entrypoint offset from the deoptimization data.
__ SmiUntagField(
@@ -2876,6 +2998,9 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
// Put the in_parameter count on the stack, we only need it at the very end
// when we pop the parameters off the stack.
Register in_param_count = rax;
+ if (kJSArgcIncludesReceiver) {
+ __ decq(in_param_count);
+ }
__ movq(MemOperand(rbp, kInParamCountOffset), in_param_count);
in_param_count = no_reg;
@@ -3691,12 +3816,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
__ bind(&skip);
- // Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
- // with both configurations. It is safe to always do this, because the
- // underlying register is caller-saved and can be arbitrarily clobbered.
- __ ResetSpeculationPoisonRegister();
-
// Clear c_entry_fp, like we do in `LeaveExitFrame`.
ExternalReference c_entry_fp_address = ExternalReference::Create(
IsolateAddressId::kCEntryFPAddress, masm->isolate());
@@ -4384,7 +4503,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// always have baseline code.
if (!is_osr) {
Label start_with_baseline;
- __ CmpObjectType(code_obj, BASELINE_DATA_TYPE, kScratchRegister);
+ __ CmpObjectType(code_obj, CODET_TYPE, kScratchRegister);
__ j(equal, &start_with_baseline);
// Start with bytecode as there is no baseline code.
@@ -4397,16 +4516,17 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Start with baseline code.
__ bind(&start_with_baseline);
} else if (FLAG_debug_code) {
- __ CmpObjectType(code_obj, BASELINE_DATA_TYPE, kScratchRegister);
+ __ CmpObjectType(code_obj, CODET_TYPE, kScratchRegister);
__ Assert(equal, AbortReason::kExpectedBaselineData);
}
// Load baseline code from baseline data.
- __ LoadTaggedPointerField(
- code_obj, FieldOperand(code_obj, BaselineData::kBaselineCodeOffset));
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
__ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
}
+ if (FLAG_debug_code) {
+ AssertCodeIsBaseline(masm, code_obj, r11);
+ }
// Load the feedback vector.
Register feedback_vector = r11;
diff --git a/chromium/v8/src/codegen/OWNERS b/chromium/v8/src/codegen/OWNERS
index 364d34fb092..6644faa7fb2 100644
--- a/chromium/v8/src/codegen/OWNERS
+++ b/chromium/v8/src/codegen/OWNERS
@@ -8,9 +8,6 @@ jkummerow@chromium.org
leszeks@chromium.org
mslekova@chromium.org
mvstanton@chromium.org
-mythria@chromium.org
neis@chromium.org
nicohartmann@chromium.org
-rmcilroy@chromium.org
-solanes@chromium.org
zhin@chromium.org
diff --git a/chromium/v8/src/codegen/arm/assembler-arm-inl.h b/chromium/v8/src/codegen/arm/assembler-arm-inl.h
index f72e27703e9..2c0e69a753b 100644
--- a/chromium/v8/src/codegen/arm/assembler-arm-inl.h
+++ b/chromium/v8/src/codegen/arm/assembler-arm-inl.h
@@ -101,7 +101,7 @@ HeapObject RelocInfo::target_object() {
Object(Assembler::target_address_at(pc_, constant_pool_)));
}
-HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
+HeapObject RelocInfo::target_object_no_host(PtrComprCageBase cage_base) {
return target_object();
}
diff --git a/chromium/v8/src/codegen/arm/assembler-arm.cc b/chromium/v8/src/codegen/arm/assembler-arm.cc
index 970386be72c..b49d9ed186d 100644
--- a/chromium/v8/src/codegen/arm/assembler-arm.cc
+++ b/chromium/v8/src/codegen/arm/assembler-arm.cc
@@ -4786,7 +4786,7 @@ static Instr EncodeNeonPairwiseOp(NeonPairwiseOp op, NeonDataType dt,
void Assembler::vpadd(DwVfpRegister dst, DwVfpRegister src1,
DwVfpRegister src2) {
DCHECK(IsEnabled(NEON));
- // Dd = vpadd(Dn, Dm) SIMD integer pairwise ADD.
+ // Dd = vpadd(Dn, Dm) SIMD floating point pairwise ADD.
// Instruction details available in ARM DDI 0406C.b, A8-982.
int vd, d;
dst.split_code(&vd, &d);
@@ -5472,8 +5472,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
if (!entry.is_merged()) {
if (IsOnHeap() && RelocInfo::IsEmbeddedObjectMode(entry.rmode())) {
int offset = pc_offset();
- saved_handles_for_raw_object_ptr_.push_back(
- std::make_pair(offset, entry.value()));
+ saved_handles_for_raw_object_ptr_.emplace_back(offset, entry.value());
Handle<HeapObject> object(reinterpret_cast<Address*>(entry.value()));
emit(object->ptr());
DCHECK(EmbeddedObjectMatches(offset, object));
diff --git a/chromium/v8/src/codegen/arm/assembler-arm.h b/chromium/v8/src/codegen/arm/assembler-arm.h
index 4a9fe49685d..a34b9e1b662 100644
--- a/chromium/v8/src/codegen/arm/assembler-arm.h
+++ b/chromium/v8/src/codegen/arm/assembler-arm.h
@@ -1067,7 +1067,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
~BlockConstPoolScope() { assem_->EndBlockConstPool(); }
private:
- Assembler* assem_;
+ Assembler* const assem_;
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
};
@@ -1250,6 +1250,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
(pc_offset() < no_const_pool_before_);
}
+ bool has_pending_constants() const {
+ bool result = !pending_32_bit_constants_.empty();
+ DCHECK_EQ(result, first_const_pool_32_use_ != -1);
+ return result;
+ }
+
bool VfpRegisterIsAvailable(DwVfpRegister reg) {
DCHECK(reg.is_valid());
return IsEnabled(VFP32DREGS) ||
diff --git a/chromium/v8/src/codegen/arm/macro-assembler-arm.cc b/chromium/v8/src/codegen/arm/macro-assembler-arm.cc
index 26d16406a62..aebfaab9320 100644
--- a/chromium/v8/src/codegen/arm/macro-assembler-arm.cc
+++ b/chromium/v8/src/codegen/arm/macro-assembler-arm.cc
@@ -182,7 +182,7 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
// size s.t. pc-relative calls may be used.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- int offset = IsolateData::builtin_entry_slot_offset(code->builtin_id());
+ int offset = IsolateData::BuiltinEntrySlotOffset(code->builtin_id());
ldr(scratch, MemOperand(kRootRegister, offset));
Jump(scratch, cond);
return;
@@ -269,7 +269,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
// This branch is taken only for specific cctests, where we force isolate
// creation at runtime. At this point, Code space isn't restricted to a
// size s.t. pc-relative calls may be used.
- int offset = IsolateData::builtin_entry_slot_offset(code->builtin_id());
+ int offset = IsolateData::BuiltinEntrySlotOffset(code->builtin_id());
ldr(ip, MemOperand(kRootRegister, offset));
Call(ip, cond);
return;
@@ -315,7 +315,7 @@ MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
ASM_CODE_COMMENT(this);
DCHECK(root_array_available());
return MemOperand(kRootRegister,
- IsolateData::builtin_entry_slot_offset(builtin));
+ IsolateData::BuiltinEntrySlotOffset(builtin));
}
void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) {
@@ -343,29 +343,32 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
DCHECK(root_array_available());
Label if_code_is_off_heap, out;
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
-
- DCHECK(!AreAliased(destination, scratch));
- DCHECK(!AreAliased(code_object, scratch));
-
- // Check whether the Code object is an off-heap trampoline. If so, call its
- // (off-heap) entry point directly without going through the (on-heap)
- // trampoline. Otherwise, just call the Code object as always.
- ldr(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
- tst(scratch, Operand(Code::IsOffHeapTrampoline::kMask));
- b(ne, &if_code_is_off_heap);
-
- // Not an off-heap trampoline, the entry point is at
- // Code::raw_instruction_start().
- add(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
- jmp(&out);
-
- // An off-heap trampoline, the entry point is loaded from the builtin entry
- // table.
- bind(&if_code_is_off_heap);
- ldr(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
- lsl(destination, scratch, Operand(kSystemPointerSizeLog2));
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+
+ DCHECK(!AreAliased(destination, scratch));
+ DCHECK(!AreAliased(code_object, scratch));
+
+ // Check whether the Code object is an off-heap trampoline. If so, call
+ // its (off-heap) entry point directly without going through the (on-heap)
+ // trampoline. Otherwise, just call the Code object as always.
+ ldr(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
+ tst(scratch, Operand(Code::IsOffHeapTrampoline::kMask));
+ b(ne, &if_code_is_off_heap);
+
+ // Not an off-heap trampoline, the entry point is at
+ // Code::raw_instruction_start().
+ add(destination, code_object,
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ jmp(&out);
+
+ // An off-heap trampoline, the entry point is loaded from the builtin
+ // entry table.
+ bind(&if_code_is_off_heap);
+ ldr(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
+ lsl(destination, scratch, Operand(kSystemPointerSizeLog2));
+ }
add(destination, destination, kRootRegister);
ldr(destination,
MemOperand(destination, IsolateData::builtin_entry_table_offset()));
@@ -1639,8 +1642,10 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
// If the expected parameter count is equal to the adaptor sentinel, no need
// to push undefined value as arguments.
- cmp(expected_parameter_count, Operand(kDontAdaptArgumentsSentinel));
- b(eq, &regular_invoke);
+ if (kDontAdaptArgumentsSentinel != 0) {
+ cmp(expected_parameter_count, Operand(kDontAdaptArgumentsSentinel));
+ b(eq, &regular_invoke);
+ }
// If overapplication or if the actual argument count is equal to the
// formal parameter count, no need to push extra undefined values.
@@ -1669,7 +1674,11 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
str(scratch, MemOperand(dest, kSystemPointerSize, PostIndex));
sub(num, num, Operand(1), SetCC);
bind(&check);
- b(ge, &copy);
+ if (kJSArgcIncludesReceiver) {
+ b(gt, &copy);
+ } else {
+ b(ge, &copy);
+ }
}
// Fill remaining expected arguments with undefined values.
@@ -1685,8 +1694,8 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
bind(&stack_overflow);
{
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
CallRuntime(Runtime::kThrowStackOverflow);
bkpt(0);
}
@@ -1700,7 +1709,8 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
ASM_CODE_COMMENT(this);
// Load receiver to pass it later to DebugOnFunctionCall hook.
ldr(r4, ReceiverOperand(actual_parameter_count));
- FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
SmiTag(expected_parameter_count);
Push(expected_parameter_count);
@@ -1867,16 +1877,26 @@ void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
cmp(type_reg, Operand(type));
}
+void MacroAssembler::CompareRange(Register value, unsigned lower_limit,
+ unsigned higher_limit) {
+ ASM_CODE_COMMENT(this);
+ DCHECK_LT(lower_limit, higher_limit);
+ if (lower_limit != 0) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ sub(scratch, value, Operand(lower_limit));
+ cmp(scratch, Operand(higher_limit - lower_limit));
+ } else {
+ cmp(value, Operand(higher_limit));
+ }
+}
void MacroAssembler::CompareInstanceTypeRange(Register map, Register type_reg,
InstanceType lower_limit,
InstanceType higher_limit) {
ASM_CODE_COMMENT(this);
DCHECK_LT(lower_limit, higher_limit);
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
ldrh(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
- sub(scratch, type_reg, Operand(lower_limit));
- cmp(scratch, Operand(higher_limit - lower_limit));
+ CompareRange(type_reg, lower_limit, higher_limit);
}
void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
@@ -1891,14 +1911,7 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
unsigned higher_limit,
Label* on_in_range) {
ASM_CODE_COMMENT(this);
- if (lower_limit != 0) {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- sub(scratch, value, Operand(lower_limit));
- cmp(scratch, Operand(higher_limit - lower_limit));
- } else {
- cmp(value, Operand(higher_limit));
- }
+ CompareRange(value, lower_limit, higher_limit);
b(ls, on_in_range);
}
@@ -2082,7 +2095,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (should_abort_hard()) {
// We don't care if we constructed a frame. Just pretend we did.
- FrameScope assume_frame(this, StackFrame::NONE);
+ FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
Move32BitImmediate(r0, Operand(static_cast<int>(reason)));
PrepareCallCFunction(1, 0, r1);
Move(r1, ExternalReference::abort_with_reason());
@@ -2098,7 +2111,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (!has_frame()) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
+ FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
} else {
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
@@ -2660,17 +2673,19 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
sub(dst, pc, Operand(pc_offset() + Instruction::kPcLoadDelta));
}
-void TurboAssembler::ResetSpeculationPoisonRegister() {
- mov(kSpeculationPoisonRegister, Operand(-1));
-}
-
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
ASM_CODE_COMMENT(this);
+
+ // All constants should have been emitted prior to deoptimization exit
+ // emission. See PrepareForDeoptimizationExits.
+ DCHECK(!has_pending_constants());
BlockConstPoolScope block_const_pool(this);
- ldr(ip, MemOperand(kRootRegister,
- IsolateData::builtin_entry_slot_offset(target)));
+
+ CHECK_LE(target, Builtins::kLastTier0);
+ ldr(ip,
+ MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(target)));
Call(ip);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
(kind == DeoptimizeKind::kLazy)
@@ -2682,6 +2697,9 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
Deoptimizer::kEagerWithResumeBeforeArgsSize);
}
+
+ // The above code must not emit constants either.
+ DCHECK(!has_pending_constants());
}
void TurboAssembler::Trap() { stop(); }
diff --git a/chromium/v8/src/codegen/arm/macro-assembler-arm.h b/chromium/v8/src/codegen/arm/macro-assembler-arm.h
index 41bc5ec5443..3dc3e208f59 100644
--- a/chromium/v8/src/codegen/arm/macro-assembler-arm.h
+++ b/chromium/v8/src/codegen/arm/macro-assembler-arm.h
@@ -518,6 +518,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
mov(dst, Operand::SmiUntag(src), s);
}
+ void SmiToInt32(Register smi) { SmiUntag(smi); }
+
// Load an object from the root table.
void LoadRoot(Register destination, RootIndex index) final {
LoadRoot(destination, index, al);
@@ -560,8 +562,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(Register dst);
- void ResetSpeculationPoisonRegister();
-
// Control-flow integrity:
// Define a function entrypoint. This doesn't emit any code for this
@@ -757,7 +757,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
}
// Checks if value is in range [lower_limit, higher_limit] using a single
- // comparison.
+ // comparison. Flags C=0 or Z=1 indicate the value is in the range (condition
+ // ls).
+ void CompareRange(Register value, unsigned lower_limit,
+ unsigned higher_limit);
void JumpIfIsInRange(Register value, unsigned lower_limit,
unsigned higher_limit, Label* on_in_range);
diff --git a/chromium/v8/src/codegen/arm/register-arm.h b/chromium/v8/src/codegen/arm/register-arm.h
index 6608ad4edeb..8cc838945d2 100644
--- a/chromium/v8/src/codegen/arm/register-arm.h
+++ b/chromium/v8/src/codegen/arm/register-arm.h
@@ -336,7 +336,6 @@ constexpr Register kReturnRegister2 = r2;
constexpr Register kJSFunctionRegister = r1;
constexpr Register kContextRegister = r7;
constexpr Register kAllocateSizeRegister = r1;
-constexpr Register kSpeculationPoisonRegister = r9;
constexpr Register kInterpreterAccumulatorRegister = r0;
constexpr Register kInterpreterBytecodeOffsetRegister = r5;
constexpr Register kInterpreterBytecodeArrayRegister = r6;
diff --git a/chromium/v8/src/codegen/arm64/assembler-arm64-inl.h b/chromium/v8/src/codegen/arm64/assembler-arm64-inl.h
index 2668502f816..41d07b10b18 100644
--- a/chromium/v8/src/codegen/arm64/assembler-arm64-inl.h
+++ b/chromium/v8/src/codegen/arm64/assembler-arm64-inl.h
@@ -670,10 +670,10 @@ HeapObject RelocInfo::target_object() {
}
}
-HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
+HeapObject RelocInfo::target_object_no_host(PtrComprCageBase cage_base) {
if (IsCompressedEmbeddedObject(rmode_)) {
return HeapObject::cast(Object(DecompressTaggedAny(
- isolate,
+ cage_base,
Assembler::target_compressed_address_at(pc_, constant_pool_))));
} else {
return target_object();
diff --git a/chromium/v8/src/codegen/arm64/macro-assembler-arm64-inl.h b/chromium/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
index 8986df823af..48b8a5f06a4 100644
--- a/chromium/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
+++ b/chromium/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
@@ -1091,6 +1091,19 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
void TurboAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); }
+void TurboAssembler::SmiToInt32(Register smi) {
+ DCHECK(smi.Is64Bits());
+ if (FLAG_enable_slow_asserts) {
+ AssertSmi(smi);
+ }
+ DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
+ if (COMPRESS_POINTERS_BOOL) {
+ Asr(smi.W(), smi.W(), kSmiShift);
+ } else {
+ Lsr(smi, smi, kSmiShift);
+ }
+}
+
void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
Label* not_smi_label) {
STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
diff --git a/chromium/v8/src/codegen/arm64/macro-assembler-arm64.cc b/chromium/v8/src/codegen/arm64/macro-assembler-arm64.cc
index ef95b4e8132..91d972ea000 100644
--- a/chromium/v8/src/codegen/arm64/macro-assembler-arm64.cc
+++ b/chromium/v8/src/codegen/arm64/macro-assembler-arm64.cc
@@ -256,8 +256,7 @@ void TurboAssembler::Mov(const Register& rd, uint64_t imm) {
bool invert_move = false;
// If the number of 0xFFFF halfwords is greater than the number of 0x0000
// halfwords, it's more efficient to use move-inverted.
- if (CountClearHalfWords(~imm, reg_size) >
- CountClearHalfWords(imm, reg_size)) {
+ if (CountSetHalfWords(imm, reg_size) > CountSetHalfWords(~imm, reg_size)) {
ignored_halfword = 0xFFFFL;
invert_move = true;
}
@@ -560,23 +559,27 @@ void TurboAssembler::Mvn(const Register& rd, const Operand& operand) {
}
}
-unsigned TurboAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
- DCHECK_EQ(reg_size % 8, 0);
- int count = 0;
- for (unsigned i = 0; i < (reg_size / 16); i++) {
- if ((imm & 0xFFFF) == 0) {
- count++;
- }
- imm >>= 16;
+unsigned TurboAssembler::CountSetHalfWords(uint64_t imm, unsigned reg_size) {
+ DCHECK_EQ(reg_size % 16, 0);
+
+#define HALFWORD(idx) (((imm >> ((idx)*16)) & 0xFFFF) ? 1u : 0u)
+ switch (reg_size / 16) {
+ case 1:
+ return HALFWORD(0);
+ case 2:
+ return HALFWORD(0) + HALFWORD(1);
+ case 4:
+ return HALFWORD(0) + HALFWORD(1) + HALFWORD(2) + HALFWORD(3);
}
- return count;
+#undef HALFWORD
+ UNREACHABLE();
}
// The movz instruction can generate immediates containing an arbitrary 16-bit
// half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
bool TurboAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
DCHECK((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
- return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
+ return CountSetHalfWords(imm, reg_size) <= 1;
}
// The movn instruction can generate immediates containing an arbitrary 16-bit
@@ -1516,9 +1519,7 @@ void MacroAssembler::AssertCodeT(Register object) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- CompareObjectType(
- object, temp, temp,
- V8_EXTERNAL_CODE_SPACE_BOOL ? CODE_DATA_CONTAINER_TYPE : CODE_TYPE);
+ CompareObjectType(object, temp, temp, CODET_TYPE);
Check(eq, AbortReason::kOperandIsNotACodeT);
}
@@ -1692,6 +1693,7 @@ void TurboAssembler::CallCFunction(ExternalReference function,
}
static const int kRegisterPassedArguments = 8;
+static const int kFPRegisterPassedArguments = 8;
void TurboAssembler::CallCFunction(Register function, int num_of_reg_args,
int num_of_double_args) {
@@ -1699,17 +1701,6 @@ void TurboAssembler::CallCFunction(Register function, int num_of_reg_args,
DCHECK_LE(num_of_reg_args + num_of_double_args, kMaxCParameters);
DCHECK(has_frame());
- // If we're passing doubles, we're limited to the following prototypes
- // (defined by ExternalReference::Type):
- // BUILTIN_COMPARE_CALL: int f(double, double)
- // BUILTIN_FP_FP_CALL: double f(double, double)
- // BUILTIN_FP_CALL: double f(double)
- // BUILTIN_FP_INT_CALL: double f(double, int)
- if (num_of_double_args > 0) {
- DCHECK_LE(num_of_reg_args, 1);
- DCHECK_LE(num_of_double_args + num_of_reg_args, 2);
- }
-
// Save the frame pointer and PC so that the stack layout remains iterable,
// even without an ExitFrame which normally exists between JS and C frames.
Register pc_scratch = x4;
@@ -1760,6 +1751,13 @@ void TurboAssembler::CallCFunction(Register function, int num_of_reg_args,
int claim_slots = RoundUp(num_of_reg_args - kRegisterPassedArguments, 2);
Drop(claim_slots);
}
+
+ if (num_of_double_args > kFPRegisterPassedArguments) {
+ // Drop the register passed arguments.
+ int claim_slots =
+ RoundUp(num_of_double_args - kFPRegisterPassedArguments, 2);
+ Drop(claim_slots);
+ }
}
void TurboAssembler::LoadFromConstantsTable(Register destination,
@@ -1846,8 +1844,7 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
Condition cond) {
int64_t offset = CalculateTargetOffset(target, rmode, pc_);
if (RelocInfo::IsRuntimeEntry(rmode) && IsOnHeap()) {
- saved_offsets_for_runtime_entries_.push_back(
- std::make_pair(pc_offset(), offset));
+ saved_offsets_for_runtime_entries_.emplace_back(pc_offset(), offset);
offset = CalculateTargetOffset(target, RelocInfo::NONE, pc_);
}
JumpHelper(offset, rmode, cond);
@@ -1895,8 +1892,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
if (CanUseNearCallOrJump(rmode)) {
int64_t offset = CalculateTargetOffset(target, rmode, pc_);
if (IsOnHeap() && RelocInfo::IsRuntimeEntry(rmode)) {
- saved_offsets_for_runtime_entries_.push_back(
- std::make_pair(pc_offset(), offset));
+ saved_offsets_for_runtime_entries_.emplace_back(pc_offset(), offset);
offset = CalculateTargetOffset(target, RelocInfo::NONE, pc_);
}
DCHECK(IsNearCallOffset(offset));
@@ -1969,7 +1965,7 @@ MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
ASM_CODE_COMMENT(this);
DCHECK(root_array_available());
return MemOperand(kRootRegister,
- IsolateData::builtin_entry_slot_offset(builtin));
+ IsolateData::BuiltinEntrySlotOffset(builtin));
}
void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
@@ -2045,9 +2041,9 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
// (off-heap) entry point directly without going through the (on-heap)
// trampoline. Otherwise, just call the Code object as always.
- Ldrsw(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
- Tst(scratch, Operand(Code::IsOffHeapTrampoline::kMask));
- B(ne, &if_code_is_off_heap);
+ Ldr(scratch.W(), FieldMemOperand(code_object, Code::kFlagsOffset));
+ TestAndBranchIfAnySet(scratch.W(), Code::IsOffHeapTrampoline::kMask,
+ &if_code_is_off_heap);
// Not an off-heap trampoline object, the entry point is at
// Code::raw_instruction_start().
@@ -2058,8 +2054,8 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
// table.
bind(&if_code_is_off_heap);
Ldrsw(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
- Lsl(destination, scratch, kSystemPointerSizeLog2);
- Add(destination, destination, kRootRegister);
+ Add(destination, kRootRegister,
+ Operand(scratch, LSL, kSystemPointerSizeLog2));
Ldr(destination,
MemOperand(destination, IsolateData::builtin_entry_table_offset()));
@@ -2092,8 +2088,12 @@ void TurboAssembler::LoadCodeDataContainerEntry(
Register destination, Register code_data_container_object) {
ASM_CODE_COMMENT(this);
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- Ldr(destination, FieldMemOperand(code_data_container_object,
- CodeDataContainer::kCodeEntryPointOffset));
+
+ LoadExternalPointerField(
+ destination,
+ FieldMemOperand(code_data_container_object,
+ CodeDataContainer::kCodeEntryPointOffset),
+ kCodeEntryPointTag);
}
void TurboAssembler::LoadCodeDataContainerCodeNonBuiltin(
@@ -2263,8 +2263,10 @@ void MacroAssembler::InvokePrologue(Register formal_parameter_count,
// If the formal parameter count is equal to the adaptor sentinel, no need
// to push undefined value as arguments.
- Cmp(formal_parameter_count, Operand(kDontAdaptArgumentsSentinel));
- B(eq, &regular_invoke);
+ if (kDontAdaptArgumentsSentinel != 0) {
+ Cmp(formal_parameter_count, Operand(kDontAdaptArgumentsSentinel));
+ B(eq, &regular_invoke);
+ }
// If overapplication or if the actual argument count is equal to the
// formal parameter count, no need to push extra undefined values.
@@ -2281,7 +2283,11 @@ void MacroAssembler::InvokePrologue(Register formal_parameter_count,
Register slots_to_copy = x4;
Register slots_to_claim = x5;
- Add(slots_to_copy, actual_argument_count, 1); // Copy with receiver.
+ if (kJSArgcIncludesReceiver) {
+ Mov(slots_to_copy, actual_argument_count);
+ } else {
+ Add(slots_to_copy, actual_argument_count, 1); // Copy with receiver.
+ }
Mov(slots_to_claim, extra_argument_count);
Tbz(extra_argument_count, 0, &even_extra_count);
@@ -2295,7 +2301,9 @@ void MacroAssembler::InvokePrologue(Register formal_parameter_count,
Register scratch = x11;
Add(slots_to_claim, extra_argument_count, 1);
And(scratch, actual_argument_count, 1);
- Eor(scratch, scratch, 1);
+ if (!kJSArgcIncludesReceiver) {
+ Eor(scratch, scratch, 1);
+ }
Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1));
}
@@ -2316,10 +2324,13 @@ void MacroAssembler::InvokePrologue(Register formal_parameter_count,
}
Bind(&skip_move);
- Register actual_argument_with_receiver = x4;
+ Register actual_argument_with_receiver = actual_argument_count;
Register pointer_next_value = x5;
- Add(actual_argument_with_receiver, actual_argument_count,
- 1); // {slots_to_copy} was scratched.
+ if (!kJSArgcIncludesReceiver) {
+ actual_argument_with_receiver = x4;
+ Add(actual_argument_with_receiver, actual_argument_count,
+ 1); // {slots_to_copy} was scratched.
+ }
// Copy extra arguments as undefined values.
{
@@ -2349,8 +2360,8 @@ void MacroAssembler::InvokePrologue(Register formal_parameter_count,
bind(&stack_overflow);
{
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
CallRuntime(Runtime::kThrowStackOverflow);
Unreachable();
}
@@ -2364,7 +2375,8 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
ASM_CODE_COMMENT(this);
// Load receiver to pass it later to DebugOnFunctionCall hook.
Peek(x4, ReceiverOperand(actual_parameter_count));
- FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
if (!new_target.is_valid()) new_target = padreg;
@@ -2919,6 +2931,18 @@ void TurboAssembler::StoreTaggedField(const Register& value,
}
}
+void TurboAssembler::AtomicStoreTaggedField(const Register& value,
+ const Register& dst_base,
+ const Register& dst_index,
+ const Register& temp) {
+ Add(temp, dst_base, dst_index);
+ if (COMPRESS_POINTERS_BOOL) {
+ Stlr(value.W(), temp);
+ } else {
+ Stlr(value, temp);
+ }
+}
+
void TurboAssembler::DecompressTaggedSigned(const Register& destination,
const MemOperand& field_operand) {
ASM_CODE_COMMENT(this);
@@ -2950,6 +2974,40 @@ void TurboAssembler::DecompressAnyTagged(const Register& destination,
Add(destination, kPtrComprCageBaseRegister, destination);
}
+void TurboAssembler::AtomicDecompressTaggedSigned(const Register& destination,
+ const Register& base,
+ const Register& index,
+ const Register& temp) {
+ ASM_CODE_COMMENT(this);
+ Add(temp, base, index);
+ Ldar(destination.W(), temp);
+ if (FLAG_debug_code) {
+ // Corrupt the top 32 bits. Made up of 16 fixed bits and 16 pc offset bits.
+ Add(destination, destination,
+ ((kDebugZapValue << 16) | (pc_offset() & 0xffff)) << 32);
+ }
+}
+
+void TurboAssembler::AtomicDecompressTaggedPointer(const Register& destination,
+ const Register& base,
+ const Register& index,
+ const Register& temp) {
+ ASM_CODE_COMMENT(this);
+ Add(temp, base, index);
+ Ldar(destination.W(), temp);
+ Add(destination, kPtrComprCageBaseRegister, destination);
+}
+
+void TurboAssembler::AtomicDecompressAnyTagged(const Register& destination,
+ const Register& base,
+ const Register& index,
+ const Register& temp) {
+ ASM_CODE_COMMENT(this);
+ Add(temp, base, index);
+ Ldar(destination.W(), temp);
+ Add(destination, kPtrComprCageBaseRegister, destination);
+}
+
void TurboAssembler::CheckPageFlag(const Register& object, int mask,
Condition cc, Label* condition_met) {
ASM_CODE_COMMENT(this);
@@ -3005,6 +3063,34 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Bind(&done);
}
+void TurboAssembler::LoadExternalPointerField(Register destination,
+ MemOperand field_operand,
+ ExternalPointerTag tag,
+ Register isolate_root) {
+ DCHECK(!AreAliased(destination, isolate_root));
+ ASM_CODE_COMMENT(this);
+#ifdef V8_HEAP_SANDBOX
+ UseScratchRegisterScope temps(this);
+ Register external_table = temps.AcquireX();
+ if (isolate_root == no_reg) {
+ DCHECK(root_array_available_);
+ isolate_root = kRootRegister;
+ }
+ Ldr(external_table,
+ MemOperand(isolate_root,
+ IsolateData::external_pointer_table_offset() +
+ Internals::kExternalPointerTableBufferOffset));
+ Ldr(destination, field_operand);
+ Ldr(destination,
+ MemOperand(external_table, destination, LSL, kSystemPointerSizeLog2));
+ if (tag != 0) {
+ And(destination, destination, Immediate(~tag));
+ }
+#else
+ Ldr(destination, field_operand);
+#endif // V8_HEAP_SANDBOX
+}
+
void TurboAssembler::MaybeSaveRegisters(RegList registers) {
if (registers == 0) return;
ASM_CODE_COMMENT(this);
@@ -3223,7 +3309,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (should_abort_hard()) {
// We don't care if we constructed a frame. Just pretend we did.
- FrameScope assume_frame(this, StackFrame::NONE);
+ FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
Mov(w0, static_cast<int>(reason));
Call(ExternalReference::abort_with_reason());
return;
@@ -3237,7 +3323,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
+ FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
} else {
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
@@ -3540,10 +3626,6 @@ void TurboAssembler::ComputeCodeStartAddress(const Register& rd) {
adr(rd, -pc_offset());
}
-void TurboAssembler::ResetSpeculationPoisonRegister() {
- Mov(kSpeculationPoisonRegister, -1);
-}
-
void TurboAssembler::RestoreFPAndLR() {
static_assert(StandardFrameConstants::kCallerFPOffset + kSystemPointerSize ==
StandardFrameConstants::kCallerPCOffset,
@@ -3575,6 +3657,16 @@ void TurboAssembler::StoreReturnAddressInWasmExitFrame(Label* return_location) {
}
#endif // V8_ENABLE_WEBASSEMBLY
+void TurboAssembler::PopcntHelper(Register dst, Register src) {
+ UseScratchRegisterScope temps(this);
+ VRegister scratch = temps.AcquireV(kFormat8B);
+ VRegister tmp = src.Is32Bits() ? scratch.S() : scratch.D();
+ Fmov(tmp, src);
+ Cnt(scratch, scratch);
+ Addv(scratch.B(), scratch);
+ Fmov(dst, tmp);
+}
+
void TurboAssembler::I64x2BitMask(Register dst, VRegister src) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope scope(this);
diff --git a/chromium/v8/src/codegen/arm64/macro-assembler-arm64.h b/chromium/v8/src/codegen/arm64/macro-assembler-arm64.h
index 9128ba2c18e..8f60217d9e9 100644
--- a/chromium/v8/src/codegen/arm64/macro-assembler-arm64.h
+++ b/chromium/v8/src/codegen/arm64/macro-assembler-arm64.h
@@ -559,6 +559,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
inline void SmiUntag(Register dst, const MemOperand& src);
inline void SmiUntag(Register smi);
+ inline void SmiToInt32(Register smi);
+
// Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cond, AbortReason reason);
@@ -643,7 +645,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Define a jump/call target and bind a label.
inline void BindJumpOrCallTarget(Label* label);
- static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
+ static unsigned CountSetHalfWords(uint64_t imm, unsigned reg_size);
CPURegList* TmpList() { return &tmp_list_; }
CPURegList* FPTmpList() { return &fptmp_list_; }
@@ -1192,6 +1194,29 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
LSPAIR_MACRO_LIST(DECLARE_FUNCTION)
#undef DECLARE_FUNCTION
+ void St1(const VRegister& vt, const MemOperand& dst) {
+ DCHECK(allow_macro_instructions());
+ st1(vt, dst);
+ }
+ void St1(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
+ DCHECK(allow_macro_instructions());
+ st1(vt, vt2, dst);
+ }
+ void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ const MemOperand& dst) {
+ DCHECK(allow_macro_instructions());
+ st1(vt, vt2, vt3, dst);
+ }
+ void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ const VRegister& vt4, const MemOperand& dst) {
+ DCHECK(allow_macro_instructions());
+ st1(vt, vt2, vt3, vt4, dst);
+ }
+ void St1(const VRegister& vt, int lane, const MemOperand& dst) {
+ DCHECK(allow_macro_instructions());
+ st1(vt, lane, dst);
+ }
+
#define NEON_2VREG_SHIFT_MACRO_LIST(V) \
V(rshrn, Rshrn) \
V(rshrn2, Rshrn2) \
@@ -1347,8 +1372,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(const Register& rd);
- void ResetSpeculationPoisonRegister();
-
// ---------------------------------------------------------------------------
// Pointer compression Support
@@ -1373,6 +1396,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void StoreTaggedField(const Register& value,
const MemOperand& dst_field_operand);
+ void AtomicStoreTaggedField(const Register& value, const Register& dst_base,
+ const Register& dst_index, const Register& temp);
+
void DecompressTaggedSigned(const Register& destination,
const MemOperand& field_operand);
void DecompressTaggedPointer(const Register& destination,
@@ -1382,6 +1408,17 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void DecompressAnyTagged(const Register& destination,
const MemOperand& field_operand);
+ void AtomicDecompressTaggedSigned(const Register& destination,
+ const Register& base, const Register& index,
+ const Register& temp);
+ void AtomicDecompressTaggedPointer(const Register& destination,
+ const Register& base,
+ const Register& index,
+ const Register& temp);
+ void AtomicDecompressAnyTagged(const Register& destination,
+ const Register& base, const Register& index,
+ const Register& temp);
+
// Restore FP and LR from the values stored in the current frame. This will
// authenticate the LR when pointer authentication is enabled.
void RestoreFPAndLR();
@@ -1390,12 +1427,22 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void StoreReturnAddressInWasmExitFrame(Label* return_location);
#endif // V8_ENABLE_WEBASSEMBLY
- // Wasm SIMD helpers. These instructions don't have direct lowering to native
- // instructions. These helpers allow us to define the optimal code sequence,
- // and be used in both TurboFan and Liftoff.
+ // Wasm helpers. These instructions don't have direct lowering
+ // to native instructions. These helpers allow us to define the optimal code
+ // sequence, and be used in both TurboFan and Liftoff.
+ void PopcntHelper(Register dst, Register src);
void I64x2BitMask(Register dst, VRegister src);
void I64x2AllTrue(Register dst, VRegister src);
+ // ---------------------------------------------------------------------------
+ // V8 Heap sandbox support
+
+ // Loads a field containing off-heap pointer and does necessary decoding
+ // if V8 heap sandbox is enabled.
+ void LoadExternalPointerField(Register destination, MemOperand field_operand,
+ ExternalPointerTag tag,
+ Register isolate_root = Register::no_reg());
+
protected:
// The actual Push and Pop implementations. These don't generate any code
// other than that required for the push or pop. This allows
@@ -1645,28 +1692,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
DCHECK(allow_macro_instructions());
ld4r(vt, vt2, vt3, vt4, src);
}
- void St1(const VRegister& vt, const MemOperand& dst) {
- DCHECK(allow_macro_instructions());
- st1(vt, dst);
- }
- void St1(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
- DCHECK(allow_macro_instructions());
- st1(vt, vt2, dst);
- }
- void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
- const MemOperand& dst) {
- DCHECK(allow_macro_instructions());
- st1(vt, vt2, vt3, dst);
- }
- void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
- const VRegister& vt4, const MemOperand& dst) {
- DCHECK(allow_macro_instructions());
- st1(vt, vt2, vt3, vt4, dst);
- }
- void St1(const VRegister& vt, int lane, const MemOperand& dst) {
- DCHECK(allow_macro_instructions());
- st1(vt, lane, dst);
- }
void St2(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
DCHECK(allow_macro_instructions());
st2(vt, vt2, dst);
diff --git a/chromium/v8/src/codegen/arm64/register-arm64.h b/chromium/v8/src/codegen/arm64/register-arm64.h
index 21007a5973f..29a4212aacf 100644
--- a/chromium/v8/src/codegen/arm64/register-arm64.h
+++ b/chromium/v8/src/codegen/arm64/register-arm64.h
@@ -699,8 +699,6 @@ constexpr Register kJSFunctionRegister = x1;
constexpr Register kContextRegister = cp;
constexpr Register kAllocateSizeRegister = x1;
-constexpr Register kSpeculationPoisonRegister = x23;
-
constexpr Register kInterpreterAccumulatorRegister = x0;
constexpr Register kInterpreterBytecodeOffsetRegister = x19;
constexpr Register kInterpreterBytecodeArrayRegister = x20;
diff --git a/chromium/v8/src/codegen/assembler-arch.h b/chromium/v8/src/codegen/assembler-arch.h
index 3569644e529..2e1b56c467e 100644
--- a/chromium/v8/src/codegen/assembler-arch.h
+++ b/chromium/v8/src/codegen/assembler-arch.h
@@ -21,6 +21,8 @@
#include "src/codegen/mips/assembler-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/assembler-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/codegen/loong64/assembler-loong64.h"
#elif V8_TARGET_ARCH_S390
#include "src/codegen/s390/assembler-s390.h"
#elif V8_TARGET_ARCH_RISCV64
diff --git a/chromium/v8/src/codegen/assembler-inl.h b/chromium/v8/src/codegen/assembler-inl.h
index c04b6d96879..084f12cc7ef 100644
--- a/chromium/v8/src/codegen/assembler-inl.h
+++ b/chromium/v8/src/codegen/assembler-inl.h
@@ -21,6 +21,8 @@
#include "src/codegen/mips/assembler-mips-inl.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/assembler-mips64-inl.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/codegen/loong64/assembler-loong64-inl.h"
#elif V8_TARGET_ARCH_S390
#include "src/codegen/s390/assembler-s390-inl.h"
#elif V8_TARGET_ARCH_RISCV64
diff --git a/chromium/v8/src/codegen/assembler.cc b/chromium/v8/src/codegen/assembler.cc
index dfd406694a9..cacbfbd679f 100644
--- a/chromium/v8/src/codegen/assembler.cc
+++ b/chromium/v8/src/codegen/assembler.cc
@@ -248,6 +248,12 @@ AssemblerBase::AssemblerBase(const AssemblerOptions& options,
if (!buffer_) buffer_ = NewAssemblerBuffer(kDefaultBufferSize);
buffer_start_ = buffer_->start();
pc_ = buffer_start_;
+ if (IsOnHeap()) {
+ saved_handles_for_raw_object_ptr_.reserve(
+ kSavedHandleForRawObjectsInitialSize);
+ saved_offsets_for_runtime_entries_.reserve(
+ kSavedOffsetForRuntimeEntriesInitialSize);
+ }
}
AssemblerBase::~AssemblerBase() = default;
diff --git a/chromium/v8/src/codegen/assembler.h b/chromium/v8/src/codegen/assembler.h
index 7373b5d48b0..f1e5b85f1f6 100644
--- a/chromium/v8/src/codegen/assembler.h
+++ b/chromium/v8/src/codegen/assembler.h
@@ -276,8 +276,10 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
int pc_offset() const { return static_cast<int>(pc_ - buffer_start_); }
int pc_offset_for_safepoint() {
-#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)
- // Mips needs it's own implementation to avoid trampoline's influence.
+#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
+ defined(V8_TARGET_ARCH_LOONG64)
+ // MIPS and LOONG need to use their own implementation to avoid trampoline's
+ // influence.
UNREACHABLE();
#else
return pc_offset();
@@ -418,6 +420,10 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
CodeCommentsWriter code_comments_writer_;
// Relocation information when code allocated directly on heap.
+ // These constants correspond to the 99% percentile of a selected number of JS
+ // frameworks and benchmarks, including jquery, lodash, d3 and speedometer3.
+ const int kSavedHandleForRawObjectsInitialSize = 60;
+ const int kSavedOffsetForRuntimeEntriesInitialSize = 100;
std::vector<std::pair<uint32_t, Address>> saved_handles_for_raw_object_ptr_;
std::vector<std::pair<uint32_t, uint32_t>> saved_offsets_for_runtime_entries_;
diff --git a/chromium/v8/src/codegen/atomic-memory-order.h b/chromium/v8/src/codegen/atomic-memory-order.h
new file mode 100644
index 00000000000..fc56cd34e34
--- /dev/null
+++ b/chromium/v8/src/codegen/atomic-memory-order.h
@@ -0,0 +1,35 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_ATOMIC_MEMORY_ORDER_H_
+#define V8_CODEGEN_ATOMIC_MEMORY_ORDER_H_
+
+#include <ostream>
+
+#include "src/base/logging.h"
+
+namespace v8 {
+namespace internal {
+
+// Atomic memory orders supported by the compiler.
+enum class AtomicMemoryOrder : uint8_t { kAcqRel, kSeqCst };
+
+inline size_t hash_value(AtomicMemoryOrder order) {
+ return static_cast<uint8_t>(order);
+}
+
+inline std::ostream& operator<<(std::ostream& os, AtomicMemoryOrder order) {
+ switch (order) {
+ case AtomicMemoryOrder::kAcqRel:
+ return os << "kAcqRel";
+ case AtomicMemoryOrder::kSeqCst:
+ return os << "kSeqCst";
+ }
+ UNREACHABLE();
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_ATOMIC_MEMORY_ORDER_H_
diff --git a/chromium/v8/src/codegen/code-factory.cc b/chromium/v8/src/codegen/code-factory.cc
index f3cb604478c..dcf19a0ad51 100644
--- a/chromium/v8/src/codegen/code-factory.cc
+++ b/chromium/v8/src/codegen/code-factory.cc
@@ -378,24 +378,47 @@ Callable CodeFactory::ArraySingleArgumentConstructor(
#ifdef V8_IS_TSAN
// static
-Builtin CodeFactory::GetTSANRelaxedStoreStub(SaveFPRegsMode fp_mode, int size) {
- if (size == kInt8Size) {
- return fp_mode == SaveFPRegsMode::kIgnore
- ? Builtin::kTSANRelaxedStore8IgnoreFP
- : Builtin::kTSANRelaxedStore8SaveFP;
- } else if (size == kInt16Size) {
- return fp_mode == SaveFPRegsMode::kIgnore
- ? Builtin::kTSANRelaxedStore16IgnoreFP
- : Builtin::kTSANRelaxedStore16SaveFP;
- } else if (size == kInt32Size) {
- return fp_mode == SaveFPRegsMode::kIgnore
- ? Builtin::kTSANRelaxedStore32IgnoreFP
- : Builtin::kTSANRelaxedStore32SaveFP;
+Builtin CodeFactory::GetTSANStoreStub(SaveFPRegsMode fp_mode, int size,
+ std::memory_order order) {
+ if (order == std::memory_order_relaxed) {
+ if (size == kInt8Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? Builtin::kTSANRelaxedStore8IgnoreFP
+ : Builtin::kTSANRelaxedStore8SaveFP;
+ } else if (size == kInt16Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? Builtin::kTSANRelaxedStore16IgnoreFP
+ : Builtin::kTSANRelaxedStore16SaveFP;
+ } else if (size == kInt32Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? Builtin::kTSANRelaxedStore32IgnoreFP
+ : Builtin::kTSANRelaxedStore32SaveFP;
+ } else {
+ CHECK_EQ(size, kInt64Size);
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? Builtin::kTSANRelaxedStore64IgnoreFP
+ : Builtin::kTSANRelaxedStore64SaveFP;
+ }
} else {
- CHECK_EQ(size, kInt64Size);
- return fp_mode == SaveFPRegsMode::kIgnore
- ? Builtin::kTSANRelaxedStore64IgnoreFP
- : Builtin::kTSANRelaxedStore64SaveFP;
+ DCHECK_EQ(order, std::memory_order_seq_cst);
+ if (size == kInt8Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? Builtin::kTSANSeqCstStore8IgnoreFP
+ : Builtin::kTSANSeqCstStore8SaveFP;
+ } else if (size == kInt16Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? Builtin::kTSANSeqCstStore16IgnoreFP
+ : Builtin::kTSANSeqCstStore16SaveFP;
+ } else if (size == kInt32Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? Builtin::kTSANSeqCstStore32IgnoreFP
+ : Builtin::kTSANSeqCstStore32SaveFP;
+ } else {
+ CHECK_EQ(size, kInt64Size);
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? Builtin::kTSANSeqCstStore64IgnoreFP
+ : Builtin::kTSANSeqCstStore64SaveFP;
+ }
}
}
diff --git a/chromium/v8/src/codegen/code-factory.h b/chromium/v8/src/codegen/code-factory.h
index 4780678dadc..05b27bef0ea 100644
--- a/chromium/v8/src/codegen/code-factory.h
+++ b/chromium/v8/src/codegen/code-factory.h
@@ -90,7 +90,8 @@ class V8_EXPORT_PRIVATE CodeFactory final {
AllocationSiteOverrideMode override_mode);
#ifdef V8_IS_TSAN
- static Builtin GetTSANRelaxedStoreStub(SaveFPRegsMode fp_mode, int size);
+ static Builtin GetTSANStoreStub(SaveFPRegsMode fp_mode, int size,
+ std::memory_order order);
static Builtin GetTSANRelaxedLoadStub(SaveFPRegsMode fp_mode, int size);
#endif // V8_IS_TSAN
};
diff --git a/chromium/v8/src/codegen/code-stub-assembler.cc b/chromium/v8/src/codegen/code-stub-assembler.cc
index e25135decee..e61933b05ac 100644
--- a/chromium/v8/src/codegen/code-stub-assembler.cc
+++ b/chromium/v8/src/codegen/code-stub-assembler.cc
@@ -68,7 +68,7 @@ void CodeStubAssembler::HandleBreakOnNode() {
BreakOnNode(node_id);
}
-void CodeStubAssembler::Assert(const BranchGenerator& branch,
+void CodeStubAssembler::Dcheck(const BranchGenerator& branch,
const char* message, const char* file, int line,
std::initializer_list<ExtraNode> extra_nodes) {
#if defined(DEBUG)
@@ -78,7 +78,7 @@ void CodeStubAssembler::Assert(const BranchGenerator& branch,
#endif
}
-void CodeStubAssembler::Assert(const NodeGenerator<BoolT>& condition_body,
+void CodeStubAssembler::Dcheck(const NodeGenerator<BoolT>& condition_body,
const char* message, const char* file, int line,
std::initializer_list<ExtraNode> extra_nodes) {
#if defined(DEBUG)
@@ -88,7 +88,7 @@ void CodeStubAssembler::Assert(const NodeGenerator<BoolT>& condition_body,
#endif
}
-void CodeStubAssembler::Assert(TNode<Word32T> condition_node,
+void CodeStubAssembler::Dcheck(TNode<Word32T> condition_node,
const char* message, const char* file, int line,
std::initializer_list<ExtraNode> extra_nodes) {
#if defined(DEBUG)
@@ -196,7 +196,7 @@ void CodeStubAssembler::FailAssert(
}
#endif
- AbortCSAAssert(message_node);
+ AbortCSADcheck(message_node);
Unreachable();
}
@@ -315,7 +315,7 @@ bool CodeStubAssembler::TryGetIntPtrOrSmiConstantValue(
TNode<IntPtrT> CodeStubAssembler::IntPtrRoundUpToPowerOfTwo32(
TNode<IntPtrT> value) {
Comment("IntPtrRoundUpToPowerOfTwo32");
- CSA_ASSERT(this, UintPtrLessThanOrEqual(value, IntPtrConstant(0x80000000u)));
+ CSA_DCHECK(this, UintPtrLessThanOrEqual(value, IntPtrConstant(0x80000000u)));
value = Signed(IntPtrSub(value, IntPtrConstant(1)));
for (int i = 1; i <= 16; i *= 2) {
value = Signed(WordOr(value, WordShr(value, IntPtrConstant(i))));
@@ -754,7 +754,7 @@ TNode<Smi> CodeStubAssembler::SmiFromInt32(TNode<Int32T> value) {
}
TNode<Smi> CodeStubAssembler::SmiFromUint32(TNode<Uint32T> value) {
- CSA_ASSERT(this, IntPtrLessThan(ChangeUint32ToWord(value),
+ CSA_DCHECK(this, IntPtrLessThan(ChangeUint32ToWord(value),
IntPtrConstant(Smi::kMaxValue)));
return SmiFromInt32(Signed(value));
}
@@ -1234,8 +1234,9 @@ TNode<HeapObject> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes,
TVARIABLE(Object, result);
Label runtime_call(this, Label::kDeferred), no_runtime_call(this), out(this);
- bool needs_double_alignment = flags & kDoubleAlignment;
- bool allow_large_object_allocation = flags & kAllowLargeObjectAllocation;
+ bool needs_double_alignment = flags & AllocationFlag::kDoubleAlignment;
+ bool allow_large_object_allocation =
+ flags & AllocationFlag::kAllowLargeObjectAllocation;
if (allow_large_object_allocation) {
Label next(this);
@@ -1281,7 +1282,7 @@ TNode<HeapObject> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes,
TNode<Smi> runtime_flags = SmiConstant(Smi::FromInt(
AllocateDoubleAlignFlag::encode(needs_double_alignment) |
AllowLargeObjectAllocationFlag::encode(allow_large_object_allocation)));
- if (flags & kPretenured) {
+ if (flags & AllocationFlag::kPretenured) {
result =
CallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(),
SmiTag(size_in_bytes), runtime_flags);
@@ -1333,7 +1334,7 @@ TNode<HeapObject> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes,
TNode<HeapObject> CodeStubAssembler::AllocateRawUnaligned(
TNode<IntPtrT> size_in_bytes, AllocationFlags flags,
TNode<RawPtrT> top_address, TNode<RawPtrT> limit_address) {
- DCHECK_EQ(flags & kDoubleAlignment, 0);
+ DCHECK_EQ(flags & AllocationFlag::kDoubleAlignment, 0);
return AllocateRaw(size_in_bytes, flags, top_address, limit_address);
}
@@ -1341,8 +1342,8 @@ TNode<HeapObject> CodeStubAssembler::AllocateRawDoubleAligned(
TNode<IntPtrT> size_in_bytes, AllocationFlags flags,
TNode<RawPtrT> top_address, TNode<RawPtrT> limit_address) {
#if defined(V8_HOST_ARCH_32_BIT)
- return AllocateRaw(size_in_bytes, flags | kDoubleAlignment, top_address,
- limit_address);
+ return AllocateRaw(size_in_bytes, flags | AllocationFlag::kDoubleAlignment,
+ top_address, limit_address);
#elif defined(V8_HOST_ARCH_64_BIT)
#ifdef V8_COMPRESS_POINTERS
// TODO(ishell, v8:8875): Consider using aligned allocations once the
@@ -1351,8 +1352,8 @@ TNode<HeapObject> CodeStubAssembler::AllocateRawDoubleAligned(
// compression is supported) allow unaligned access to doubles and full words.
#endif // V8_COMPRESS_POINTERS
// Allocation on 64 bit machine is naturally double aligned
- return AllocateRaw(size_in_bytes, flags & ~kDoubleAlignment, top_address,
- limit_address);
+ return AllocateRaw(size_in_bytes, flags & ~AllocationFlag::kDoubleAlignment,
+ top_address, limit_address);
#else
#error Architecture not supported
#endif
@@ -1360,17 +1361,19 @@ TNode<HeapObject> CodeStubAssembler::AllocateRawDoubleAligned(
TNode<HeapObject> CodeStubAssembler::AllocateInNewSpace(
TNode<IntPtrT> size_in_bytes, AllocationFlags flags) {
- DCHECK(flags == kNone || flags == kDoubleAlignment);
- CSA_ASSERT(this, IsRegularHeapObjectSize(size_in_bytes));
+ DCHECK(flags == AllocationFlag::kNone ||
+ flags == AllocationFlag::kDoubleAlignment);
+ CSA_DCHECK(this, IsRegularHeapObjectSize(size_in_bytes));
return Allocate(size_in_bytes, flags);
}
TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
AllocationFlags flags) {
Comment("Allocate");
- if (FLAG_single_generation) flags |= kPretenured;
- bool const new_space = !(flags & kPretenured);
- bool const allow_large_objects = flags & kAllowLargeObjectAllocation;
+ if (FLAG_single_generation) flags |= AllocationFlag::kPretenured;
+ bool const new_space = !(flags & AllocationFlag::kPretenured);
+ bool const allow_large_objects =
+ flags & AllocationFlag::kAllowLargeObjectAllocation;
// For optimized allocations, we don't allow the allocation to happen in a
// different generation than requested.
bool const always_allocated_in_requested_space =
@@ -1380,10 +1383,11 @@ TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
if (TryToIntPtrConstant(size_in_bytes, &size_constant)) {
CHECK_LE(size_constant, kMaxRegularHeapObjectSize);
} else {
- CSA_ASSERT(this, IsRegularHeapObjectSize(size_in_bytes));
+ CSA_DCHECK(this, IsRegularHeapObjectSize(size_in_bytes));
}
}
- if (!(flags & kDoubleAlignment) && always_allocated_in_requested_space) {
+ if (!(flags & AllocationFlag::kDoubleAlignment) &&
+ always_allocated_in_requested_space) {
return OptimizedAllocate(
size_in_bytes,
new_space ? AllocationType::kYoung : AllocationType::kOld,
@@ -1400,14 +1404,14 @@ TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
// kNullAddress.
if (ExternalReference::new_space_allocation_top_address(isolate())
.address() != kNullAddress) {
- Address top_address =
+ Address raw_top_address =
ExternalReference::new_space_allocation_top_address(isolate())
.address();
- Address limit_address =
+ Address raw_limit_address =
ExternalReference::new_space_allocation_limit_address(isolate())
.address();
- CHECK_EQ(kSystemPointerSize, limit_address - top_address);
+ CHECK_EQ(kSystemPointerSize, raw_limit_address - raw_top_address);
}
DCHECK_EQ(kSystemPointerSize,
@@ -1421,7 +1425,7 @@ TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
IntPtrAdd(ReinterpretCast<IntPtrT>(top_address),
IntPtrConstant(kSystemPointerSize));
- if (flags & kDoubleAlignment) {
+ if (flags & AllocationFlag::kDoubleAlignment) {
return AllocateRawDoubleAligned(size_in_bytes, flags,
ReinterpretCast<RawPtrT>(top_address),
ReinterpretCast<RawPtrT>(limit_address));
@@ -1434,7 +1438,8 @@ TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
TNode<HeapObject> CodeStubAssembler::AllocateInNewSpace(int size_in_bytes,
AllocationFlags flags) {
- CHECK(flags == kNone || flags == kDoubleAlignment);
+ CHECK(flags == AllocationFlag::kNone ||
+ flags == AllocationFlag::kDoubleAlignment);
DCHECK_LE(size_in_bytes, kMaxRegularHeapObjectSize);
return CodeStubAssembler::Allocate(IntPtrConstant(size_in_bytes), flags);
}
@@ -1678,7 +1683,7 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ObjectField(
TNode<Float64T> CodeStubAssembler::LoadHeapNumberValue(
TNode<HeapObject> object) {
- CSA_ASSERT(this, Word32Or(IsHeapNumber(object), IsOddball(object)));
+ CSA_DCHECK(this, Word32Or(IsHeapNumber(object), IsOddball(object)));
STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
return LoadObjectField<Float64T>(object, HeapNumber::kValueOffset);
}
@@ -1694,7 +1699,7 @@ TNode<Map> CodeStubAssembler::LoadMap(TNode<HeapObject> object) {
TNode<Map> map = LoadObjectField<Map>(object, HeapObject::kMapOffset);
#ifdef V8_MAP_PACKING
// Check the loaded map is unpacked. i.e. the lowest two bits != 0b10
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
WordNotEqual(WordAnd(BitcastTaggedToWord(map),
IntPtrConstant(Internals::kMapWordXorMask)),
IntPtrConstant(Internals::kMapWordSignature)));
@@ -1732,7 +1737,7 @@ TNode<BoolT> CodeStubAssembler::IsSpecialReceiverMap(TNode<Map> map) {
Map::Bits1::IsAccessCheckNeededBit::kMask;
USE(mask);
// Interceptors or access checks imply special receiver.
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
SelectConstant<BoolT>(IsSetWord32(LoadMapBitField(map), mask),
is_special, Int32TrueConstant()));
return is_special;
@@ -1754,7 +1759,7 @@ void CodeStubAssembler::GotoIfMapHasSlowProperties(TNode<Map> map,
TNode<HeapObject> CodeStubAssembler::LoadFastProperties(
TNode<JSReceiver> object) {
- CSA_SLOW_ASSERT(this, Word32BinaryNot(IsDictionaryMap(LoadMap(object))));
+ CSA_SLOW_DCHECK(this, Word32BinaryNot(IsDictionaryMap(LoadMap(object))));
TNode<Object> properties = LoadJSReceiverPropertiesOrHash(object);
return Select<HeapObject>(
TaggedIsSmi(properties), [=] { return EmptyFixedArrayConstant(); },
@@ -1763,7 +1768,7 @@ TNode<HeapObject> CodeStubAssembler::LoadFastProperties(
TNode<HeapObject> CodeStubAssembler::LoadSlowProperties(
TNode<JSReceiver> object) {
- CSA_SLOW_ASSERT(this, IsDictionaryMap(LoadMap(object)));
+ CSA_SLOW_DCHECK(this, IsDictionaryMap(LoadMap(object)));
TNode<Object> properties = LoadJSReceiverPropertiesOrHash(object);
NodeGenerator<HeapObject> make_empty = [=]() -> TNode<HeapObject> {
if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
@@ -1775,10 +1780,10 @@ TNode<HeapObject> CodeStubAssembler::LoadSlowProperties(
NodeGenerator<HeapObject> cast_properties = [=] {
TNode<HeapObject> dict = CAST(properties);
if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
- CSA_ASSERT(this, Word32Or(IsSwissNameDictionary(dict),
+ CSA_DCHECK(this, Word32Or(IsSwissNameDictionary(dict),
IsGlobalDictionary(dict)));
} else {
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
Word32Or(IsNameDictionary(dict), IsGlobalDictionary(dict)));
}
return dict;
@@ -1789,7 +1794,7 @@ TNode<HeapObject> CodeStubAssembler::LoadSlowProperties(
TNode<Object> CodeStubAssembler::LoadJSArgumentsObjectLength(
TNode<Context> context, TNode<JSArgumentsObject> array) {
- CSA_ASSERT(this, IsJSArgumentsObjectWithLength(context, array));
+ CSA_DCHECK(this, IsJSArgumentsObjectWithLength(context, array));
constexpr int offset = JSStrictArgumentsObject::kLengthOffset;
STATIC_ASSERT(offset == JSSloppyArgumentsObject::kLengthOffset);
return LoadObjectField(array, offset);
@@ -1797,19 +1802,19 @@ TNode<Object> CodeStubAssembler::LoadJSArgumentsObjectLength(
TNode<Smi> CodeStubAssembler::LoadFastJSArrayLength(TNode<JSArray> array) {
TNode<Number> length = LoadJSArrayLength(array);
- CSA_ASSERT(this, Word32Or(IsFastElementsKind(LoadElementsKind(array)),
+ CSA_DCHECK(this, Word32Or(IsFastElementsKind(LoadElementsKind(array)),
IsElementsKindInRange(
LoadElementsKind(array),
FIRST_ANY_NONEXTENSIBLE_ELEMENTS_KIND,
LAST_ANY_NONEXTENSIBLE_ELEMENTS_KIND)));
// JSArray length is always a positive Smi for fast arrays.
- CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
+ CSA_SLOW_DCHECK(this, TaggedIsPositiveSmi(length));
return CAST(length);
}
TNode<Smi> CodeStubAssembler::LoadFixedArrayBaseLength(
TNode<FixedArrayBase> array) {
- CSA_SLOW_ASSERT(this, IsNotWeakFixedArraySubclass(array));
+ CSA_SLOW_DCHECK(this, IsNotWeakFixedArraySubclass(array));
return LoadObjectField<Smi>(array, FixedArrayBase::kLengthOffset);
}
@@ -1889,7 +1894,7 @@ TNode<IntPtrT> CodeStubAssembler::LoadMapInstanceSizeInWords(TNode<Map> map) {
TNode<IntPtrT> CodeStubAssembler::LoadMapInobjectPropertiesStartInWords(
TNode<Map> map) {
// See Map::GetInObjectPropertiesStartInWords() for details.
- CSA_ASSERT(this, IsJSObjectMap(map));
+ CSA_DCHECK(this, IsJSObjectMap(map));
return ChangeInt32ToIntPtr(LoadObjectField<Uint8T>(
map, Map::kInobjectPropertiesStartOrConstructorFunctionIndexOffset));
}
@@ -1897,7 +1902,7 @@ TNode<IntPtrT> CodeStubAssembler::LoadMapInobjectPropertiesStartInWords(
TNode<IntPtrT> CodeStubAssembler::LoadMapConstructorFunctionIndex(
TNode<Map> map) {
// See Map::GetConstructorFunctionIndex() for details.
- CSA_ASSERT(this, IsPrimitiveInstanceType(LoadMapInstanceType(map)));
+ CSA_DCHECK(this, IsPrimitiveInstanceType(LoadMapInstanceType(map)));
return ChangeInt32ToIntPtr(LoadObjectField<Uint8T>(
map, Map::kInobjectPropertiesStartOrConstructorFunctionIndexOffset));
}
@@ -2020,7 +2025,7 @@ TNode<IntPtrT> CodeStubAssembler::LoadJSReceiverIdentityHash(
TNode<Uint32T> CodeStubAssembler::LoadNameHashAssumeComputed(TNode<Name> name) {
TNode<Uint32T> hash_field = LoadNameRawHashField(name);
- CSA_ASSERT(this, IsClearWord32(hash_field, Name::kHashNotComputedMask));
+ CSA_DCHECK(this, IsClearWord32(hash_field, Name::kHashNotComputedMask));
return Unsigned(Word32Shr(hash_field, Int32Constant(Name::kHashShift)));
}
@@ -2076,10 +2081,10 @@ void CodeStubAssembler::DispatchMaybeObject(TNode<MaybeObject> maybe_object,
Goto(if_strong);
}
-void CodeStubAssembler::AssertHasValidMap(TNode<HeapObject> object) {
+void CodeStubAssembler::DcheckHasValidMap(TNode<HeapObject> object) {
#ifdef V8_MAP_PACKING
// Test if the map is an unpacked and valid map
- CSA_ASSERT(this, IsMap(LoadMap(object)));
+ CSA_DCHECK(this, IsMap(LoadMap(object)));
#endif
}
@@ -2110,8 +2115,8 @@ TNode<BoolT> CodeStubAssembler::IsCleared(TNode<MaybeObject> value) {
TNode<HeapObject> CodeStubAssembler::GetHeapObjectAssumeWeak(
TNode<MaybeObject> value) {
- CSA_ASSERT(this, IsWeakOrCleared(value));
- CSA_ASSERT(this, IsNotCleared(value));
+ CSA_DCHECK(this, IsWeakOrCleared(value));
+ CSA_DCHECK(this, IsNotCleared(value));
return UncheckedCast<HeapObject>(BitcastWordToTagged(WordAnd(
BitcastMaybeObjectToWord(value), IntPtrConstant(~kWeakHeapObjectMask))));
}
@@ -2128,7 +2133,7 @@ TNode<HeapObject> CodeStubAssembler::GetHeapObjectAssumeWeak(
// but requires a big constant for ~mask.
TNode<BoolT> CodeStubAssembler::IsWeakReferenceToObject(
TNode<MaybeObject> maybe_object, TNode<Object> value) {
- CSA_ASSERT(this, TaggedIsNotSmi(maybe_object));
+ CSA_DCHECK(this, TaggedIsNotSmi(maybe_object));
if (COMPRESS_POINTERS_BOOL) {
return Word32Equal(
Word32And(TruncateWordToInt32(BitcastMaybeObjectToWord(maybe_object)),
@@ -2193,70 +2198,62 @@ TNode<IntPtrT> CodeStubAssembler::LoadArrayLength(
}
template <typename Array, typename TIndex, typename TValue>
-TNode<TValue> CodeStubAssembler::LoadArrayElement(
- TNode<Array> array, int array_header_size, TNode<TIndex> index_node,
- int additional_offset, LoadSensitivity needs_poisoning) {
+TNode<TValue> CodeStubAssembler::LoadArrayElement(TNode<Array> array,
+ int array_header_size,
+ TNode<TIndex> index_node,
+ int additional_offset) {
// TODO(v8:9708): Do we want to keep both IntPtrT and UintPtrT variants?
static_assert(std::is_same<TIndex, Smi>::value ||
std::is_same<TIndex, UintPtrT>::value ||
std::is_same<TIndex, IntPtrT>::value,
"Only Smi, UintPtrT or IntPtrT indices are allowed");
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(ParameterToIntPtr(index_node),
+ CSA_DCHECK(this, IntPtrGreaterThanOrEqual(ParameterToIntPtr(index_node),
IntPtrConstant(0)));
DCHECK(IsAligned(additional_offset, kTaggedSize));
int32_t header_size = array_header_size + additional_offset - kHeapObjectTag;
TNode<IntPtrT> offset =
ElementOffsetFromIndex(index_node, HOLEY_ELEMENTS, header_size);
- CSA_ASSERT(this, IsOffsetInBounds(offset, LoadArrayLength(array),
+ CSA_DCHECK(this, IsOffsetInBounds(offset, LoadArrayLength(array),
array_header_size));
constexpr MachineType machine_type = MachineTypeOf<TValue>::value;
- // TODO(gsps): Remove the Load case once LoadFromObject supports poisoning
- if (needs_poisoning == LoadSensitivity::kSafe) {
- return UncheckedCast<TValue>(LoadFromObject(machine_type, array, offset));
- } else {
- return UncheckedCast<TValue>(
- Load(machine_type, array, offset, needs_poisoning));
- }
+ return UncheckedCast<TValue>(LoadFromObject(machine_type, array, offset));
}
template V8_EXPORT_PRIVATE TNode<MaybeObject>
CodeStubAssembler::LoadArrayElement<TransitionArray, IntPtrT>(
- TNode<TransitionArray>, int, TNode<IntPtrT>, int, LoadSensitivity);
+ TNode<TransitionArray>, int, TNode<IntPtrT>, int);
template <typename TIndex>
TNode<Object> CodeStubAssembler::LoadFixedArrayElement(
TNode<FixedArray> object, TNode<TIndex> index, int additional_offset,
- LoadSensitivity needs_poisoning, CheckBounds check_bounds) {
+ CheckBounds check_bounds) {
// TODO(v8:9708): Do we want to keep both IntPtrT and UintPtrT variants?
static_assert(std::is_same<TIndex, Smi>::value ||
std::is_same<TIndex, UintPtrT>::value ||
std::is_same<TIndex, IntPtrT>::value,
"Only Smi, UintPtrT or IntPtrT indexes are allowed");
- CSA_ASSERT(this, IsFixedArraySubclass(object));
- CSA_ASSERT(this, IsNotWeakFixedArraySubclass(object));
+ CSA_DCHECK(this, IsFixedArraySubclass(object));
+ CSA_DCHECK(this, IsNotWeakFixedArraySubclass(object));
if (NeedsBoundsCheck(check_bounds)) {
FixedArrayBoundsCheck(object, index, additional_offset);
}
- TNode<MaybeObject> element =
- LoadArrayElement(object, FixedArray::kHeaderSize, index,
- additional_offset, needs_poisoning);
+ TNode<MaybeObject> element = LoadArrayElement(object, FixedArray::kHeaderSize,
+ index, additional_offset);
return CAST(element);
}
template V8_EXPORT_PRIVATE TNode<Object>
CodeStubAssembler::LoadFixedArrayElement<Smi>(TNode<FixedArray>, TNode<Smi>,
- int, LoadSensitivity,
- CheckBounds);
+ int, CheckBounds);
template V8_EXPORT_PRIVATE TNode<Object>
CodeStubAssembler::LoadFixedArrayElement<UintPtrT>(TNode<FixedArray>,
TNode<UintPtrT>, int,
- LoadSensitivity,
CheckBounds);
template V8_EXPORT_PRIVATE TNode<Object>
CodeStubAssembler::LoadFixedArrayElement<IntPtrT>(TNode<FixedArray>,
TNode<IntPtrT>, int,
- LoadSensitivity, CheckBounds);
+ CheckBounds);
void CodeStubAssembler::FixedArrayBoundsCheck(TNode<FixedArrayBase> array,
TNode<Smi> index,
@@ -2291,9 +2288,8 @@ void CodeStubAssembler::FixedArrayBoundsCheck(TNode<FixedArrayBase> array,
TNode<Object> CodeStubAssembler::LoadPropertyArrayElement(
TNode<PropertyArray> object, TNode<IntPtrT> index) {
int additional_offset = 0;
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe;
return CAST(LoadArrayElement(object, PropertyArray::kHeaderSize, index,
- additional_offset, needs_poisoning));
+ additional_offset));
}
TNode<IntPtrT> CodeStubAssembler::LoadPropertyArrayLength(
@@ -2600,7 +2596,7 @@ TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot(
additional_offset - kHeapObjectTag;
TNode<IntPtrT> offset =
ElementOffsetFromIndex(slot, HOLEY_ELEMENTS, header_size);
- CSA_SLOW_ASSERT(
+ CSA_SLOW_DCHECK(
this, IsOffsetInBounds(offset, LoadFeedbackVectorLength(feedback_vector),
FeedbackVector::kHeaderSize));
return Load<MaybeObject>(feedback_vector, offset);
@@ -2629,7 +2625,7 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ArrayElement(
endian_correction;
TNode<IntPtrT> offset =
ElementOffsetFromIndex(index, HOLEY_ELEMENTS, header_size);
- CSA_ASSERT(this, IsOffsetInBounds(offset, LoadArrayLength(object),
+ CSA_DCHECK(this, IsOffsetInBounds(offset, LoadArrayLength(object),
array_header_size + endian_correction));
if (SmiValuesAre32Bits()) {
return Load<Int32T>(object, offset);
@@ -2640,7 +2636,7 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ArrayElement(
TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
TNode<FixedArray> object, TNode<IntPtrT> index, int additional_offset) {
- CSA_SLOW_ASSERT(this, IsFixedArraySubclass(object));
+ CSA_SLOW_DCHECK(this, IsFixedArraySubclass(object));
return LoadAndUntagToWord32ArrayElement(object, FixedArray::kHeaderSize,
index, additional_offset);
}
@@ -2648,7 +2644,7 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
TNode<MaybeObject> CodeStubAssembler::LoadWeakFixedArrayElement(
TNode<WeakFixedArray> object, TNode<IntPtrT> index, int additional_offset) {
return LoadArrayElement(object, WeakFixedArray::kHeaderSize, index,
- additional_offset, LoadSensitivity::kSafe);
+ additional_offset);
}
TNode<Float64T> CodeStubAssembler::LoadFixedDoubleArrayElement(
@@ -2657,7 +2653,7 @@ TNode<Float64T> CodeStubAssembler::LoadFixedDoubleArrayElement(
int32_t header_size = FixedDoubleArray::kHeaderSize - kHeapObjectTag;
TNode<IntPtrT> offset =
ElementOffsetFromIndex(index, HOLEY_DOUBLE_ELEMENTS, header_size);
- CSA_ASSERT(this, IsOffsetInBounds(
+ CSA_DCHECK(this, IsOffsetInBounds(
offset, LoadAndUntagFixedArrayBaseLength(object),
FixedDoubleArray::kHeaderSize, HOLEY_DOUBLE_ELEMENTS));
return LoadDoubleWithHoleCheck(object, offset, if_hole, machine_type);
@@ -2719,7 +2715,7 @@ TNode<Object> CodeStubAssembler::LoadFixedArrayBaseElementAsTagged(
BIND(&if_dictionary);
{
- CSA_ASSERT(this, IsDictionaryElementsKind(elements_kind));
+ CSA_DCHECK(this, IsDictionaryElementsKind(elements_kind));
var_result = BasicLoadNumberDictionaryElement(CAST(elements), index,
if_accessor, if_hole);
Goto(&done);
@@ -2796,7 +2792,7 @@ TNode<Context> CodeStubAssembler::LoadModuleContext(TNode<Context> context) {
Goto(&context_search);
BIND(&context_search);
{
- CSA_ASSERT(this, Word32BinaryNot(
+ CSA_DCHECK(this, Word32BinaryNot(
TaggedEqual(cur_context.value(), native_context)));
GotoIf(TaggedEqual(LoadMap(CAST(cur_context.value())), module_map),
&context_found);
@@ -2845,7 +2841,7 @@ TNode<Map> CodeStubAssembler::LoadSlowObjectWithNullPrototypeMap(
TNode<Map> CodeStubAssembler::LoadJSArrayElementsMap(
TNode<Int32T> kind, TNode<NativeContext> native_context) {
- CSA_ASSERT(this, IsFastElementsKind(kind));
+ CSA_DCHECK(this, IsFastElementsKind(kind));
TNode<IntPtrT> offset =
IntPtrAdd(IntPtrConstant(Context::FIRST_JS_ARRAY_MAP_SLOT),
ChangeInt32ToIntPtr(kind));
@@ -2906,8 +2902,8 @@ void CodeStubAssembler::GotoIfPrototypeRequiresRuntimeLookup(
TNode<HeapObject> CodeStubAssembler::LoadJSFunctionPrototype(
TNode<JSFunction> function, Label* if_bailout) {
- CSA_ASSERT(this, IsFunctionWithPrototypeSlotMap(LoadMap(function)));
- CSA_ASSERT(this, IsClearWord32<Map::Bits1::HasNonInstancePrototypeBit>(
+ CSA_DCHECK(this, IsFunctionWithPrototypeSlotMap(LoadMap(function)));
+ CSA_DCHECK(this, IsClearWord32<Map::Bits1::HasNonInstancePrototypeBit>(
LoadMapBitField(LoadMap(function))));
TNode<HeapObject> proto_or_map = LoadObjectField<HeapObject>(
function, JSFunction::kPrototypeOrInitialMapOffset);
@@ -2934,11 +2930,18 @@ TNode<BytecodeArray> CodeStubAssembler::LoadSharedFunctionInfoBytecodeArray(
Label check_for_interpreter_data(this, &var_result);
Label done(this, &var_result);
- GotoIfNot(HasInstanceType(var_result.value(), BASELINE_DATA_TYPE),
+ GotoIfNot(HasInstanceType(var_result.value(), CODET_TYPE),
&check_for_interpreter_data);
- TNode<HeapObject> baseline_data = LoadObjectField<HeapObject>(
- var_result.value(), BaselineData::kDataOffset);
- var_result = baseline_data;
+ {
+ TNode<Code> code = FromCodeT(CAST(var_result.value()));
+ CSA_DCHECK(
+ this, Word32Equal(DecodeWord32<Code::KindField>(LoadObjectField<Int32T>(
+ code, Code::kFlagsOffset)),
+ Int32Constant(static_cast<int>(CodeKind::BASELINE))));
+ TNode<HeapObject> baseline_data = LoadObjectField<HeapObject>(
+ code, Code::kDeoptimizationDataOrInterpreterDataOffset);
+ var_result = baseline_data;
+ }
Goto(&check_for_interpreter_data);
BIND(&check_for_interpreter_data);
@@ -3003,7 +3006,7 @@ void CodeStubAssembler::UnsafeStoreObjectFieldNoWriteBarrier(
void CodeStubAssembler::StoreMap(TNode<HeapObject> object, TNode<Map> map) {
OptimizedStoreMap(object, map);
- AssertHasValidMap(object);
+ DcheckHasValidMap(object);
}
void CodeStubAssembler::StoreMapNoWriteBarrier(TNode<HeapObject> object,
@@ -3014,7 +3017,7 @@ void CodeStubAssembler::StoreMapNoWriteBarrier(TNode<HeapObject> object,
void CodeStubAssembler::StoreMapNoWriteBarrier(TNode<HeapObject> object,
TNode<Map> map) {
OptimizedStoreMap(object, map);
- AssertHasValidMap(object);
+ DcheckHasValidMap(object);
}
void CodeStubAssembler::StoreObjectFieldRoot(TNode<HeapObject> object,
@@ -3055,7 +3058,7 @@ void CodeStubAssembler::StoreFixedArrayOrPropertyArrayElement(
static_cast<int>(PropertyArray::kLengthAndHashOffset));
// Check that index_node + additional_offset <= object.length.
// TODO(cbruni): Use proper LoadXXLength helpers
- CSA_ASSERT(
+ CSA_DCHECK(
this,
IsOffsetInBounds(
offset,
@@ -3136,7 +3139,7 @@ void CodeStubAssembler::StoreFeedbackVectorSlot(
TNode<IntPtrT> offset =
ElementOffsetFromIndex(Signed(slot), HOLEY_ELEMENTS, header_size);
// Check that slot <= feedback_vector.length.
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
IsOffsetInBounds(offset, LoadFeedbackVectorLength(feedback_vector),
FeedbackVector::kHeaderSize),
SmiFromIntPtr(offset), feedback_vector);
@@ -3197,7 +3200,8 @@ TNode<Smi> CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
// Resize the capacity of the fixed array if it doesn't fit.
TNode<IntPtrT> first = arg_index->value();
- TNode<BInt> growth = IntPtrToBInt(IntPtrSub(args->GetLength(), first));
+ TNode<BInt> growth =
+ IntPtrToBInt(IntPtrSub(args->GetLengthWithoutReceiver(), first));
PossiblyGrowElementsCapacity(kind, array, var_length.value(), &var_elements,
growth, &pre_bailout);
@@ -3276,7 +3280,7 @@ void CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
TNode<Cell> CodeStubAssembler::AllocateCellWithValue(TNode<Object> value,
WriteBarrierMode mode) {
- TNode<HeapObject> result = Allocate(Cell::kSize, kNone);
+ TNode<HeapObject> result = Allocate(Cell::kSize, AllocationFlag::kNone);
StoreMapNoWriteBarrier(result, RootIndex::kCellMap);
TNode<Cell> cell = CAST(result);
StoreCellValue(cell, value, mode);
@@ -3299,7 +3303,7 @@ void CodeStubAssembler::StoreCellValue(TNode<Cell> cell, TNode<Object> value,
}
TNode<HeapNumber> CodeStubAssembler::AllocateHeapNumber() {
- TNode<HeapObject> result = Allocate(HeapNumber::kSize, kNone);
+ TNode<HeapObject> result = Allocate(HeapNumber::kSize, AllocationFlag::kNone);
RootIndex heap_map_index = RootIndex::kHeapNumberMap;
StoreMapNoWriteBarrier(result, heap_map_index);
return UncheckedCast<HeapNumber>(result);
@@ -3344,7 +3348,8 @@ TNode<BigInt> CodeStubAssembler::AllocateRawBigInt(TNode<IntPtrT> length) {
TNode<IntPtrT> size =
IntPtrAdd(IntPtrConstant(BigInt::kHeaderSize),
Signed(WordShl(length, kSystemPointerSizeLog2)));
- TNode<HeapObject> raw_result = Allocate(size, kAllowLargeObjectAllocation);
+ TNode<HeapObject> raw_result =
+ Allocate(size, AllocationFlag::kAllowLargeObjectAllocation);
StoreMapNoWriteBarrier(raw_result, RootIndex::kBigIntMap);
if (FIELD_SIZE(BigInt::kOptionalPaddingOffset) != 0) {
DCHECK_EQ(4, FIELD_SIZE(BigInt::kOptionalPaddingOffset));
@@ -3404,7 +3409,7 @@ TNode<UintPtrT> CodeStubAssembler::LoadBigIntDigit(TNode<BigInt> bigint,
TNode<ByteArray> CodeStubAssembler::AllocateNonEmptyByteArray(
TNode<UintPtrT> length, AllocationFlags flags) {
- CSA_ASSERT(this, WordNotEqual(length, IntPtrConstant(0)));
+ CSA_DCHECK(this, WordNotEqual(length, IntPtrConstant(0)));
Comment("AllocateNonEmptyByteArray");
TVARIABLE(Object, var_result);
@@ -3552,7 +3557,7 @@ TNode<NameDictionary> CodeStubAssembler::AllocateNameDictionary(
TNode<NameDictionary> CodeStubAssembler::AllocateNameDictionary(
TNode<IntPtrT> at_least_space_for, AllocationFlags flags) {
- CSA_ASSERT(this, UintPtrLessThanOrEqual(
+ CSA_DCHECK(this, UintPtrLessThanOrEqual(
at_least_space_for,
IntPtrConstant(NameDictionary::kMaxCapacity)));
TNode<IntPtrT> capacity = HashTableComputeCapacity(at_least_space_for);
@@ -3561,8 +3566,8 @@ TNode<NameDictionary> CodeStubAssembler::AllocateNameDictionary(
TNode<NameDictionary> CodeStubAssembler::AllocateNameDictionaryWithCapacity(
TNode<IntPtrT> capacity, AllocationFlags flags) {
- CSA_ASSERT(this, WordIsPowerOfTwo(capacity));
- CSA_ASSERT(this, IntPtrGreaterThan(capacity, IntPtrConstant(0)));
+ CSA_DCHECK(this, WordIsPowerOfTwo(capacity));
+ CSA_DCHECK(this, IntPtrGreaterThan(capacity, IntPtrConstant(0)));
TNode<IntPtrT> length = EntryToIndex<NameDictionary>(capacity);
TNode<IntPtrT> store_size = IntPtrAdd(
TimesTaggedSize(length), IntPtrConstant(NameDictionary::kHeaderSize));
@@ -3620,7 +3625,7 @@ TNode<NameDictionary> CodeStubAssembler::CopyNameDictionary(
TNode<NameDictionary> dictionary, Label* large_object_fallback) {
Comment("Copy boilerplate property dict");
TNode<IntPtrT> capacity = SmiUntag(GetCapacity<NameDictionary>(dictionary));
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(capacity, IntPtrConstant(0)));
+ CSA_DCHECK(this, IntPtrGreaterThanOrEqual(capacity, IntPtrConstant(0)));
GotoIf(UintPtrGreaterThan(
capacity, IntPtrConstant(NameDictionary::kMaxRegularCapacity)),
large_object_fallback);
@@ -3644,11 +3649,11 @@ TNode<CollectionType> CodeStubAssembler::AllocateOrderedHashTable(
template <typename CollectionType>
TNode<CollectionType> CodeStubAssembler::AllocateOrderedHashTableWithCapacity(
TNode<IntPtrT> capacity) {
- CSA_ASSERT(this, WordIsPowerOfTwo(capacity));
- CSA_ASSERT(this,
+ CSA_DCHECK(this, WordIsPowerOfTwo(capacity));
+ CSA_DCHECK(this,
IntPtrGreaterThanOrEqual(
capacity, IntPtrConstant(CollectionType::kInitialCapacity)));
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
IntPtrLessThanOrEqual(
capacity, IntPtrConstant(CollectionType::MaxCapacity())));
@@ -3666,9 +3671,9 @@ TNode<CollectionType> CodeStubAssembler::AllocateOrderedHashTableWithCapacity(
const ElementsKind elements_kind = HOLEY_ELEMENTS;
TNode<Map> fixed_array_map =
HeapConstant(CollectionType::GetMap(ReadOnlyRoots(isolate())));
- TNode<CollectionType> table =
- CAST(AllocateFixedArray(elements_kind, fixed_array_length,
- kAllowLargeObjectAllocation, fixed_array_map));
+ TNode<CollectionType> table = CAST(AllocateFixedArray(
+ elements_kind, fixed_array_length,
+ AllocationFlag::kAllowLargeObjectAllocation, fixed_array_map));
Comment("Initialize the OrderedHashTable fields.");
const WriteBarrierMode barrier_mode = SKIP_WRITE_BARRIER;
@@ -3743,8 +3748,8 @@ TNode<CollectionType> CodeStubAssembler::AllocateOrderedHashTableWithCapacity(
TimesTaggedSize(IntPtrMul(
capacity, IntPtrConstant(CollectionType::kEntrySize))));
- CSA_ASSERT(this, IntPtrEqual(ptr_diff, TimesTaggedSize(array_data_fields)));
- CSA_ASSERT(this, IntPtrEqual(expected_end, data_end_address));
+ CSA_DCHECK(this, IntPtrEqual(ptr_diff, TimesTaggedSize(array_data_fields)));
+ CSA_DCHECK(this, IntPtrEqual(expected_end, data_end_address));
#endif
}
@@ -3780,8 +3785,8 @@ TNode<JSObject> CodeStubAssembler::AllocateJSObjectFromMap(
TNode<Map> map, base::Optional<TNode<HeapObject>> properties,
base::Optional<TNode<FixedArray>> elements, AllocationFlags flags,
SlackTrackingMode slack_tracking_mode) {
- CSA_ASSERT(this, Word32BinaryNot(IsJSFunctionMap(map)));
- CSA_ASSERT(this, Word32BinaryNot(InstanceTypeEqual(LoadMapInstanceType(map),
+ CSA_DCHECK(this, Word32BinaryNot(IsJSFunctionMap(map)));
+ CSA_DCHECK(this, Word32BinaryNot(InstanceTypeEqual(LoadMapInstanceType(map),
JS_GLOBAL_OBJECT_TYPE)));
TNode<IntPtrT> instance_size =
TimesTaggedSize(LoadMapInstanceSizeInWords(map));
@@ -3800,11 +3805,11 @@ void CodeStubAssembler::InitializeJSObjectFromMap(
// This helper assumes that the object is in new-space, as guarded by the
// check in AllocatedJSObjectFromMap.
if (!properties) {
- CSA_ASSERT(this, Word32BinaryNot(IsDictionaryMap((map))));
+ CSA_DCHECK(this, Word32BinaryNot(IsDictionaryMap((map))));
StoreObjectFieldRoot(object, JSObject::kPropertiesOrHashOffset,
RootIndex::kEmptyFixedArray);
} else {
- CSA_ASSERT(this, Word32Or(Word32Or(Word32Or(IsPropertyArray(*properties),
+ CSA_DCHECK(this, Word32Or(Word32Or(Word32Or(IsPropertyArray(*properties),
IsNameDictionary(*properties)),
IsSwissNameDictionary(*properties)),
IsEmptyFixedArray(*properties)));
@@ -3830,7 +3835,7 @@ void CodeStubAssembler::InitializeJSObjectBodyNoSlackTracking(
TNode<HeapObject> object, TNode<Map> map, TNode<IntPtrT> instance_size,
int start_offset) {
STATIC_ASSERT(Map::kNoSlackTracking == 0);
- CSA_ASSERT(this, IsClearWord32<Map::Bits3::ConstructionCounterBits>(
+ CSA_DCHECK(this, IsClearWord32<Map::Bits3::ConstructionCounterBits>(
LoadMapBitField3(map)));
InitializeFieldsWithRoot(object, IntPtrConstant(start_offset), instance_size,
RootIndex::kUndefinedValue);
@@ -3855,7 +3860,7 @@ void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
{
Comment("Decrease construction counter");
// Slack tracking is only done on initial maps.
- CSA_ASSERT(this, IsUndefined(LoadMapBackPointer(map)));
+ CSA_DCHECK(this, IsUndefined(LoadMapBackPointer(map)));
STATIC_ASSERT(Map::Bits3::ConstructionCounterBits::kLastUsedBit == 31);
TNode<Word32T> new_bit_field3 = Int32Sub(
bit_field3,
@@ -3900,8 +3905,8 @@ void CodeStubAssembler::StoreFieldsNoWriteBarrier(TNode<IntPtrT> start_address,
TNode<IntPtrT> end_address,
TNode<Object> value) {
Comment("StoreFieldsNoWriteBarrier");
- CSA_ASSERT(this, WordIsAligned(start_address, kTaggedSize));
- CSA_ASSERT(this, WordIsAligned(end_address, kTaggedSize));
+ CSA_DCHECK(this, WordIsAligned(start_address, kTaggedSize));
+ CSA_DCHECK(this, WordIsAligned(end_address, kTaggedSize));
BuildFastLoop<IntPtrT>(
start_address, end_address,
[=](TNode<IntPtrT> current) {
@@ -3912,7 +3917,7 @@ void CodeStubAssembler::StoreFieldsNoWriteBarrier(TNode<IntPtrT> start_address,
}
void CodeStubAssembler::MakeFixedArrayCOW(TNode<FixedArray> array) {
- CSA_ASSERT(this, IsFixedArrayMap(LoadMap(array)));
+ CSA_DCHECK(this, IsFixedArrayMap(LoadMap(array)));
Label done(this);
// The empty fixed array is not modifiable anyway. And we shouldn't change its
// Map.
@@ -3933,7 +3938,7 @@ TNode<JSArray> CodeStubAssembler::AllocateJSArray(
base::Optional<TNode<AllocationSite>> allocation_site,
int array_header_size) {
Comment("begin allocation of JSArray passing in elements");
- CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
+ CSA_SLOW_DCHECK(this, TaggedIsPositiveSmi(length));
int base_size = array_header_size;
if (allocation_site) {
@@ -3969,8 +3974,8 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
TNode<IntPtrT> capacity, AllocationFlags allocation_flags,
int array_header_size) {
Comment("begin allocation of JSArray with elements");
- CHECK_EQ(allocation_flags & ~kAllowLargeObjectAllocation, 0);
- CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
+ CHECK_EQ(allocation_flags & ~AllocationFlag::kAllowLargeObjectAllocation, 0);
+ CSA_SLOW_DCHECK(this, TaggedIsPositiveSmi(length));
TVARIABLE(JSArray, array);
TVARIABLE(FixedArrayBase, elements);
@@ -4019,7 +4024,7 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
// folding trick. Instead, we first allocate the elements in large object
// space, and then allocate the JSArray (and possibly the allocation
// memento) in new space.
- if (allocation_flags & kAllowLargeObjectAllocation) {
+ if (allocation_flags & AllocationFlag::kAllowLargeObjectAllocation) {
Label next(this);
GotoIf(IsRegularHeapObjectSize(size), &next);
@@ -4061,7 +4066,7 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
DCHECK(RootsTable::IsImmortalImmovable(elements_map_index));
StoreMapNoWriteBarrier(elements.value(), elements_map_index);
- CSA_ASSERT(this, WordNotEqual(capacity, IntPtrConstant(0)));
+ CSA_DCHECK(this, WordNotEqual(capacity, IntPtrConstant(0)));
TNode<Smi> capacity_smi = SmiTag(capacity);
StoreObjectFieldNoWriteBarrier(elements.value(), FixedArray::kLengthOffset,
capacity_smi);
@@ -4076,7 +4081,7 @@ TNode<JSArray> CodeStubAssembler::AllocateUninitializedJSArray(
TNode<Map> array_map, TNode<Smi> length,
base::Optional<TNode<AllocationSite>> allocation_site,
TNode<IntPtrT> size_in_bytes) {
- CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
+ CSA_SLOW_DCHECK(this, TaggedIsPositiveSmi(length));
// Allocate space for the JSArray and the elements FixedArray in one go.
TNode<HeapObject> array = AllocateInNewSpace(size_in_bytes);
@@ -4099,7 +4104,7 @@ TNode<JSArray> CodeStubAssembler::AllocateJSArray(
ElementsKind kind, TNode<Map> array_map, TNode<IntPtrT> capacity,
TNode<Smi> length, base::Optional<TNode<AllocationSite>> allocation_site,
AllocationFlags allocation_flags) {
- CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
+ CSA_SLOW_DCHECK(this, TaggedIsPositiveSmi(length));
TNode<JSArray> array;
TNode<FixedArrayBase> elements;
@@ -4206,7 +4211,7 @@ TNode<JSArray> CodeStubAssembler::CloneFastJSArray(
BIND(&allocate_jsarray);
// Handle any nonextensible elements kinds
- CSA_ASSERT(this, IsElementsKindLessThanOrEqual(
+ CSA_DCHECK(this, IsElementsKindLessThanOrEqual(
var_elements_kind.value(),
LAST_ANY_NONEXTENSIBLE_ELEMENTS_KIND));
GotoIf(IsElementsKindLessThanOrEqual(var_elements_kind.value(),
@@ -4234,7 +4239,7 @@ TNode<FixedArrayBase> CodeStubAssembler::AllocateFixedArray(
std::is_same<TIndex, Smi>::value || std::is_same<TIndex, IntPtrT>::value,
"Only Smi or IntPtrT capacity is allowed");
Comment("AllocateFixedArray");
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
IntPtrOrSmiGreaterThan(capacity, IntPtrOrSmiConstant<TIndex>(0)));
const intptr_t kMaxLength = IsDoubleElementsKind(kind)
@@ -4259,7 +4264,7 @@ TNode<FixedArrayBase> CodeStubAssembler::AllocateFixedArray(
TNode<IntPtrT> total_size = GetFixedArrayAllocationSize(capacity, kind);
- if (IsDoubleElementsKind(kind)) flags |= kDoubleAlignment;
+ if (IsDoubleElementsKind(kind)) flags |= AllocationFlag::kDoubleAlignment;
// Allocate both array and elements object, and initialize the JSArray.
TNode<HeapObject> array = Allocate(total_size, flags);
if (fixed_array_map) {
@@ -4269,7 +4274,7 @@ TNode<FixedArrayBase> CodeStubAssembler::AllocateFixedArray(
// need the write barrier even in LOS, but it's better to not take chances
// in case this invariant changes later, since it's difficult to enforce
// locally here.
- if (flags == CodeStubAssembler::kNone) {
+ if (flags == AllocationFlag::kNone) {
StoreMapNoWriteBarrier(array, *fixed_array_map);
} else {
StoreMap(array, *fixed_array_map);
@@ -4305,9 +4310,9 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
"Only Smi or IntPtrT first, count, and capacity are allowed");
DCHECK(extract_flags & ExtractFixedArrayFlag::kFixedArrays);
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
IntPtrOrSmiNotEqual(IntPtrOrSmiConstant<TIndex>(0), capacity));
- CSA_ASSERT(this, TaggedEqual(source_map, LoadMap(source)));
+ CSA_DCHECK(this, TaggedEqual(source_map, LoadMap(source)));
TVARIABLE(FixedArrayBase, var_result);
TVARIABLE(Map, var_target_map, source_map);
@@ -4319,11 +4324,11 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
// we can't just use COW, use FixedArrayMap as the target map. Otherwise, use
// source_map as the target map.
if (IsDoubleElementsKind(from_kind)) {
- CSA_ASSERT(this, IsFixedDoubleArrayMap(source_map));
+ CSA_DCHECK(this, IsFixedDoubleArrayMap(source_map));
var_target_map = FixedArrayMapConstant();
Goto(&new_space_check);
} else {
- CSA_ASSERT(this, Word32BinaryNot(IsFixedDoubleArrayMap(source_map)));
+ CSA_DCHECK(this, Word32BinaryNot(IsFixedDoubleArrayMap(source_map)));
Branch(TaggedEqual(var_target_map.value(), FixedCOWArrayMapConstant()),
&is_cow, &new_space_check);
@@ -4350,17 +4355,11 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
{
bool handle_old_space = !FLAG_young_generation_large_objects;
if (handle_old_space) {
- if (extract_flags & ExtractFixedArrayFlag::kNewSpaceAllocationOnly) {
- handle_old_space = false;
- CSA_ASSERT(this, Word32BinaryNot(FixedArraySizeDoesntFitInNewSpace(
- count, FixedArray::kHeaderSize)));
- } else {
- int constant_count;
- handle_old_space =
- !TryGetIntPtrOrSmiConstantValue(count, &constant_count) ||
- (constant_count >
- FixedArray::GetMaxLengthForNewSpaceAllocation(PACKED_ELEMENTS));
- }
+ int constant_count;
+ handle_old_space =
+ !TryGetIntPtrOrSmiConstantValue(count, &constant_count) ||
+ (constant_count >
+ FixedArray::GetMaxLengthForNewSpaceAllocation(PACKED_ELEMENTS));
}
Label old_space(this, Label::kDeferred);
@@ -4383,7 +4382,7 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
TNode<IntPtrT> object_page = PageFromAddress(object_word);
TNode<IntPtrT> page_flags =
Load<IntPtrT>(object_page, IntPtrConstant(Page::kFlagsOffset));
- CSA_ASSERT(
+ CSA_DCHECK(
this,
WordNotEqual(
WordAnd(page_flags,
@@ -4469,7 +4468,7 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedDoubleArrayFillingHoles(
"Only Smi or IntPtrT first, count, and capacity are allowed");
DCHECK_NE(var_holes_converted, nullptr);
- CSA_ASSERT(this, IsFixedDoubleArrayMap(fixed_array_map));
+ CSA_DCHECK(this, IsFixedDoubleArrayMap(fixed_array_map));
TVARIABLE(FixedArrayBase, var_result);
const ElementsKind kind = PACKED_DOUBLE_ELEMENTS;
@@ -4482,7 +4481,7 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedDoubleArrayFillingHoles(
// The construction of the loop and the offsets for double elements is
// extracted from CopyFixedArrayElements.
- CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(from_array, kind));
+ CSA_SLOW_DCHECK(this, IsFixedArrayWithKindOrEmpty(from_array, kind));
STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
Comment("[ ExtractFixedDoubleArrayFillingHoles");
@@ -4563,10 +4562,7 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedArray(
var_holes_converted != nullptr ? HoleConversionMode::kConvertToUndefined
: HoleConversionMode::kDontConvert;
TVARIABLE(FixedArrayBase, var_result);
- const AllocationFlags allocation_flags =
- (extract_flags & ExtractFixedArrayFlag::kNewSpaceAllocationOnly)
- ? CodeStubAssembler::kNone
- : CodeStubAssembler::kAllowLargeObjectAllocation;
+ auto allocation_flags = AllocationFlag::kAllowLargeObjectAllocation;
if (!first) {
first = IntPtrOrSmiConstant<TIndex>(0);
}
@@ -4574,13 +4570,13 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedArray(
count = IntPtrOrSmiSub(
TaggedToParameter<TIndex>(LoadFixedArrayBaseLength(source)), *first);
- CSA_ASSERT(this, IntPtrOrSmiLessThanOrEqual(IntPtrOrSmiConstant<TIndex>(0),
+ CSA_DCHECK(this, IntPtrOrSmiLessThanOrEqual(IntPtrOrSmiConstant<TIndex>(0),
*count));
}
if (!capacity) {
capacity = *count;
} else {
- CSA_ASSERT(this, Word32BinaryNot(IntPtrOrSmiGreaterThan(
+ CSA_DCHECK(this, Word32BinaryNot(IntPtrOrSmiGreaterThan(
IntPtrOrSmiAdd(*first, *count), *capacity)));
}
@@ -4592,7 +4588,7 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedArray(
if (extract_flags & ExtractFixedArrayFlag::kFixedArrays) {
GotoIf(IsFixedDoubleArrayMap(source_map), &if_fixed_double_array);
} else {
- CSA_ASSERT(this, IsFixedDoubleArrayMap(source_map));
+ CSA_DCHECK(this, IsFixedDoubleArrayMap(source_map));
}
}
@@ -4659,8 +4655,8 @@ CodeStubAssembler::ExtractFixedArray<IntPtrT>(
void CodeStubAssembler::InitializePropertyArrayLength(
TNode<PropertyArray> property_array, TNode<IntPtrT> length) {
- CSA_ASSERT(this, IntPtrGreaterThan(length, IntPtrConstant(0)));
- CSA_ASSERT(this,
+ CSA_DCHECK(this, IntPtrGreaterThan(length, IntPtrConstant(0)));
+ CSA_DCHECK(this,
IntPtrLessThanOrEqual(
length, IntPtrConstant(PropertyArray::LengthField::kMax)));
StoreObjectFieldNoWriteBarrier(
@@ -4669,10 +4665,10 @@ void CodeStubAssembler::InitializePropertyArrayLength(
TNode<PropertyArray> CodeStubAssembler::AllocatePropertyArray(
TNode<IntPtrT> capacity) {
- CSA_ASSERT(this, IntPtrGreaterThan(capacity, IntPtrConstant(0)));
+ CSA_DCHECK(this, IntPtrGreaterThan(capacity, IntPtrConstant(0)));
TNode<IntPtrT> total_size = GetPropertyArrayAllocationSize(capacity);
- TNode<HeapObject> array = Allocate(total_size, kNone);
+ TNode<HeapObject> array = Allocate(total_size, AllocationFlag::kNone);
RootIndex map_index = RootIndex::kPropertyArrayMap;
DCHECK(RootsTable::IsImmortalImmovable(map_index));
StoreMapNoWriteBarrier(array, map_index);
@@ -4703,7 +4699,7 @@ void CodeStubAssembler::FillFixedArrayWithValue(ElementsKind kind,
static_assert(
std::is_same<TIndex, Smi>::value || std::is_same<TIndex, IntPtrT>::value,
"Only Smi or IntPtrT from and to are allowed");
- CSA_SLOW_ASSERT(this, IsFixedArrayWithKind(array, kind));
+ CSA_SLOW_DCHECK(this, IsFixedArrayWithKind(array, kind));
DCHECK(value_root_index == RootIndex::kTheHoleValue ||
value_root_index == RootIndex::kUndefinedValue);
@@ -4762,7 +4758,7 @@ void CodeStubAssembler::StoreFixedDoubleArrayHole(TNode<FixedDoubleArray> array,
TNode<IntPtrT> index) {
TNode<IntPtrT> offset = ElementOffsetFromIndex(
index, PACKED_DOUBLE_ELEMENTS, FixedArray::kHeaderSize - kHeapObjectTag);
- CSA_ASSERT(this, IsOffsetInBounds(
+ CSA_DCHECK(this, IsOffsetInBounds(
offset, LoadAndUntagFixedArrayBaseLength(array),
FixedDoubleArray::kHeaderSize, PACKED_DOUBLE_ELEMENTS));
StoreDoubleHole(array, offset);
@@ -4770,10 +4766,10 @@ void CodeStubAssembler::StoreFixedDoubleArrayHole(TNode<FixedDoubleArray> array,
void CodeStubAssembler::FillFixedArrayWithSmiZero(TNode<FixedArray> array,
TNode<IntPtrT> length) {
- CSA_ASSERT(this, WordEqual(length, LoadAndUntagFixedArrayBaseLength(array)));
+ CSA_DCHECK(this, WordEqual(length, LoadAndUntagFixedArrayBaseLength(array)));
TNode<IntPtrT> byte_length = TimesTaggedSize(length);
- CSA_ASSERT(this, UintPtrLessThan(length, byte_length));
+ CSA_DCHECK(this, UintPtrLessThan(length, byte_length));
static const int32_t fa_base_data_offset =
FixedArray::kHeaderSize - kHeapObjectTag;
@@ -4792,10 +4788,10 @@ void CodeStubAssembler::FillFixedArrayWithSmiZero(TNode<FixedArray> array,
void CodeStubAssembler::FillFixedDoubleArrayWithZero(
TNode<FixedDoubleArray> array, TNode<IntPtrT> length) {
- CSA_ASSERT(this, WordEqual(length, LoadAndUntagFixedArrayBaseLength(array)));
+ CSA_DCHECK(this, WordEqual(length, LoadAndUntagFixedArrayBaseLength(array)));
TNode<IntPtrT> byte_length = TimesDoubleSize(length);
- CSA_ASSERT(this, UintPtrLessThan(length, byte_length));
+ CSA_DCHECK(this, UintPtrLessThan(length, byte_length));
static const int32_t fa_base_data_offset =
FixedDoubleArray::kHeaderSize - kHeapObjectTag;
@@ -4842,11 +4838,11 @@ void CodeStubAssembler::MoveElements(ElementsKind kind,
#endif // V8_DISABLE_WRITE_BARRIERS
DCHECK(IsFastElementsKind(kind));
- CSA_ASSERT(this, IsFixedArrayWithKind(elements, kind));
- CSA_ASSERT(this,
+ CSA_DCHECK(this, IsFixedArrayWithKind(elements, kind));
+ CSA_DCHECK(this,
IntPtrLessThanOrEqual(IntPtrAdd(dst_index, length),
LoadAndUntagFixedArrayBaseLength(elements)));
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
IntPtrLessThanOrEqual(IntPtrAdd(src_index, length),
LoadAndUntagFixedArrayBaseLength(elements)));
@@ -4931,15 +4927,15 @@ void CodeStubAssembler::CopyElements(ElementsKind kind,
#endif // V8_DISABLE_WRITE_BARRIERS
DCHECK(IsFastElementsKind(kind));
- CSA_ASSERT(this, IsFixedArrayWithKind(dst_elements, kind));
- CSA_ASSERT(this, IsFixedArrayWithKind(src_elements, kind));
- CSA_ASSERT(this, IntPtrLessThanOrEqual(
+ CSA_DCHECK(this, IsFixedArrayWithKind(dst_elements, kind));
+ CSA_DCHECK(this, IsFixedArrayWithKind(src_elements, kind));
+ CSA_DCHECK(this, IntPtrLessThanOrEqual(
IntPtrAdd(dst_index, length),
LoadAndUntagFixedArrayBaseLength(dst_elements)));
- CSA_ASSERT(this, IntPtrLessThanOrEqual(
+ CSA_DCHECK(this, IntPtrLessThanOrEqual(
IntPtrAdd(src_index, length),
LoadAndUntagFixedArrayBaseLength(src_elements)));
- CSA_ASSERT(this, Word32Or(TaggedNotEqual(dst_elements, src_elements),
+ CSA_DCHECK(this, Word32Or(TaggedNotEqual(dst_elements, src_elements),
IntPtrEqual(length, IntPtrConstant(0))));
// The write barrier can be ignored if {dst_elements} is in new space, or if
@@ -5007,8 +5003,8 @@ void CodeStubAssembler::CopyFixedArrayElements(
HoleConversionMode convert_holes, TVariable<BoolT>* var_holes_converted) {
DCHECK_IMPLIES(var_holes_converted != nullptr,
convert_holes == HoleConversionMode::kConvertToUndefined);
- CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(from_array, from_kind));
- CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(to_array, to_kind));
+ CSA_SLOW_DCHECK(this, IsFixedArrayWithKindOrEmpty(from_array, from_kind));
+ CSA_SLOW_DCHECK(this, IsFixedArrayWithKindOrEmpty(to_array, to_kind));
STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
static_assert(
std::is_same<TIndex, Smi>::value || std::is_same<TIndex, IntPtrT>::value,
@@ -5195,7 +5191,7 @@ void CodeStubAssembler::CopyPropertyArrayValues(TNode<HeapObject> from_array,
TNode<IntPtrT> property_count,
WriteBarrierMode barrier_mode,
DestroySource destroy_source) {
- CSA_SLOW_ASSERT(this, Word32Or(IsPropertyArray(from_array),
+ CSA_SLOW_DCHECK(this, Word32Or(IsPropertyArray(from_array),
IsEmptyFixedArray(from_array)));
Comment("[ CopyPropertyArrayValues");
@@ -5253,7 +5249,7 @@ template <>
TNode<Object> CodeStubAssembler::LoadElementAndPrepareForStore(
TNode<FixedArrayBase> array, TNode<IntPtrT> offset, ElementsKind from_kind,
ElementsKind to_kind, Label* if_hole) {
- CSA_ASSERT(this, IsFixedArrayWithKind(array, from_kind));
+ CSA_DCHECK(this, IsFixedArrayWithKind(array, from_kind));
DCHECK(!IsDoubleElementsKind(to_kind));
if (IsDoubleElementsKind(from_kind)) {
TNode<Float64T> value =
@@ -5272,7 +5268,7 @@ template <>
TNode<Float64T> CodeStubAssembler::LoadElementAndPrepareForStore(
TNode<FixedArrayBase> array, TNode<IntPtrT> offset, ElementsKind from_kind,
ElementsKind to_kind, Label* if_hole) {
- CSA_ASSERT(this, IsFixedArrayWithKind(array, from_kind));
+ CSA_DCHECK(this, IsFixedArrayWithKind(array, from_kind));
DCHECK(IsDoubleElementsKind(to_kind));
if (IsDoubleElementsKind(from_kind)) {
return LoadDoubleWithHoleCheck(array, offset, if_hole,
@@ -5311,7 +5307,7 @@ template V8_EXPORT_PRIVATE TNode<Smi>
TNode<FixedArrayBase> CodeStubAssembler::TryGrowElementsCapacity(
TNode<HeapObject> object, TNode<FixedArrayBase> elements, ElementsKind kind,
TNode<Smi> key, Label* bailout) {
- CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(elements, kind));
+ CSA_SLOW_DCHECK(this, IsFixedArrayWithKindOrEmpty(elements, kind));
TNode<Smi> capacity = LoadFixedArrayBaseLength(elements);
return TryGrowElementsCapacity(object, elements, kind,
@@ -5327,7 +5323,7 @@ TNode<FixedArrayBase> CodeStubAssembler::TryGrowElementsCapacity(
std::is_same<TIndex, Smi>::value || std::is_same<TIndex, IntPtrT>::value,
"Only Smi or IntPtrT key and capacity nodes are allowed");
Comment("TryGrowElementsCapacity");
- CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(elements, kind));
+ CSA_SLOW_DCHECK(this, IsFixedArrayWithKindOrEmpty(elements, kind));
// If the gap growth is too big, fall back to the runtime.
TNode<TIndex> max_gap = IntPtrOrSmiConstant<TIndex>(JSObject::kMaxGap);
@@ -5351,7 +5347,7 @@ TNode<FixedArrayBase> CodeStubAssembler::GrowElementsCapacity(
std::is_same<TIndex, Smi>::value || std::is_same<TIndex, IntPtrT>::value,
"Only Smi or IntPtrT capacities are allowed");
Comment("[ GrowElementsCapacity");
- CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(elements, from_kind));
+ CSA_SLOW_DCHECK(this, IsFixedArrayWithKindOrEmpty(elements, from_kind));
// If size of the allocation for the new capacity doesn't fit in a page
// that we can bump-pointer allocate from, fall back to the runtime.
@@ -5549,7 +5545,7 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
// We do not require an Or with earlier feedback here because once we
// convert the value to a Numeric, we cannot reach this path. We can
// only reach this path on the first pass when the feedback is kNone.
- CSA_ASSERT(this, SmiEqual(var_feedback->value(),
+ CSA_DCHECK(this, SmiEqual(var_feedback->value(),
SmiConstant(BinaryOperationFeedback::kNone)));
}
GotoIf(InstanceTypeEqual(instance_type, ODDBALL_TYPE), &is_oddball);
@@ -5927,11 +5923,11 @@ TNode<WordT> CodeStubAssembler::TimesDoubleSize(TNode<WordT> value) {
}
TNode<Object> CodeStubAssembler::ToThisValue(TNode<Context> context,
- TNode<Object> value,
+ TNode<Object> input_value,
PrimitiveType primitive_type,
char const* method_name) {
// We might need to loop once due to JSPrimitiveWrapper unboxing.
- TVARIABLE(Object, var_value, value);
+ TVARIABLE(Object, var_value, input_value);
Label loop(this, &var_value), done_loop(this),
done_throw(this, Label::kDeferred);
Goto(&loop);
@@ -6321,7 +6317,7 @@ TNode<BoolT> CodeStubAssembler::IsStringInstanceType(
TNode<BoolT> CodeStubAssembler::IsOneByteStringInstanceType(
TNode<Int32T> instance_type) {
- CSA_ASSERT(this, IsStringInstanceType(instance_type));
+ CSA_DCHECK(this, IsStringInstanceType(instance_type));
return Word32Equal(
Word32And(instance_type, Int32Constant(kStringEncodingMask)),
Int32Constant(kOneByteStringTag));
@@ -6329,7 +6325,7 @@ TNode<BoolT> CodeStubAssembler::IsOneByteStringInstanceType(
TNode<BoolT> CodeStubAssembler::IsSequentialStringInstanceType(
TNode<Int32T> instance_type) {
- CSA_ASSERT(this, IsStringInstanceType(instance_type));
+ CSA_DCHECK(this, IsStringInstanceType(instance_type));
return Word32Equal(
Word32And(instance_type, Int32Constant(kStringRepresentationMask)),
Int32Constant(kSeqStringTag));
@@ -6337,7 +6333,7 @@ TNode<BoolT> CodeStubAssembler::IsSequentialStringInstanceType(
TNode<BoolT> CodeStubAssembler::IsSeqOneByteStringInstanceType(
TNode<Int32T> instance_type) {
- CSA_ASSERT(this, IsStringInstanceType(instance_type));
+ CSA_DCHECK(this, IsStringInstanceType(instance_type));
return Word32Equal(
Word32And(instance_type,
Int32Constant(kStringRepresentationMask | kStringEncodingMask)),
@@ -6346,7 +6342,7 @@ TNode<BoolT> CodeStubAssembler::IsSeqOneByteStringInstanceType(
TNode<BoolT> CodeStubAssembler::IsConsStringInstanceType(
TNode<Int32T> instance_type) {
- CSA_ASSERT(this, IsStringInstanceType(instance_type));
+ CSA_DCHECK(this, IsStringInstanceType(instance_type));
return Word32Equal(
Word32And(instance_type, Int32Constant(kStringRepresentationMask)),
Int32Constant(kConsStringTag));
@@ -6354,7 +6350,7 @@ TNode<BoolT> CodeStubAssembler::IsConsStringInstanceType(
TNode<BoolT> CodeStubAssembler::IsIndirectStringInstanceType(
TNode<Int32T> instance_type) {
- CSA_ASSERT(this, IsStringInstanceType(instance_type));
+ CSA_DCHECK(this, IsStringInstanceType(instance_type));
STATIC_ASSERT(kIsIndirectStringMask == 0x1);
STATIC_ASSERT(kIsIndirectStringTag == 0x1);
return UncheckedCast<BoolT>(
@@ -6363,7 +6359,7 @@ TNode<BoolT> CodeStubAssembler::IsIndirectStringInstanceType(
TNode<BoolT> CodeStubAssembler::IsExternalStringInstanceType(
TNode<Int32T> instance_type) {
- CSA_ASSERT(this, IsStringInstanceType(instance_type));
+ CSA_DCHECK(this, IsStringInstanceType(instance_type));
return Word32Equal(
Word32And(instance_type, Int32Constant(kStringRepresentationMask)),
Int32Constant(kExternalStringTag));
@@ -6371,7 +6367,7 @@ TNode<BoolT> CodeStubAssembler::IsExternalStringInstanceType(
TNode<BoolT> CodeStubAssembler::IsUncachedExternalStringInstanceType(
TNode<Int32T> instance_type) {
- CSA_ASSERT(this, IsStringInstanceType(instance_type));
+ CSA_DCHECK(this, IsStringInstanceType(instance_type));
STATIC_ASSERT(kUncachedExternalStringTag != 0);
return IsSetWord32(instance_type, kUncachedExternalStringMask);
}
@@ -6663,7 +6659,7 @@ TNode<BoolT> CodeStubAssembler::IsUniqueNameNoIndex(TNode<HeapObject> object) {
// Semantics: {object} is a Symbol, or a String that doesn't have a cached
// index. This returns {true} for strings containing representations of
// integers in the range above 9999999 (per kMaxCachedArrayIndexLength)
-// and below MAX_SAFE_INTEGER. For CSA_ASSERTs ensuring correct usage, this is
+// and below MAX_SAFE_INTEGER. For CSA_DCHECKs ensuring correct usage, this is
// better than no checking; and we don't have a good/fast way to accurately
// check such strings for being within "array index" (uint32_t) range.
TNode<BoolT> CodeStubAssembler::IsUniqueNameNoCachedIndex(
@@ -6923,7 +6919,7 @@ TNode<BoolT> CodeStubAssembler::FixedArraySizeDoesntFitInNewSpace(
TNode<Uint16T> CodeStubAssembler::StringCharCodeAt(TNode<String> string,
TNode<UintPtrT> index) {
- CSA_ASSERT(this, UintPtrLessThan(index, LoadStringLengthAsWord(string)));
+ CSA_DCHECK(this, UintPtrLessThan(index, LoadStringLengthAsWord(string)));
TVARIABLE(Uint16T, var_result);
@@ -7315,7 +7311,7 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
TNode<Numeric> CodeStubAssembler::NonNumberToNumberOrNumeric(
TNode<Context> context, TNode<HeapObject> input, Object::Conversion mode,
BigIntHandling bigint_handling) {
- CSA_ASSERT(this, Word32BinaryNot(IsHeapNumber(input)));
+ CSA_DCHECK(this, Word32BinaryNot(IsHeapNumber(input)));
TVARIABLE(HeapObject, var_input, input);
TVARIABLE(Numeric, var_result);
@@ -7357,7 +7353,7 @@ TNode<Numeric> CodeStubAssembler::NonNumberToNumberOrNumeric(
// Number/Numeric.
var_input = CAST(result);
// We have a new input. Redo the check and reload instance_type.
- CSA_ASSERT(this, Word32BinaryNot(IsHeapNumber(var_input.value())));
+ CSA_DCHECK(this, Word32BinaryNot(IsHeapNumber(var_input.value())));
instance_type = LoadInstanceType(var_input.value());
Goto(&if_inputisnotreceiver);
}
@@ -7416,7 +7412,7 @@ TNode<Numeric> CodeStubAssembler::NonNumberToNumberOrNumeric(
BIND(&end);
if (mode == Object::Conversion::kToNumber) {
- CSA_ASSERT(this, IsNumber(var_result.value()));
+ CSA_DCHECK(this, IsNumber(var_result.value()));
}
return var_result.value();
}
@@ -7430,7 +7426,7 @@ TNode<Number> CodeStubAssembler::NonNumberToNumber(
void CodeStubAssembler::TryPlainPrimitiveNonNumberToNumber(
TNode<HeapObject> input, TVariable<Number>* var_result, Label* if_bailout) {
- CSA_ASSERT(this, Word32BinaryNot(IsHeapNumber(input)));
+ CSA_DCHECK(this, Word32BinaryNot(IsHeapNumber(input)));
Label done(this);
// Dispatch on the {input} instance type.
@@ -7849,11 +7845,11 @@ TNode<Word32T> CodeStubAssembler::UpdateWord32(TNode<Word32T> word,
bool starts_as_zero) {
DCHECK_EQ((mask >> shift) << shift, mask);
// Ensure the {value} fits fully in the mask.
- CSA_ASSERT(this, Uint32LessThanOrEqual(value, Uint32Constant(mask >> shift)));
+ CSA_DCHECK(this, Uint32LessThanOrEqual(value, Uint32Constant(mask >> shift)));
TNode<Word32T> encoded_value = Word32Shl(value, Int32Constant(shift));
TNode<Word32T> masked_word;
if (starts_as_zero) {
- CSA_ASSERT(this, Word32Equal(Word32And(word, Int32Constant(~mask)), word));
+ CSA_DCHECK(this, Word32Equal(Word32And(word, Int32Constant(~mask)), word));
masked_word = word;
} else {
masked_word = Word32And(word, Int32Constant(~mask));
@@ -7867,12 +7863,12 @@ TNode<WordT> CodeStubAssembler::UpdateWord(TNode<WordT> word,
bool starts_as_zero) {
DCHECK_EQ((mask >> shift) << shift, mask);
// Ensure the {value} fits fully in the mask.
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
UintPtrLessThanOrEqual(value, UintPtrConstant(mask >> shift)));
TNode<WordT> encoded_value = WordShl(value, static_cast<int>(shift));
TNode<WordT> masked_word;
if (starts_as_zero) {
- CSA_ASSERT(this, WordEqual(WordAnd(word, UintPtrConstant(~mask)), word));
+ CSA_DCHECK(this, WordEqual(WordAnd(word, UintPtrConstant(~mask)), word));
masked_word = word;
} else {
masked_word = WordAnd(word, UintPtrConstant(~mask));
@@ -8006,7 +8002,7 @@ void CodeStubAssembler::TryToName(TNode<Object> key, Label* if_keyisindex,
{
TNode<IntPtrT> index = Signed(
DecodeWordFromWord32<String::ArrayIndexValueBits>(raw_hash_field));
- CSA_ASSERT(this, IntPtrLessThan(index, IntPtrConstant(INT_MAX)));
+ CSA_DCHECK(this, IntPtrLessThan(index, IntPtrConstant(INT_MAX)));
*var_index = index;
Goto(if_keyisindex);
}
@@ -8025,28 +8021,28 @@ void CodeStubAssembler::TryToName(TNode<Object> key, Label* if_keyisindex,
void CodeStubAssembler::StringWriteToFlatOneByte(TNode<String> source,
TNode<RawPtrT> sink,
- TNode<Int32T> from,
- TNode<Int32T> to) {
+ TNode<Int32T> start,
+ TNode<Int32T> length) {
TNode<ExternalReference> function =
ExternalConstant(ExternalReference::string_write_to_flat_one_byte());
CallCFunction(function, base::nullopt,
std::make_pair(MachineType::AnyTagged(), source),
std::make_pair(MachineType::Pointer(), sink),
- std::make_pair(MachineType::Int32(), from),
- std::make_pair(MachineType::Int32(), to));
+ std::make_pair(MachineType::Int32(), start),
+ std::make_pair(MachineType::Int32(), length));
}
void CodeStubAssembler::StringWriteToFlatTwoByte(TNode<String> source,
TNode<RawPtrT> sink,
- TNode<Int32T> from,
- TNode<Int32T> to) {
+ TNode<Int32T> start,
+ TNode<Int32T> length) {
TNode<ExternalReference> function =
ExternalConstant(ExternalReference::string_write_to_flat_two_byte());
CallCFunction(function, base::nullopt,
std::make_pair(MachineType::AnyTagged(), source),
std::make_pair(MachineType::Pointer(), sink),
- std::make_pair(MachineType::Int32(), from),
- std::make_pair(MachineType::Int32(), to));
+ std::make_pair(MachineType::Int32(), start),
+ std::make_pair(MachineType::Int32(), length));
}
TNode<RawPtr<Uint8T>> CodeStubAssembler::ExternalOneByteStringGetChars(
@@ -8359,7 +8355,7 @@ TNode<UintPtrT> CodeStubAssembler::UintPtrMin(TNode<UintPtrT> left,
template <>
TNode<HeapObject> CodeStubAssembler::LoadName<NameDictionary>(
TNode<HeapObject> key) {
- CSA_ASSERT(this, Word32Or(IsTheHole(key), IsName(key)));
+ CSA_DCHECK(this, Word32Or(IsTheHole(key), IsName(key)));
return key;
}
@@ -8380,7 +8376,7 @@ void CodeStubAssembler::NameDictionaryLookup(
DCHECK_EQ(MachineType::PointerRepresentation(), var_name_index->rep());
DCHECK_IMPLIES(mode == kFindInsertionIndex, if_found == nullptr);
Comment("NameDictionaryLookup");
- CSA_ASSERT(this, IsUniqueName(unique_name));
+ CSA_DCHECK(this, IsUniqueName(unique_name));
TNode<IntPtrT> capacity = SmiUntag(GetCapacity<Dictionary>(dictionary));
TNode<IntPtrT> mask = IntPtrSub(capacity, IntPtrConstant(1));
@@ -8388,14 +8384,14 @@ void CodeStubAssembler::NameDictionaryLookup(
// See Dictionary::FirstProbe().
TNode<IntPtrT> count = IntPtrConstant(0);
- TNode<IntPtrT> entry = Signed(WordAnd(hash, mask));
+ TNode<IntPtrT> initial_entry = Signed(WordAnd(hash, mask));
TNode<Oddball> undefined = UndefinedConstant();
// Appease the variable merging algorithm for "Goto(&loop)" below.
*var_name_index = IntPtrConstant(0);
TVARIABLE(IntPtrT, var_count, count);
- TVARIABLE(IntPtrT, var_entry, entry);
+ TVARIABLE(IntPtrT, var_entry, initial_entry);
Label loop(this, {&var_count, &var_entry, var_name_index});
Goto(&loop);
BIND(&loop);
@@ -8468,7 +8464,7 @@ void CodeStubAssembler::NameDictionaryLookup(
void CodeStubAssembler::NumberDictionaryLookup(
TNode<NumberDictionary> dictionary, TNode<IntPtrT> intptr_index,
Label* if_found, TVariable<IntPtrT>* var_entry, Label* if_not_found) {
- CSA_ASSERT(this, IsNumberDictionary(dictionary));
+ CSA_DCHECK(this, IsNumberDictionary(dictionary));
DCHECK_EQ(MachineType::PointerRepresentation(), var_entry->rep());
Comment("NumberDictionaryLookup");
@@ -8480,14 +8476,14 @@ void CodeStubAssembler::NumberDictionaryLookup(
// See Dictionary::FirstProbe().
TNode<IntPtrT> count = IntPtrConstant(0);
- TNode<IntPtrT> entry = Signed(WordAnd(hash, mask));
+ TNode<IntPtrT> initial_entry = Signed(WordAnd(hash, mask));
TNode<Oddball> undefined = UndefinedConstant();
TNode<Oddball> the_hole = TheHoleConstant();
TVARIABLE(IntPtrT, var_count, count);
Label loop(this, {&var_count, var_entry});
- *var_entry = entry;
+ *var_entry = initial_entry;
Goto(&loop);
BIND(&loop);
{
@@ -8574,7 +8570,7 @@ void CodeStubAssembler::InsertEntry<NameDictionary>(
TNode<NameDictionary> dictionary, TNode<Name> name, TNode<Object> value,
TNode<IntPtrT> index, TNode<Smi> enum_index) {
// This should only be used for adding, not updating existing mappings.
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
Word32Or(TaggedEqual(LoadFixedArrayElement(dictionary, index),
UndefinedConstant()),
TaggedEqual(LoadFixedArrayElement(dictionary, index),
@@ -8618,7 +8614,7 @@ void CodeStubAssembler::InsertEntry<GlobalDictionary>(
template <class Dictionary>
void CodeStubAssembler::Add(TNode<Dictionary> dictionary, TNode<Name> key,
TNode<Object> value, Label* bailout) {
- CSA_ASSERT(this, Word32BinaryNot(IsEmptyPropertyDictionary(dictionary)));
+ CSA_DCHECK(this, Word32BinaryNot(IsEmptyPropertyDictionary(dictionary)));
TNode<Smi> capacity = GetCapacity<Dictionary>(dictionary);
TNode<Smi> nof = GetNumberOfElements<Dictionary>(dictionary);
TNode<Smi> new_nof = SmiAdd(nof, SmiConstant(1));
@@ -8629,7 +8625,7 @@ void CodeStubAssembler::Add(TNode<Dictionary> dictionary, TNode<Name> key,
GotoIf(SmiBelow(capacity, required_capacity_pseudo_smi), bailout);
// Require rehashing if more than 50% of free elements are deleted elements.
TNode<Smi> deleted = GetNumberOfDeletedElements<Dictionary>(dictionary);
- CSA_ASSERT(this, SmiAbove(capacity, new_nof));
+ CSA_DCHECK(this, SmiAbove(capacity, new_nof));
TNode<Smi> half_of_free_elements = SmiShr(SmiSub(capacity, new_nof), 1);
GotoIf(SmiAbove(deleted, half_of_free_elements), bailout);
@@ -8715,7 +8711,7 @@ void CodeStubAssembler::LookupLinear(TNode<Name> unique_name,
std::is_base_of<DescriptorArray, Array>::value,
"T must be a descendant of FixedArray or a WeakFixedArray");
Comment("LookupLinear");
- CSA_ASSERT(this, IsUniqueName(unique_name));
+ CSA_DCHECK(this, IsUniqueName(unique_name));
TNode<IntPtrT> first_inclusive = IntPtrConstant(Array::ToKeyIndex(0));
TNode<IntPtrT> factor = IntPtrConstant(Array::kEntrySize);
TNode<IntPtrT> last_exclusive = IntPtrAdd(
@@ -8827,10 +8823,10 @@ void CodeStubAssembler::LookupBinary(TNode<Name> unique_name,
Unsigned(Int32Sub(NumberOfEntries<Array>(array), Int32Constant(1)));
TVARIABLE(Uint32T, var_high, limit);
TNode<Uint32T> hash = LoadNameHashAssumeComputed(unique_name);
- CSA_ASSERT(this, Word32NotEqual(hash, Int32Constant(0)));
+ CSA_DCHECK(this, Word32NotEqual(hash, Int32Constant(0)));
// Assume non-empty array.
- CSA_ASSERT(this, Uint32LessThanOrEqual(var_low.value(), var_high.value()));
+ CSA_DCHECK(this, Uint32LessThanOrEqual(var_low.value(), var_high.value()));
Label binary_loop(this, {&var_high, &var_low});
Goto(&binary_loop);
@@ -8952,7 +8948,7 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty(
}
BIND(&if_string);
{
- CSA_ASSERT(this, IsString(next_key));
+ CSA_DCHECK(this, IsString(next_key));
// Process string property when |var_is_symbol_processing_loop| is
// false.
Branch(var_is_symbol_processing_loop.value(), &next_iteration,
@@ -9111,7 +9107,7 @@ TNode<NativeContext> CodeStubAssembler::GetCreationContext(
// Remote objects don't have a creation context.
GotoIf(IsFunctionTemplateInfoMap(function_map), if_bailout);
- CSA_ASSERT(this, IsJSFunctionMap(receiver_map));
+ CSA_DCHECK(this, IsJSFunctionMap(receiver_map));
var_function = CAST(receiver);
Goto(&done);
@@ -9194,8 +9190,8 @@ void CodeStubAssembler::TryLookupPropertyInSimpleObject(
Label* if_found_fast, Label* if_found_dict,
TVariable<HeapObject>* var_meta_storage, TVariable<IntPtrT>* var_name_index,
Label* if_not_found, Label* bailout) {
- CSA_ASSERT(this, IsSimpleObjectMap(map));
- CSA_ASSERT(this, IsUniqueNameNoCachedIndex(unique_name));
+ CSA_DCHECK(this, IsSimpleObjectMap(map));
+ CSA_DCHECK(this, IsUniqueNameNoCachedIndex(unique_name));
TNode<Uint32T> bit_field3 = LoadMapBitField3(map);
Label if_isfastmap(this), if_isslowmap(this);
@@ -9259,7 +9255,7 @@ void CodeStubAssembler::TryHasOwnProperty(TNode<HeapObject> object,
Label* if_found, Label* if_not_found,
Label* if_bailout) {
Comment("TryHasOwnProperty");
- CSA_ASSERT(this, IsUniqueNameNoCachedIndex(unique_name));
+ CSA_DCHECK(this, IsUniqueNameNoCachedIndex(unique_name));
TVARIABLE(HeapObject, var_meta_storage);
TVARIABLE(IntPtrT, var_name_index);
@@ -9357,8 +9353,9 @@ void CodeStubAssembler::LoadPropertyFromFastObject(
DecodeWord32<PropertyDetails::LocationField>(details);
Label if_in_field(this), if_in_descriptor(this), done(this);
- Branch(Word32Equal(location, Int32Constant(kField)), &if_in_field,
- &if_in_descriptor);
+ Branch(Word32Equal(location, Int32Constant(static_cast<int32_t>(
+ PropertyLocation::kField))),
+ &if_in_field, &if_in_descriptor);
BIND(&if_in_field);
{
TNode<IntPtrT> field_index =
@@ -9367,7 +9364,7 @@ void CodeStubAssembler::LoadPropertyFromFastObject(
DecodeWord32<PropertyDetails::RepresentationField>(details);
// TODO(ishell): support WasmValues.
- CSA_ASSERT(this, Word32NotEqual(representation,
+ CSA_DCHECK(this, Word32NotEqual(representation,
Int32Constant(Representation::kWasmValue)));
field_index =
IntPtrAdd(field_index, LoadMapInobjectPropertiesStartInWords(map));
@@ -9535,7 +9532,8 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
GetCreationContext(CAST(holder), if_bailout);
var_value = CallBuiltin(
Builtin::kCallFunctionTemplate_CheckAccessAndCompatibleReceiver,
- creation_context, getter, IntPtrConstant(0), receiver);
+ creation_context, getter, IntPtrConstant(i::JSParameterCount(0)),
+ receiver);
Goto(&done);
BIND(&runtime);
@@ -9628,7 +9626,7 @@ void CodeStubAssembler::TryGetOwnProperty(
Label* if_not_found, Label* if_bailout, GetOwnPropertyMode mode) {
DCHECK_EQ(MachineRepresentation::kTagged, var_value->rep());
Comment("TryGetOwnProperty");
- CSA_ASSERT(this, IsUniqueNameNoCachedIndex(unique_name));
+ CSA_DCHECK(this, IsUniqueNameNoCachedIndex(unique_name));
TVARIABLE(HeapObject, var_meta_storage);
TVARIABLE(IntPtrT, var_entry);
@@ -9953,12 +9951,12 @@ void CodeStubAssembler::TryPrototypeChainLookup(
GotoIf(IsNull(proto), if_end);
- TNode<Map> map = LoadMap(proto);
- TNode<Uint16T> instance_type = LoadMapInstanceType(map);
+ TNode<Map> proto_map = LoadMap(proto);
+ TNode<Uint16T> proto_instance_type = LoadMapInstanceType(proto_map);
var_holder = proto;
- var_holder_map = map;
- var_holder_instance_type = instance_type;
+ var_holder_map = proto_map;
+ var_holder_instance_type = proto_instance_type;
Goto(&loop);
}
}
@@ -9983,12 +9981,12 @@ void CodeStubAssembler::TryPrototypeChainLookup(
GotoIf(IsNull(proto), if_end);
- TNode<Map> map = LoadMap(proto);
- TNode<Uint16T> instance_type = LoadMapInstanceType(map);
+ TNode<Map> proto_map = LoadMap(proto);
+ TNode<Uint16T> proto_instance_type = LoadMapInstanceType(proto_map);
var_holder = proto;
- var_holder_map = map;
- var_holder_instance_type = instance_type;
+ var_holder_map = proto_map;
+ var_holder_instance_type = proto_instance_type;
Goto(&loop);
}
}
@@ -10033,7 +10031,7 @@ TNode<Oddball> CodeStubAssembler::HasInPrototypeChain(TNode<Context> context,
GotoIf(TaggedEqual(object_prototype, prototype), &return_true);
// Continue with the prototype.
- CSA_ASSERT(this, TaggedIsNotSmi(object_prototype));
+ CSA_DCHECK(this, TaggedIsNotSmi(object_prototype));
var_object_map = LoadMap(object_prototype);
Goto(&loop);
}
@@ -10283,7 +10281,7 @@ void CodeStubAssembler::UpdateFeedback(TNode<Smi> feedback,
MaybeUpdateFeedback(feedback, maybe_feedback_vector, slot_id);
break;
case UpdateFeedbackMode::kGuaranteedFeedback:
- CSA_ASSERT(this, IsFeedbackVector(maybe_feedback_vector));
+ CSA_DCHECK(this, IsFeedbackVector(maybe_feedback_vector));
UpdateFeedback(feedback, CAST(maybe_feedback_vector), slot_id);
break;
}
@@ -10527,7 +10525,7 @@ void CodeStubAssembler::StoreElementTypedArrayWord32(TNode<RawPtrT> elements,
"Only UintPtrT or IntPtrT indices is allowed");
DCHECK(IsTypedArrayElementsKind(kind));
if (kind == UINT8_CLAMPED_ELEMENTS) {
- CSA_ASSERT(this, Word32Equal(value, Word32And(Int32Constant(0xFF), value)));
+ CSA_DCHECK(this, Word32Equal(value, Word32And(Int32Constant(0xFF), value)));
}
TNode<IntPtrT> offset = ElementOffsetFromIndex(index, kind, 0);
// TODO(cbruni): Add OOB check once typed.
@@ -11026,13 +11024,13 @@ void CodeStubAssembler::EmitElementStore(
TNode<JSObject> object, TNode<Object> key, TNode<Object> value,
ElementsKind elements_kind, KeyedAccessStoreMode store_mode, Label* bailout,
TNode<Context> context, TVariable<Object>* maybe_converted_value) {
- CSA_ASSERT(this, Word32BinaryNot(IsJSProxy(object)));
+ CSA_DCHECK(this, Word32BinaryNot(IsJSProxy(object)));
TNode<FixedArrayBase> elements = LoadElements(object);
if (!(IsSmiOrObjectElementsKind(elements_kind) ||
IsSealedElementsKind(elements_kind) ||
IsNonextensibleElementsKind(elements_kind))) {
- CSA_ASSERT(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements))));
+ CSA_DCHECK(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements))));
} else if (!IsCOWHandlingStoreMode(store_mode)) {
GotoIf(IsFixedCOWArrayMap(LoadMap(elements)), bailout);
}
@@ -11133,13 +11131,13 @@ void CodeStubAssembler::EmitElementStore(
if (!(IsSmiOrObjectElementsKind(elements_kind) ||
IsSealedElementsKind(elements_kind) ||
IsNonextensibleElementsKind(elements_kind))) {
- CSA_ASSERT(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements))));
+ CSA_DCHECK(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements))));
} else if (IsCOWHandlingStoreMode(store_mode)) {
elements = CopyElementsOnWrite(object, elements, elements_kind,
Signed(length), bailout);
}
- CSA_ASSERT(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements))));
+ CSA_DCHECK(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements))));
if (float_value) {
StoreElement(elements, elements_kind, intptr_key, float_value.value());
} else {
@@ -11188,7 +11186,7 @@ TNode<FixedArrayBase> CodeStubAssembler::CheckForCapacityGrow(
Runtime::kGrowArrayElements, NoContextConstant(), object, tagged_key);
GotoIf(TaggedIsSmi(maybe_elements), bailout);
TNode<FixedArrayBase> new_elements = CAST(maybe_elements);
- CSA_ASSERT(this, IsFixedArrayWithKind(new_elements, kind));
+ CSA_DCHECK(this, IsFixedArrayWithKind(new_elements, kind));
checked_elements = new_elements;
Goto(&fits_capacity);
}
@@ -11255,12 +11253,12 @@ void CodeStubAssembler::TransitionElementsKind(TNode<JSObject> object,
TNode<IntPtrT> array_length = Select<IntPtrT>(
IsJSArray(object),
[=]() {
- CSA_ASSERT(this, IsFastElementsKind(LoadElementsKind(object)));
+ CSA_DCHECK(this, IsFastElementsKind(LoadElementsKind(object)));
return SmiUntag(LoadFastJSArrayLength(CAST(object)));
},
[=]() { return elements_length; });
- CSA_ASSERT(this, WordNotEqual(elements_length, IntPtrConstant(0)));
+ CSA_DCHECK(this, WordNotEqual(elements_length, IntPtrConstant(0)));
GrowElementsCapacity(object, elements, from_kind, to_kind, array_length,
elements_length, bailout);
@@ -11352,7 +11350,7 @@ TNode<IntPtrT> CodeStubAssembler::PageFromAddress(TNode<IntPtrT> address) {
TNode<AllocationSite> CodeStubAssembler::CreateAllocationSiteInFeedbackVector(
TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot) {
TNode<IntPtrT> size = IntPtrConstant(AllocationSite::kSizeWithWeakNext);
- TNode<HeapObject> site = Allocate(size, CodeStubAssembler::kPretenured);
+ TNode<HeapObject> site = Allocate(size, AllocationFlag::kPretenured);
StoreMapNoWriteBarrier(site, RootIndex::kAllocationSiteWithWeakNextMap);
// Should match AllocationSite::Initialize.
TNode<WordT> field = UpdateWord<AllocationSite::ElementsKindBits>(
@@ -11429,7 +11427,7 @@ TNode<Int32T> CodeStubAssembler::LoadElementsKind(
TNode<Int32T> elements_kind =
Signed(DecodeWord32<AllocationSite::ElementsKindBits>(
SmiToInt32(transition_info)));
- CSA_ASSERT(this, IsFastElementsKind(elements_kind));
+ CSA_DCHECK(this, IsFastElementsKind(elements_kind));
return elements_kind;
}
@@ -11499,7 +11497,7 @@ void CodeStubAssembler::BuildFastArrayForEach(
TNode<TIndex> last_element_exclusive, const FastArrayForEachBody& body,
ForEachDirection direction) {
STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
- CSA_SLOW_ASSERT(this, Word32Or(IsFixedArrayWithKind(array, kind),
+ CSA_SLOW_DCHECK(this, Word32Or(IsFixedArrayWithKind(array, kind),
IsPropertyArray(array)));
intptr_t first_val;
@@ -11555,7 +11553,7 @@ void CodeStubAssembler::InitializeFieldsWithRoot(TNode<HeapObject> object,
TNode<IntPtrT> start_offset,
TNode<IntPtrT> end_offset,
RootIndex root_index) {
- CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object));
+ CSA_SLOW_DCHECK(this, TaggedIsNotSmi(object));
start_offset = IntPtrAdd(start_offset, IntPtrConstant(-kHeapObjectTag));
end_offset = IntPtrAdd(end_offset, IntPtrConstant(-kHeapObjectTag));
TNode<AnyTaggedT> root_value;
@@ -11706,7 +11704,7 @@ TNode<Context> CodeStubAssembler::GotoIfHasContextExtensionUpToDepth(
Label no_extension(this);
// Loop until the depth is 0.
- CSA_ASSERT(this, Word32NotEqual(cur_depth.value(), Int32Constant(0)));
+ CSA_DCHECK(this, Word32NotEqual(cur_depth.value(), Int32Constant(0)));
Goto(&context_search);
BIND(&context_search);
{
@@ -12197,7 +12195,7 @@ void CodeStubAssembler::GenerateEqual_Same(TNode<Object> value, Label* if_equal,
BIND(&if_string);
{
- CSA_ASSERT(this, IsString(value_heapobject));
+ CSA_DCHECK(this, IsString(value_heapobject));
CombineFeedback(var_type_feedback,
CollectFeedbackForString(instance_type));
Goto(if_equal);
@@ -12205,28 +12203,28 @@ void CodeStubAssembler::GenerateEqual_Same(TNode<Object> value, Label* if_equal,
BIND(&if_symbol);
{
- CSA_ASSERT(this, IsSymbol(value_heapobject));
+ CSA_DCHECK(this, IsSymbol(value_heapobject));
CombineFeedback(var_type_feedback, CompareOperationFeedback::kSymbol);
Goto(if_equal);
}
BIND(&if_receiver);
{
- CSA_ASSERT(this, IsJSReceiver(value_heapobject));
+ CSA_DCHECK(this, IsJSReceiver(value_heapobject));
CombineFeedback(var_type_feedback, CompareOperationFeedback::kReceiver);
Goto(if_equal);
}
BIND(&if_bigint);
{
- CSA_ASSERT(this, IsBigInt(value_heapobject));
+ CSA_DCHECK(this, IsBigInt(value_heapobject));
CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt);
Goto(if_equal);
}
BIND(&if_oddball);
{
- CSA_ASSERT(this, IsOddball(value_heapobject));
+ CSA_DCHECK(this, IsOddball(value_heapobject));
Label if_boolean(this), if_not_boolean(this);
Branch(IsBooleanMap(value_map), &if_boolean, &if_not_boolean);
@@ -12238,7 +12236,7 @@ void CodeStubAssembler::GenerateEqual_Same(TNode<Object> value, Label* if_equal,
BIND(&if_not_boolean);
{
- CSA_ASSERT(this, IsNullOrUndefined(value_heapobject));
+ CSA_DCHECK(this, IsNullOrUndefined(value_heapobject));
CombineFeedback(var_type_feedback,
CompareOperationFeedback::kReceiverOrNullOrUndefined);
Goto(if_equal);
@@ -12327,8 +12325,8 @@ TNode<Oddball> CodeStubAssembler::Equal(TNode<Object> left, TNode<Object> right,
BIND(&if_right_not_smi);
{
TNode<Map> right_map = LoadMap(CAST(right));
- Label if_right_heapnumber(this), if_right_boolean(this),
- if_right_oddball(this), if_right_bigint(this, Label::kDeferred),
+ Label if_right_heapnumber(this), if_right_oddball(this),
+ if_right_bigint(this, Label::kDeferred),
if_right_receiver(this, Label::kDeferred);
GotoIf(IsHeapNumberMap(right_map), &if_right_heapnumber);
@@ -12616,7 +12614,7 @@ TNode<Oddball> CodeStubAssembler::Equal(TNode<Object> left, TNode<Object> right,
BIND(&if_left_receiver);
{
- CSA_ASSERT(this, IsJSReceiverInstanceType(left_type));
+ CSA_DCHECK(this, IsJSReceiverInstanceType(left_type));
Label if_right_receiver(this), if_right_not_receiver(this);
Branch(IsJSReceiverInstanceType(right_type), &if_right_receiver,
&if_right_not_receiver);
@@ -12641,7 +12639,7 @@ TNode<Oddball> CodeStubAssembler::Equal(TNode<Object> left, TNode<Object> right,
BIND(&if_right_undetectable);
{
// When we get here, {right} must be either Null or Undefined.
- CSA_ASSERT(this, IsNullOrUndefined(right));
+ CSA_DCHECK(this, IsNullOrUndefined(right));
if (var_type_feedback != nullptr) {
*var_type_feedback = SmiConstant(
CompareOperationFeedback::kReceiverOrNullOrUndefined);
@@ -13282,7 +13280,7 @@ TNode<Oddball> CodeStubAssembler::HasProperty(TNode<Context> context,
}
BIND(&end);
- CSA_ASSERT(this, IsBoolean(result.value()));
+ CSA_DCHECK(this, IsBoolean(result.value()));
return result.value();
}
@@ -13303,7 +13301,7 @@ void CodeStubAssembler::ForInPrepare(TNode<HeapObject> enumerator,
// Load the enumeration length and cache from the {enumerator}.
TNode<Map> map_enumerator = CAST(enumerator);
TNode<WordT> enum_length = LoadMapEnumLength(map_enumerator);
- CSA_ASSERT(this, WordNotEqual(enum_length,
+ CSA_DCHECK(this, WordNotEqual(enum_length,
IntPtrConstant(kInvalidEnumCacheSentinel)));
TNode<DescriptorArray> descriptors = LoadMapDescriptors(map_enumerator);
TNode<EnumCache> enum_cache = LoadObjectField<EnumCache>(
@@ -13382,7 +13380,7 @@ TNode<String> CodeStubAssembler::Typeof(TNode<Object> value) {
GotoIf(IsBigIntInstanceType(instance_type), &return_bigint);
- CSA_ASSERT(this, InstanceTypeEqual(instance_type, SYMBOL_TYPE));
+ CSA_DCHECK(this, InstanceTypeEqual(instance_type, SYMBOL_TYPE));
result_var = HeapConstant(isolate()->factory()->symbol_string());
Goto(&return_result);
@@ -13730,7 +13728,7 @@ TNode<Number> CodeStubAssembler::BitwiseOp(TNode<Word32T> left32,
TNode<JSObject> CodeStubAssembler::AllocateJSIteratorResult(
TNode<Context> context, TNode<Object> value, TNode<Oddball> done) {
- CSA_ASSERT(this, IsBoolean(done));
+ CSA_DCHECK(this, IsBoolean(done));
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<Map> map = CAST(
LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX));
@@ -13806,9 +13804,8 @@ void CodeStubAssembler::ThrowIfArrayBufferViewBufferIsDetached(
TNode<RawPtrT> CodeStubAssembler::LoadJSArrayBufferBackingStorePtr(
TNode<JSArrayBuffer> array_buffer) {
- return LoadExternalPointerFromObject(array_buffer,
- JSArrayBuffer::kBackingStoreOffset,
- kArrayBufferBackingStoreTag);
+ return LoadObjectField<RawPtrT>(array_buffer,
+ JSArrayBuffer::kBackingStoreOffset);
}
TNode<JSArrayBuffer> CodeStubAssembler::LoadJSArrayBufferViewBuffer(
@@ -13866,7 +13863,7 @@ TNode<UintPtrT> CodeStubAssembler::LoadVariableLengthJSTypedArrayLength(
BIND(&is_gsab);
{
// Non-length-tracking GSAB-backed TypedArrays shouldn't end up here.
- CSA_ASSERT(this, IsLengthTrackingTypedArray(array));
+ CSA_DCHECK(this, IsLengthTrackingTypedArray(array));
// Read the byte length from the BackingStore.
const TNode<ExternalReference> length_function = ExternalConstant(
ExternalReference::length_tracking_gsab_backed_typed_array_length());
@@ -13926,7 +13923,7 @@ TNode<UintPtrT> CodeStubAssembler::LoadVariableLengthJSTypedArrayLength(
return result.value();
}
-void CodeStubAssembler::IsTypedArrayDetachedOrOutOfBounds(
+void CodeStubAssembler::IsJSTypedArrayDetachedOrOutOfBounds(
TNode<JSTypedArray> array, Label* detached_or_oob,
Label* not_detached_nor_oob) {
TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(array);
@@ -14093,7 +14090,8 @@ TNode<RawPtrT> CodeStubArguments::AtIndexPtr(TNode<IntPtrT> index) const {
}
TNode<Object> CodeStubArguments::AtIndex(TNode<IntPtrT> index) const {
- CSA_ASSERT(assembler_, assembler_->UintPtrOrSmiLessThan(index, GetLength()));
+ CSA_DCHECK(assembler_, assembler_->UintPtrOrSmiLessThan(
+ index, GetLengthWithoutReceiver()));
return assembler_->LoadFullTagged(AtIndexPtr(index));
}
@@ -14101,9 +14099,19 @@ TNode<Object> CodeStubArguments::AtIndex(int index) const {
return AtIndex(assembler_->IntPtrConstant(index));
}
+TNode<IntPtrT> CodeStubArguments::GetLengthWithoutReceiver() const {
+ TNode<IntPtrT> argc = argc_;
+ if (kJSArgcIncludesReceiver) {
+ argc = assembler_->IntPtrSub(argc, assembler_->IntPtrConstant(1));
+ }
+ return argc;
+}
+
TNode<IntPtrT> CodeStubArguments::GetLengthWithReceiver() const {
- TNode<IntPtrT> argc = GetLength();
- argc = assembler_->IntPtrAdd(argc, assembler_->IntPtrConstant(1));
+ TNode<IntPtrT> argc = argc_;
+ if (!kJSArgcIncludesReceiver) {
+ argc = assembler_->IntPtrAdd(argc, assembler_->IntPtrConstant(1));
+ }
return argc;
}
@@ -14113,8 +14121,9 @@ TNode<Object> CodeStubArguments::GetOptionalArgumentValue(
CodeStubAssembler::Label argument_missing(assembler_),
argument_done(assembler_, &result);
- assembler_->GotoIf(assembler_->UintPtrGreaterThanOrEqual(index, argc_),
- &argument_missing);
+ assembler_->GotoIf(
+ assembler_->UintPtrGreaterThanOrEqual(index, GetLengthWithoutReceiver()),
+ &argument_missing);
result = AtIndex(index);
assembler_->Goto(&argument_done);
@@ -14135,7 +14144,7 @@ void CodeStubArguments::ForEach(
first = assembler_->IntPtrConstant(0);
}
if (last == nullptr) {
- last = argc_;
+ last = GetLengthWithoutReceiver();
}
TNode<RawPtrT> start = AtIndexPtr(first);
TNode<RawPtrT> end = AtIndexPtr(last);
@@ -14150,8 +14159,7 @@ void CodeStubArguments::ForEach(
}
void CodeStubArguments::PopAndReturn(TNode<Object> value) {
- TNode<IntPtrT> pop_count =
- assembler_->IntPtrAdd(argc_, assembler_->IntPtrConstant(1));
+ TNode<IntPtrT> pop_count = GetLengthWithReceiver();
assembler_->PopAndReturn(pop_count, value);
}
@@ -14200,7 +14208,7 @@ TNode<BoolT> CodeStubAssembler::IsFastSmiElementsKind(
TNode<BoolT> CodeStubAssembler::IsHoleyFastElementsKind(
TNode<Int32T> elements_kind) {
- CSA_ASSERT(this, IsFastElementsKind(elements_kind));
+ CSA_DCHECK(this, IsFastElementsKind(elements_kind));
STATIC_ASSERT(HOLEY_SMI_ELEMENTS == (PACKED_SMI_ELEMENTS | 1));
STATIC_ASSERT(HOLEY_ELEMENTS == (PACKED_ELEMENTS | 1));
@@ -14210,7 +14218,7 @@ TNode<BoolT> CodeStubAssembler::IsHoleyFastElementsKind(
TNode<BoolT> CodeStubAssembler::IsHoleyFastElementsKindForRead(
TNode<Int32T> elements_kind) {
- CSA_ASSERT(this, Uint32LessThanOrEqual(
+ CSA_DCHECK(this, Uint32LessThanOrEqual(
elements_kind,
Int32Constant(LAST_ANY_NONEXTENSIBLE_ELEMENTS_KIND)));
@@ -14229,6 +14237,11 @@ TNode<BoolT> CodeStubAssembler::IsElementsKindGreaterThan(
return Int32GreaterThan(target_kind, Int32Constant(reference_kind));
}
+TNode<BoolT> CodeStubAssembler::IsElementsKindGreaterThanOrEqual(
+ TNode<Int32T> target_kind, ElementsKind reference_kind) {
+ return Int32GreaterThanOrEqual(target_kind, Int32Constant(reference_kind));
+}
+
TNode<BoolT> CodeStubAssembler::IsElementsKindLessThanOrEqual(
TNode<Int32T> target_kind, ElementsKind reference_kind) {
return Int32LessThanOrEqual(target_kind, Int32Constant(reference_kind));
@@ -14294,7 +14307,7 @@ TNode<BoolT> CodeStubAssembler::NeedsAnyPromiseHooks(TNode<Uint32T> flags) {
}
TNode<Code> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
- CSA_ASSERT(this, SmiBelow(builtin_id, SmiConstant(Builtins::kBuiltinCount)));
+ CSA_DCHECK(this, SmiBelow(builtin_id, SmiConstant(Builtins::kBuiltinCount)));
TNode<IntPtrT> offset =
ElementOffsetFromIndex(SmiToBInt(builtin_id), SYSTEM_POINTER_ELEMENTS);
@@ -14336,7 +14349,7 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
int32_t case_values[] = {
BYTECODE_ARRAY_TYPE,
- BASELINE_DATA_TYPE,
+ CODET_TYPE,
UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE,
UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE,
FUNCTION_TEMPLATE_INFO_TYPE,
@@ -14380,7 +14393,7 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
// IsBaselineData: Execute baseline code
BIND(&check_is_baseline_data);
{
- TNode<CodeT> baseline_code = LoadBaselineDataBaselineCode(CAST(sfi_data));
+ TNode<CodeT> baseline_code = CAST(sfi_data);
sfi_code = FromCodeT(baseline_code);
Goto(&done);
}
@@ -14401,7 +14414,7 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
// IsInterpreterData: Interpret bytecode
BIND(&check_is_interpreter_data);
// This is the default branch, so assert that we have the expected data type.
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
Word32Equal(data_type, Int32Constant(INTERPRETER_DATA_TYPE)));
{
TNode<CodeT> trampoline =
@@ -14436,8 +14449,8 @@ TNode<JSFunction> CodeStubAssembler::AllocateFunctionWithMapAndContext(
// TODO(ishell): All the callers of this function pass map loaded from
// Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX. So we can remove
// map parameter.
- CSA_ASSERT(this, Word32BinaryNot(IsConstructorMap(map)));
- CSA_ASSERT(this, Word32BinaryNot(IsFunctionWithPrototypeSlotMap(map)));
+ CSA_DCHECK(this, Word32BinaryNot(IsConstructorMap(map)));
+ CSA_DCHECK(this, Word32BinaryNot(IsFunctionWithPrototypeSlotMap(map)));
const TNode<HeapObject> fun = Allocate(JSFunction::kSizeWithoutPrototype);
STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kTaggedSize);
StoreMapNoWriteBarrier(fun, map);
@@ -14518,7 +14531,7 @@ TNode<Map> CodeStubAssembler::CheckEnumCache(TNode<JSReceiver> receiver,
TNode<HeapObject> properties = LoadSlowProperties(receiver);
if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
- CSA_ASSERT(this, Word32Or(IsSwissNameDictionary(properties),
+ CSA_DCHECK(this, Word32Or(IsSwissNameDictionary(properties),
IsGlobalDictionary(properties)));
length = Select<Smi>(
@@ -14533,7 +14546,7 @@ TNode<Map> CodeStubAssembler::CheckEnumCache(TNode<JSReceiver> receiver,
});
} else {
- CSA_ASSERT(this, Word32Or(IsNameDictionary(properties),
+ CSA_DCHECK(this, Word32Or(IsNameDictionary(properties),
IsGlobalDictionary(properties)));
STATIC_ASSERT(static_cast<int>(NameDictionary::kNumberOfElementsIndex) ==
static_cast<int>(GlobalDictionary::kNumberOfElementsIndex));
@@ -14563,7 +14576,15 @@ TNode<Object> CodeStubAssembler::GetArgumentValue(TorqueStructArguments args,
}
TorqueStructArguments CodeStubAssembler::GetFrameArguments(
- TNode<RawPtrT> frame, TNode<IntPtrT> argc) {
+ TNode<RawPtrT> frame, TNode<IntPtrT> argc,
+ FrameArgumentsArgcType argc_type) {
+ if (kJSArgcIncludesReceiver &&
+ argc_type == FrameArgumentsArgcType::kCountExcludesReceiver) {
+ argc = IntPtrAdd(argc, IntPtrConstant(kJSArgcReceiverSlots));
+ } else if (!kJSArgcIncludesReceiver &&
+ argc_type == FrameArgumentsArgcType::kCountIncludesReceiver) {
+ argc = IntPtrSub(argc, IntPtrConstant(1));
+ }
return CodeStubArguments(this, argc, frame).GetTorqueArguments();
}
@@ -14652,7 +14673,7 @@ TNode<JSArray> CodeStubAssembler::ArrayCreate(TNode<Context> context,
Label done(this), next(this), runtime(this, Label::kDeferred);
TNode<Smi> limit = SmiConstant(JSArray::kInitialMaxFastElementArray);
- CSA_ASSERT_BRANCH(this, [=](Label* ok, Label* not_ok) {
+ CSA_DCHECK_BRANCH(this, [=](Label* ok, Label* not_ok) {
BranchIfNumberRelationalComparison(Operation::kGreaterThanOrEqual, length,
SmiConstant(0), ok, not_ok);
});
@@ -14711,7 +14732,7 @@ void CodeStubAssembler::SetPropertyLength(TNode<Context> context,
TNode<Smi> length_smi = CAST(length);
TNode<Smi> old_length = LoadFastJSArrayLength(fast_array);
- CSA_ASSERT(this, TaggedIsPositiveSmi(old_length));
+ CSA_DCHECK(this, TaggedIsPositiveSmi(old_length));
// 2) If the created array's length matches the required length, then
// there's nothing else to do. Otherwise use the runtime to set the
@@ -14809,13 +14830,13 @@ void PrototypeCheckAssembler::CheckAndBranch(TNode<HeapObject> prototype,
for (int i = 0; i < properties_.length(); i++) {
// Assert the descriptor index is in-bounds.
int descriptor = properties_[i].descriptor_index;
- CSA_ASSERT(this, Int32LessThan(Int32Constant(descriptor),
+ CSA_DCHECK(this, Int32LessThan(Int32Constant(descriptor),
LoadNumberOfDescriptors(descriptors)));
// Assert that the name is correct. This essentially checks that
// the descriptor index corresponds to the insertion order in
// the bootstrapper.
- CSA_ASSERT(
+ CSA_DCHECK(
this,
TaggedEqual(LoadKeyByDescriptorEntry(descriptors, descriptor),
CodeAssembler::LoadRoot(properties_[i].name_root_index)));
@@ -14929,7 +14950,7 @@ class MetaTableAccessor {
int bits = mt.MemSize() * 8;
TNode<UintPtrT> max_value = csa.UintPtrConstant((1ULL << bits) - 1);
- CSA_ASSERT(&csa, csa.UintPtrLessThanOrEqual(csa.ChangeUint32ToWord(data),
+ CSA_DCHECK(&csa, csa.UintPtrLessThanOrEqual(csa.ChangeUint32ToWord(data),
max_value));
#endif
@@ -14967,7 +14988,7 @@ class MetaTableAccessor {
csa.SmiToIntPtr(csa.LoadFixedArrayBaseLength(meta_table));
TNode<IntPtrT> max_allowed_offset = csa.IntPtrAdd(
byte_array_data_bytes, csa.IntPtrConstant(offset_to_data_minus_tag));
- CSA_ASSERT(&csa, csa.UintPtrLessThan(overall_offset, max_allowed_offset));
+ CSA_DCHECK(&csa, csa.UintPtrLessThan(overall_offset, max_allowed_offset));
#endif
return overall_offset;
@@ -15137,11 +15158,11 @@ TNode<SwissNameDictionary>
CodeStubAssembler::AllocateSwissNameDictionaryWithCapacity(
TNode<IntPtrT> capacity) {
Comment("[ AllocateSwissNameDictionaryWithCapacity");
- CSA_ASSERT(this, WordIsPowerOfTwo(capacity));
- CSA_ASSERT(this, UintPtrGreaterThanOrEqual(
+ CSA_DCHECK(this, WordIsPowerOfTwo(capacity));
+ CSA_DCHECK(this, UintPtrGreaterThanOrEqual(
capacity,
IntPtrConstant(SwissNameDictionary::kInitialCapacity)));
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
UintPtrLessThanOrEqual(
capacity, IntPtrConstant(SwissNameDictionary::MaxCapacity())));
@@ -15181,7 +15202,7 @@ CodeStubAssembler::AllocateSwissNameDictionaryWithCapacity(
TNode<IntPtrT> total_size = SwissNameDictionarySizeFor(capacity);
TNode<SwissNameDictionary> table = UncheckedCast<SwissNameDictionary>(
- Allocate(total_size, kAllowLargeObjectAllocation));
+ Allocate(total_size, AllocationFlag::kAllowLargeObjectAllocation));
StoreMapNoWriteBarrier(table, RootIndex::kSwissNameDictionaryMap);
@@ -15281,7 +15302,7 @@ TNode<SwissNameDictionary> CodeStubAssembler::CopySwissNameDictionary(
TNode<IntPtrT> total_size = SwissNameDictionarySizeFor(capacity);
TNode<SwissNameDictionary> table = UncheckedCast<SwissNameDictionary>(
- Allocate(total_size, kAllowLargeObjectAllocation));
+ Allocate(total_size, AllocationFlag::kAllowLargeObjectAllocation));
StoreMapNoWriteBarrier(table, RootIndex::kSwissNameDictionaryMap);
@@ -15431,7 +15452,7 @@ TNode<IntPtrT>
CodeStubAssembler::SwissNameDictionaryOffsetIntoPropertyDetailsTableMT(
TNode<SwissNameDictionary> dict, TNode<IntPtrT> capacity,
TNode<IntPtrT> index) {
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
WordEqual(capacity, ChangeUint32ToWord(
LoadSwissNameDictionaryCapacity(dict))));
@@ -15448,7 +15469,7 @@ CodeStubAssembler::SwissNameDictionaryOffsetIntoPropertyDetailsTableMT(
TNode<IntPtrT> property_details_table_start =
IntPtrAdd(data_table_start, data_and_ctrl_table_size);
- CSA_ASSERT(
+ CSA_DCHECK(
this,
WordEqual(FieldSliceSwissNameDictionaryPropertyDetailsTable(dict).offset,
// Our calculation subtracted the tag, Torque's offset didn't.
@@ -15551,15 +15572,15 @@ TNode<Uint64T> CodeStubAssembler::LoadSwissNameDictionaryCtrlTableGroup(
void CodeStubAssembler::SwissNameDictionarySetCtrl(
TNode<SwissNameDictionary> table, TNode<IntPtrT> capacity,
TNode<IntPtrT> entry, TNode<Uint8T> ctrl) {
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
WordEqual(capacity, ChangeUint32ToWord(
LoadSwissNameDictionaryCapacity(table))));
- CSA_ASSERT(this, UintPtrLessThan(entry, capacity));
+ CSA_DCHECK(this, UintPtrLessThan(entry, capacity));
TNode<IntPtrT> one = IntPtrConstant(1);
TNode<IntPtrT> offset = SwissNameDictionaryCtrlTableStartOffsetMT(capacity);
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
WordEqual(FieldSliceSwissNameDictionaryCtrlTable(table).offset,
IntPtrAdd(offset, one)));
@@ -15581,11 +15602,11 @@ void CodeStubAssembler::SwissNameDictionarySetCtrl(
TNode<IntPtrT> offset_copy_entry = IntPtrAdd(offset, copy_entry);
// |entry| < |kGroupWidth| implies |copy_entry| == |capacity| + |entry|
- CSA_ASSERT(this, Word32Or(UintPtrGreaterThanOrEqual(entry, group_width),
+ CSA_DCHECK(this, Word32Or(UintPtrGreaterThanOrEqual(entry, group_width),
WordEqual(copy_entry, IntPtrAdd(capacity, entry))));
// |entry| >= |kGroupWidth| implies |copy_entry| == |entry|
- CSA_ASSERT(this, Word32Or(UintPtrLessThan(entry, group_width),
+ CSA_DCHECK(this, Word32Or(UintPtrLessThan(entry, group_width),
WordEqual(copy_entry, entry)));
// TODO(v8:11330): consider using StoreObjectFieldNoWriteBarrier here.
diff --git a/chromium/v8/src/codegen/code-stub-assembler.h b/chromium/v8/src/codegen/code-stub-assembler.h
index 008af6006f5..1cb0b4cf6e1 100644
--- a/chromium/v8/src/codegen/code-stub-assembler.h
+++ b/chromium/v8/src/codegen/code-stub-assembler.h
@@ -233,48 +233,49 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
#endif
#ifdef DEBUG
-// CSA_ASSERT_ARGS generates an
+// CSA_DCHECK_ARGS generates an
// std::initializer_list<CodeStubAssembler::ExtraNode> from __VA_ARGS__. It
// currently supports between 0 and 2 arguments.
// clang-format off
-#define CSA_ASSERT_0_ARGS(...) {}
-#define CSA_ASSERT_1_ARG(a, ...) {{a, #a}}
-#define CSA_ASSERT_2_ARGS(a, b, ...) {{a, #a}, {b, #b}}
+#define CSA_DCHECK_0_ARGS(...) {}
+#define CSA_DCHECK_1_ARG(a, ...) {{a, #a}}
+#define CSA_DCHECK_2_ARGS(a, b, ...) {{a, #a}, {b, #b}}
// clang-format on
-#define SWITCH_CSA_ASSERT_ARGS(dummy, a, b, FUNC, ...) FUNC(a, b)
-#define CSA_ASSERT_ARGS(...) \
- CALL(SWITCH_CSA_ASSERT_ARGS, (, ##__VA_ARGS__, CSA_ASSERT_2_ARGS, \
- CSA_ASSERT_1_ARG, CSA_ASSERT_0_ARGS))
+#define SWITCH_CSA_DCHECK_ARGS(dummy, a, b, FUNC, ...) FUNC(a, b)
+#define CSA_DCHECK_ARGS(...) \
+ CALL(SWITCH_CSA_DCHECK_ARGS, (, ##__VA_ARGS__, CSA_DCHECK_2_ARGS, \
+ CSA_DCHECK_1_ARG, CSA_DCHECK_0_ARGS))
// Workaround for MSVC to skip comma in empty __VA_ARGS__.
#define CALL(x, y) x y
-// CSA_ASSERT(csa, <condition>, <extra values to print...>)
+// CSA_DCHECK(csa, <condition>, <extra values to print...>)
-#define CSA_ASSERT(csa, condition_node, ...) \
- (csa)->Assert(condition_node, #condition_node, __FILE__, __LINE__, \
- CSA_ASSERT_ARGS(__VA_ARGS__))
+#define CSA_DCHECK(csa, condition_node, ...) \
+ (csa)->Dcheck(condition_node, #condition_node, __FILE__, __LINE__, \
+ CSA_DCHECK_ARGS(__VA_ARGS__))
-// CSA_ASSERT_BRANCH(csa, [](Label* ok, Label* not_ok) {...},
+// CSA_DCHECK_BRANCH(csa, [](Label* ok, Label* not_ok) {...},
// <extra values to print...>)
-#define CSA_ASSERT_BRANCH(csa, gen, ...) \
- (csa)->Assert(gen, #gen, __FILE__, __LINE__, CSA_ASSERT_ARGS(__VA_ARGS__))
-
-#define CSA_ASSERT_JS_ARGC_OP(csa, Op, op, expected) \
- (csa)->Assert( \
- [&]() -> TNode<BoolT> { \
- const TNode<Word32T> argc = (csa)->UncheckedParameter<Word32T>( \
- Descriptor::kJSActualArgumentsCount); \
- return (csa)->Op(argc, (csa)->Int32Constant(expected)); \
- }, \
- "argc " #op " " #expected, __FILE__, __LINE__, \
- {{SmiFromInt32((csa)->UncheckedParameter<Int32T>( \
- Descriptor::kJSActualArgumentsCount)), \
+#define CSA_DCHECK_BRANCH(csa, gen, ...) \
+ (csa)->Dcheck(gen, #gen, __FILE__, __LINE__, CSA_DCHECK_ARGS(__VA_ARGS__))
+
+#define CSA_DCHECK_JS_ARGC_OP(csa, Op, op, expected) \
+ (csa)->Dcheck( \
+ [&]() -> TNode<BoolT> { \
+ const TNode<Word32T> argc = (csa)->UncheckedParameter<Word32T>( \
+ Descriptor::kJSActualArgumentsCount); \
+ return (csa)->Op(argc, \
+ (csa)->Int32Constant(i::JSParameterCount(expected))); \
+ }, \
+ "argc " #op " " #expected, __FILE__, __LINE__, \
+ {{SmiFromInt32((csa)->UncheckedParameter<Int32T>( \
+ Descriptor::kJSActualArgumentsCount)), \
"argc"}})
-#define CSA_ASSERT_JS_ARGC_EQ(csa, expected) \
- CSA_ASSERT_JS_ARGC_OP(csa, Word32Equal, ==, expected)
+#define CSA_DCHECK_JS_ARGC_EQ(csa, expected) \
+ CSA_DCHECK_JS_ARGC_OP(csa, Word32Equal, ==, expected)
#define CSA_DEBUG_INFO(name) \
{ #name, __FILE__, __LINE__ }
@@ -284,9 +285,9 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
#define TYPED_VARIABLE_CONSTRUCTOR(name, ...) \
name(CSA_DEBUG_INFO(name), __VA_ARGS__)
#else // DEBUG
-#define CSA_ASSERT(csa, ...) ((void)0)
-#define CSA_ASSERT_BRANCH(csa, ...) ((void)0)
-#define CSA_ASSERT_JS_ARGC_EQ(csa, expected) ((void)0)
+#define CSA_DCHECK(csa, ...) ((void)0)
+#define CSA_DCHECK_BRANCH(csa, ...) ((void)0)
+#define CSA_DCHECK_JS_ARGC_EQ(csa, expected) ((void)0)
#define BIND(label) Bind(label)
#define TYPED_VARIABLE_DEF(type, name, ...) TVariable<type> name(__VA_ARGS__)
#define TYPED_VARIABLE_CONSTRUCTOR(name, ...) name(__VA_ARGS__)
@@ -297,12 +298,12 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
EXPAND(TYPED_VARIABLE_CONSTRUCTOR(__VA_ARGS__, this))
#ifdef ENABLE_SLOW_DCHECKS
-#define CSA_SLOW_ASSERT(csa, ...) \
+#define CSA_SLOW_DCHECK(csa, ...) \
if (FLAG_enable_slow_asserts) { \
- CSA_ASSERT(csa, __VA_ARGS__); \
+ CSA_DCHECK(csa, __VA_ARGS__); \
}
#else
-#define CSA_SLOW_ASSERT(csa, ...) ((void)0)
+#define CSA_SLOW_DCHECK(csa, ...) ((void)0)
#endif
// Provides JavaScript-specific "macro-assembler" functionality on top of the
@@ -321,7 +322,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
explicit CodeStubAssembler(compiler::CodeAssemblerState* state);
- enum AllocationFlag : uint8_t {
+ enum class AllocationFlag : uint8_t {
kNone = 0,
kDoubleAlignment = 1,
kPretenured = 1 << 1,
@@ -752,13 +753,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Operation bitwise_op);
// Allocate an object of the given size.
- TNode<HeapObject> AllocateInNewSpace(TNode<IntPtrT> size,
- AllocationFlags flags = kNone);
- TNode<HeapObject> AllocateInNewSpace(int size, AllocationFlags flags = kNone);
+ TNode<HeapObject> AllocateInNewSpace(
+ TNode<IntPtrT> size, AllocationFlags flags = AllocationFlag::kNone);
+ TNode<HeapObject> AllocateInNewSpace(
+ int size, AllocationFlags flags = AllocationFlag::kNone);
TNode<HeapObject> Allocate(TNode<IntPtrT> size,
- AllocationFlags flags = kNone);
+ AllocationFlags flags = AllocationFlag::kNone);
- TNode<HeapObject> Allocate(int size, AllocationFlags flags = kNone);
+ TNode<HeapObject> Allocate(int size,
+ AllocationFlags flags = AllocationFlag::kNone);
TNode<BoolT> IsRegularHeapObjectSize(TNode<IntPtrT> size);
@@ -767,13 +770,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
using NodeGenerator = std::function<TNode<T>()>;
using ExtraNode = std::pair<TNode<Object>, const char*>;
- void Assert(const BranchGenerator& branch, const char* message,
+ void Dcheck(const BranchGenerator& branch, const char* message,
const char* file, int line,
std::initializer_list<ExtraNode> extra_nodes = {});
- void Assert(const NodeGenerator<BoolT>& condition_body, const char* message,
+ void Dcheck(const NodeGenerator<BoolT>& condition_body, const char* message,
const char* file, int line,
std::initializer_list<ExtraNode> extra_nodes = {});
- void Assert(TNode<Word32T> condition_node, const char* message,
+ void Dcheck(TNode<Word32T> condition_node, const char* message,
const char* file, int line,
std::initializer_list<ExtraNode> extra_nodes = {});
void Check(const BranchGenerator& branch, const char* message,
@@ -1096,7 +1099,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<ExternalString> object) {
// This is only valid for ExternalStrings where the resource data
// pointer is cached (i.e. no uncached external strings).
- CSA_ASSERT(this, Word32NotEqual(
+ CSA_DCHECK(this, Word32NotEqual(
Word32And(LoadInstanceType(object),
Int32Constant(kUncachedExternalStringMask)),
Int32Constant(kUncachedExternalStringTag)));
@@ -1107,15 +1110,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<RawPtrT> LoadJSTypedArrayExternalPointerPtr(
TNode<JSTypedArray> holder) {
- return LoadExternalPointerFromObject(holder,
- JSTypedArray::kExternalPointerOffset,
- kTypedArrayExternalPointerTag);
+ return LoadObjectField<RawPtrT>(holder,
+ JSTypedArray::kExternalPointerOffset);
}
void StoreJSTypedArrayExternalPointerPtr(TNode<JSTypedArray> holder,
TNode<RawPtrT> value) {
- StoreExternalPointerToObject(holder, JSTypedArray::kExternalPointerOffset,
- value, kTypedArrayExternalPointerTag);
+ StoreObjectFieldNoWriteBarrier<RawPtrT>(
+ holder, JSTypedArray::kExternalPointerOffset, value);
}
// Load value from current parent frame by given offset in bytes.
@@ -1236,7 +1238,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<IntPtrT> offset =
IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag));
- CSA_ASSERT(this, TaggedIsNotSmi(reference.object));
+ CSA_DCHECK(this, TaggedIsNotSmi(reference.object));
return CAST(
LoadFromObject(MachineTypeOf<T>::value, reference.object, offset));
}
@@ -1270,7 +1272,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
TNode<IntPtrT> offset =
IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag));
- CSA_ASSERT(this, TaggedIsNotSmi(reference.object));
+ CSA_DCHECK(this, TaggedIsNotSmi(reference.object));
StoreToObject(rep, reference.object, offset, value, write_barrier);
}
template <class T, typename std::enable_if<
@@ -1448,40 +1450,35 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Array is any array-like type that has a fixed header followed by
// tagged elements.
template <typename Array, typename TIndex, typename TValue = MaybeObject>
- TNode<TValue> LoadArrayElement(
- TNode<Array> array, int array_header_size, TNode<TIndex> index,
- int additional_offset = 0,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
+ TNode<TValue> LoadArrayElement(TNode<Array> array, int array_header_size,
+ TNode<TIndex> index,
+ int additional_offset = 0);
template <typename TIndex>
TNode<Object> LoadFixedArrayElement(
TNode<FixedArray> object, TNode<TIndex> index, int additional_offset = 0,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe,
CheckBounds check_bounds = CheckBounds::kAlways);
// This doesn't emit a bounds-check. As part of the security-performance
// tradeoff, only use it if it is performance critical.
- TNode<Object> UnsafeLoadFixedArrayElement(
- TNode<FixedArray> object, TNode<IntPtrT> index, int additional_offset = 0,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
+ TNode<Object> UnsafeLoadFixedArrayElement(TNode<FixedArray> object,
+ TNode<IntPtrT> index,
+ int additional_offset = 0) {
return LoadFixedArrayElement(object, index, additional_offset,
- needs_poisoning, CheckBounds::kDebugOnly);
+ CheckBounds::kDebugOnly);
}
- TNode<Object> LoadFixedArrayElement(
- TNode<FixedArray> object, int index, int additional_offset = 0,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
+ TNode<Object> LoadFixedArrayElement(TNode<FixedArray> object, int index,
+ int additional_offset = 0) {
return LoadFixedArrayElement(object, IntPtrConstant(index),
- additional_offset, needs_poisoning);
+ additional_offset);
}
// This doesn't emit a bounds-check. As part of the security-performance
// tradeoff, only use it if it is performance critical.
- TNode<Object> UnsafeLoadFixedArrayElement(
- TNode<FixedArray> object, int index, int additional_offset = 0,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
+ TNode<Object> UnsafeLoadFixedArrayElement(TNode<FixedArray> object, int index,
+ int additional_offset = 0) {
return LoadFixedArrayElement(object, IntPtrConstant(index),
- additional_offset, needs_poisoning,
- CheckBounds::kDebugOnly);
+ additional_offset, CheckBounds::kDebugOnly);
}
TNode<Object> LoadPropertyArrayElement(TNode<PropertyArray> object,
@@ -1814,17 +1811,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
AllocationFlags flags);
// Allocate a ByteArray with the given length.
- TNode<ByteArray> AllocateByteArray(TNode<UintPtrT> length,
- AllocationFlags flags = kNone);
+ TNode<ByteArray> AllocateByteArray(
+ TNode<UintPtrT> length, AllocationFlags flags = AllocationFlag::kNone);
// Allocate a SeqOneByteString with the given length.
- TNode<String> AllocateSeqOneByteString(uint32_t length,
- AllocationFlags flags = kNone);
+ TNode<String> AllocateSeqOneByteString(
+ uint32_t length, AllocationFlags flags = AllocationFlag::kNone);
using TorqueGeneratedExportedMacrosAssembler::AllocateSeqOneByteString;
// Allocate a SeqTwoByteString with the given length.
- TNode<String> AllocateSeqTwoByteString(uint32_t length,
- AllocationFlags flags = kNone);
+ TNode<String> AllocateSeqTwoByteString(
+ uint32_t length, AllocationFlags flags = AllocationFlag::kNone);
using TorqueGeneratedExportedMacrosAssembler::AllocateSeqTwoByteString;
// Allocate a SlicedOneByteString with the given length, parent and offset.
@@ -1841,9 +1838,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<NameDictionary> AllocateNameDictionary(int at_least_space_for);
TNode<NameDictionary> AllocateNameDictionary(
- TNode<IntPtrT> at_least_space_for, AllocationFlags = kNone);
+ TNode<IntPtrT> at_least_space_for,
+ AllocationFlags = AllocationFlag::kNone);
TNode<NameDictionary> AllocateNameDictionaryWithCapacity(
- TNode<IntPtrT> capacity, AllocationFlags = kNone);
+ TNode<IntPtrT> capacity, AllocationFlags = AllocationFlag::kNone);
TNode<NameDictionary> CopyNameDictionary(TNode<NameDictionary> dictionary,
Label* large_object_fallback);
@@ -1861,7 +1859,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Map> map,
base::Optional<TNode<HeapObject>> properties = base::nullopt,
base::Optional<TNode<FixedArray>> elements = base::nullopt,
- AllocationFlags flags = kNone,
+ AllocationFlags flags = AllocationFlag::kNone,
SlackTrackingMode slack_tracking_mode = kNoSlackTracking);
void InitializeJSObjectFromMap(
@@ -1886,30 +1884,33 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
AllocateUninitializedJSArrayWithElements(
ElementsKind kind, TNode<Map> array_map, TNode<Smi> length,
base::Optional<TNode<AllocationSite>> allocation_site,
- TNode<IntPtrT> capacity, AllocationFlags allocation_flags = kNone,
+ TNode<IntPtrT> capacity,
+ AllocationFlags allocation_flags = AllocationFlag::kNone,
int array_header_size = JSArray::kHeaderSize);
// Allocate a JSArray and fill elements with the hole.
TNode<JSArray> AllocateJSArray(
ElementsKind kind, TNode<Map> array_map, TNode<IntPtrT> capacity,
TNode<Smi> length, base::Optional<TNode<AllocationSite>> allocation_site,
- AllocationFlags allocation_flags = kNone);
+ AllocationFlags allocation_flags = AllocationFlag::kNone);
TNode<JSArray> AllocateJSArray(
ElementsKind kind, TNode<Map> array_map, TNode<Smi> capacity,
TNode<Smi> length, base::Optional<TNode<AllocationSite>> allocation_site,
- AllocationFlags allocation_flags = kNone) {
+ AllocationFlags allocation_flags = AllocationFlag::kNone) {
return AllocateJSArray(kind, array_map, SmiUntag(capacity), length,
allocation_site, allocation_flags);
}
- TNode<JSArray> AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
- TNode<Smi> capacity, TNode<Smi> length,
- AllocationFlags allocation_flags = kNone) {
+ TNode<JSArray> AllocateJSArray(
+ ElementsKind kind, TNode<Map> array_map, TNode<Smi> capacity,
+ TNode<Smi> length,
+ AllocationFlags allocation_flags = AllocationFlag::kNone) {
return AllocateJSArray(kind, array_map, SmiUntag(capacity), length,
base::nullopt, allocation_flags);
}
- TNode<JSArray> AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
- TNode<IntPtrT> capacity, TNode<Smi> length,
- AllocationFlags allocation_flags = kNone) {
+ TNode<JSArray> AllocateJSArray(
+ ElementsKind kind, TNode<Map> array_map, TNode<IntPtrT> capacity,
+ TNode<Smi> length,
+ AllocationFlags allocation_flags = AllocationFlag::kNone) {
return AllocateJSArray(kind, array_map, capacity, length, base::nullopt,
allocation_flags);
}
@@ -1942,7 +1943,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
template <typename TIndex>
TNode<FixedArrayBase> AllocateFixedArray(
- ElementsKind kind, TNode<TIndex> capacity, AllocationFlags flags = kNone,
+ ElementsKind kind, TNode<TIndex> capacity,
+ AllocationFlags flags = AllocationFlag::kNone,
base::Optional<TNode<Map>> fixed_array_map = base::nullopt);
TNode<NativeContext> GetCreationContext(TNode<JSReceiver> receiver,
@@ -2138,7 +2140,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
kFixedArrays = 1,
kFixedDoubleArrays = 2,
kDontCopyCOW = 4,
- kNewSpaceAllocationOnly = 8,
kAllFixedArrays = kFixedArrays | kFixedDoubleArrays,
kAllFixedArraysDontCopyCOW = kAllFixedArrays | kDontCopyCOW
};
@@ -2628,6 +2629,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsHoleyFastElementsKindForRead(TNode<Int32T> elements_kind);
TNode<BoolT> IsElementsKindGreaterThan(TNode<Int32T> target_kind,
ElementsKind reference_kind);
+ TNode<BoolT> IsElementsKindGreaterThanOrEqual(TNode<Int32T> target_kind,
+ ElementsKind reference_kind);
TNode<BoolT> IsElementsKindLessThanOrEqual(TNode<Int32T> target_kind,
ElementsKind reference_kind);
// Check if lower_reference_kind <= target_kind <= higher_reference_kind.
@@ -2887,9 +2890,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Call non-allocating runtime String::WriteToFlat using fast C-calls.
void StringWriteToFlatOneByte(TNode<String> source, TNode<RawPtrT> sink,
- TNode<Int32T> from, TNode<Int32T> to);
+ TNode<Int32T> start, TNode<Int32T> length);
void StringWriteToFlatTwoByte(TNode<String> source, TNode<RawPtrT> sink,
- TNode<Int32T> from, TNode<Int32T> to);
+ TNode<Int32T> start, TNode<Int32T> length);
// Calls External{One,Two}ByteString::GetChars with a fast C-call.
TNode<RawPtr<Uint8T>> ExternalOneByteStringGetChars(
@@ -3569,9 +3572,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<UintPtrT> LoadVariableLengthJSTypedArrayByteLength(
TNode<Context> context, TNode<JSTypedArray> array,
TNode<JSArrayBuffer> buffer);
- void IsTypedArrayDetachedOrOutOfBounds(TNode<JSTypedArray> array,
- Label* detached_or_oob,
- Label* not_detached_nor_oob);
+ void IsJSTypedArrayDetachedOrOutOfBounds(TNode<JSTypedArray> array,
+ Label* detached_or_oob,
+ Label* not_detached_nor_oob);
TNode<IntPtrT> RabGsabElementsKindToElementByteSize(
TNode<Int32T> elementsKind);
@@ -3647,8 +3650,28 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Object> GetArgumentValue(TorqueStructArguments args,
TNode<IntPtrT> index);
- TorqueStructArguments GetFrameArguments(TNode<RawPtrT> frame,
- TNode<IntPtrT> argc);
+ enum class FrameArgumentsArgcType {
+ kCountIncludesReceiver,
+ kCountExcludesReceiver
+ };
+
+ TorqueStructArguments GetFrameArguments(
+ TNode<RawPtrT> frame, TNode<IntPtrT> argc,
+ FrameArgumentsArgcType argc_type =
+ FrameArgumentsArgcType::kCountExcludesReceiver);
+
+ inline TNode<Int32T> JSParameterCount(TNode<Int32T> argc_without_receiver) {
+ return kJSArgcIncludesReceiver
+ ? Int32Add(argc_without_receiver,
+ Int32Constant(kJSArgcReceiverSlots))
+ : argc_without_receiver;
+ }
+ inline TNode<Word32T> JSParameterCount(TNode<Word32T> argc_without_receiver) {
+ return kJSArgcIncludesReceiver
+ ? Int32Add(argc_without_receiver,
+ Int32Constant(kJSArgcReceiverSlots))
+ : argc_without_receiver;
+ }
// Support for printf-style debugging
void Print(const char* s);
@@ -4054,7 +4077,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TVariable<Number>* var_result,
Label* if_bailout);
- void AssertHasValidMap(TNode<HeapObject> object);
+ void DcheckHasValidMap(TNode<HeapObject> object);
template <typename TValue>
void EmitElementStoreTypedArray(TNode<JSTypedArray> typed_array,
@@ -4086,7 +4109,7 @@ class V8_EXPORT_PRIVATE CodeStubArguments {
CodeStubArguments(CodeStubAssembler* assembler,
TorqueStructArguments torque_arguments)
: assembler_(assembler),
- argc_(torque_arguments.length),
+ argc_(torque_arguments.actual_count),
base_(torque_arguments.base),
fp_(torque_arguments.frame) {}
@@ -4104,12 +4127,12 @@ class V8_EXPORT_PRIVATE CodeStubArguments {
TNode<Object> AtIndex(int index) const;
// Return the number of arguments (excluding the receiver).
- TNode<IntPtrT> GetLength() const { return argc_; }
+ TNode<IntPtrT> GetLengthWithoutReceiver() const;
// Return the number of arguments (including the receiver).
TNode<IntPtrT> GetLengthWithReceiver() const;
TorqueStructArguments GetTorqueArguments() const {
- return TorqueStructArguments{fp_, base_, argc_};
+ return TorqueStructArguments{fp_, base_, GetLengthWithoutReceiver(), argc_};
}
TNode<Object> GetOptionalArgumentValue(TNode<IntPtrT> index,
diff --git a/chromium/v8/src/codegen/compiler.cc b/chromium/v8/src/codegen/compiler.cc
index 4fd70a8d9e1..9fab1cd40f2 100644
--- a/chromium/v8/src/codegen/compiler.cc
+++ b/chromium/v8/src/codegen/compiler.cc
@@ -1064,8 +1064,8 @@ Handle<Code> ContinuationForConcurrentOptimization(
function->set_code(function->feedback_vector().optimized_code());
}
return handle(function->code(), isolate);
- } else if (function->shared().HasBaselineData()) {
- Code baseline_code = function->shared().baseline_data().baseline_code();
+ } else if (function->shared().HasBaselineCode()) {
+ Code baseline_code = function->shared().baseline_code(kAcquireLoad);
function->set_code(baseline_code);
return handle(baseline_code, isolate);
}
@@ -1179,9 +1179,13 @@ void SpawnDuplicateConcurrentJobForStressTesting(Isolate* isolate,
isolate->concurrent_recompilation_enabled() &&
mode == ConcurrencyMode::kNotConcurrent &&
isolate->node_observer() == nullptr);
+ GetOptimizedCodeResultHandling result_handling =
+ FLAG_stress_concurrent_inlining_attach_code
+ ? GetOptimizedCodeResultHandling::kDefault
+ : GetOptimizedCodeResultHandling::kDiscardForTesting;
USE(GetOptimizedCode(isolate, function, ConcurrencyMode::kConcurrent,
code_kind, BytecodeOffset::None(), nullptr,
- GetOptimizedCodeResultHandling::kDiscardForTesting));
+ result_handling));
}
bool FailAndClearPendingException(Isolate* isolate) {
@@ -1308,6 +1312,7 @@ void FinalizeUnoptimizedScriptCompilation(
void CompileAllWithBaseline(Isolate* isolate,
const FinalizeUnoptimizedCompilationDataList&
finalize_unoptimized_compilation_data_list) {
+ CodePageCollectionMemoryModificationScope code_allocation(isolate->heap());
for (const auto& finalize_data : finalize_unoptimized_compilation_data_list) {
Handle<SharedFunctionInfo> shared_info = finalize_data.function_handle();
IsCompiledScope is_compiled_scope(*shared_info, isolate);
@@ -1975,7 +1980,7 @@ bool Compiler::CompileSharedWithBaseline(Isolate* isolate,
DCHECK(is_compiled_scope->is_compiled());
// Early return for already baseline-compiled functions.
- if (shared->HasBaselineData()) return true;
+ if (shared->HasBaselineCode()) return true;
// Check if we actually can compile with baseline.
if (!CanCompileWithBaseline(isolate, *shared)) return false;
@@ -1998,12 +2003,8 @@ bool Compiler::CompileSharedWithBaseline(Isolate* isolate,
// report these somehow, or silently ignore them?
return false;
}
+ shared->set_baseline_code(*code, kReleaseStore);
- Handle<HeapObject> function_data =
- handle(HeapObject::cast(shared->function_data(kAcquireLoad)), isolate);
- Handle<BaselineData> baseline_data =
- isolate->factory()->NewBaselineData(code, function_data);
- shared->set_baseline_data(*baseline_data);
if (V8_LIKELY(FLAG_use_osr)) {
// Arm back edges for OSR
shared->GetBytecodeArray(isolate).set_osr_loop_nesting_level(
@@ -2035,7 +2036,7 @@ bool Compiler::CompileBaseline(Isolate* isolate, Handle<JSFunction> function,
// Baseline code needs a feedback vector.
JSFunction::EnsureFeedbackVector(function, is_compiled_scope);
- Code baseline_code = shared->baseline_data().baseline_code(isolate);
+ Code baseline_code = shared->baseline_code(kAcquireLoad);
DCHECK_EQ(baseline_code.kind(), CodeKind::BASELINE);
function->set_code(baseline_code);
@@ -2210,7 +2211,7 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
// position, but store it as negative value for lazy translation.
StackTraceFrameIterator it(isolate);
if (!it.done() && it.is_javascript()) {
- FrameSummary summary = FrameSummary::GetTop(it.javascript_frame());
+ FrameSummary summary = it.GetTopValidFrame();
script->set_eval_from_shared(
summary.AsJavaScript().function()->shared());
script->set_origin_options(OriginOptionsForEval(*summary.script()));
@@ -2830,13 +2831,10 @@ MaybeHandle<SharedFunctionInfo> CompileScriptOnBothBackgroundAndMainThread(
return maybe_result;
}
-} // namespace
-
-// static
-MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
+MaybeHandle<SharedFunctionInfo> GetSharedFunctionInfoForScriptImpl(
Isolate* isolate, Handle<String> source,
const ScriptDetails& script_details, v8::Extension* extension,
- AlignedCachedData* cached_data,
+ AlignedCachedData* cached_data, BackgroundDeserializeTask* deserialize_task,
ScriptCompiler::CompileOptions compile_options,
ScriptCompiler::NoCacheReason no_cache_reason, NativesFlag natives) {
ScriptCompileTimerScope compile_timer(isolate, no_cache_reason);
@@ -2844,9 +2842,12 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
if (compile_options == ScriptCompiler::kNoCompileOptions ||
compile_options == ScriptCompiler::kEagerCompile) {
DCHECK_NULL(cached_data);
+ DCHECK_NULL(deserialize_task);
} else {
- DCHECK(compile_options == ScriptCompiler::kConsumeCodeCache);
- DCHECK(cached_data);
+ DCHECK_EQ(compile_options, ScriptCompiler::kConsumeCodeCache);
+ // Have to have exactly one of cached_data or deserialize_task.
+ DCHECK(cached_data || deserialize_task);
+ DCHECK(!(cached_data && deserialize_task));
DCHECK_NULL(extension);
}
int source_length = source->length();
@@ -2882,17 +2883,26 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileDeserialize);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileDeserialize");
- Handle<SharedFunctionInfo> inner_result;
- if (CodeSerializer::Deserialize(isolate, cached_data, source,
- script_details.origin_options)
- .ToHandle(&inner_result) &&
- inner_result->is_compiled()) {
- // Promote to per-isolate compilation cache.
- is_compiled_scope = inner_result->is_compiled_scope(isolate);
- DCHECK(is_compiled_scope.is_compiled());
- compilation_cache->PutScript(source, language_mode, inner_result);
- maybe_result = inner_result;
+ if (deserialize_task) {
+ // If there's a cache consume task, finish it.
+ maybe_result = deserialize_task->Finish(isolate, source,
+ script_details.origin_options);
} else {
+ maybe_result = CodeSerializer::Deserialize(
+ isolate, cached_data, source, script_details.origin_options);
+ }
+
+ bool consuming_code_cache_succeeded = false;
+ Handle<SharedFunctionInfo> result;
+ if (maybe_result.ToHandle(&result)) {
+ is_compiled_scope = result->is_compiled_scope(isolate);
+ if (is_compiled_scope.is_compiled()) {
+ consuming_code_cache_succeeded = true;
+ // Promote to per-isolate compilation cache.
+ compilation_cache->PutScript(source, language_mode, result);
+ }
+ }
+ if (!consuming_code_cache_succeeded) {
// Deserializer failed. Fall through to compile.
compile_timer.set_consuming_code_cache_failed();
}
@@ -2937,6 +2947,51 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
return maybe_result;
}
+} // namespace
+
+MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
+ Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details,
+ ScriptCompiler::CompileOptions compile_options,
+ ScriptCompiler::NoCacheReason no_cache_reason, NativesFlag natives) {
+ return GetSharedFunctionInfoForScriptImpl(
+ isolate, source, script_details, nullptr, nullptr, nullptr,
+ compile_options, no_cache_reason, natives);
+}
+
+MaybeHandle<SharedFunctionInfo>
+Compiler::GetSharedFunctionInfoForScriptWithExtension(
+ Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details, v8::Extension* extension,
+ ScriptCompiler::CompileOptions compile_options, NativesFlag natives) {
+ return GetSharedFunctionInfoForScriptImpl(
+ isolate, source, script_details, extension, nullptr, nullptr,
+ compile_options, ScriptCompiler::kNoCacheBecauseV8Extension, natives);
+}
+
+MaybeHandle<SharedFunctionInfo>
+Compiler::GetSharedFunctionInfoForScriptWithCachedData(
+ Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details, AlignedCachedData* cached_data,
+ ScriptCompiler::CompileOptions compile_options,
+ ScriptCompiler::NoCacheReason no_cache_reason, NativesFlag natives) {
+ return GetSharedFunctionInfoForScriptImpl(
+ isolate, source, script_details, nullptr, cached_data, nullptr,
+ compile_options, no_cache_reason, natives);
+}
+
+MaybeHandle<SharedFunctionInfo>
+Compiler::GetSharedFunctionInfoForScriptWithDeserializeTask(
+ Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details,
+ BackgroundDeserializeTask* deserialize_task,
+ ScriptCompiler::CompileOptions compile_options,
+ ScriptCompiler::NoCacheReason no_cache_reason, NativesFlag natives) {
+ return GetSharedFunctionInfoForScriptImpl(
+ isolate, source, script_details, nullptr, nullptr, deserialize_task,
+ compile_options, no_cache_reason, natives);
+}
+
// static
MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
Handle<String> source, Handle<FixedArray> arguments,
diff --git a/chromium/v8/src/codegen/compiler.h b/chromium/v8/src/codegen/compiler.h
index 0d1582d872a..97bd6bd027b 100644
--- a/chromium/v8/src/codegen/compiler.h
+++ b/chromium/v8/src/codegen/compiler.h
@@ -161,8 +161,39 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
// Create a shared function info object for a String source.
static MaybeHandle<SharedFunctionInfo> GetSharedFunctionInfoForScript(
Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details,
+ ScriptCompiler::CompileOptions compile_options,
+ ScriptCompiler::NoCacheReason no_cache_reason,
+ NativesFlag is_natives_code);
+
+ // Create a shared function info object for a String source.
+ static MaybeHandle<SharedFunctionInfo>
+ GetSharedFunctionInfoForScriptWithExtension(
+ Isolate* isolate, Handle<String> source,
const ScriptDetails& script_details, v8::Extension* extension,
- AlignedCachedData* cached_data,
+ ScriptCompiler::CompileOptions compile_options,
+ NativesFlag is_natives_code);
+
+ // Create a shared function info object for a String source and serialized
+ // cached data. The cached data may be rejected, in which case this function
+ // will set cached_data->rejected() to true.
+ static MaybeHandle<SharedFunctionInfo>
+ GetSharedFunctionInfoForScriptWithCachedData(
+ Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details, AlignedCachedData* cached_data,
+ ScriptCompiler::CompileOptions compile_options,
+ ScriptCompiler::NoCacheReason no_cache_reason,
+ NativesFlag is_natives_code);
+
+ // Create a shared function info object for a String source and a task that
+ // has deserialized cached data on a background thread. The cached data from
+ // the task may be rejected, in which case this function will set
+ // deserialize_task->rejected() to true.
+ static MaybeHandle<SharedFunctionInfo>
+ GetSharedFunctionInfoForScriptWithDeserializeTask(
+ Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details,
+ BackgroundDeserializeTask* deserialize_task,
ScriptCompiler::CompileOptions compile_options,
ScriptCompiler::NoCacheReason no_cache_reason,
NativesFlag is_natives_code);
@@ -571,6 +602,8 @@ class V8_EXPORT_PRIVATE BackgroundDeserializeTask {
Handle<String> source,
ScriptOriginOptions origin_options);
+ bool rejected() const { return cached_data_.rejected(); }
+
private:
Isolate* isolate_for_local_isolate_;
AlignedCachedData cached_data_;
diff --git a/chromium/v8/src/codegen/constant-pool.cc b/chromium/v8/src/codegen/constant-pool.cc
index 9af91d7a15f..510f59185c7 100644
--- a/chromium/v8/src/codegen/constant-pool.cc
+++ b/chromium/v8/src/codegen/constant-pool.cc
@@ -356,8 +356,7 @@ void ConstantPool::Emit(const ConstantPoolKey& key) {
if (assm_->IsOnHeap() && RelocInfo::IsEmbeddedObjectMode(key.rmode())) {
int offset = assm_->pc_offset();
Assembler::EmbeddedObjectIndex index = key.value64();
- assm_->saved_handles_for_raw_object_ptr_.push_back(
- std::make_pair(offset, index));
+ assm_->saved_handles_for_raw_object_ptr_.emplace_back(offset, index);
Handle<Object> object = assm_->GetEmbeddedObject(index);
assm_->dq(object->ptr());
DCHECK(assm_->EmbeddedObjectMatches(offset, object, index));
diff --git a/chromium/v8/src/codegen/constants-arch.h b/chromium/v8/src/codegen/constants-arch.h
index 2417be5d4dc..7eb32bafde4 100644
--- a/chromium/v8/src/codegen/constants-arch.h
+++ b/chromium/v8/src/codegen/constants-arch.h
@@ -15,6 +15,8 @@
#include "src/codegen/mips/constants-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/constants-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/codegen/loong64/constants-loong64.h"
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/codegen/ppc/constants-ppc.h"
#elif V8_TARGET_ARCH_S390
diff --git a/chromium/v8/src/codegen/cpu-features.h b/chromium/v8/src/codegen/cpu-features.h
index ab6608679f9..3cdae6d4c8a 100644
--- a/chromium/v8/src/codegen/cpu-features.h
+++ b/chromium/v8/src/codegen/cpu-features.h
@@ -51,6 +51,9 @@ enum CpuFeature {
MIPSr6,
MIPS_SIMD, // MSA instructions
+#elif V8_TARGET_ARCH_LOONG64
+ FPU,
+
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
PPC_6_PLUS,
PPC_7_PLUS,
diff --git a/chromium/v8/src/codegen/external-reference.cc b/chromium/v8/src/codegen/external-reference.cc
index e1d8c5d96ef..8f42fa7f509 100644
--- a/chromium/v8/src/codegen/external-reference.cc
+++ b/chromium/v8/src/codegen/external-reference.cc
@@ -7,9 +7,11 @@
#include "src/api/api.h"
#include "src/base/ieee754.h"
#include "src/codegen/cpu-features.h"
+#include "src/common/globals.h"
#include "src/date/date.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/isolate-utils.h"
#include "src/execution/isolate.h"
#include "src/execution/microtask-queue.h"
#include "src/execution/simulator-base.h"
@@ -145,6 +147,19 @@ constexpr struct alignas(16) {
} wasm_uint32_max_as_double = {uint64_t{0x41efffffffe00000},
uint64_t{0x41efffffffe00000}};
+// This is 2147483648.0, which is 1 more than INT32_MAX.
+constexpr struct alignas(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+} wasm_int32_overflow_as_float = {
+ uint32_t{0x4f00'0000},
+ uint32_t{0x4f00'0000},
+ uint32_t{0x4f00'0000},
+ uint32_t{0x4f00'0000},
+};
+
// Implementation of ExternalReference
static ExternalReference::Type BuiltinCallTypeForResultSize(int result_size) {
@@ -175,8 +190,8 @@ ExternalReference ExternalReference::Create(const Runtime::Function* f) {
}
// static
-ExternalReference ExternalReference::Create(Address address) {
- return ExternalReference(Redirect(address));
+ExternalReference ExternalReference::Create(Address address, Type type) {
+ return ExternalReference(Redirect(address, type));
}
ExternalReference ExternalReference::isolate_address(Isolate* isolate) {
@@ -400,6 +415,7 @@ IF_WASM(FUNCTION_REFERENCE, wasm_memory_fill, wasm::memory_fill_wrapper)
IF_WASM(FUNCTION_REFERENCE, wasm_float64_pow, wasm::float64_pow_wrapper)
IF_WASM(FUNCTION_REFERENCE, wasm_call_trap_callback_for_testing,
wasm::call_trap_callback_for_testing)
+IF_WASM(FUNCTION_REFERENCE, wasm_array_copy, wasm::array_copy_wrapper)
static void f64_acos_wrapper(Address data) {
double input = ReadUnalignedValue<double>(data);
@@ -618,6 +634,11 @@ ExternalReference ExternalReference::address_of_wasm_uint32_max_as_double() {
reinterpret_cast<Address>(&wasm_uint32_max_as_double));
}
+ExternalReference ExternalReference::address_of_wasm_int32_overflow_as_float() {
+ return ExternalReference(
+ reinterpret_cast<Address>(&wasm_int32_overflow_as_float));
+}
+
ExternalReference
ExternalReference::address_of_enable_experimental_regexp_engine() {
return ExternalReference(&FLAG_enable_experimental_regexp_engine);
@@ -688,6 +709,8 @@ ExternalReference ExternalReference::invoke_accessor_getter_callback() {
#define re_stack_check_func RegExpMacroAssemblerMIPS::CheckStackGuardState
#elif V8_TARGET_ARCH_MIPS64
#define re_stack_check_func RegExpMacroAssemblerMIPS::CheckStackGuardState
+#elif V8_TARGET_ARCH_LOONG64
+#define re_stack_check_func RegExpMacroAssemblerLOONG64::CheckStackGuardState
#elif V8_TARGET_ARCH_S390
#define re_stack_check_func RegExpMacroAssemblerS390::CheckStackGuardState
#elif V8_TARGET_ARCH_RISCV64
@@ -738,6 +761,11 @@ ExternalReference ExternalReference::address_of_regexp_stack_memory_top_address(
isolate->regexp_stack()->memory_top_address_address());
}
+ExternalReference ExternalReference::address_of_regexp_stack_stack_pointer(
+ Isolate* isolate) {
+ return ExternalReference(isolate->regexp_stack()->stack_pointer_address());
+}
+
ExternalReference ExternalReference::javascript_execution_assert(
Isolate* isolate) {
return ExternalReference(isolate->javascript_execution_assert_address());
@@ -861,35 +889,37 @@ ExternalReference ExternalReference::search_string_raw_two_two() {
namespace {
-void StringWriteToFlatOneByte(Address source, uint8_t* sink, int32_t from,
- int32_t to) {
- return String::WriteToFlat<uint8_t>(String::cast(Object(source)), sink, from,
- to);
+void StringWriteToFlatOneByte(Address source, uint8_t* sink, int32_t start,
+ int32_t length) {
+ return String::WriteToFlat<uint8_t>(String::cast(Object(source)), sink, start,
+ length);
}
-void StringWriteToFlatTwoByte(Address source, uint16_t* sink, int32_t from,
- int32_t to) {
- return String::WriteToFlat<uint16_t>(String::cast(Object(source)), sink, from,
- to);
+void StringWriteToFlatTwoByte(Address source, uint16_t* sink, int32_t start,
+ int32_t length) {
+ return String::WriteToFlat<uint16_t>(String::cast(Object(source)), sink,
+ start, length);
}
const uint8_t* ExternalOneByteStringGetChars(Address string) {
+ PtrComprCageBase cage_base = GetPtrComprCageBaseFromOnHeapAddress(string);
// The following CHECK is a workaround to prevent a CFI bug where
// ExternalOneByteStringGetChars() and ExternalTwoByteStringGetChars() are
// merged by the linker, resulting in one of the input type's vtable address
// failing the address range check.
// TODO(chromium:1160961): Consider removing the CHECK when CFI is fixed.
- CHECK(Object(string).IsExternalOneByteString());
- return ExternalOneByteString::cast(Object(string)).GetChars();
+ CHECK(Object(string).IsExternalOneByteString(cage_base));
+ return ExternalOneByteString::cast(Object(string)).GetChars(cage_base);
}
const uint16_t* ExternalTwoByteStringGetChars(Address string) {
+ PtrComprCageBase cage_base = GetPtrComprCageBaseFromOnHeapAddress(string);
// The following CHECK is a workaround to prevent a CFI bug where
// ExternalOneByteStringGetChars() and ExternalTwoByteStringGetChars() are
// merged by the linker, resulting in one of the input type's vtable address
// failing the address range check.
// TODO(chromium:1160961): Consider removing the CHECK when CFI is fixed.
- CHECK(Object(string).IsExternalTwoByteString());
- return ExternalTwoByteString::cast(Object(string)).GetChars();
+ CHECK(Object(string).IsExternalTwoByteString(cage_base));
+ return ExternalTwoByteString::cast(Object(string)).GetChars(cage_base);
}
} // namespace
@@ -1180,7 +1210,7 @@ namespace {
// address, with the same value. This is done in order for TSAN to see these
// stores from generated code.
// Note that {value} is an int64_t irrespective of the store size. This is on
-// purpose to keep the function signatures the same accross stores. The
+// purpose to keep the function signatures the same across stores. The
// static_cast inside the method will ignore the bits which will not be stored.
void tsan_relaxed_store_8_bits(Address addr, int64_t value) {
#if V8_TARGET_ARCH_X64
@@ -1218,6 +1248,44 @@ void tsan_relaxed_store_64_bits(Address addr, int64_t value) {
#endif // V8_TARGET_ARCH_X64
}
+// Same as above, for sequentially consistent stores.
+void tsan_seq_cst_store_8_bits(Address addr, int64_t value) {
+#if V8_TARGET_ARCH_X64
+ base::SeqCst_Store(reinterpret_cast<base::Atomic8*>(addr),
+ static_cast<base::Atomic8>(value));
+#else
+ UNREACHABLE();
+#endif // V8_TARGET_ARCH_X64
+}
+
+void tsan_seq_cst_store_16_bits(Address addr, int64_t value) {
+#if V8_TARGET_ARCH_X64
+ base::SeqCst_Store(reinterpret_cast<base::Atomic16*>(addr),
+ static_cast<base::Atomic16>(value));
+#else
+ UNREACHABLE();
+#endif // V8_TARGET_ARCH_X64
+}
+
+void tsan_seq_cst_store_32_bits(Address addr, int64_t value) {
+#if V8_TARGET_ARCH_X64
+ base::SeqCst_Store(reinterpret_cast<base::Atomic32*>(addr),
+ static_cast<base::Atomic32>(value));
+#else
+ UNREACHABLE();
+#endif // V8_TARGET_ARCH_X64
+}
+
+void tsan_seq_cst_store_64_bits(Address addr, int64_t value) {
+#if V8_TARGET_ARCH_X64
+ base::SeqCst_Store(reinterpret_cast<base::Atomic64*>(addr),
+ static_cast<base::Atomic64>(value));
+#else
+ UNREACHABLE();
+#endif // V8_TARGET_ARCH_X64
+}
+
+// Same as above, for relaxed loads.
base::Atomic32 tsan_relaxed_load_32_bits(Address addr, int64_t value) {
#if V8_TARGET_ARCH_X64
return base::Relaxed_Load(reinterpret_cast<base::Atomic32*>(addr));
@@ -1245,6 +1313,14 @@ IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_store_function_32_bits,
tsan_relaxed_store_32_bits)
IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_store_function_64_bits,
tsan_relaxed_store_64_bits)
+IF_TSAN(FUNCTION_REFERENCE, tsan_seq_cst_store_function_8_bits,
+ tsan_seq_cst_store_8_bits)
+IF_TSAN(FUNCTION_REFERENCE, tsan_seq_cst_store_function_16_bits,
+ tsan_seq_cst_store_16_bits)
+IF_TSAN(FUNCTION_REFERENCE, tsan_seq_cst_store_function_32_bits,
+ tsan_seq_cst_store_32_bits)
+IF_TSAN(FUNCTION_REFERENCE, tsan_seq_cst_store_function_64_bits,
+ tsan_seq_cst_store_64_bits)
IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_load_function_32_bits,
tsan_relaxed_load_32_bits)
IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_load_function_64_bits,
diff --git a/chromium/v8/src/codegen/external-reference.h b/chromium/v8/src/codegen/external-reference.h
index cbc34638413..fe80bef62ad 100644
--- a/chromium/v8/src/codegen/external-reference.h
+++ b/chromium/v8/src/codegen/external-reference.h
@@ -72,6 +72,8 @@ class StatsCounter;
"RegExpStack::limit_address_address()") \
V(address_of_regexp_stack_memory_top_address, \
"RegExpStack::memory_top_address_address()") \
+ V(address_of_regexp_stack_stack_pointer, \
+ "RegExpStack::stack_pointer_address()") \
V(address_of_static_offsets_vector, "OffsetsVector::static_offsets_vector") \
V(thread_in_wasm_flag_address_address, \
"Isolate::thread_in_wasm_flag_address_address") \
@@ -111,13 +113,6 @@ class StatsCounter;
V(address_of_runtime_stats_flag, "TracingFlags::runtime_stats") \
V(address_of_the_hole_nan, "the_hole_nan") \
V(address_of_uint32_bias, "uint32_bias") \
- V(address_of_wasm_i8x16_swizzle_mask, "wasm_i8x16_swizzle_mask") \
- V(address_of_wasm_i8x16_popcnt_mask, "wasm_i8x16_popcnt_mask") \
- V(address_of_wasm_i8x16_splat_0x01, "wasm_i8x16_splat_0x01") \
- V(address_of_wasm_i8x16_splat_0x0f, "wasm_i8x16_splat_0x0f") \
- V(address_of_wasm_i8x16_splat_0x33, "wasm_i8x16_splat_0x33") \
- V(address_of_wasm_i8x16_splat_0x55, "wasm_i8x16_splat_0x55") \
- V(address_of_wasm_i16x8_splat_0x0001, "wasm_16x8_splat_0x0001") \
V(baseline_pc_for_bytecode_offset, "BaselinePCForBytecodeOffset") \
V(baseline_pc_for_next_executed_bytecode, \
"BaselinePCForNextExecutedBytecode") \
@@ -247,12 +242,21 @@ class StatsCounter;
IF_WASM(V, wasm_memory_init, "wasm::memory_init") \
IF_WASM(V, wasm_memory_copy, "wasm::memory_copy") \
IF_WASM(V, wasm_memory_fill, "wasm::memory_fill") \
+ IF_WASM(V, wasm_array_copy, "wasm::array_copy") \
+ V(address_of_wasm_i8x16_swizzle_mask, "wasm_i8x16_swizzle_mask") \
+ V(address_of_wasm_i8x16_popcnt_mask, "wasm_i8x16_popcnt_mask") \
+ V(address_of_wasm_i8x16_splat_0x01, "wasm_i8x16_splat_0x01") \
+ V(address_of_wasm_i8x16_splat_0x0f, "wasm_i8x16_splat_0x0f") \
+ V(address_of_wasm_i8x16_splat_0x33, "wasm_i8x16_splat_0x33") \
+ V(address_of_wasm_i8x16_splat_0x55, "wasm_i8x16_splat_0x55") \
+ V(address_of_wasm_i16x8_splat_0x0001, "wasm_16x8_splat_0x0001") \
V(address_of_wasm_f64x2_convert_low_i32x4_u_int_mask, \
"wasm_f64x2_convert_low_i32x4_u_int_mask") \
V(supports_wasm_simd_128_address, "wasm::supports_wasm_simd_128_address") \
V(address_of_wasm_double_2_power_52, "wasm_double_2_power_52") \
V(address_of_wasm_int32_max_as_double, "wasm_int32_max_as_double") \
V(address_of_wasm_uint32_max_as_double, "wasm_uint32_max_as_double") \
+ V(address_of_wasm_int32_overflow_as_float, "wasm_int32_overflow_as_float") \
V(write_barrier_marking_from_code_function, "WriteBarrier::MarkingFromCode") \
V(call_enqueue_microtask_function, "MicrotaskQueue::CallEnqueueMicrotask") \
V(call_enter_context_function, "call_enter_context_function") \
@@ -274,6 +278,14 @@ class StatsCounter;
"tsan_relaxed_store_function_32_bits") \
IF_TSAN(V, tsan_relaxed_store_function_64_bits, \
"tsan_relaxed_store_function_64_bits") \
+ IF_TSAN(V, tsan_seq_cst_store_function_8_bits, \
+ "tsan_seq_cst_store_function_8_bits") \
+ IF_TSAN(V, tsan_seq_cst_store_function_16_bits, \
+ "tsan_seq_cst_store_function_16_bits") \
+ IF_TSAN(V, tsan_seq_cst_store_function_32_bits, \
+ "tsan_seq_cst_store_function_32_bits") \
+ IF_TSAN(V, tsan_seq_cst_store_function_64_bits, \
+ "tsan_seq_cst_store_function_64_bits") \
IF_TSAN(V, tsan_relaxed_load_function_32_bits, \
"tsan_relaxed_load_function_32_bits") \
IF_TSAN(V, tsan_relaxed_load_function_64_bits, \
@@ -318,6 +330,10 @@ class ExternalReference {
// ObjectPair f(v8::internal::Arguments).
BUILTIN_CALL_PAIR,
+ // TODO(mslekova): Once FAST_C_CALL is supported in the simulator,
+ // the following four specific types and their special handling
+ // can be removed, as the generic call supports them.
+
// Builtin that takes float arguments and returns an int.
// int f(double, double).
BUILTIN_COMPARE_CALL,
@@ -349,7 +365,11 @@ class ExternalReference {
// Call to accessor getter callback via InvokeAccessorGetterCallback.
// void f(Local<Name> property, PropertyCallbackInfo& info,
// AccessorNameGetterCallback callback)
- PROFILING_GETTER_CALL
+ PROFILING_GETTER_CALL,
+
+ // C call, either representing a fast API call or used in tests.
+ // Can have arbitrary signature from the types supported by the fast API.
+ FAST_C_CALL
};
#define COUNT_EXTERNAL_REFERENCE(name, desc) +1
@@ -370,7 +390,8 @@ class ExternalReference {
static ExternalReference Create(const Runtime::Function* f);
static ExternalReference Create(IsolateAddressId id, Isolate* isolate);
static ExternalReference Create(Runtime::FunctionId id);
- static V8_EXPORT_PRIVATE ExternalReference Create(Address address);
+ static V8_EXPORT_PRIVATE ExternalReference
+ Create(Address address, Type type = ExternalReference::BUILTIN_CALL);
template <typename SubjectChar, typename PatternChar>
static ExternalReference search_string_raw();
diff --git a/chromium/v8/src/codegen/ia32/assembler-ia32-inl.h b/chromium/v8/src/codegen/ia32/assembler-ia32-inl.h
index f4ff4914fb2..18aa39461d6 100644
--- a/chromium/v8/src/codegen/ia32/assembler-ia32-inl.h
+++ b/chromium/v8/src/codegen/ia32/assembler-ia32-inl.h
@@ -86,7 +86,7 @@ HeapObject RelocInfo::target_object() {
return HeapObject::cast(Object(ReadUnalignedValue<Address>(pc_)));
}
-HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
+HeapObject RelocInfo::target_object_no_host(PtrComprCageBase cage_base) {
return target_object();
}
diff --git a/chromium/v8/src/codegen/ia32/assembler-ia32.cc b/chromium/v8/src/codegen/ia32/assembler-ia32.cc
index 90f8e8b70cf..b5bbcee83f5 100644
--- a/chromium/v8/src/codegen/ia32/assembler-ia32.cc
+++ b/chromium/v8/src/codegen/ia32/assembler-ia32.cc
@@ -291,6 +291,8 @@ Register Operand::reg() const {
return Register::from_code(buf_[0] & 0x07);
}
+bool operator!=(Operand op, XMMRegister r) { return !op.is_reg(r); }
+
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
for (auto& request : heap_object_requests_) {
@@ -688,6 +690,14 @@ void Assembler::movq(XMMRegister dst, Operand src) {
emit_operand(dst, src);
}
+void Assembler::movq(Operand dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xD6);
+ emit_operand(src, dst);
+}
+
void Assembler::cmov(Condition cc, Register dst, Operand src) {
EnsureSpace ensure_space(this);
// Opcode: 0f 40 + cc /r.
@@ -2178,21 +2188,6 @@ void Assembler::cvtss2sd(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-void Assembler::cvtsd2ss(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x5A);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::cvtdq2ps(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x5B);
- emit_sse_operand(dst, src);
-}
-
void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
@@ -2201,13 +2196,6 @@ void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-void Assembler::cvtps2pd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x5A);
- emit_sse_operand(dst, src);
-}
-
void Assembler::cvtpd2ps(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2232,59 +2220,6 @@ void Assembler::cvttpd2dq(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-void Assembler::addsd(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x58);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::mulsd(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x59);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::subsd(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x5C);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::divsd(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x5E);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::rcpps(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x53);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::sqrtps(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x51);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::rsqrtps(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x52);
- emit_sse_operand(dst, src);
-}
-
void Assembler::cmpps(XMMRegister dst, Operand src, uint8_t cmp) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
@@ -2302,14 +2237,6 @@ void Assembler::cmppd(XMMRegister dst, Operand src, uint8_t cmp) {
EMIT(cmp);
}
-void Assembler::sqrtsd(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x51);
- emit_sse_operand(dst, src);
-}
-
void Assembler::haddps(XMMRegister dst, Operand src) {
DCHECK(IsEnabled(SSE3));
EnsureSpace ensure_space(this);
@@ -2398,22 +2325,6 @@ void Assembler::pmovmskb(Register dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-void Assembler::maxsd(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x5F);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::minsd(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x5D);
- emit_sse_operand(dst, src);
-}
-
void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
@@ -2959,16 +2870,12 @@ void Assembler::vfmass(byte op, XMMRegister dst, XMMRegister src1,
emit_sse_operand(dst, src2);
}
-void Assembler::vsd(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
- vinstr(op, dst, src1, src2, kF2, k0F, kWIG);
-}
-
void Assembler::vss(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
vinstr(op, dst, src1, src2, kF3, k0F, kWIG);
}
void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
- vinstr(op, dst, src1, src2, kNone, k0F, kWIG);
+ vinstr(op, dst, src1, src2, kNoPrefix, k0F, kWIG);
}
void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
@@ -2983,27 +2890,27 @@ void Assembler::vshufpd(XMMRegister dst, XMMRegister src1, Operand src2,
}
void Assembler::vmovhlps(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vinstr(0x12, dst, src1, src2, kNone, k0F, kWIG);
+ vinstr(0x12, dst, src1, src2, kNoPrefix, k0F, kWIG);
}
void Assembler::vmovlhps(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vinstr(0x16, dst, src1, src2, kNone, k0F, kWIG);
+ vinstr(0x16, dst, src1, src2, kNoPrefix, k0F, kWIG);
}
void Assembler::vmovlps(XMMRegister dst, XMMRegister src1, Operand src2) {
- vinstr(0x12, dst, src1, src2, kNone, k0F, kWIG);
+ vinstr(0x12, dst, src1, src2, kNoPrefix, k0F, kWIG);
}
void Assembler::vmovlps(Operand dst, XMMRegister src) {
- vinstr(0x13, src, xmm0, dst, kNone, k0F, kWIG);
+ vinstr(0x13, src, xmm0, dst, kNoPrefix, k0F, kWIG);
}
void Assembler::vmovhps(XMMRegister dst, XMMRegister src1, Operand src2) {
- vinstr(0x16, dst, src1, src2, kNone, k0F, kWIG);
+ vinstr(0x16, dst, src1, src2, kNoPrefix, k0F, kWIG);
}
void Assembler::vmovhps(Operand dst, XMMRegister src) {
- vinstr(0x17, src, xmm0, dst, kNone, k0F, kWIG);
+ vinstr(0x17, src, xmm0, dst, kNoPrefix, k0F, kWIG);
}
void Assembler::vcmpps(XMMRegister dst, XMMRegister src1, Operand src2,
@@ -3157,6 +3064,16 @@ void Assembler::vpinsrd(XMMRegister dst, XMMRegister src1, Operand src2,
EMIT(offset);
}
+void Assembler::vroundsd(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ RoundingMode mode) {
+ vinstr(0x0b, dst, src1, src2, k66, k0F3A, kWIG);
+ EMIT(static_cast<byte>(mode) | 0x8); // Mask precision exception.
+}
+void Assembler::vroundss(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ RoundingMode mode) {
+ vinstr(0x0a, dst, src1, src2, k66, k0F3A, kWIG);
+ EMIT(static_cast<byte>(mode) | 0x8); // Mask precision exception.
+}
void Assembler::vroundps(XMMRegister dst, XMMRegister src, RoundingMode mode) {
vinstr(0x08, dst, xmm0, Operand(src), k66, k0F3A, kWIG);
EMIT(static_cast<byte>(mode) | 0x8); // Mask precision exception.
@@ -3177,7 +3094,7 @@ void Assembler::vmovmskpd(Register dst, XMMRegister src) {
void Assembler::vmovmskps(Register dst, XMMRegister src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(xmm0, kL128, kNone, k0F, kWIG);
+ emit_vex_prefix(xmm0, kL128, kNoPrefix, k0F, kWIG);
EMIT(0x50);
emit_sse_operand(dst, src);
}
@@ -3202,7 +3119,7 @@ void Assembler::vpcmpgtq(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
void Assembler::bmi1(byte op, Register reg, Register vreg, Operand rm) {
DCHECK(IsEnabled(BMI1));
EnsureSpace ensure_space(this);
- emit_vex_prefix(vreg, kLZ, kNone, k0F38, kW0);
+ emit_vex_prefix(vreg, kLZ, kNoPrefix, k0F38, kW0);
EMIT(op);
emit_operand(reg, rm);
}
@@ -3254,6 +3171,14 @@ void Assembler::rorx(Register dst, Operand src, byte imm8) {
EMIT(imm8);
}
+void Assembler::sse_instr(XMMRegister dst, Operand src, byte escape,
+ byte opcode) {
+ EnsureSpace ensure_space(this);
+ EMIT(escape);
+ EMIT(opcode);
+ emit_sse_operand(dst, src);
+}
+
void Assembler::sse2_instr(XMMRegister dst, Operand src, byte prefix,
byte escape, byte opcode) {
EnsureSpace ensure_space(this);
@@ -3461,27 +3386,27 @@ void Assembler::emit_operand(XMMRegister reg, Operand adr) {
void Assembler::emit_operand(int code, Operand adr) {
// Isolate-independent code may not embed relocatable addresses.
- DCHECK(!options().isolate_independent_code ||
- adr.rmode_ != RelocInfo::CODE_TARGET);
- DCHECK(!options().isolate_independent_code ||
- adr.rmode_ != RelocInfo::FULL_EMBEDDED_OBJECT);
- DCHECK(!options().isolate_independent_code ||
- adr.rmode_ != RelocInfo::EXTERNAL_REFERENCE);
-
- const unsigned length = adr.len_;
+ DCHECK_IMPLIES(options().isolate_independent_code,
+ adr.rmode() != RelocInfo::CODE_TARGET);
+ DCHECK_IMPLIES(options().isolate_independent_code,
+ adr.rmode() != RelocInfo::FULL_EMBEDDED_OBJECT);
+ DCHECK_IMPLIES(options().isolate_independent_code,
+ adr.rmode() != RelocInfo::EXTERNAL_REFERENCE);
+
+ const unsigned length = adr.encoded_bytes().length();
DCHECK_GT(length, 0);
// Emit updated ModRM byte containing the given register.
- EMIT((adr.buf_[0] & ~0x38) | (code << 3));
+ EMIT((adr.encoded_bytes()[0] & ~0x38) | (code << 3));
// Emit the rest of the encoded operand.
- for (unsigned i = 1; i < length; i++) EMIT(adr.buf_[i]);
+ for (unsigned i = 1; i < length; i++) EMIT(adr.encoded_bytes()[i]);
// Emit relocation information if necessary.
- if (length >= sizeof(int32_t) && !RelocInfo::IsNone(adr.rmode_)) {
+ if (length >= sizeof(int32_t) && !RelocInfo::IsNone(adr.rmode())) {
pc_ -= sizeof(int32_t); // pc_ must be *at* disp32
- RecordRelocInfo(adr.rmode_);
- if (adr.rmode_ == RelocInfo::INTERNAL_REFERENCE) { // Fixup for labels
+ RecordRelocInfo(adr.rmode());
+ if (adr.rmode() == RelocInfo::INTERNAL_REFERENCE) { // Fixup for labels
emit_label(ReadUnalignedValue<Label*>(reinterpret_cast<Address>(pc_)));
} else {
pc_ += sizeof(int32_t);
diff --git a/chromium/v8/src/codegen/ia32/assembler-ia32.h b/chromium/v8/src/codegen/ia32/assembler-ia32.h
index 89a65ee99bf..b099dfcdd3e 100644
--- a/chromium/v8/src/codegen/ia32/assembler-ia32.h
+++ b/chromium/v8/src/codegen/ia32/assembler-ia32.h
@@ -269,6 +269,9 @@ class V8_EXPORT_PRIVATE Operand {
// register.
Register reg() const;
+ base::Vector<const byte> encoded_bytes() const { return {buf_, len_}; }
+ RelocInfo::Mode rmode() { return rmode_; }
+
private:
// Set the ModRM byte without an encoded 'reg' register. The
// register is encoded later as part of the emit_operand operation.
@@ -298,14 +301,13 @@ class V8_EXPORT_PRIVATE Operand {
uint8_t len_ = 0;
// Only valid if len_ > 4.
RelocInfo::Mode rmode_ = RelocInfo::NONE;
-
- // TODO(clemensb): Get rid of this friendship, or make Operand immutable.
- friend class Assembler;
};
ASSERT_TRIVIALLY_COPYABLE(Operand);
static_assert(sizeof(Operand) <= 2 * kSystemPointerSize,
"Operand must be small enough to pass it by value");
+bool operator!=(Operand op, XMMRegister r);
+
// -----------------------------------------------------------------------------
// A Displacement describes the 32bit immediate field of an instruction which
// may be used together with a Label in order to refer to a yet unknown code
@@ -535,6 +537,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void movzx_w(Register dst, Operand src);
void movq(XMMRegister dst, Operand src);
+ void movq(Operand dst, XMMRegister src);
// Conditional moves
void cmov(Condition cc, Register dst, Register src) {
@@ -896,12 +899,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void minss(XMMRegister dst, XMMRegister src) { minss(dst, Operand(src)); }
void minss(XMMRegister dst, Operand src);
- void rcpps(XMMRegister dst, Operand src);
- void rcpps(XMMRegister dst, XMMRegister src) { rcpps(dst, Operand(src)); }
- void sqrtps(XMMRegister dst, Operand src);
- void sqrtps(XMMRegister dst, XMMRegister src) { sqrtps(dst, Operand(src)); }
- void rsqrtps(XMMRegister dst, Operand src);
- void rsqrtps(XMMRegister dst, XMMRegister src) { rsqrtps(dst, Operand(src)); }
void haddps(XMMRegister dst, Operand src);
void haddps(XMMRegister dst, XMMRegister src) { haddps(dst, Operand(src)); }
void sqrtpd(XMMRegister dst, Operand src) {
@@ -958,16 +955,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void cvtss2sd(XMMRegister dst, XMMRegister src) {
cvtss2sd(dst, Operand(src));
}
- void cvtsd2ss(XMMRegister dst, Operand src);
- void cvtsd2ss(XMMRegister dst, XMMRegister src) {
- cvtsd2ss(dst, Operand(src));
- }
- void cvtdq2ps(XMMRegister dst, XMMRegister src) {
- cvtdq2ps(dst, Operand(src));
- }
- void cvtdq2ps(XMMRegister dst, Operand src);
void cvtdq2pd(XMMRegister dst, XMMRegister src);
- void cvtps2pd(XMMRegister dst, XMMRegister src);
void cvtpd2ps(XMMRegister dst, XMMRegister src);
void cvttps2dq(XMMRegister dst, XMMRegister src) {
cvttps2dq(dst, Operand(src));
@@ -975,17 +963,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void cvttps2dq(XMMRegister dst, Operand src);
void cvttpd2dq(XMMRegister dst, XMMRegister src);
- void addsd(XMMRegister dst, XMMRegister src) { addsd(dst, Operand(src)); }
- void addsd(XMMRegister dst, Operand src);
- void subsd(XMMRegister dst, XMMRegister src) { subsd(dst, Operand(src)); }
- void subsd(XMMRegister dst, Operand src);
- void mulsd(XMMRegister dst, XMMRegister src) { mulsd(dst, Operand(src)); }
- void mulsd(XMMRegister dst, Operand src);
- void divsd(XMMRegister dst, XMMRegister src) { divsd(dst, Operand(src)); }
- void divsd(XMMRegister dst, Operand src);
- void sqrtsd(XMMRegister dst, XMMRegister src) { sqrtsd(dst, Operand(src)); }
- void sqrtsd(XMMRegister dst, Operand src);
-
void ucomisd(XMMRegister dst, XMMRegister src) { ucomisd(dst, Operand(src)); }
void ucomisd(XMMRegister dst, Operand src);
@@ -1007,11 +984,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void cmpltsd(XMMRegister dst, XMMRegister src);
- void maxsd(XMMRegister dst, XMMRegister src) { maxsd(dst, Operand(src)); }
- void maxsd(XMMRegister dst, Operand src);
- void minsd(XMMRegister dst, XMMRegister src) { minsd(dst, Operand(src)); }
- void minsd(XMMRegister dst, Operand src);
-
void movdqa(XMMRegister dst, Operand src);
void movdqa(Operand dst, XMMRegister src);
void movdqa(XMMRegister dst, XMMRegister src);
@@ -1263,50 +1235,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
void vfmass(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
- void vaddsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vaddsd(dst, src1, Operand(src2));
- }
- void vaddsd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vsd(0x58, dst, src1, src2);
- }
- void vsubsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vsubsd(dst, src1, Operand(src2));
- }
- void vsubsd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vsd(0x5c, dst, src1, src2);
- }
- void vmulsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vmulsd(dst, src1, Operand(src2));
- }
- void vmulsd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vsd(0x59, dst, src1, src2);
- }
- void vdivsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vdivsd(dst, src1, Operand(src2));
- }
- void vdivsd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vsd(0x5e, dst, src1, src2);
- }
- void vmaxsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vmaxsd(dst, src1, Operand(src2));
- }
- void vmaxsd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vsd(0x5f, dst, src1, src2);
- }
- void vminsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vminsd(dst, src1, Operand(src2));
- }
- void vminsd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vsd(0x5d, dst, src1, src2);
- }
- void vsqrtsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vsqrtsd(dst, src1, Operand(src2));
- }
- void vsqrtsd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vsd(0x51, dst, src1, src2);
- }
- void vsd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
-
void vaddss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vaddss(dst, src1, Operand(src2));
}
@@ -1351,20 +1279,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
void vss(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
- void vrcpps(XMMRegister dst, XMMRegister src) { vrcpps(dst, Operand(src)); }
- void vrcpps(XMMRegister dst, Operand src) {
- vinstr(0x53, dst, xmm0, src, kNone, k0F, kWIG);
- }
- void vsqrtps(XMMRegister dst, XMMRegister src) { vsqrtps(dst, Operand(src)); }
- void vsqrtps(XMMRegister dst, Operand src) {
- vinstr(0x51, dst, xmm0, src, kNone, k0F, kWIG);
- }
- void vrsqrtps(XMMRegister dst, XMMRegister src) {
- vrsqrtps(dst, Operand(src));
- }
- void vrsqrtps(XMMRegister dst, Operand src) {
- vinstr(0x52, dst, xmm0, src, kNone, k0F, kWIG);
- }
void vhaddps(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vhaddps(dst, src1, Operand(src2));
}
@@ -1498,21 +1412,16 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
void vpinsrd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t offset);
+ void vroundsd(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ RoundingMode mode);
+ void vroundss(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ RoundingMode mode);
void vroundps(XMMRegister dst, XMMRegister src, RoundingMode mode);
void vroundpd(XMMRegister dst, XMMRegister src, RoundingMode mode);
- void vcvtdq2ps(XMMRegister dst, XMMRegister src) {
- vcvtdq2ps(dst, Operand(src));
- }
- void vcvtdq2ps(XMMRegister dst, Operand src) {
- vinstr(0x5B, dst, xmm0, src, kNone, k0F, kWIG);
- }
void vcvtdq2pd(XMMRegister dst, XMMRegister src) {
vinstr(0xE6, dst, xmm0, src, kF3, k0F, kWIG);
}
- void vcvtps2pd(XMMRegister dst, XMMRegister src) {
- vinstr(0x5A, dst, xmm0, src, kNone, k0F, kWIG);
- }
void vcvtpd2ps(XMMRegister dst, XMMRegister src) {
vinstr(0x5A, dst, xmm0, src, k66, k0F, kWIG);
}
@@ -1525,6 +1434,28 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vcvttpd2dq(XMMRegister dst, XMMRegister src) {
vinstr(0xE6, dst, xmm0, src, k66, k0F, kWIG);
}
+ void vcvttsd2si(Register dst, XMMRegister src) {
+ XMMRegister idst = XMMRegister::from_code(dst.code());
+ vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW0);
+ }
+ void vcvttsd2si(Register dst, Operand src) {
+ XMMRegister idst = XMMRegister::from_code(dst.code());
+ vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW0);
+ }
+ void vcvtss2sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
+ }
+ void vcvtss2sd(XMMRegister dst, XMMRegister src1, Operand src2) {
+ vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
+ }
+ void vcvttss2si(Register dst, XMMRegister src) {
+ XMMRegister idst = XMMRegister::from_code(dst.code());
+ vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW0);
+ }
+ void vcvttss2si(Register dst, Operand src) {
+ XMMRegister idst = XMMRegister::from_code(dst.code());
+ vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW0);
+ }
void vmovddup(XMMRegister dst, Operand src) {
vinstr(0x12, dst, xmm0, src, kF2, k0F, kWIG);
@@ -1544,6 +1475,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vmovdqa(XMMRegister dst, Operand src) {
vinstr(0x6F, dst, xmm0, src, k66, k0F, kWIG);
}
+ void vmovdqa(XMMRegister dst, XMMRegister src) {
+ vinstr(0x6F, dst, xmm0, src, k66, k0F, kWIG);
+ }
void vmovdqu(XMMRegister dst, Operand src) {
vinstr(0x6F, dst, xmm0, src, kF3, k0F, kWIG);
}
@@ -1564,6 +1498,19 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vpmovmskb(Register dst, XMMRegister src);
+ void vucomisd(XMMRegister dst, XMMRegister src) {
+ vinstr(0x2E, dst, xmm0, src, k66, k0F, kWIG);
+ }
+ void vucomisd(XMMRegister dst, Operand src) {
+ vinstr(0x2E, dst, xmm0, src, k66, k0F, kWIG);
+ }
+ void vucomiss(XMMRegister dst, XMMRegister src) {
+ vinstr(0x2E, dst, xmm0, src, kNoPrefix, k0F, kWIG);
+ }
+ void vucomiss(XMMRegister dst, Operand src) {
+ vinstr(0x2E, dst, xmm0, src, kNoPrefix, k0F, kWIG);
+ }
+
// BMI instruction
void andn(Register dst, Register src1, Register src2) {
andn(dst, src1, Operand(src2));
@@ -1596,7 +1543,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
bzhi(dst, Operand(src1), src2);
}
void bzhi(Register dst, Operand src1, Register src2) {
- bmi2(kNone, 0xf5, dst, src2, src1);
+ bmi2(kNoPrefix, 0xf5, dst, src2, src1);
}
void mulx(Register dst1, Register dst2, Register src) {
mulx(dst1, dst2, Operand(src));
@@ -1709,10 +1656,29 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
PACKED_CMP_LIST(AVX_CMP_P)
+ // vcmpgeps/vcmpgepd only in AVX.
+ AVX_CMP_P(cmpge, 0xd)
#undef AVX_CMP_P
#undef PACKED_CMP_LIST
// Other SSE and AVX instructions
+#define DECLARE_SSE_UNOP_AND_AVX(instruction, escape, opcode) \
+ void instruction(XMMRegister dst, XMMRegister src) { \
+ instruction(dst, Operand(src)); \
+ } \
+ void instruction(XMMRegister dst, Operand src) { \
+ sse_instr(dst, src, 0x##escape, 0x##opcode); \
+ } \
+ void v##instruction(XMMRegister dst, XMMRegister src) { \
+ v##instruction(dst, Operand(src)); \
+ } \
+ void v##instruction(XMMRegister dst, Operand src) { \
+ vinstr(0x##opcode, dst, xmm0, src, kNoPrefix, k##escape, kWIG); \
+ }
+
+ SSE_UNOP_INSTRUCTION_LIST(DECLARE_SSE_UNOP_AND_AVX)
+#undef DECLARE_SSE_UNOP_AND_AVX
+
#define DECLARE_SSE2_INSTRUCTION(instruction, prefix, escape, opcode) \
void instruction(XMMRegister dst, XMMRegister src) { \
instruction(dst, Operand(src)); \
@@ -1722,6 +1688,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
SSE2_INSTRUCTION_LIST(DECLARE_SSE2_INSTRUCTION)
+ SSE2_INSTRUCTION_LIST_SD(DECLARE_SSE2_INSTRUCTION)
#undef DECLARE_SSE2_INSTRUCTION
#define DECLARE_SSE2_AVX_INSTRUCTION(instruction, prefix, escape, opcode) \
@@ -1733,6 +1700,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
SSE2_INSTRUCTION_LIST(DECLARE_SSE2_AVX_INSTRUCTION)
+ SSE2_INSTRUCTION_LIST_SD(DECLARE_SSE2_AVX_INSTRUCTION)
#undef DECLARE_SSE2_AVX_INSTRUCTION
#define DECLARE_SSSE3_INSTRUCTION(instruction, prefix, escape1, escape2, \
@@ -1790,6 +1758,19 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
SSE4_RM_INSTRUCTION_LIST(DECLARE_SSE4_AVX_RM_INSTRUCTION)
#undef DECLARE_SSE4_AVX_RM_INSTRUCTION
+ // AVX2 instructions
+#define AVX2_INSTRUCTION(instr, prefix, escape1, escape2, opcode) \
+ void instr(XMMRegister dst, XMMRegister src) { \
+ vinstr(0x##opcode, dst, xmm0, src, k##prefix, k##escape1##escape2, kW0, \
+ AVX2); \
+ } \
+ void instr(XMMRegister dst, Operand src) { \
+ vinstr(0x##opcode, dst, xmm0, src, k##prefix, k##escape1##escape2, kW0, \
+ AVX2); \
+ }
+ AVX2_BROADCAST_LIST(AVX2_INSTRUCTION)
+#undef AVX2_INSTRUCTION
+
// Prefetch src position into cache level.
// Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
// non-temporal
@@ -1888,7 +1869,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void emit_farith(int b1, int b2, int i);
// Emit vex prefix
- enum SIMDPrefix { kNone = 0x0, k66 = 0x1, kF3 = 0x2, kF2 = 0x3 };
+ enum SIMDPrefix { kNoPrefix = 0x0, k66 = 0x1, kF3 = 0x2, kF2 = 0x3 };
enum VectorLength { kL128 = 0x0, kL256 = 0x4, kLIG = kL128, kLZ = kL128 };
enum VexW { kW0 = 0x0, kW1 = 0x80, kWIG = kW0 };
enum LeadingOpcode { k0F = 0x1, k0F38 = 0x2, k0F3A = 0x3 };
@@ -1907,6 +1888,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
inline void emit_disp(Label* L, Displacement::Type type);
inline void emit_near_disp(Label* L);
+ void sse_instr(XMMRegister dst, Operand src, byte prefix, byte opcode);
void sse2_instr(XMMRegister dst, Operand src, byte prefix, byte escape,
byte opcode);
void ssse3_instr(XMMRegister dst, Operand src, byte prefix, byte escape1,
diff --git a/chromium/v8/src/codegen/ia32/macro-assembler-ia32.cc b/chromium/v8/src/codegen/ia32/macro-assembler-ia32.cc
index c95ea8ad2c6..e92fb3b5f7e 100644
--- a/chromium/v8/src/codegen/ia32/macro-assembler-ia32.cc
+++ b/chromium/v8/src/codegen/ia32/macro-assembler-ia32.cc
@@ -158,16 +158,23 @@ void MacroAssembler::PushRoot(RootIndex index) {
}
}
-void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
- unsigned higher_limit, Register scratch,
- Label* on_in_range,
- Label::Distance near_jump) {
+void MacroAssembler::CompareRange(Register value, unsigned lower_limit,
+ unsigned higher_limit, Register scratch) {
+ ASM_CODE_COMMENT(this);
+ DCHECK_LT(lower_limit, higher_limit);
if (lower_limit != 0) {
lea(scratch, Operand(value, 0u - lower_limit));
cmp(scratch, Immediate(higher_limit - lower_limit));
} else {
cmp(value, Immediate(higher_limit));
}
+}
+
+void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
+ unsigned higher_limit, Register scratch,
+ Label* on_in_range,
+ Label::Distance near_jump) {
+ CompareRange(value, lower_limit, higher_limit, scratch);
j(below_equal, on_in_range, near_jump);
}
@@ -199,7 +206,11 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch,
Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference,
Register scratch) {
- // TODO(jgruber): Add support for enable_root_relative_access.
+ if (root_array_available() && options().enable_root_relative_access) {
+ intptr_t delta =
+ RootRegisterOffsetForExternalReference(isolate(), reference);
+ return Operand(kRootRegister, delta);
+ }
if (root_array_available() && options().isolate_independent_code) {
if (IsAddressableThroughRootRegister(isolate(), reference)) {
// Some external references can be efficiently loaded as an offset from
@@ -631,319 +642,6 @@ void TurboAssembler::Cvttsd2ui(Register dst, Operand src, XMMRegister tmp) {
add(dst, Immediate(0x80000000));
}
-void TurboAssembler::Pmulhrsw(XMMRegister dst, XMMRegister src1,
- XMMRegister src2) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpmulhrsw(dst, src1, src2);
- } else {
- if (dst != src1) {
- movaps(dst, src1);
- }
- CpuFeatureScope sse_scope(this, SSSE3);
- pmulhrsw(dst, src2);
- }
-}
-
-void TurboAssembler::I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, XMMRegister scratch) {
- ASM_CODE_COMMENT(this);
- // k = i16x8.splat(0x8000)
- Pcmpeqd(scratch, scratch);
- Psllw(scratch, scratch, byte{15});
-
- Pmulhrsw(dst, src1, src2);
- Pcmpeqw(scratch, dst);
- Pxor(dst, scratch);
-}
-
-void TurboAssembler::I8x16Popcnt(XMMRegister dst, XMMRegister src,
- XMMRegister tmp1, XMMRegister tmp2,
- Register scratch) {
- ASM_CODE_COMMENT(this);
- DCHECK_NE(dst, tmp1);
- DCHECK_NE(src, tmp1);
- DCHECK_NE(dst, tmp2);
- DCHECK_NE(src, tmp2);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vmovdqa(tmp1, ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x0f(),
- scratch));
- vpandn(tmp2, tmp1, src);
- vpand(dst, tmp1, src);
- vmovdqa(tmp1, ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_popcnt_mask(),
- scratch));
- vpsrlw(tmp2, tmp2, 4);
- vpshufb(dst, tmp1, dst);
- vpshufb(tmp2, tmp1, tmp2);
- vpaddb(dst, dst, tmp2);
- } else if (CpuFeatures::IsSupported(ATOM)) {
- // Pre-Goldmont low-power Intel microarchitectures have very slow
- // PSHUFB instruction, thus use PSHUFB-free divide-and-conquer
- // algorithm on these processors. ATOM CPU feature captures exactly
- // the right set of processors.
- movaps(tmp1, src);
- psrlw(tmp1, 1);
- if (dst != src) {
- movaps(dst, src);
- }
- andps(tmp1,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x55(), scratch));
- psubb(dst, tmp1);
- Operand splat_0x33 = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x33(), scratch);
- movaps(tmp1, dst);
- andps(dst, splat_0x33);
- psrlw(tmp1, 2);
- andps(tmp1, splat_0x33);
- paddb(dst, tmp1);
- movaps(tmp1, dst);
- psrlw(dst, 4);
- paddb(dst, tmp1);
- andps(dst,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x0f(), scratch));
- } else {
- CpuFeatureScope sse_scope(this, SSSE3);
- movaps(tmp1,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x0f(), scratch));
- Operand mask = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_popcnt_mask(), scratch);
- if (tmp2 != tmp1) {
- movaps(tmp2, tmp1);
- }
- andps(tmp1, src);
- andnps(tmp2, src);
- psrlw(tmp2, 4);
- movaps(dst, mask);
- pshufb(dst, tmp1);
- movaps(tmp1, mask);
- pshufb(tmp1, tmp2);
- paddb(dst, tmp1);
- }
-}
-
-void TurboAssembler::F64x2ConvertLowI32x4U(XMMRegister dst, XMMRegister src,
- Register tmp) {
- // dst = [ src_low, 0x43300000, src_high, 0x4330000 ];
- // 0x43300000'00000000 is a special double where the significand bits
- // precisely represents all uint32 numbers.
- if (!CpuFeatures::IsSupported(AVX) && dst != src) {
- movaps(dst, src);
- src = dst;
- }
- Unpcklps(dst, src,
- ExternalReferenceAsOperand(
- ExternalReference::
- address_of_wasm_f64x2_convert_low_i32x4_u_int_mask(),
- tmp));
- Subpd(dst, dst,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_double_2_power_52(), tmp));
-}
-
-void TurboAssembler::I32x4TruncSatF64x2SZero(XMMRegister dst, XMMRegister src,
- XMMRegister scratch,
- Register tmp) {
- ASM_CODE_COMMENT(this);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- XMMRegister original_dst = dst;
- // Make sure we don't overwrite src.
- if (dst == src) {
- DCHECK_NE(scratch, src);
- dst = scratch;
- }
- // dst = 0 if src == NaN, else all ones.
- vcmpeqpd(dst, src, src);
- // dst = 0 if src == NaN, else INT32_MAX as double.
- vandpd(dst, dst,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_int32_max_as_double(), tmp));
- // dst = 0 if src == NaN, src is saturated to INT32_MAX as double.
- vminpd(dst, src, dst);
- // Values > INT32_MAX already saturated, values < INT32_MIN raises an
- // exception, which is masked and returns 0x80000000.
- vcvttpd2dq(dst, dst);
-
- if (original_dst != dst) {
- vmovaps(original_dst, dst);
- }
- } else {
- if (dst != src) {
- movaps(dst, src);
- }
- movaps(scratch, dst);
- cmpeqpd(scratch, dst);
- andps(scratch,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_int32_max_as_double(), tmp));
- minpd(dst, scratch);
- cvttpd2dq(dst, dst);
- }
-}
-
-void TurboAssembler::I32x4TruncSatF64x2UZero(XMMRegister dst, XMMRegister src,
- XMMRegister scratch,
- Register tmp) {
- ASM_CODE_COMMENT(this);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vxorpd(scratch, scratch, scratch);
- // Saturate to 0.
- vmaxpd(dst, src, scratch);
- // Saturate to UINT32_MAX.
- vminpd(dst, dst,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_uint32_max_as_double(), tmp));
- // Truncate.
- vroundpd(dst, dst, kRoundToZero);
- // Add to special double where significant bits == uint32.
- vaddpd(dst, dst,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_double_2_power_52(), tmp));
- // Extract low 32 bits of each double's significand, zero top lanes.
- // dst = [dst[0], dst[2], 0, 0]
- vshufps(dst, dst, scratch, 0x88);
- } else {
- CpuFeatureScope scope(this, SSE4_1);
- if (dst != src) {
- movaps(dst, src);
- }
-
- xorps(scratch, scratch);
- maxpd(dst, scratch);
- minpd(dst,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_uint32_max_as_double(), tmp));
- roundpd(dst, dst, kRoundToZero);
- addpd(dst,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_double_2_power_52(), tmp));
- shufps(dst, scratch, 0x88);
- }
-}
-
-void TurboAssembler::I16x8ExtAddPairwiseI8x16S(XMMRegister dst, XMMRegister src,
- XMMRegister tmp,
- Register scratch) {
- // pmaddubsw treats the first operand as unsigned, so pass the external
- // reference to as the first operand.
- Operand op = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x01(), scratch);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vmovdqa(tmp, op);
- vpmaddubsw(dst, tmp, src);
- } else {
- CpuFeatureScope sse_scope(this, SSSE3);
- if (dst == src) {
- movaps(tmp, op);
- pmaddubsw(tmp, src);
- movaps(dst, tmp);
- } else {
- movaps(dst, op);
- pmaddubsw(dst, src);
- }
- }
-}
-
-void TurboAssembler::I16x8ExtAddPairwiseI8x16U(XMMRegister dst, XMMRegister src,
- Register scratch) {
- Operand op = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x01(), scratch);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpmaddubsw(dst, src, op);
- } else {
- CpuFeatureScope sse_scope(this, SSSE3);
- movaps(dst, src);
- pmaddubsw(dst, op);
- }
-}
-
-void TurboAssembler::I32x4ExtAddPairwiseI16x8S(XMMRegister dst, XMMRegister src,
- Register scratch) {
- Operand op = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i16x8_splat_0x0001(), scratch);
- // pmaddwd multiplies signed words in src and op, producing
- // signed doublewords, then adds pairwise.
- // src = |a|b|c|d|e|f|g|h|
- // dst = | a*1 + b*1 | c*1 + d*1 | e*1 + f*1 | g*1 + h*1 |
- Pmaddwd(dst, src, op);
-}
-
-void TurboAssembler::I32x4ExtAddPairwiseI16x8U(XMMRegister dst, XMMRegister src,
- XMMRegister tmp) {
- ASM_CODE_COMMENT(this);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- // src = |a|b|c|d|e|f|g|h| (low)
- // scratch = |0|a|0|c|0|e|0|g|
- vpsrld(tmp, src, 16);
- // dst = |0|b|0|d|0|f|0|h|
- vpblendw(dst, src, tmp, 0xAA);
- // dst = |a+b|c+d|e+f|g+h|
- vpaddd(dst, tmp, dst);
- } else if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope sse_scope(this, SSE4_1);
- // There is a potentially better lowering if we get rip-relative constants,
- // see https://github.com/WebAssembly/simd/pull/380.
- movaps(tmp, src);
- psrld(tmp, 16);
- if (dst != src) {
- movaps(dst, src);
- }
- pblendw(dst, tmp, 0xAA);
- paddd(dst, tmp);
- } else {
- // src = |a|b|c|d|e|f|g|h|
- // tmp = i32x4.splat(0x0000FFFF)
- pcmpeqd(tmp, tmp);
- psrld(tmp, byte{16});
- // tmp =|0|b|0|d|0|f|0|h|
- andps(tmp, src);
- // dst = |0|a|0|c|0|e|0|g|
- if (dst != src) {
- movaps(dst, src);
- }
- psrld(dst, byte{16});
- // dst = |a+b|c+d|e+f|g+h|
- paddd(dst, tmp);
- }
-}
-
-void TurboAssembler::I8x16Swizzle(XMMRegister dst, XMMRegister src,
- XMMRegister mask, XMMRegister scratch,
- Register tmp, bool omit_add) {
- if (omit_add) {
- Pshufb(dst, src, mask);
- return;
- }
-
- // Out-of-range indices should return 0, add 112 so that any value > 15
- // saturates to 128 (top bit set), so pshufb will zero that lane.
- Operand op = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_swizzle_mask(), tmp);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpaddusb(scratch, mask, op);
- vpshufb(dst, src, scratch);
- } else {
- CpuFeatureScope sse_scope(this, SSSE3);
- movaps(scratch, op);
- if (dst != src) {
- movaps(dst, src);
- }
- paddusb(scratch, mask);
- pshufb(dst, scratch);
- }
-}
-
void TurboAssembler::ShlPair(Register high, Register low, uint8_t shift) {
DCHECK_GE(63, shift);
if (shift >= 32) {
@@ -1032,14 +730,15 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
cmpw(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
}
-void MacroAssembler::CmpInstanceTypeRange(Register map, Register scratch,
+void MacroAssembler::CmpInstanceTypeRange(Register map,
+ Register instance_type_out,
+ Register scratch,
InstanceType lower_limit,
InstanceType higher_limit) {
ASM_CODE_COMMENT(this);
DCHECK_LT(lower_limit, higher_limit);
- movzx_w(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
- lea(scratch, Operand(scratch, 0u - lower_limit));
- cmp(scratch, Immediate(higher_limit - lower_limit));
+ movzx_w(instance_type_out, FieldOperand(map, Map::kInstanceTypeOffset));
+ CompareRange(instance_type_out, lower_limit, higher_limit, scratch);
}
void MacroAssembler::AssertSmi(Register object) {
@@ -1071,7 +770,7 @@ void MacroAssembler::AssertFunction(Register object, Register scratch) {
Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
Push(object);
LoadMap(object, object);
- CmpInstanceTypeRange(object, scratch, FIRST_JS_FUNCTION_TYPE,
+ CmpInstanceTypeRange(object, scratch, scratch, FIRST_JS_FUNCTION_TYPE,
LAST_JS_FUNCTION_TYPE);
Pop(object);
Check(below_equal, AbortReason::kOperandIsNotAFunction);
@@ -1265,7 +964,7 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
bind(&check_offset);
cmp(bytes_scratch, kStackPageSize);
- j(greater, &touch_next_page);
+ j(greater_equal, &touch_next_page);
sub(esp, bytes_scratch);
}
@@ -1273,7 +972,7 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
void TurboAssembler::AllocateStackSpace(int bytes) {
ASM_CODE_COMMENT(this);
DCHECK_GE(bytes, 0);
- while (bytes > kStackPageSize) {
+ while (bytes >= kStackPageSize) {
sub(esp, Immediate(kStackPageSize));
mov(Operand(esp, 0), Immediate(0));
bytes -= kStackPageSize;
@@ -1556,8 +1255,10 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
// If the expected parameter count is equal to the adaptor sentinel, no need
// to push undefined value as arguments.
- cmp(expected_parameter_count, Immediate(kDontAdaptArgumentsSentinel));
- j(equal, &regular_invoke, Label::kFar);
+ if (kDontAdaptArgumentsSentinel != 0) {
+ cmp(expected_parameter_count, Immediate(kDontAdaptArgumentsSentinel));
+ j(equal, &regular_invoke, Label::kFar);
+ }
// If overapplication or if the actual argument count is equal to the
// formal parameter count, no need to push extra undefined values.
@@ -1584,8 +1285,10 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
lea(scratch,
Operand(expected_parameter_count, times_system_pointer_size, 0));
AllocateStackSpace(scratch);
- // Extra words are the receiver and the return address (if a jump).
- int extra_words = type == InvokeType::kCall ? 1 : 2;
+ // Extra words are the receiver (if not already included in argc) and the
+ // return address (if a jump).
+ int extra_words = type == InvokeType::kCall ? 0 : 1;
+ if (!kJSArgcIncludesReceiver) extra_words++;
lea(num, Operand(eax, extra_words)); // Number of words to copy.
Move(current, 0);
// Fall-through to the loop body because there are non-zero words to copy.
@@ -1620,8 +1323,8 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
bind(&stack_overflow);
{
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
CallRuntime(Runtime::kThrowStackOverflow);
int3(); // This should be unreachable.
}
@@ -1633,7 +1336,8 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count) {
ASM_CODE_COMMENT(this);
- FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
SmiTag(expected_parameter_count);
Push(expected_parameter_count);
@@ -1895,37 +1599,12 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
}
}
-void TurboAssembler::Pshufb(XMMRegister dst, XMMRegister src, Operand mask) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpshufb(dst, src, mask);
- return;
- }
-
- // Make sure these are different so that we won't overwrite mask.
- DCHECK(!mask.is_reg(dst));
- CpuFeatureScope sse_scope(this, SSSE3);
- if (dst != src) {
- movaps(dst, src);
- }
- pshufb(dst, mask);
-}
-
-void TurboAssembler::Pextrd(Register dst, XMMRegister src, uint8_t imm8) {
+void TurboAssembler::PextrdPreSse41(Register dst, XMMRegister src,
+ uint8_t imm8) {
if (imm8 == 0) {
Movd(dst, src);
return;
}
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpextrd(dst, src, imm8);
- return;
- }
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope sse_scope(this, SSE4_1);
- pextrd(dst, src, imm8);
- return;
- }
// Without AVX or SSE, we can only have 64-bit values in xmm registers.
// We don't have an xmm scratch register, so move the data via the stack. This
// path is rarely required, so it's acceptable to be slow.
@@ -1936,43 +1615,8 @@ void TurboAssembler::Pextrd(Register dst, XMMRegister src, uint8_t imm8) {
add(esp, Immediate(kDoubleSize));
}
-void TurboAssembler::Pinsrb(XMMRegister dst, Operand src, int8_t imm8) {
- Pinsrb(dst, dst, src, imm8);
-}
-
-void TurboAssembler::Pinsrb(XMMRegister dst, XMMRegister src1, Operand src2,
- int8_t imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpinsrb(dst, src1, src2, imm8);
- return;
- }
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope sse_scope(this, SSE4_1);
- if (dst != src1) {
- movaps(dst, src1);
- }
- pinsrb(dst, src2, imm8);
- return;
- }
- FATAL("no AVX or SSE4.1 support");
-}
-
-void TurboAssembler::Pinsrd(XMMRegister dst, XMMRegister src1, Operand src2,
- uint8_t imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpinsrd(dst, src1, src2, imm8);
- return;
- }
- if (dst != src1) {
- movaps(dst, src1);
- }
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope sse_scope(this, SSE4_1);
- pinsrd(dst, src2, imm8);
- return;
- }
+void TurboAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8,
+ uint32_t* load_pc_offset) {
// Without AVX or SSE, we can only have 64-bit values in xmm registers.
// We don't have an xmm scratch register, so move the data via the stack. This
// path is rarely required, so it's acceptable to be slow.
@@ -1981,10 +1625,10 @@ void TurboAssembler::Pinsrd(XMMRegister dst, XMMRegister src1, Operand src2,
// Write original content of {dst} to the stack.
movsd(Operand(esp, 0), dst);
// Overwrite the portion specified in {imm8}.
- if (src2.is_reg_only()) {
- mov(Operand(esp, imm8 * kUInt32Size), src2.reg());
+ if (src.is_reg_only()) {
+ mov(Operand(esp, imm8 * kUInt32Size), src.reg());
} else {
- movss(dst, src2);
+ movss(dst, src);
movss(Operand(esp, imm8 * kUInt32Size), dst);
}
// Load back the full value into {dst}.
@@ -1992,39 +1636,6 @@ void TurboAssembler::Pinsrd(XMMRegister dst, XMMRegister src1, Operand src2,
add(esp, Immediate(kDoubleSize));
}
-void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, uint8_t imm8) {
- Pinsrd(dst, dst, src, imm8);
-}
-
-void TurboAssembler::Pinsrw(XMMRegister dst, Operand src, int8_t imm8) {
- Pinsrw(dst, dst, src, imm8);
-}
-
-void TurboAssembler::Pinsrw(XMMRegister dst, XMMRegister src1, Operand src2,
- int8_t imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpinsrw(dst, src1, src2, imm8);
- return;
- } else {
- if (dst != src1) {
- movaps(dst, src1);
- }
- pinsrw(dst, src2, imm8);
- return;
- }
-}
-
-void TurboAssembler::Vbroadcastss(XMMRegister dst, Operand src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vbroadcastss(dst, src);
- return;
- }
- movss(dst, src);
- shufps(dst, dst, static_cast<byte>(0));
-}
-
void TurboAssembler::Lzcnt(Register dst, Operand src) {
if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT);
@@ -2145,7 +1756,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (should_abort_hard()) {
// We don't care if we constructed a frame. Just pretend we did.
- FrameScope assume_frame(this, StackFrame::NONE);
+ FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
PrepareCallCFunction(1, eax);
mov(Operand(esp, 0), Immediate(static_cast<int>(reason)));
CallCFunction(ExternalReference::abort_with_reason(), 1);
@@ -2158,7 +1769,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (!has_frame()) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
+ FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
} else {
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
@@ -2295,8 +1906,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
Operand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
ASM_CODE_COMMENT(this);
- return Operand(kRootRegister,
- IsolateData::builtin_entry_slot_offset(builtin));
+ return Operand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin));
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
@@ -2385,63 +1995,6 @@ void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
jmp(code_object, rmode);
}
-void TurboAssembler::RetpolineCall(Register reg) {
- ASM_CODE_COMMENT(this);
- Label setup_return, setup_target, inner_indirect_branch, capture_spec;
-
- jmp(&setup_return); // Jump past the entire retpoline below.
-
- bind(&inner_indirect_branch);
- call(&setup_target);
-
- bind(&capture_spec);
- pause();
- jmp(&capture_spec);
-
- bind(&setup_target);
- mov(Operand(esp, 0), reg);
- ret(0);
-
- bind(&setup_return);
- call(&inner_indirect_branch); // Callee will return after this instruction.
-}
-
-void TurboAssembler::RetpolineCall(Address destination, RelocInfo::Mode rmode) {
- ASM_CODE_COMMENT(this);
- Label setup_return, setup_target, inner_indirect_branch, capture_spec;
-
- jmp(&setup_return); // Jump past the entire retpoline below.
-
- bind(&inner_indirect_branch);
- call(&setup_target);
-
- bind(&capture_spec);
- pause();
- jmp(&capture_spec);
-
- bind(&setup_target);
- mov(Operand(esp, 0), destination, rmode);
- ret(0);
-
- bind(&setup_return);
- call(&inner_indirect_branch); // Callee will return after this instruction.
-}
-
-void TurboAssembler::RetpolineJump(Register reg) {
- ASM_CODE_COMMENT(this);
- Label setup_target, capture_spec;
-
- call(&setup_target);
-
- bind(&capture_spec);
- pause();
- jmp(&capture_spec);
-
- bind(&setup_target);
- mov(Operand(esp, 0), reg);
- ret(0);
-}
-
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met,
Label::Distance condition_met_distance) {
diff --git a/chromium/v8/src/codegen/ia32/macro-assembler-ia32.h b/chromium/v8/src/codegen/ia32/macro-assembler-ia32.h
index 527c3570470..ce02c0e294f 100644
--- a/chromium/v8/src/codegen/ia32/macro-assembler-ia32.h
+++ b/chromium/v8/src/codegen/ia32/macro-assembler-ia32.h
@@ -68,9 +68,10 @@ class StackArgumentsAccessor {
DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor);
};
-class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
+class V8_EXPORT_PRIVATE TurboAssembler
+ : public SharedTurboAssemblerBase<TurboAssembler> {
public:
- using SharedTurboAssembler::SharedTurboAssembler;
+ using SharedTurboAssemblerBase<TurboAssembler>::SharedTurboAssemblerBase;
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met,
@@ -158,15 +159,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
JumpMode jump_mode = JumpMode::kJump);
void Jump(const ExternalReference& reference);
- void RetpolineCall(Register reg);
- void RetpolineCall(Address destination, RelocInfo::Mode rmode);
-
void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
void LoadMap(Register destination, Register object);
- void RetpolineJump(Register reg);
-
void Trap();
void DebugBreak();
@@ -203,6 +199,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
SmiUntag(output);
}
+ void SmiToInt32(Register reg) { SmiUntag(reg); }
+
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, arguments must be stored in esp[0], esp[4],
// etc., not pushed. The argument count assumes all arguments are word sized.
@@ -306,68 +304,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
// may be bigger than 2^16 - 1. Requires a scratch register.
void Ret(int bytes_dropped, Register scratch);
- // Defined here because some callers take a pointer to member functions.
- AVX_OP(Pcmpeqb, pcmpeqb)
- AVX_OP(Pcmpeqw, pcmpeqw)
- AVX_OP(Pcmpeqd, pcmpeqd)
- AVX_OP_SSE4_1(Pcmpeqq, pcmpeqq)
-
-// Macro for instructions that have 2 operands for AVX version and 1 operand for
-// SSE version. Will move src1 to dst if dst != src1.
-#define AVX_OP3_WITH_MOVE(macro_name, name, dst_type, src_type) \
- void macro_name(dst_type dst, dst_type src1, src_type src2) { \
- if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope scope(this, AVX); \
- v##name(dst, src1, src2); \
- } else { \
- if (dst != src1) { \
- movaps(dst, src1); \
- } \
- name(dst, src2); \
- } \
- }
- AVX_OP3_WITH_MOVE(Cmpeqps, cmpeqps, XMMRegister, XMMRegister)
- AVX_OP3_WITH_MOVE(Movlps, movlps, XMMRegister, Operand)
- AVX_OP3_WITH_MOVE(Movhps, movhps, XMMRegister, Operand)
- AVX_OP3_WITH_MOVE(Pmaddwd, pmaddwd, XMMRegister, Operand)
-#undef AVX_OP3_WITH_MOVE
-
- // TODO(zhin): Remove after moving more definitions into SharedTurboAssembler.
- void Movlps(Operand dst, XMMRegister src) {
- SharedTurboAssembler::Movlps(dst, src);
- }
- void Movhps(Operand dst, XMMRegister src) {
- SharedTurboAssembler::Movhps(dst, src);
- }
-
- void Pshufb(XMMRegister dst, XMMRegister src) { Pshufb(dst, dst, src); }
- void Pshufb(XMMRegister dst, Operand src) { Pshufb(dst, dst, src); }
- // Handles SSE and AVX. On SSE, moves src to dst if they are not equal.
- void Pshufb(XMMRegister dst, XMMRegister src, XMMRegister mask) {
- Pshufb(dst, src, Operand(mask));
+ void PextrdPreSse41(Register dst, XMMRegister src, uint8_t imm8);
+ void PinsrdPreSse41(XMMRegister dst, Register src, uint8_t imm8,
+ uint32_t* load_pc_offset) {
+ PinsrdPreSse41(dst, Operand(src), imm8, load_pc_offset);
}
- void Pshufb(XMMRegister dst, XMMRegister src, Operand mask);
-
- void Pextrd(Register dst, XMMRegister src, uint8_t imm8);
- void Pinsrb(XMMRegister dst, Register src, int8_t imm8) {
- Pinsrb(dst, Operand(src), imm8);
- }
- void Pinsrb(XMMRegister dst, Operand src, int8_t imm8);
- // Moves src1 to dst if AVX is not supported.
- void Pinsrb(XMMRegister dst, XMMRegister src1, Operand src2, int8_t imm8);
- void Pinsrd(XMMRegister dst, Register src, uint8_t imm8) {
- Pinsrd(dst, Operand(src), imm8);
- }
- void Pinsrd(XMMRegister dst, Operand src, uint8_t imm8);
- // Moves src1 to dst if AVX is not supported.
- void Pinsrd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8);
- void Pinsrw(XMMRegister dst, Register src, int8_t imm8) {
- Pinsrw(dst, Operand(src), imm8);
- }
- void Pinsrw(XMMRegister dst, Operand src, int8_t imm8);
- // Moves src1 to dst if AVX is not supported.
- void Pinsrw(XMMRegister dst, XMMRegister src1, Operand src2, int8_t imm8);
- void Vbroadcastss(XMMRegister dst, Operand src);
+ void PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8,
+ uint32_t* load_pc_offset);
// Expression support
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
@@ -395,32 +338,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
}
void Cvttsd2ui(Register dst, Operand src, XMMRegister tmp);
- // Handles SSE and AVX. On SSE, moves src to dst if they are not equal.
- void Pmulhrsw(XMMRegister dst, XMMRegister src1, XMMRegister src2);
-
- // These Wasm SIMD ops do not have direct lowerings on IA32. These
- // helpers are optimized to produce the fastest and smallest codegen.
- // Defined here to allow usage on both TurboFan and Liftoff.
- void I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- XMMRegister scratch);
- void I8x16Popcnt(XMMRegister dst, XMMRegister src, XMMRegister tmp1,
- XMMRegister tmp2, Register scratch);
- void F64x2ConvertLowI32x4U(XMMRegister dst, XMMRegister src, Register tmp);
- void I32x4TruncSatF64x2SZero(XMMRegister dst, XMMRegister src,
- XMMRegister scratch, Register tmp);
- void I32x4TruncSatF64x2UZero(XMMRegister dst, XMMRegister src,
- XMMRegister scratch, Register tmp);
- void I16x8ExtAddPairwiseI8x16S(XMMRegister dst, XMMRegister src,
- XMMRegister tmp, Register scratch);
- void I16x8ExtAddPairwiseI8x16U(XMMRegister dst, XMMRegister src,
- Register scratch);
- void I32x4ExtAddPairwiseI16x8S(XMMRegister dst, XMMRegister src,
- Register scratch);
- void I32x4ExtAddPairwiseI16x8U(XMMRegister dst, XMMRegister src,
- XMMRegister tmp);
- void I8x16Swizzle(XMMRegister dst, XMMRegister src, XMMRegister mask,
- XMMRegister scratch, Register tmp, bool omit_add = false);
-
void Push(Register src) { push(src); }
void Push(Operand src) { push(src); }
void Push(Immediate value);
@@ -480,9 +397,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(Register dst);
- // TODO(860429): Remove remaining poisoning infrastructure on ia32.
- void ResetSpeculationPoisonRegister() { UNREACHABLE(); }
-
// Control-flow integrity:
// Define a function entrypoint. This doesn't emit any code for this
@@ -521,7 +435,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
}
// Checks if value is in range [lower_limit, higher_limit] using a single
- // comparison.
+ // comparison. Flags CF=1 or ZF=1 indicate the value is in the range
+ // (condition below_equal). It is valid, that |value| == |scratch| as far as
+ // this function is concerned.
+ void CompareRange(Register value, unsigned lower_limit, unsigned higher_limit,
+ Register scratch);
void JumpIfIsInRange(Register value, unsigned lower_limit,
unsigned higher_limit, Register scratch,
Label* on_in_range,
@@ -605,8 +523,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
//
// Always use unsigned comparisons: below_equal for a positive
// result.
- void CmpInstanceTypeRange(Register map, Register scratch,
- InstanceType lower_limit,
+ void CmpInstanceTypeRange(Register map, Register instance_type_out,
+ Register scratch, InstanceType lower_limit,
InstanceType higher_limit);
// Smi tagging support.
diff --git a/chromium/v8/src/codegen/ia32/register-ia32.h b/chromium/v8/src/codegen/ia32/register-ia32.h
index 5dc035d9669..37a5783deda 100644
--- a/chromium/v8/src/codegen/ia32/register-ia32.h
+++ b/chromium/v8/src/codegen/ia32/register-ia32.h
@@ -161,9 +161,6 @@ constexpr Register kWasmCompileLazyFuncIndexRegister = edi;
constexpr Register kRootRegister = ebx;
-// TODO(860429): Remove remaining poisoning infrastructure on ia32.
-constexpr Register kSpeculationPoisonRegister = no_reg;
-
constexpr DoubleRegister kFPReturnRegister0 = xmm1; // xmm0 isn't allocatable.
} // namespace internal
diff --git a/chromium/v8/src/codegen/ia32/sse-instr.h b/chromium/v8/src/codegen/ia32/sse-instr.h
index d775dfdd774..ec630dfa9dd 100644
--- a/chromium/v8/src/codegen/ia32/sse-instr.h
+++ b/chromium/v8/src/codegen/ia32/sse-instr.h
@@ -5,6 +5,14 @@
#ifndef V8_CODEGEN_IA32_SSE_INSTR_H_
#define V8_CODEGEN_IA32_SSE_INSTR_H_
+// SSE/SSE2 instructions whose AVX version has two operands.
+#define SSE_UNOP_INSTRUCTION_LIST(V) \
+ V(sqrtps, 0F, 51) \
+ V(rsqrtps, 0F, 52) \
+ V(rcpps, 0F, 53) \
+ V(cvtps2pd, 0F, 5A) \
+ V(cvtdq2ps, 0F, 5B)
+
#define SSE2_INSTRUCTION_LIST(V) \
V(packsswb, 66, 0F, 63) \
V(packssdw, 66, 0F, 6B) \
@@ -63,6 +71,17 @@
V(punpckhqdq, 66, 0F, 6D) \
V(pxor, 66, 0F, EF)
+// Instructions dealing with scalar double-precision values.
+#define SSE2_INSTRUCTION_LIST_SD(V) \
+ V(sqrtsd, F2, 0F, 51) \
+ V(addsd, F2, 0F, 58) \
+ V(mulsd, F2, 0F, 59) \
+ V(cvtsd2ss, F2, 0F, 5A) \
+ V(subsd, F2, 0F, 5C) \
+ V(minsd, F2, 0F, 5D) \
+ V(divsd, F2, 0F, 5E) \
+ V(maxsd, F2, 0F, 5F)
+
#define SSSE3_INSTRUCTION_LIST(V) \
V(pshufb, 66, 0F, 38, 00) \
V(phaddw, 66, 0F, 38, 01) \
@@ -102,4 +121,10 @@
V(pmovzxdq, 66, 0F, 38, 35) \
V(ptest, 66, 0F, 38, 17)
+// These require AVX2, and we only define the VEX-128 versions.
+#define AVX2_BROADCAST_LIST(V) \
+ V(vpbroadcastd, 66, 0F, 38, 58) \
+ V(vpbroadcastb, 66, 0F, 38, 78) \
+ V(vpbroadcastw, 66, 0F, 38, 79)
+
#endif // V8_CODEGEN_IA32_SSE_INSTR_H_
diff --git a/chromium/v8/src/codegen/interface-descriptors-inl.h b/chromium/v8/src/codegen/interface-descriptors-inl.h
index cf4ff5b0e6b..d5a8ccf6e4a 100644
--- a/chromium/v8/src/codegen/interface-descriptors-inl.h
+++ b/chromium/v8/src/codegen/interface-descriptors-inl.h
@@ -27,6 +27,8 @@
#include "src/codegen/mips64/interface-descriptors-mips64-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/codegen/mips/interface-descriptors-mips-inl.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/codegen/loong64/interface-descriptors-loong64-inl.h"
#elif V8_TARGET_ARCH_RISCV64
#include "src/codegen/riscv64/interface-descriptors-riscv64-inl.h"
#else
@@ -318,9 +320,10 @@ constexpr auto LoadWithReceiverBaselineDescriptor::registers() {
// static
constexpr auto BaselineOutOfLinePrologueDescriptor::registers() {
// TODO(v8:11421): Implement on other platforms.
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || \
- V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390 || \
- V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_MIPS
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || \
+ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390 || \
+ V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_MIPS || \
+ V8_TARGET_ARCH_LOONG64
return RegisterArray(
kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
kJavaScriptCallExtraArg1Register, kJavaScriptCallNewTargetRegister,
@@ -341,7 +344,7 @@ constexpr auto BaselineLeaveFrameDescriptor::registers() {
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || \
V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
- V8_TARGET_ARCH_MIPS
+ V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_LOONG64
return RegisterArray(ParamsSizeRegister(), WeightRegister());
#else
return DefaultRegisterArray();
diff --git a/chromium/v8/src/codegen/interface-descriptors.h b/chromium/v8/src/codegen/interface-descriptors.h
index cf4840bfd7f..87bef49f37a 100644
--- a/chromium/v8/src/codegen/interface-descriptors.h
+++ b/chromium/v8/src/codegen/interface-descriptors.h
@@ -111,8 +111,8 @@ namespace internal {
V(StringAt) \
V(StringAtAsString) \
V(StringSubstring) \
- IF_TSAN(V, TSANRelaxedStore) \
- IF_TSAN(V, TSANRelaxedLoad) \
+ IF_TSAN(V, TSANStore) \
+ IF_TSAN(V, TSANLoad) \
V(TypeConversion) \
V(TypeConversionNoContext) \
V(TypeConversion_Baseline) \
@@ -1053,26 +1053,26 @@ class WriteBarrierDescriptor final
};
#ifdef V8_IS_TSAN
-class TSANRelaxedStoreDescriptor final
- : public StaticCallInterfaceDescriptor<TSANRelaxedStoreDescriptor> {
+class TSANStoreDescriptor final
+ : public StaticCallInterfaceDescriptor<TSANStoreDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kValue)
DEFINE_PARAMETER_TYPES(MachineType::Pointer(), // kAddress
MachineType::AnyTagged()) // kValue
- DECLARE_DESCRIPTOR(TSANRelaxedStoreDescriptor)
+ DECLARE_DESCRIPTOR(TSANStoreDescriptor)
static constexpr auto registers();
static constexpr bool kRestrictAllocatableRegisters = true;
};
-class TSANRelaxedLoadDescriptor final
- : public StaticCallInterfaceDescriptor<TSANRelaxedLoadDescriptor> {
+class TSANLoadDescriptor final
+ : public StaticCallInterfaceDescriptor<TSANLoadDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kAddress)
DEFINE_PARAMETER_TYPES(MachineType::Pointer()) // kAddress
- DECLARE_DESCRIPTOR(TSANRelaxedLoadDescriptor)
+ DECLARE_DESCRIPTOR(TSANLoadDescriptor)
static constexpr auto registers();
static constexpr bool kRestrictAllocatableRegisters = true;
diff --git a/chromium/v8/src/codegen/loong64/assembler-loong64-inl.h b/chromium/v8/src/codegen/loong64/assembler-loong64-inl.h
new file mode 100644
index 00000000000..eb7cf3d3984
--- /dev/null
+++ b/chromium/v8/src/codegen/loong64/assembler-loong64-inl.h
@@ -0,0 +1,249 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_INL_H_
+#define V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_INL_H_
+
+#include "src/codegen/assembler.h"
+#include "src/codegen/loong64/assembler-loong64.h"
+#include "src/debug/debug.h"
+#include "src/objects/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+bool CpuFeatures::SupportsOptimizer() { return IsSupported(FPU); }
+
+// -----------------------------------------------------------------------------
+// Operand and MemOperand.
+
+bool Operand::is_reg() const { return rm_.is_valid(); }
+
+int64_t Operand::immediate() const {
+ DCHECK(!is_reg());
+ DCHECK(!IsHeapObjectRequest());
+ return value_.immediate;
+}
+
+// -----------------------------------------------------------------------------
+// RelocInfo.
+
+void RelocInfo::apply(intptr_t delta) {
+ if (IsInternalReference(rmode_)) {
+ // Absolute code pointer inside code object moves with the code object.
+ Assembler::RelocateInternalReference(rmode_, pc_, delta);
+ } else {
+ DCHECK(IsRelativeCodeTarget(rmode_));
+ Assembler::RelocateRelativeReference(rmode_, pc_, delta);
+ }
+}
+
+Address RelocInfo::target_address() {
+ DCHECK(IsCodeTargetMode(rmode_) || IsRuntimeEntry(rmode_) ||
+ IsWasmCall(rmode_));
+ return Assembler::target_address_at(pc_, constant_pool_);
+}
+
+Address RelocInfo::target_address_address() {
+ DCHECK(HasTargetAddressAddress());
+ // Read the address of the word containing the target_address in an
+ // instruction stream.
+ // The only architecture-independent user of this function is the serializer.
+ // The serializer uses it to find out how many raw bytes of instruction to
+ // output before the next target.
+ // For an instruction like LUI/ORI where the target bits are mixed into the
+ // instruction bits, the size of the target will be zero, indicating that the
+ // serializer should not step forward in memory after a target is resolved
+ // and written. In this case the target_address_address function should
+ // return the end of the instructions to be patched, allowing the
+ // deserializer to deserialize the instructions as raw bytes and put them in
+ // place, ready to be patched with the target. After jump optimization,
+ // that is the address of the instruction that follows J/JAL/JR/JALR
+ // instruction.
+ return pc_ + Assembler::kInstructionsFor64BitConstant * kInstrSize;
+}
+
+Address RelocInfo::constant_pool_entry_address() { UNREACHABLE(); }
+
+int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
+
+void Assembler::deserialization_set_special_target_at(
+ Address instruction_payload, Code code, Address target) {
+ set_target_address_at(instruction_payload,
+ !code.is_null() ? code.constant_pool() : kNullAddress,
+ target);
+}
+
+int Assembler::deserialization_special_target_size(
+ Address instruction_payload) {
+ return kSpecialTargetSize;
+}
+
+void Assembler::deserialization_set_target_internal_reference_at(
+ Address pc, Address target, RelocInfo::Mode mode) {
+ WriteUnalignedValue<Address>(pc, target);
+}
+
+HeapObject RelocInfo::target_object() {
+ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_) ||
+ IsDataEmbeddedObject(rmode_));
+ if (IsDataEmbeddedObject(rmode_)) {
+ return HeapObject::cast(Object(ReadUnalignedValue<Address>(pc_)));
+ }
+ return HeapObject::cast(
+ Object(Assembler::target_address_at(pc_, constant_pool_)));
+}
+
+HeapObject RelocInfo::target_object_no_host(PtrComprCageBase cage_base) {
+ return target_object();
+}
+
+Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
+ if (IsDataEmbeddedObject(rmode_)) {
+ return Handle<HeapObject>::cast(ReadUnalignedValue<Handle<Object>>(pc_));
+ } else if (IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)) {
+ return Handle<HeapObject>(reinterpret_cast<Address*>(
+ Assembler::target_address_at(pc_, constant_pool_)));
+ } else {
+ DCHECK(IsRelativeCodeTarget(rmode_));
+ return origin->relative_code_target_object_handle_at(pc_);
+ }
+}
+
+void RelocInfo::set_target_object(Heap* heap, HeapObject target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_) ||
+ IsDataEmbeddedObject(rmode_));
+ if (IsDataEmbeddedObject(rmode_)) {
+ WriteUnalignedValue(pc_, target.ptr());
+ // No need to flush icache since no instructions were changed.
+ } else {
+ Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
+ icache_flush_mode);
+ }
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
+ !FLAG_disable_write_barriers) {
+ WriteBarrierForCode(host(), this, target);
+ }
+}
+
+Address RelocInfo::target_external_reference() {
+ DCHECK(rmode_ == EXTERNAL_REFERENCE);
+ return Assembler::target_address_at(pc_, constant_pool_);
+}
+
+void RelocInfo::set_target_external_reference(
+ Address target, ICacheFlushMode icache_flush_mode) {
+ DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
+ Assembler::set_target_address_at(pc_, constant_pool_, target,
+ icache_flush_mode);
+}
+
+Address RelocInfo::target_internal_reference() {
+ if (rmode_ == INTERNAL_REFERENCE) {
+ return Memory<Address>(pc_);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+Address RelocInfo::target_internal_reference_address() {
+ DCHECK(rmode_ == INTERNAL_REFERENCE);
+ return pc_;
+}
+
+Handle<Code> Assembler::relative_code_target_object_handle_at(
+ Address pc) const {
+ Instr instr = Assembler::instr_at(pc);
+ int32_t code_target_index = instr & kImm26Mask;
+ code_target_index = ((code_target_index & 0x3ff) << 22 >> 6) |
+ ((code_target_index >> 10) & kImm16Mask);
+ return GetCodeTarget(code_target_index);
+}
+
+Address RelocInfo::target_runtime_entry(Assembler* origin) {
+ DCHECK(IsRuntimeEntry(rmode_));
+ return target_address();
+}
+
+void RelocInfo::set_target_runtime_entry(Address target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsRuntimeEntry(rmode_));
+ if (target_address() != target)
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
+}
+
+Address RelocInfo::target_off_heap_target() {
+ DCHECK(IsOffHeapTarget(rmode_));
+ return Assembler::target_address_at(pc_, constant_pool_);
+}
+
+void RelocInfo::WipeOut() {
+ DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
+ IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
+ IsInternalReference(rmode_) || IsOffHeapTarget(rmode_));
+ if (IsInternalReference(rmode_)) {
+ Memory<Address>(pc_) = kNullAddress;
+ } else {
+ Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress);
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Assembler.
+
+void Assembler::CheckBuffer() {
+ if (buffer_space() <= kGap) {
+ GrowBuffer();
+ }
+}
+
+void Assembler::EmitHelper(Instr x) {
+ *reinterpret_cast<Instr*>(pc_) = x;
+ pc_ += kInstrSize;
+ CheckTrampolinePoolQuick();
+}
+
+template <>
+inline void Assembler::EmitHelper(uint8_t x);
+
+template <typename T>
+void Assembler::EmitHelper(T x) {
+ *reinterpret_cast<T*>(pc_) = x;
+ pc_ += sizeof(x);
+ CheckTrampolinePoolQuick();
+}
+
+template <>
+void Assembler::EmitHelper(uint8_t x) {
+ *reinterpret_cast<uint8_t*>(pc_) = x;
+ pc_ += sizeof(x);
+ if (reinterpret_cast<intptr_t>(pc_) % kInstrSize == 0) {
+ CheckTrampolinePoolQuick();
+ }
+}
+
+void Assembler::emit(Instr x) {
+ if (!is_buffer_growth_blocked()) {
+ CheckBuffer();
+ }
+ EmitHelper(x);
+}
+
+void Assembler::emit(uint64_t data) {
+ // CheckForEmitInForbiddenSlot();
+ if (!is_buffer_growth_blocked()) {
+ CheckBuffer();
+ }
+ EmitHelper(data);
+}
+
+EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_INL_H_
diff --git a/chromium/v8/src/codegen/loong64/assembler-loong64.cc b/chromium/v8/src/codegen/loong64/assembler-loong64.cc
new file mode 100644
index 00000000000..cc1eaa7d123
--- /dev/null
+++ b/chromium/v8/src/codegen/loong64/assembler-loong64.cc
@@ -0,0 +1,2405 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/loong64/assembler-loong64.h"
+
+#if V8_TARGET_ARCH_LOONG64
+
+#include "src/base/cpu.h"
+#include "src/codegen/loong64/assembler-loong64-inl.h"
+#include "src/codegen/machine-type.h"
+#include "src/codegen/safepoint-table.h"
+#include "src/codegen/string-constants.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/objects/heap-number-inl.h"
+
+namespace v8 {
+namespace internal {
+
+bool CpuFeatures::SupportsWasmSimd128() { return false; }
+
+void CpuFeatures::ProbeImpl(bool cross_compile) {
+ supported_ |= 1u << FPU;
+
+ // Only use statically determined features for cross compile (snapshot).
+ if (cross_compile) return;
+
+#ifdef __loongarch__
+ // Probe for additional features at runtime.
+ base::CPU cpu;
+ supported_ |= 1u << FPU;
+#endif
+
+ // Set a static value on whether Simd is supported.
+ // This variable is only used for certain archs to query SupportWasmSimd128()
+ // at runtime in builtins using an extern ref. Other callers should use
+ // CpuFeatures::SupportWasmSimd128().
+ CpuFeatures::supports_wasm_simd_128_ = CpuFeatures::SupportsWasmSimd128();
+}
+
+void CpuFeatures::PrintTarget() {}
+void CpuFeatures::PrintFeatures() {}
+
+int ToNumber(Register reg) {
+ DCHECK(reg.is_valid());
+ const int kNumbers[] = {
+ 0, // zero_reg
+ 1, // ra
+ 2, // tp
+ 3, // sp
+ 4, // a0 v0
+ 5, // a1 v1
+ 6, // a2
+ 7, // a3
+ 8, // a4
+ 9, // a5
+ 10, // a6
+ 11, // a7
+ 12, // t0
+ 13, // t1
+ 14, // t2
+ 15, // t3
+ 16, // t4
+ 17, // t5
+ 18, // t6
+ 19, // t7
+ 20, // t8
+ 21, // x_reg
+ 22, // fp
+ 23, // s0
+ 24, // s1
+ 25, // s2
+ 26, // s3
+ 27, // s4
+ 28, // s5
+ 29, // s6
+ 30, // s7
+ 31, // s8
+ };
+ return kNumbers[reg.code()];
+}
+
+Register ToRegister(int num) {
+ DCHECK(num >= 0 && num < kNumRegisters);
+ const Register kRegisters[] = {
+ zero_reg, ra, tp, sp, a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, t3,
+ t4, t5, t6, t7, t8, x_reg, fp, s0, s1, s2, s3, s4, s5, s6, s7, s8};
+ return kRegisters[num];
+}
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo.
+
+const int RelocInfo::kApplyMask =
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
+
+bool RelocInfo::IsCodedSpecially() {
+ // The deserializer needs to know whether a pointer is specially coded. Being
+ // specially coded on LoongArch64 means that it is a lu12i_w/ori instruction,
+ // and that is always the case inside code objects.
+ return true;
+}
+
+bool RelocInfo::IsInConstantPool() { return false; }
+
+uint32_t RelocInfo::wasm_call_tag() const {
+ DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
+ return static_cast<uint32_t>(
+ Assembler::target_address_at(pc_, constant_pool_));
+}
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand and MemOperand.
+// See assembler-loong64-inl.h for inlined constructors.
+
+Operand::Operand(Handle<HeapObject> handle)
+ : rm_(no_reg), rmode_(RelocInfo::FULL_EMBEDDED_OBJECT) {
+ value_.immediate = static_cast<intptr_t>(handle.address());
+}
+
+Operand Operand::EmbeddedNumber(double value) {
+ int32_t smi;
+ if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
+ Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
+ result.is_heap_object_request_ = true;
+ result.value_.heap_object_request = HeapObjectRequest(value);
+ return result;
+}
+
+Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
+ Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
+ result.is_heap_object_request_ = true;
+ result.value_.heap_object_request = HeapObjectRequest(str);
+ return result;
+}
+
+MemOperand::MemOperand(Register base, int32_t offset)
+ : base_(base), index_(no_reg), offset_(offset) {}
+
+MemOperand::MemOperand(Register base, Register index)
+ : base_(base), index_(index), offset_(0) {}
+
+void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
+ DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
+ for (auto& request : heap_object_requests_) {
+ Handle<HeapObject> object;
+ switch (request.kind()) {
+ case HeapObjectRequest::kHeapNumber:
+ object = isolate->factory()->NewHeapNumber<AllocationType::kOld>(
+ request.heap_number());
+ break;
+ case HeapObjectRequest::kStringConstant:
+ const StringConstantBase* str = request.string();
+ CHECK_NOT_NULL(str);
+ object = str->AllocateStringConstant(isolate);
+ break;
+ }
+ Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
+ set_target_value_at(pc, reinterpret_cast<uint64_t>(object.location()));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Specific instructions, constants, and masks.
+
+Assembler::Assembler(const AssemblerOptions& options,
+ std::unique_ptr<AssemblerBuffer> buffer)
+ : AssemblerBase(options, std::move(buffer)),
+ scratch_register_list_(t7.bit() | t6.bit()) {
+ reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
+
+ last_trampoline_pool_end_ = 0;
+ no_trampoline_pool_before_ = 0;
+ trampoline_pool_blocked_nesting_ = 0;
+ // We leave space (16 * kTrampolineSlotsSize)
+ // for BlockTrampolinePoolScope buffer.
+ next_buffer_check_ = FLAG_force_long_branches
+ ? kMaxInt
+ : kMax16BranchOffset - kTrampolineSlotsSize * 16;
+ internal_trampoline_exception_ = false;
+ last_bound_pos_ = 0;
+
+ trampoline_emitted_ = FLAG_force_long_branches;
+ unbound_labels_count_ = 0;
+ block_buffer_growth_ = false;
+}
+
+void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
+ SafepointTableBuilder* safepoint_table_builder,
+ int handler_table_offset) {
+ // As a crutch to avoid having to add manual Align calls wherever we use a
+ // raw workflow to create Code objects (mostly in tests), add another Align
+ // call here. It does no harm - the end of the Code object is aligned to the
+ // (larger) kCodeAlignment anyways.
+ // TODO(jgruber): Consider moving responsibility for proper alignment to
+ // metadata table builders (safepoint, handler, constant pool, code
+ // comments).
+ DataAlign(Code::kMetadataAlignment);
+
+ // EmitForbiddenSlotInstruction(); TODO:LOONG64 why?
+
+ int code_comments_size = WriteCodeComments();
+
+ DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
+
+ AllocateAndInstallRequestedHeapObjects(isolate);
+
+ // Set up code descriptor.
+ // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to
+ // this point to make CodeDesc initialization less fiddly.
+
+ static constexpr int kConstantPoolSize = 0;
+ const int instruction_size = pc_offset();
+ const int code_comments_offset = instruction_size - code_comments_size;
+ const int constant_pool_offset = code_comments_offset - kConstantPoolSize;
+ const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable)
+ ? constant_pool_offset
+ : handler_table_offset;
+ const int safepoint_table_offset =
+ (safepoint_table_builder == kNoSafepointTable)
+ ? handler_table_offset2
+ : safepoint_table_builder->GetCodeOffset();
+ const int reloc_info_offset =
+ static_cast<int>(reloc_info_writer.pos() - buffer_->start());
+ CodeDesc::Initialize(desc, this, safepoint_table_offset,
+ handler_table_offset2, constant_pool_offset,
+ code_comments_offset, reloc_info_offset);
+}
+
+void Assembler::Align(int m) {
+ // If not, the loop below won't terminate.
+ DCHECK(IsAligned(pc_offset(), kInstrSize));
+ DCHECK(m >= kInstrSize && base::bits::IsPowerOfTwo(m));
+ while ((pc_offset() & (m - 1)) != 0) {
+ nop();
+ }
+}
+
+void Assembler::CodeTargetAlign() {
+ // No advantage to aligning branch/call targets to more than
+ // single instruction, that I am aware of.
+ Align(4);
+}
+
+Register Assembler::GetRkReg(Instr instr) {
+ return Register::from_code((instr & kRkFieldMask) >> kRkShift);
+}
+
+Register Assembler::GetRjReg(Instr instr) {
+ return Register::from_code((instr & kRjFieldMask) >> kRjShift);
+}
+
+Register Assembler::GetRdReg(Instr instr) {
+ return Register::from_code((instr & kRdFieldMask) >> kRdShift);
+}
+
+uint32_t Assembler::GetRk(Instr instr) {
+ return (instr & kRkFieldMask) >> kRkShift;
+}
+
+uint32_t Assembler::GetRkField(Instr instr) { return instr & kRkFieldMask; }
+
+uint32_t Assembler::GetRj(Instr instr) {
+ return (instr & kRjFieldMask) >> kRjShift;
+}
+
+uint32_t Assembler::GetRjField(Instr instr) { return instr & kRjFieldMask; }
+
+uint32_t Assembler::GetRd(Instr instr) {
+ return (instr & kRdFieldMask) >> kRdShift;
+}
+
+uint32_t Assembler::GetRdField(Instr instr) { return instr & kRdFieldMask; }
+
+uint32_t Assembler::GetSa2(Instr instr) {
+ return (instr & kSa2FieldMask) >> kSaShift;
+}
+
+uint32_t Assembler::GetSa2Field(Instr instr) { return instr & kSa2FieldMask; }
+
+uint32_t Assembler::GetSa3(Instr instr) {
+ return (instr & kSa3FieldMask) >> kSaShift;
+}
+
+uint32_t Assembler::GetSa3Field(Instr instr) { return instr & kSa3FieldMask; }
+
+// Labels refer to positions in the (to be) generated code.
+// There are bound, linked, and unused labels.
+//
+// Bound labels refer to known positions in the already
+// generated code. pos() is the position the label refers to.
+//
+// Linked labels refer to unknown positions in the code
+// to be generated; pos() is the position of the last
+// instruction using the label.
+
+// The link chain is terminated by a value in the instruction of 0,
+// which is an otherwise illegal value (branch 0 is inf loop).
+// The instruction 16-bit offset field addresses 32-bit words, but in
+// code is conv to an 18-bit value addressing bytes, hence the -4 value.
+
+const int kEndOfChain = 0;
+// Determines the end of the Jump chain (a subset of the label link chain).
+const int kEndOfJumpChain = 0;
+
+bool Assembler::IsBranch(Instr instr) {
+ uint32_t opcode = (instr >> 26) << 26;
+ // Checks if the instruction is a branch.
+ bool isBranch = opcode == BEQZ || opcode == BNEZ || opcode == BCZ ||
+ opcode == B || opcode == BL || opcode == BEQ ||
+ opcode == BNE || opcode == BLT || opcode == BGE ||
+ opcode == BLTU || opcode == BGEU;
+ return isBranch;
+}
+
+bool Assembler::IsB(Instr instr) {
+ uint32_t opcode = (instr >> 26) << 26;
+ // Checks if the instruction is a b.
+ bool isBranch = opcode == B || opcode == BL;
+ return isBranch;
+}
+
+bool Assembler::IsBz(Instr instr) {
+ uint32_t opcode = (instr >> 26) << 26;
+ // Checks if the instruction is a branch.
+ bool isBranch = opcode == BEQZ || opcode == BNEZ || opcode == BCZ;
+ return isBranch;
+}
+
+bool Assembler::IsEmittedConstant(Instr instr) {
+ // Add GetLabelConst function?
+ uint32_t label_constant = instr & ~kImm16Mask;
+ return label_constant == 0; // Emitted label const in reg-exp engine.
+}
+
+bool Assembler::IsJ(Instr instr) {
+ uint32_t opcode = (instr >> 26) << 26;
+ // Checks if the instruction is a jump.
+ return opcode == JIRL;
+}
+
+bool Assembler::IsLu12i_w(Instr instr) {
+ uint32_t opcode = (instr >> 25) << 25;
+ return opcode == LU12I_W;
+}
+
+bool Assembler::IsOri(Instr instr) {
+ uint32_t opcode = (instr >> 22) << 22;
+ return opcode == ORI;
+}
+
+bool Assembler::IsLu32i_d(Instr instr) {
+ uint32_t opcode = (instr >> 25) << 25;
+ return opcode == LU32I_D;
+}
+
+bool Assembler::IsLu52i_d(Instr instr) {
+ uint32_t opcode = (instr >> 22) << 22;
+ return opcode == LU52I_D;
+}
+
+bool Assembler::IsMov(Instr instr, Register rd, Register rj) {
+ // Checks if the instruction is a OR with zero_reg argument (aka MOV).
+ Instr instr1 =
+ OR | zero_reg.code() << kRkShift | rj.code() << kRjShift | rd.code();
+ return instr == instr1;
+}
+
+bool Assembler::IsPcAddi(Instr instr, Register rd, int32_t si20) {
+ DCHECK(is_int20(si20));
+ Instr instr1 = PCADDI | (si20 & 0xfffff) << kRjShift | rd.code();
+ return instr == instr1;
+}
+
+bool Assembler::IsNop(Instr instr, unsigned int type) {
+ // See Assembler::nop(type).
+ DCHECK_LT(type, 32);
+
+ Instr instr1 =
+ ANDI | ((type & kImm12Mask) << kRkShift) | (zero_reg.code() << kRjShift);
+
+ return instr == instr1;
+}
+
+static inline int32_t GetOffsetOfBranch(Instr instr,
+ Assembler::OffsetSize bits) {
+ int32_t result = 0;
+ if (bits == 16) {
+ result = (instr << 6) >> 16;
+ } else if (bits == 21) {
+ uint32_t low16 = instr << 6;
+ low16 = low16 >> 16;
+ low16 &= 0xffff;
+ int32_t hi5 = (instr << 27) >> 11;
+ result = hi5 | low16;
+ } else {
+ uint32_t low16 = instr << 6;
+ low16 = low16 >> 16;
+ low16 &= 0xffff;
+ int32_t hi10 = (instr << 22) >> 6;
+ result = hi10 | low16;
+ DCHECK_EQ(bits, 26);
+ }
+ return result << 2;
+}
+
+static Assembler::OffsetSize OffsetSizeInBits(Instr instr) {
+ if (Assembler::IsB(instr)) {
+ return Assembler::OffsetSize::kOffset26;
+ } else if (Assembler::IsBz(instr)) {
+ return Assembler::OffsetSize::kOffset21;
+ } else {
+ DCHECK(Assembler::IsBranch(instr));
+ return Assembler::OffsetSize::kOffset16;
+ }
+}
+
+static inline int32_t AddBranchOffset(int pos, Instr instr) {
+ Assembler::OffsetSize bits = OffsetSizeInBits(instr);
+
+ int32_t imm = GetOffsetOfBranch(instr, bits);
+
+ if (imm == kEndOfChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ // Handle the case that next branch position is 0.
+ // TODO(LOONG_dev): Define -4 as a constant
+ int32_t offset = pos + imm;
+ return offset == 0 ? -4 : offset;
+ }
+}
+
+int Assembler::target_at(int pos, bool is_internal) {
+ if (is_internal) {
+ int64_t* p = reinterpret_cast<int64_t*>(buffer_start_ + pos);
+ int64_t address = *p;
+ if (address == kEndOfJumpChain) {
+ return kEndOfChain;
+ } else {
+ int64_t instr_address = reinterpret_cast<int64_t>(p);
+ DCHECK(instr_address - address < INT_MAX);
+ int delta = static_cast<int>(instr_address - address);
+ DCHECK(pos > delta);
+ return pos - delta;
+ }
+ }
+ Instr instr = instr_at(pos);
+
+ // TODO(LOONG_dev) remove after remove label_at_put?
+ if ((instr & ~kImm16Mask) == 0) {
+ // Emitted label constant, not part of a branch.
+ if (instr == 0) {
+ return kEndOfChain;
+ } else {
+ int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
+ return (imm18 + pos);
+ }
+ }
+
+ // Check we have a branch or jump instruction.
+ DCHECK(IsBranch(instr) || IsPcAddi(instr, t8, 16));
+ // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
+ // the compiler uses arithmetic shifts for signed integers.
+ if (IsBranch(instr)) {
+ return AddBranchOffset(pos, instr);
+ } else {
+ DCHECK(IsPcAddi(instr, t8, 16));
+ // see BranchLong(Label* L) and BranchAndLinkLong ??
+ int32_t imm32;
+ Instr instr_lu12i_w = instr_at(pos + 1 * kInstrSize);
+ Instr instr_ori = instr_at(pos + 2 * kInstrSize);
+ DCHECK(IsLu12i_w(instr_lu12i_w));
+ imm32 = ((instr_lu12i_w >> 5) & 0xfffff) << 12;
+ imm32 |= ((instr_ori >> 10) & static_cast<int32_t>(kImm12Mask));
+ if (imm32 == kEndOfJumpChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ }
+ return pos + imm32;
+ }
+}
+
+static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
+ Instr instr) {
+ int32_t bits = OffsetSizeInBits(instr);
+ int32_t imm = target_pos - pos;
+ DCHECK_EQ(imm & 3, 0);
+ imm >>= 2;
+
+ DCHECK(is_intn(imm, bits));
+
+ if (bits == 16) {
+ const int32_t mask = ((1 << 16) - 1) << 10;
+ instr &= ~mask;
+ return instr | ((imm << 10) & mask);
+ } else if (bits == 21) {
+ const int32_t mask = 0x3fffc1f;
+ instr &= ~mask;
+ uint32_t low16 = (imm & kImm16Mask) << 10;
+ int32_t hi5 = (imm >> 16) & 0x1f;
+ return instr | low16 | hi5;
+ } else {
+ DCHECK_EQ(bits, 26);
+ const int32_t mask = 0x3ffffff;
+ instr &= ~mask;
+ uint32_t low16 = (imm & kImm16Mask) << 10;
+ int32_t hi10 = (imm >> 16) & 0x3ff;
+ return instr | low16 | hi10;
+ }
+}
+
+void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
+ if (is_internal) {
+ uint64_t imm = reinterpret_cast<uint64_t>(buffer_start_) + target_pos;
+ *reinterpret_cast<uint64_t*>(buffer_start_ + pos) = imm;
+ return;
+ }
+ Instr instr = instr_at(pos);
+ if ((instr & ~kImm16Mask) == 0) {
+ DCHECK(target_pos == kEndOfChain || target_pos >= 0);
+ // Emitted label constant, not part of a branch.
+ // Make label relative to Code pointer of generated Code object.
+ instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+ return;
+ }
+
+ DCHECK(IsBranch(instr));
+ instr = SetBranchOffset(pos, target_pos, instr);
+ instr_at_put(pos, instr);
+}
+
+void Assembler::print(const Label* L) {
+ if (L->is_unused()) {
+ PrintF("unused label\n");
+ } else if (L->is_bound()) {
+ PrintF("bound label to %d\n", L->pos());
+ } else if (L->is_linked()) {
+ Label l;
+ l.link_to(L->pos());
+ PrintF("unbound label");
+ while (l.is_linked()) {
+ PrintF("@ %d ", l.pos());
+ Instr instr = instr_at(l.pos());
+ if ((instr & ~kImm16Mask) == 0) {
+ PrintF("value\n");
+ } else {
+ PrintF("%d\n", instr);
+ }
+ next(&l, is_internal_reference(&l));
+ }
+ } else {
+ PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
+ }
+}
+
+void Assembler::bind_to(Label* L, int pos) {
+ DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
+ int trampoline_pos = kInvalidSlotPos;
+ bool is_internal = false;
+ if (L->is_linked() && !trampoline_emitted_) {
+ unbound_labels_count_--;
+ if (!is_internal_reference(L)) {
+ next_buffer_check_ += kTrampolineSlotsSize;
+ }
+ }
+
+ while (L->is_linked()) {
+ int fixup_pos = L->pos();
+ int dist = pos - fixup_pos;
+ is_internal = is_internal_reference(L);
+ next(L, is_internal); // Call next before overwriting link with target at
+ // fixup_pos.
+ Instr instr = instr_at(fixup_pos);
+ if (is_internal) {
+ target_at_put(fixup_pos, pos, is_internal);
+ } else {
+ if (IsBranch(instr)) {
+ int branch_offset = BranchOffset(instr);
+ if (dist > branch_offset) {
+ if (trampoline_pos == kInvalidSlotPos) {
+ trampoline_pos = get_trampoline_entry(fixup_pos);
+ CHECK_NE(trampoline_pos, kInvalidSlotPos);
+ }
+ CHECK((trampoline_pos - fixup_pos) <= branch_offset);
+ target_at_put(fixup_pos, trampoline_pos, false);
+ fixup_pos = trampoline_pos;
+ }
+ target_at_put(fixup_pos, pos, false);
+ } else {
+ DCHECK(IsJ(instr) || IsLu12i_w(instr) || IsEmittedConstant(instr) ||
+ IsPcAddi(instr, t8, 8));
+ target_at_put(fixup_pos, pos, false);
+ }
+ }
+ }
+ L->bind_to(pos);
+
+ // Keep track of the last bound label so we don't eliminate any instructions
+ // before a bound label.
+ if (pos > last_bound_pos_) last_bound_pos_ = pos;
+}
+
+void Assembler::bind(Label* L) {
+ DCHECK(!L->is_bound()); // Label can only be bound once.
+ bind_to(L, pc_offset());
+}
+
+void Assembler::next(Label* L, bool is_internal) {
+ DCHECK(L->is_linked());
+ int link = target_at(L->pos(), is_internal);
+ if (link == kEndOfChain) {
+ L->Unuse();
+ } else if (link == -4) {
+ // Next position is pc_offset == 0
+ L->link_to(0);
+ } else {
+ DCHECK_GE(link, 0);
+ L->link_to(link);
+ }
+}
+
+bool Assembler::is_near_c(Label* L) {
+ DCHECK(L->is_bound());
+ return pc_offset() - L->pos() < kMax16BranchOffset - 4 * kInstrSize;
+}
+
+bool Assembler::is_near(Label* L, OffsetSize bits) {
+ DCHECK(L->is_bound());
+ return ((pc_offset() - L->pos()) <
+ (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize);
+}
+
+bool Assembler::is_near_a(Label* L) {
+ DCHECK(L->is_bound());
+ return pc_offset() - L->pos() <= kMax26BranchOffset - 4 * kInstrSize;
+}
+
+int Assembler::BranchOffset(Instr instr) {
+ int bits = OffsetSize::kOffset16;
+
+ uint32_t opcode = (instr >> 26) << 26;
+ switch (opcode) {
+ case B:
+ case BL:
+ bits = OffsetSize::kOffset26;
+ break;
+ case BNEZ:
+ case BEQZ:
+ case BCZ:
+ bits = OffsetSize::kOffset21;
+ break;
+ case BNE:
+ case BEQ:
+ case BLT:
+ case BGE:
+ case BLTU:
+ case BGEU:
+ case JIRL:
+ bits = OffsetSize::kOffset16;
+ break;
+ default:
+ break;
+ }
+
+ return (1 << (bits + 2 - 1)) - 1;
+}
+
+// We have to use a temporary register for things that can be relocated even
+// if they can be encoded in the LOONG's 16 bits of immediate-offset
+// instruction space. There is no guarantee that the relocated location can be
+// similarly encoded.
+bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
+ return !RelocInfo::IsNone(rmode);
+}
+
+void Assembler::GenB(Opcode opcode, Register rj, int32_t si21) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK((BEQZ == opcode || BNEZ == opcode) && is_int21(si21) && rj.is_valid());
+ Instr instr = opcode | (si21 & kImm16Mask) << kRkShift |
+ (rj.code() << kRjShift) | ((si21 & 0x1fffff) >> 16);
+ emit(instr);
+}
+
+void Assembler::GenB(Opcode opcode, CFRegister cj, int32_t si21, bool isEq) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(BCZ == opcode && is_int21(si21));
+ DCHECK(cj >= 0 && cj <= 7);
+ int32_t sc = (isEq ? cj : cj + 8);
+ Instr instr = opcode | (si21 & kImm16Mask) << kRkShift | (sc << kRjShift) |
+ ((si21 & 0x1fffff) >> 16);
+ emit(instr);
+}
+
+void Assembler::GenB(Opcode opcode, int32_t si26) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK((B == opcode || BL == opcode) && is_int26(si26));
+ Instr instr =
+ opcode | ((si26 & kImm16Mask) << kRkShift) | ((si26 & kImm26Mask) >> 16);
+ emit(instr);
+}
+
+void Assembler::GenBJ(Opcode opcode, Register rj, Register rd, int32_t si16) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(is_int16(si16));
+ Instr instr = opcode | ((si16 & kImm16Mask) << kRkShift) |
+ (rj.code() << kRjShift) | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenCmp(Opcode opcode, FPUCondition cond, FPURegister fk,
+ FPURegister fj, CFRegister cd) {
+ DCHECK(opcode == FCMP_COND_S || opcode == FCMP_COND_D);
+ Instr instr = opcode | cond << kCondShift | (fk.code() << kFkShift) |
+ (fj.code() << kFjShift) | cd;
+ emit(instr);
+}
+
+void Assembler::GenSel(Opcode opcode, CFRegister ca, FPURegister fk,
+ FPURegister fj, FPURegister rd) {
+ DCHECK((opcode == FSEL));
+ Instr instr = opcode | ca << kCondShift | (fk.code() << kFkShift) |
+ (fj.code() << kFjShift) | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, Register rj, Register rd,
+ bool rjrd) {
+ DCHECK(rjrd);
+ Instr instr = 0;
+ instr = opcode | (rj.code() << kRjShift) | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, FPURegister fj, FPURegister fd) {
+ Instr instr = opcode | (fj.code() << kFjShift) | fd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, Register rj, FPURegister fd) {
+ DCHECK((opcode == MOVGR2FR_W) || (opcode == MOVGR2FR_D) ||
+ (opcode == MOVGR2FRH_W));
+ Instr instr = opcode | (rj.code() << kRjShift) | fd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, FPURegister fj, Register rd) {
+ DCHECK((opcode == MOVFR2GR_S) || (opcode == MOVFR2GR_D) ||
+ (opcode == MOVFRH2GR_S));
+ Instr instr = opcode | (fj.code() << kFjShift) | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, Register rj, FPUControlRegister fd) {
+ DCHECK((opcode == MOVGR2FCSR));
+ Instr instr = opcode | (rj.code() << kRjShift) | fd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, FPUControlRegister fj, Register rd) {
+ DCHECK((opcode == MOVFCSR2GR));
+ Instr instr = opcode | (fj.code() << kFjShift) | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, FPURegister fj, CFRegister cd) {
+ DCHECK((opcode == MOVFR2CF));
+ Instr instr = opcode | (fj.code() << kFjShift) | cd;
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, CFRegister cj, FPURegister fd) {
+ DCHECK((opcode == MOVCF2FR));
+ Instr instr = opcode | cj << kFjShift | fd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, Register rj, CFRegister cd) {
+ DCHECK((opcode == MOVGR2CF));
+ Instr instr = opcode | (rj.code() << kRjShift) | cd;
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, CFRegister cj, Register rd) {
+ DCHECK((opcode == MOVCF2GR));
+ Instr instr = opcode | cj << kFjShift | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, Register rk, Register rj,
+ Register rd) {
+ Instr instr =
+ opcode | (rk.code() << kRkShift) | (rj.code() << kRjShift) | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, FPURegister fk, FPURegister fj,
+ FPURegister fd) {
+ Instr instr =
+ opcode | (fk.code() << kFkShift) | (fj.code() << kFjShift) | fd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, FPURegister fa, FPURegister fk,
+ FPURegister fj, FPURegister fd) {
+ Instr instr = opcode | (fa.code() << kFaShift) | (fk.code() << kFkShift) |
+ (fj.code() << kFjShift) | fd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, Register rk, Register rj,
+ FPURegister fd) {
+ Instr instr =
+ opcode | (rk.code() << kRkShift) | (rj.code() << kRjShift) | fd.code();
+ emit(instr);
+}
+
+void Assembler::GenImm(Opcode opcode, int32_t bit3, Register rk, Register rj,
+ Register rd) {
+ DCHECK(is_uint3(bit3));
+ Instr instr = opcode | (bit3 & 0x7) << kSaShift | (rk.code() << kRkShift) |
+ (rj.code() << kRjShift) | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenImm(Opcode opcode, int32_t bit6m, int32_t bit6l, Register rj,
+ Register rd) {
+ DCHECK(is_uint6(bit6m) && is_uint6(bit6l));
+ Instr instr = opcode | (bit6m & 0x3f) << 16 | (bit6l & 0x3f) << kRkShift |
+ (rj.code() << kRjShift) | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenImm(Opcode opcode, int32_t bit20, Register rd) {
+ // DCHECK(is_uint20(bit20) || is_int20(bit20));
+ Instr instr = opcode | (bit20 & 0xfffff) << kRjShift | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenImm(Opcode opcode, int32_t bit15) {
+ DCHECK(is_uint15(bit15));
+ Instr instr = opcode | (bit15 & 0x7fff);
+ emit(instr);
+}
+
+void Assembler::GenImm(Opcode opcode, int32_t value, Register rj, Register rd,
+ int32_t value_bits) {
+ DCHECK(value_bits == 6 || value_bits == 12 || value_bits == 14 ||
+ value_bits == 16);
+ uint32_t imm = value & 0x3f;
+ if (value_bits == 12) {
+ imm = value & kImm12Mask;
+ } else if (value_bits == 14) {
+ imm = value & 0x3fff;
+ } else if (value_bits == 16) {
+ imm = value & kImm16Mask;
+ }
+ Instr instr = opcode | imm << kRkShift | (rj.code() << kRjShift) | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenImm(Opcode opcode, int32_t bit12, Register rj,
+ FPURegister fd) {
+ DCHECK(is_int12(bit12));
+ Instr instr = opcode | ((bit12 & kImm12Mask) << kRkShift) |
+ (rj.code() << kRjShift) | fd.code();
+ emit(instr);
+}
+
+// Returns the next free trampoline entry.
+int32_t Assembler::get_trampoline_entry(int32_t pos) {
+ int32_t trampoline_entry = kInvalidSlotPos;
+ if (!internal_trampoline_exception_) {
+ if (trampoline_.start() > pos) {
+ trampoline_entry = trampoline_.take_slot();
+ }
+
+ if (kInvalidSlotPos == trampoline_entry) {
+ internal_trampoline_exception_ = true;
+ }
+ }
+ return trampoline_entry;
+}
+
+uint64_t Assembler::jump_address(Label* L) {
+ int64_t target_pos;
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos(); // L's link.
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ return kEndOfJumpChain;
+ }
+ }
+ uint64_t imm = reinterpret_cast<uint64_t>(buffer_start_) + target_pos;
+ DCHECK_EQ(imm & 3, 0);
+
+ return imm;
+}
+
+uint64_t Assembler::branch_long_offset(Label* L) {
+ int64_t target_pos;
+
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos(); // L's link.
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ return kEndOfJumpChain;
+ }
+ }
+ int64_t offset = target_pos - pc_offset();
+ DCHECK_EQ(offset & 3, 0);
+
+ return static_cast<uint64_t>(offset);
+}
+
+int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
+ int32_t target_pos;
+
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos();
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
+ return kEndOfChain;
+ }
+ }
+
+ int32_t offset = target_pos - pc_offset();
+ DCHECK(is_intn(offset, bits + 2));
+ DCHECK_EQ(offset & 3, 0);
+
+ return offset;
+}
+
+void Assembler::label_at_put(Label* L, int at_offset) {
+ int target_pos;
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos(); // L's link.
+ int32_t imm18 = target_pos - at_offset;
+ DCHECK_EQ(imm18 & 3, 0);
+ int32_t imm16 = imm18 >> 2;
+ DCHECK(is_int16(imm16));
+ instr_at_put(at_offset, (imm16 & kImm16Mask));
+ } else {
+ target_pos = kEndOfChain;
+ instr_at_put(at_offset, 0);
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
+ }
+ L->link_to(at_offset);
+ }
+}
+
+//------- Branch and jump instructions --------
+
+void Assembler::b(int32_t offset) { GenB(B, offset); }
+
+void Assembler::bl(int32_t offset) { GenB(BL, offset); }
+
+void Assembler::beq(Register rj, Register rd, int32_t offset) {
+ GenBJ(BEQ, rj, rd, offset);
+}
+
+void Assembler::bne(Register rj, Register rd, int32_t offset) {
+ GenBJ(BNE, rj, rd, offset);
+}
+
+void Assembler::blt(Register rj, Register rd, int32_t offset) {
+ GenBJ(BLT, rj, rd, offset);
+}
+
+void Assembler::bge(Register rj, Register rd, int32_t offset) {
+ GenBJ(BGE, rj, rd, offset);
+}
+
+void Assembler::bltu(Register rj, Register rd, int32_t offset) {
+ GenBJ(BLTU, rj, rd, offset);
+}
+
+void Assembler::bgeu(Register rj, Register rd, int32_t offset) {
+ GenBJ(BGEU, rj, rd, offset);
+}
+
+void Assembler::beqz(Register rj, int32_t offset) { GenB(BEQZ, rj, offset); }
+void Assembler::bnez(Register rj, int32_t offset) { GenB(BNEZ, rj, offset); }
+
+void Assembler::jirl(Register rd, Register rj, int32_t offset) {
+ GenBJ(JIRL, rj, rd, offset);
+}
+
+void Assembler::bceqz(CFRegister cj, int32_t si21) {
+ GenB(BCZ, cj, si21, true);
+}
+
+void Assembler::bcnez(CFRegister cj, int32_t si21) {
+ GenB(BCZ, cj, si21, false);
+}
+
+// -------Data-processing-instructions---------
+
+// Arithmetic.
+void Assembler::add_w(Register rd, Register rj, Register rk) {
+ GenRegister(ADD_W, rk, rj, rd);
+}
+
+void Assembler::add_d(Register rd, Register rj, Register rk) {
+ GenRegister(ADD_D, rk, rj, rd);
+}
+
+void Assembler::sub_w(Register rd, Register rj, Register rk) {
+ GenRegister(SUB_W, rk, rj, rd);
+}
+
+void Assembler::sub_d(Register rd, Register rj, Register rk) {
+ GenRegister(SUB_D, rk, rj, rd);
+}
+
+void Assembler::addi_w(Register rd, Register rj, int32_t si12) {
+ GenImm(ADDI_W, si12, rj, rd, 12);
+}
+
+void Assembler::addi_d(Register rd, Register rj, int32_t si12) {
+ GenImm(ADDI_D, si12, rj, rd, 12);
+}
+
+void Assembler::addu16i_d(Register rd, Register rj, int32_t si16) {
+ GenImm(ADDU16I_D, si16, rj, rd, 16);
+}
+
+void Assembler::alsl_w(Register rd, Register rj, Register rk, int32_t sa2) {
+ DCHECK(is_uint2(sa2 - 1));
+ GenImm(ALSL_W, sa2 - 1, rk, rj, rd);
+}
+
+void Assembler::alsl_wu(Register rd, Register rj, Register rk, int32_t sa2) {
+ DCHECK(is_uint2(sa2 - 1));
+ GenImm(ALSL_WU, sa2 + 3, rk, rj, rd);
+}
+
+void Assembler::alsl_d(Register rd, Register rj, Register rk, int32_t sa2) {
+ DCHECK(is_uint2(sa2 - 1));
+ GenImm(ALSL_D, sa2 - 1, rk, rj, rd);
+}
+
+void Assembler::lu12i_w(Register rd, int32_t si20) {
+ GenImm(LU12I_W, si20, rd);
+}
+
+void Assembler::lu32i_d(Register rd, int32_t si20) {
+ GenImm(LU32I_D, si20, rd);
+}
+
+void Assembler::lu52i_d(Register rd, Register rj, int32_t si12) {
+ GenImm(LU52I_D, si12, rj, rd, 12);
+}
+
+void Assembler::slt(Register rd, Register rj, Register rk) {
+ GenRegister(SLT, rk, rj, rd);
+}
+
+void Assembler::sltu(Register rd, Register rj, Register rk) {
+ GenRegister(SLTU, rk, rj, rd);
+}
+
+void Assembler::slti(Register rd, Register rj, int32_t si12) {
+ GenImm(SLTI, si12, rj, rd, 12);
+}
+
+void Assembler::sltui(Register rd, Register rj, int32_t si12) {
+ GenImm(SLTUI, si12, rj, rd, 12);
+}
+
+void Assembler::pcaddi(Register rd, int32_t si20) { GenImm(PCADDI, si20, rd); }
+
+void Assembler::pcaddu12i(Register rd, int32_t si20) {
+ GenImm(PCADDU12I, si20, rd);
+}
+
+void Assembler::pcaddu18i(Register rd, int32_t si20) {
+ GenImm(PCADDU18I, si20, rd);
+}
+
+void Assembler::pcalau12i(Register rd, int32_t si20) {
+ GenImm(PCALAU12I, si20, rd);
+}
+
+void Assembler::and_(Register rd, Register rj, Register rk) {
+ GenRegister(AND, rk, rj, rd);
+}
+
+void Assembler::or_(Register rd, Register rj, Register rk) {
+ GenRegister(OR, rk, rj, rd);
+}
+
+void Assembler::xor_(Register rd, Register rj, Register rk) {
+ GenRegister(XOR, rk, rj, rd);
+}
+
+void Assembler::nor(Register rd, Register rj, Register rk) {
+ GenRegister(NOR, rk, rj, rd);
+}
+
+void Assembler::andn(Register rd, Register rj, Register rk) {
+ GenRegister(ANDN, rk, rj, rd);
+}
+
+void Assembler::orn(Register rd, Register rj, Register rk) {
+ GenRegister(ORN, rk, rj, rd);
+}
+
+void Assembler::andi(Register rd, Register rj, int32_t ui12) {
+ GenImm(ANDI, ui12, rj, rd, 12);
+}
+
+void Assembler::ori(Register rd, Register rj, int32_t ui12) {
+ GenImm(ORI, ui12, rj, rd, 12);
+}
+
+void Assembler::xori(Register rd, Register rj, int32_t ui12) {
+ GenImm(XORI, ui12, rj, rd, 12);
+}
+
+void Assembler::mul_w(Register rd, Register rj, Register rk) {
+ GenRegister(MUL_W, rk, rj, rd);
+}
+
+void Assembler::mulh_w(Register rd, Register rj, Register rk) {
+ GenRegister(MULH_W, rk, rj, rd);
+}
+
+void Assembler::mulh_wu(Register rd, Register rj, Register rk) {
+ GenRegister(MULH_WU, rk, rj, rd);
+}
+
+void Assembler::mul_d(Register rd, Register rj, Register rk) {
+ GenRegister(MUL_D, rk, rj, rd);
+}
+
+void Assembler::mulh_d(Register rd, Register rj, Register rk) {
+ GenRegister(MULH_D, rk, rj, rd);
+}
+
+void Assembler::mulh_du(Register rd, Register rj, Register rk) {
+ GenRegister(MULH_DU, rk, rj, rd);
+}
+
+void Assembler::mulw_d_w(Register rd, Register rj, Register rk) {
+ GenRegister(MULW_D_W, rk, rj, rd);
+}
+
+void Assembler::mulw_d_wu(Register rd, Register rj, Register rk) {
+ GenRegister(MULW_D_WU, rk, rj, rd);
+}
+
+void Assembler::div_w(Register rd, Register rj, Register rk) {
+ GenRegister(DIV_W, rk, rj, rd);
+}
+
+void Assembler::mod_w(Register rd, Register rj, Register rk) {
+ GenRegister(MOD_W, rk, rj, rd);
+}
+
+void Assembler::div_wu(Register rd, Register rj, Register rk) {
+ GenRegister(DIV_WU, rk, rj, rd);
+}
+
+void Assembler::mod_wu(Register rd, Register rj, Register rk) {
+ GenRegister(MOD_WU, rk, rj, rd);
+}
+
+void Assembler::div_d(Register rd, Register rj, Register rk) {
+ GenRegister(DIV_D, rk, rj, rd);
+}
+
+void Assembler::mod_d(Register rd, Register rj, Register rk) {
+ GenRegister(MOD_D, rk, rj, rd);
+}
+
+void Assembler::div_du(Register rd, Register rj, Register rk) {
+ GenRegister(DIV_DU, rk, rj, rd);
+}
+
+void Assembler::mod_du(Register rd, Register rj, Register rk) {
+ GenRegister(MOD_DU, rk, rj, rd);
+}
+
+// Shifts.
+void Assembler::sll_w(Register rd, Register rj, Register rk) {
+ GenRegister(SLL_W, rk, rj, rd);
+}
+
+void Assembler::srl_w(Register rd, Register rj, Register rk) {
+ GenRegister(SRL_W, rk, rj, rd);
+}
+
+void Assembler::sra_w(Register rd, Register rj, Register rk) {
+ GenRegister(SRA_W, rk, rj, rd);
+}
+
+void Assembler::rotr_w(Register rd, Register rj, Register rk) {
+ GenRegister(ROTR_W, rk, rj, rd);
+}
+
+void Assembler::slli_w(Register rd, Register rj, int32_t ui5) {
+ DCHECK(is_uint5(ui5));
+ GenImm(SLLI_W, ui5 + 0x20, rj, rd, 6);
+}
+
+void Assembler::srli_w(Register rd, Register rj, int32_t ui5) {
+ DCHECK(is_uint5(ui5));
+ GenImm(SRLI_W, ui5 + 0x20, rj, rd, 6);
+}
+
+void Assembler::srai_w(Register rd, Register rj, int32_t ui5) {
+ DCHECK(is_uint5(ui5));
+ GenImm(SRAI_W, ui5 + 0x20, rj, rd, 6);
+}
+
+void Assembler::rotri_w(Register rd, Register rj, int32_t ui5) {
+ DCHECK(is_uint5(ui5));
+ GenImm(ROTRI_W, ui5 + 0x20, rj, rd, 6);
+}
+
+void Assembler::sll_d(Register rd, Register rj, Register rk) {
+ GenRegister(SLL_D, rk, rj, rd);
+}
+
+void Assembler::srl_d(Register rd, Register rj, Register rk) {
+ GenRegister(SRL_D, rk, rj, rd);
+}
+
+void Assembler::sra_d(Register rd, Register rj, Register rk) {
+ GenRegister(SRA_D, rk, rj, rd);
+}
+
+void Assembler::rotr_d(Register rd, Register rj, Register rk) {
+ GenRegister(ROTR_D, rk, rj, rd);
+}
+
+void Assembler::slli_d(Register rd, Register rj, int32_t ui6) {
+ GenImm(SLLI_D, ui6, rj, rd, 6);
+}
+
+void Assembler::srli_d(Register rd, Register rj, int32_t ui6) {
+ GenImm(SRLI_D, ui6, rj, rd, 6);
+}
+
+void Assembler::srai_d(Register rd, Register rj, int32_t ui6) {
+ GenImm(SRAI_D, ui6, rj, rd, 6);
+}
+
+void Assembler::rotri_d(Register rd, Register rj, int32_t ui6) {
+ GenImm(ROTRI_D, ui6, rj, rd, 6);
+}
+
+// Bit twiddling.
+void Assembler::ext_w_b(Register rd, Register rj) {
+ GenRegister(EXT_W_B, rj, rd);
+}
+
+void Assembler::ext_w_h(Register rd, Register rj) {
+ GenRegister(EXT_W_H, rj, rd);
+}
+
+void Assembler::clo_w(Register rd, Register rj) { GenRegister(CLO_W, rj, rd); }
+
+void Assembler::clz_w(Register rd, Register rj) { GenRegister(CLZ_W, rj, rd); }
+
+void Assembler::cto_w(Register rd, Register rj) { GenRegister(CTO_W, rj, rd); }
+
+void Assembler::ctz_w(Register rd, Register rj) { GenRegister(CTZ_W, rj, rd); }
+
+void Assembler::clo_d(Register rd, Register rj) { GenRegister(CLO_D, rj, rd); }
+
+void Assembler::clz_d(Register rd, Register rj) { GenRegister(CLZ_D, rj, rd); }
+
+void Assembler::cto_d(Register rd, Register rj) { GenRegister(CTO_D, rj, rd); }
+
+void Assembler::ctz_d(Register rd, Register rj) { GenRegister(CTZ_D, rj, rd); }
+
+void Assembler::bytepick_w(Register rd, Register rj, Register rk, int32_t sa2) {
+ DCHECK(is_uint2(sa2));
+ GenImm(BYTEPICK_W, sa2, rk, rj, rd);
+}
+
+void Assembler::bytepick_d(Register rd, Register rj, Register rk, int32_t sa3) {
+ GenImm(BYTEPICK_D, sa3, rk, rj, rd);
+}
+
+void Assembler::revb_2h(Register rd, Register rj) {
+ GenRegister(REVB_2H, rj, rd);
+}
+
+void Assembler::revb_4h(Register rd, Register rj) {
+ GenRegister(REVB_4H, rj, rd);
+}
+
+void Assembler::revb_2w(Register rd, Register rj) {
+ GenRegister(REVB_2W, rj, rd);
+}
+
+void Assembler::revb_d(Register rd, Register rj) {
+ GenRegister(REVB_D, rj, rd);
+}
+
+void Assembler::revh_2w(Register rd, Register rj) {
+ GenRegister(REVH_2W, rj, rd);
+}
+
+void Assembler::revh_d(Register rd, Register rj) {
+ GenRegister(REVH_D, rj, rd);
+}
+
+void Assembler::bitrev_4b(Register rd, Register rj) {
+ GenRegister(BITREV_4B, rj, rd);
+}
+
+void Assembler::bitrev_8b(Register rd, Register rj) {
+ GenRegister(BITREV_8B, rj, rd);
+}
+
+void Assembler::bitrev_w(Register rd, Register rj) {
+ GenRegister(BITREV_W, rj, rd);
+}
+
+void Assembler::bitrev_d(Register rd, Register rj) {
+ GenRegister(BITREV_D, rj, rd);
+}
+
+void Assembler::bstrins_w(Register rd, Register rj, int32_t msbw,
+ int32_t lsbw) {
+ DCHECK(is_uint5(msbw) && is_uint5(lsbw));
+ GenImm(BSTR_W, msbw + 0x20, lsbw, rj, rd);
+}
+
+void Assembler::bstrins_d(Register rd, Register rj, int32_t msbd,
+ int32_t lsbd) {
+ GenImm(BSTRINS_D, msbd, lsbd, rj, rd);
+}
+
+void Assembler::bstrpick_w(Register rd, Register rj, int32_t msbw,
+ int32_t lsbw) {
+ DCHECK(is_uint5(msbw) && is_uint5(lsbw));
+ GenImm(BSTR_W, msbw + 0x20, lsbw + 0x20, rj, rd);
+}
+
+void Assembler::bstrpick_d(Register rd, Register rj, int32_t msbd,
+ int32_t lsbd) {
+ GenImm(BSTRPICK_D, msbd, lsbd, rj, rd);
+}
+
+void Assembler::maskeqz(Register rd, Register rj, Register rk) {
+ GenRegister(MASKEQZ, rk, rj, rd);
+}
+
+void Assembler::masknez(Register rd, Register rj, Register rk) {
+ GenRegister(MASKNEZ, rk, rj, rd);
+}
+
+// Memory-instructions
+void Assembler::ld_b(Register rd, Register rj, int32_t si12) {
+ GenImm(LD_B, si12, rj, rd, 12);
+}
+
+void Assembler::ld_h(Register rd, Register rj, int32_t si12) {
+ GenImm(LD_H, si12, rj, rd, 12);
+}
+
+void Assembler::ld_w(Register rd, Register rj, int32_t si12) {
+ GenImm(LD_W, si12, rj, rd, 12);
+}
+
+void Assembler::ld_d(Register rd, Register rj, int32_t si12) {
+ GenImm(LD_D, si12, rj, rd, 12);
+}
+
+void Assembler::ld_bu(Register rd, Register rj, int32_t si12) {
+ GenImm(LD_BU, si12, rj, rd, 12);
+}
+
+void Assembler::ld_hu(Register rd, Register rj, int32_t si12) {
+ GenImm(LD_HU, si12, rj, rd, 12);
+}
+
+void Assembler::ld_wu(Register rd, Register rj, int32_t si12) {
+ GenImm(LD_WU, si12, rj, rd, 12);
+}
+
+void Assembler::st_b(Register rd, Register rj, int32_t si12) {
+ GenImm(ST_B, si12, rj, rd, 12);
+}
+
+void Assembler::st_h(Register rd, Register rj, int32_t si12) {
+ GenImm(ST_H, si12, rj, rd, 12);
+}
+
+void Assembler::st_w(Register rd, Register rj, int32_t si12) {
+ GenImm(ST_W, si12, rj, rd, 12);
+}
+
+void Assembler::st_d(Register rd, Register rj, int32_t si12) {
+ GenImm(ST_D, si12, rj, rd, 12);
+}
+
+void Assembler::ldx_b(Register rd, Register rj, Register rk) {
+ GenRegister(LDX_B, rk, rj, rd);
+}
+
+void Assembler::ldx_h(Register rd, Register rj, Register rk) {
+ GenRegister(LDX_H, rk, rj, rd);
+}
+
+void Assembler::ldx_w(Register rd, Register rj, Register rk) {
+ GenRegister(LDX_W, rk, rj, rd);
+}
+
+void Assembler::ldx_d(Register rd, Register rj, Register rk) {
+ GenRegister(LDX_D, rk, rj, rd);
+}
+
+void Assembler::ldx_bu(Register rd, Register rj, Register rk) {
+ GenRegister(LDX_BU, rk, rj, rd);
+}
+
+void Assembler::ldx_hu(Register rd, Register rj, Register rk) {
+ GenRegister(LDX_HU, rk, rj, rd);
+}
+
+void Assembler::ldx_wu(Register rd, Register rj, Register rk) {
+ GenRegister(LDX_WU, rk, rj, rd);
+}
+
+void Assembler::stx_b(Register rd, Register rj, Register rk) {
+ GenRegister(STX_B, rk, rj, rd);
+}
+
+void Assembler::stx_h(Register rd, Register rj, Register rk) {
+ GenRegister(STX_H, rk, rj, rd);
+}
+
+void Assembler::stx_w(Register rd, Register rj, Register rk) {
+ GenRegister(STX_W, rk, rj, rd);
+}
+
+void Assembler::stx_d(Register rd, Register rj, Register rk) {
+ GenRegister(STX_D, rk, rj, rd);
+}
+
+void Assembler::ldptr_w(Register rd, Register rj, int32_t si14) {
+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
+ GenImm(LDPTR_W, si14 >> 2, rj, rd, 14);
+}
+
+void Assembler::ldptr_d(Register rd, Register rj, int32_t si14) {
+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
+ GenImm(LDPTR_D, si14 >> 2, rj, rd, 14);
+}
+
+void Assembler::stptr_w(Register rd, Register rj, int32_t si14) {
+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
+ GenImm(STPTR_W, si14 >> 2, rj, rd, 14);
+}
+
+void Assembler::stptr_d(Register rd, Register rj, int32_t si14) {
+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
+ GenImm(STPTR_D, si14 >> 2, rj, rd, 14);
+}
+
+void Assembler::amswap_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMSWAP_W, rk, rj, rd);
+}
+
+void Assembler::amswap_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMSWAP_D, rk, rj, rd);
+}
+
+void Assembler::amadd_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMADD_W, rk, rj, rd);
+}
+
+void Assembler::amadd_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMADD_D, rk, rj, rd);
+}
+
+void Assembler::amand_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMAND_W, rk, rj, rd);
+}
+
+void Assembler::amand_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMAND_D, rk, rj, rd);
+}
+
+void Assembler::amor_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMOR_W, rk, rj, rd);
+}
+
+void Assembler::amor_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMOR_D, rk, rj, rd);
+}
+
+void Assembler::amxor_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMXOR_W, rk, rj, rd);
+}
+
+void Assembler::amxor_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMXOR_D, rk, rj, rd);
+}
+
+void Assembler::ammax_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMMAX_W, rk, rj, rd);
+}
+
+void Assembler::ammax_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMMAX_D, rk, rj, rd);
+}
+
+void Assembler::ammin_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMMIN_W, rk, rj, rd);
+}
+
+void Assembler::ammin_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMMIN_D, rk, rj, rd);
+}
+
+void Assembler::ammax_wu(Register rd, Register rk, Register rj) {
+ GenRegister(AMMAX_WU, rk, rj, rd);
+}
+
+void Assembler::ammax_du(Register rd, Register rk, Register rj) {
+ GenRegister(AMMAX_DU, rk, rj, rd);
+}
+
+void Assembler::ammin_wu(Register rd, Register rk, Register rj) {
+ GenRegister(AMMIN_WU, rk, rj, rd);
+}
+
+void Assembler::ammin_du(Register rd, Register rk, Register rj) {
+ GenRegister(AMMIN_DU, rk, rj, rd);
+}
+
+void Assembler::amswap_db_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMSWAP_DB_W, rk, rj, rd);
+}
+
+void Assembler::amswap_db_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMSWAP_DB_D, rk, rj, rd);
+}
+
+void Assembler::amadd_db_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMADD_DB_W, rk, rj, rd);
+}
+
+void Assembler::amadd_db_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMADD_DB_D, rk, rj, rd);
+}
+
+void Assembler::amand_db_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMAND_DB_W, rk, rj, rd);
+}
+
+void Assembler::amand_db_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMAND_DB_D, rk, rj, rd);
+}
+
+void Assembler::amor_db_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMOR_DB_W, rk, rj, rd);
+}
+
+void Assembler::amor_db_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMOR_DB_D, rk, rj, rd);
+}
+
+void Assembler::amxor_db_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMXOR_DB_W, rk, rj, rd);
+}
+
+void Assembler::amxor_db_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMXOR_DB_D, rk, rj, rd);
+}
+
+void Assembler::ammax_db_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMMAX_DB_W, rk, rj, rd);
+}
+
+void Assembler::ammax_db_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMMAX_DB_D, rk, rj, rd);
+}
+
+void Assembler::ammin_db_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMMIN_DB_W, rk, rj, rd);
+}
+
+void Assembler::ammin_db_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMMIN_DB_D, rk, rj, rd);
+}
+
+void Assembler::ammax_db_wu(Register rd, Register rk, Register rj) {
+ GenRegister(AMMAX_DB_WU, rk, rj, rd);
+}
+
+void Assembler::ammax_db_du(Register rd, Register rk, Register rj) {
+ GenRegister(AMMAX_DB_DU, rk, rj, rd);
+}
+
+void Assembler::ammin_db_wu(Register rd, Register rk, Register rj) {
+ GenRegister(AMMIN_DB_WU, rk, rj, rd);
+}
+
+void Assembler::ammin_db_du(Register rd, Register rk, Register rj) {
+ GenRegister(AMMIN_DB_DU, rk, rj, rd);
+}
+
+void Assembler::ll_w(Register rd, Register rj, int32_t si14) {
+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
+ GenImm(LL_W, si14 >> 2, rj, rd, 14);
+}
+
+void Assembler::ll_d(Register rd, Register rj, int32_t si14) {
+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
+ GenImm(LL_D, si14 >> 2, rj, rd, 14);
+}
+
+void Assembler::sc_w(Register rd, Register rj, int32_t si14) {
+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
+ GenImm(SC_W, si14 >> 2, rj, rd, 14);
+}
+
+void Assembler::sc_d(Register rd, Register rj, int32_t si14) {
+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
+ GenImm(SC_D, si14 >> 2, rj, rd, 14);
+}
+
+void Assembler::dbar(int32_t hint) { GenImm(DBAR, hint); }
+
+void Assembler::ibar(int32_t hint) { GenImm(IBAR, hint); }
+
+// Break instruction.
+void Assembler::break_(uint32_t code, bool break_as_stop) {
+ DCHECK(
+ (break_as_stop && code <= kMaxStopCode && code > kMaxWatchpointCode) ||
+ (!break_as_stop && (code > kMaxStopCode || code <= kMaxWatchpointCode)));
+ GenImm(BREAK, code);
+}
+
+void Assembler::stop(uint32_t code) {
+ DCHECK_GT(code, kMaxWatchpointCode);
+ DCHECK_LE(code, kMaxStopCode);
+#if defined(V8_HOST_ARCH_LOONG64)
+ break_(0x4321);
+#else // V8_HOST_ARCH_LOONG64
+ break_(code, true);
+#endif
+}
+
+void Assembler::fadd_s(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FADD_S, fk, fj, fd);
+}
+
+void Assembler::fadd_d(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FADD_D, fk, fj, fd);
+}
+
+void Assembler::fsub_s(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FSUB_S, fk, fj, fd);
+}
+
+void Assembler::fsub_d(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FSUB_D, fk, fj, fd);
+}
+
+void Assembler::fmul_s(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FMUL_S, fk, fj, fd);
+}
+
+void Assembler::fmul_d(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FMUL_D, fk, fj, fd);
+}
+
+void Assembler::fdiv_s(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FDIV_S, fk, fj, fd);
+}
+
+void Assembler::fdiv_d(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FDIV_D, fk, fj, fd);
+}
+
+void Assembler::fmadd_s(FPURegister fd, FPURegister fj, FPURegister fk,
+ FPURegister fa) {
+ GenRegister(FMADD_S, fa, fk, fj, fd);
+}
+
+void Assembler::fmadd_d(FPURegister fd, FPURegister fj, FPURegister fk,
+ FPURegister fa) {
+ GenRegister(FMADD_D, fa, fk, fj, fd);
+}
+
+void Assembler::fmsub_s(FPURegister fd, FPURegister fj, FPURegister fk,
+ FPURegister fa) {
+ GenRegister(FMSUB_S, fa, fk, fj, fd);
+}
+
+void Assembler::fmsub_d(FPURegister fd, FPURegister fj, FPURegister fk,
+ FPURegister fa) {
+ GenRegister(FMSUB_D, fa, fk, fj, fd);
+}
+
+void Assembler::fnmadd_s(FPURegister fd, FPURegister fj, FPURegister fk,
+ FPURegister fa) {
+ GenRegister(FNMADD_S, fa, fk, fj, fd);
+}
+
+void Assembler::fnmadd_d(FPURegister fd, FPURegister fj, FPURegister fk,
+ FPURegister fa) {
+ GenRegister(FNMADD_D, fa, fk, fj, fd);
+}
+
+void Assembler::fnmsub_s(FPURegister fd, FPURegister fj, FPURegister fk,
+ FPURegister fa) {
+ GenRegister(FNMSUB_S, fa, fk, fj, fd);
+}
+
+void Assembler::fnmsub_d(FPURegister fd, FPURegister fj, FPURegister fk,
+ FPURegister fa) {
+ GenRegister(FNMSUB_D, fa, fk, fj, fd);
+}
+
+void Assembler::fmax_s(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FMAX_S, fk, fj, fd);
+}
+
+void Assembler::fmax_d(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FMAX_D, fk, fj, fd);
+}
+
+void Assembler::fmin_s(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FMIN_S, fk, fj, fd);
+}
+
+void Assembler::fmin_d(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FMIN_D, fk, fj, fd);
+}
+
+void Assembler::fmaxa_s(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FMAXA_S, fk, fj, fd);
+}
+
+void Assembler::fmaxa_d(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FMAXA_D, fk, fj, fd);
+}
+
+void Assembler::fmina_s(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FMINA_S, fk, fj, fd);
+}
+
+void Assembler::fmina_d(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FMINA_D, fk, fj, fd);
+}
+
+void Assembler::fabs_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FABS_S, fj, fd);
+}
+
+void Assembler::fabs_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FABS_D, fj, fd);
+}
+
+void Assembler::fneg_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FNEG_S, fj, fd);
+}
+
+void Assembler::fneg_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FNEG_D, fj, fd);
+}
+
+void Assembler::fsqrt_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FSQRT_S, fj, fd);
+}
+
+void Assembler::fsqrt_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FSQRT_D, fj, fd);
+}
+
+void Assembler::frecip_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FRECIP_S, fj, fd);
+}
+
+void Assembler::frecip_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FRECIP_D, fj, fd);
+}
+
+void Assembler::frsqrt_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FRSQRT_S, fj, fd);
+}
+
+void Assembler::frsqrt_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FRSQRT_D, fj, fd);
+}
+
+void Assembler::fscaleb_s(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FSCALEB_S, fk, fj, fd);
+}
+
+void Assembler::fscaleb_d(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FSCALEB_D, fk, fj, fd);
+}
+
+void Assembler::flogb_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FLOGB_S, fj, fd);
+}
+
+void Assembler::flogb_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FLOGB_D, fj, fd);
+}
+
+void Assembler::fcopysign_s(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FCOPYSIGN_S, fk, fj, fd);
+}
+
+void Assembler::fcopysign_d(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FCOPYSIGN_D, fk, fj, fd);
+}
+
+void Assembler::fclass_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FCLASS_S, fj, fd);
+}
+
+void Assembler::fclass_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FCLASS_D, fj, fd);
+}
+
+void Assembler::fcmp_cond_s(FPUCondition cc, FPURegister fj, FPURegister fk,
+ CFRegister cd) {
+ GenCmp(FCMP_COND_S, cc, fk, fj, cd);
+}
+
+void Assembler::fcmp_cond_d(FPUCondition cc, FPURegister fj, FPURegister fk,
+ CFRegister cd) {
+ GenCmp(FCMP_COND_D, cc, fk, fj, cd);
+}
+
+void Assembler::fcvt_s_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FCVT_S_D, fj, fd);
+}
+
+void Assembler::fcvt_d_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FCVT_D_S, fj, fd);
+}
+
+void Assembler::ffint_s_w(FPURegister fd, FPURegister fj) {
+ GenRegister(FFINT_S_W, fj, fd);
+}
+
+void Assembler::ffint_s_l(FPURegister fd, FPURegister fj) {
+ GenRegister(FFINT_S_L, fj, fd);
+}
+
+void Assembler::ffint_d_w(FPURegister fd, FPURegister fj) {
+ GenRegister(FFINT_D_W, fj, fd);
+}
+
+void Assembler::ffint_d_l(FPURegister fd, FPURegister fj) {
+ GenRegister(FFINT_D_L, fj, fd);
+}
+
+void Assembler::ftint_w_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINT_W_S, fj, fd);
+}
+
+void Assembler::ftint_w_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINT_W_D, fj, fd);
+}
+
+void Assembler::ftint_l_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINT_L_S, fj, fd);
+}
+
+void Assembler::ftint_l_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINT_L_D, fj, fd);
+}
+
+void Assembler::ftintrm_w_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRM_W_S, fj, fd);
+}
+
+void Assembler::ftintrm_w_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRM_W_D, fj, fd);
+}
+
+void Assembler::ftintrm_l_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRM_L_S, fj, fd);
+}
+
+void Assembler::ftintrm_l_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRM_L_D, fj, fd);
+}
+
+void Assembler::ftintrp_w_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRP_W_S, fj, fd);
+}
+
+void Assembler::ftintrp_w_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRP_W_D, fj, fd);
+}
+
+void Assembler::ftintrp_l_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRP_L_S, fj, fd);
+}
+
+void Assembler::ftintrp_l_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRP_L_D, fj, fd);
+}
+
+void Assembler::ftintrz_w_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRZ_W_S, fj, fd);
+}
+
+void Assembler::ftintrz_w_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRZ_W_D, fj, fd);
+}
+
+void Assembler::ftintrz_l_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRZ_L_S, fj, fd);
+}
+
+void Assembler::ftintrz_l_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRZ_L_D, fj, fd);
+}
+
+void Assembler::ftintrne_w_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRNE_W_S, fj, fd);
+}
+
+void Assembler::ftintrne_w_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRNE_W_D, fj, fd);
+}
+
+void Assembler::ftintrne_l_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRNE_L_S, fj, fd);
+}
+
+void Assembler::ftintrne_l_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRNE_L_D, fj, fd);
+}
+
+void Assembler::frint_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FRINT_S, fj, fd);
+}
+
+void Assembler::frint_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FRINT_D, fj, fd);
+}
+
+void Assembler::fmov_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FMOV_S, fj, fd);
+}
+
+void Assembler::fmov_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FMOV_D, fj, fd);
+}
+
+void Assembler::fsel(CFRegister ca, FPURegister fd, FPURegister fj,
+ FPURegister fk) {
+ GenSel(FSEL, ca, fk, fj, fd);
+}
+
+void Assembler::movgr2fr_w(FPURegister fd, Register rj) {
+ GenRegister(MOVGR2FR_W, rj, fd);
+}
+
+void Assembler::movgr2fr_d(FPURegister fd, Register rj) {
+ GenRegister(MOVGR2FR_D, rj, fd);
+}
+
+void Assembler::movgr2frh_w(FPURegister fd, Register rj) {
+ GenRegister(MOVGR2FRH_W, rj, fd);
+}
+
+void Assembler::movfr2gr_s(Register rd, FPURegister fj) {
+ GenRegister(MOVFR2GR_S, fj, rd);
+}
+
+void Assembler::movfr2gr_d(Register rd, FPURegister fj) {
+ GenRegister(MOVFR2GR_D, fj, rd);
+}
+
+void Assembler::movfrh2gr_s(Register rd, FPURegister fj) {
+ GenRegister(MOVFRH2GR_S, fj, rd);
+}
+
+void Assembler::movgr2fcsr(Register rj, FPUControlRegister fcsr) {
+ GenRegister(MOVGR2FCSR, rj, fcsr);
+}
+
+void Assembler::movfcsr2gr(Register rd, FPUControlRegister fcsr) {
+ GenRegister(MOVFCSR2GR, fcsr, rd);
+}
+
+void Assembler::movfr2cf(CFRegister cd, FPURegister fj) {
+ GenRegister(MOVFR2CF, fj, cd);
+}
+
+void Assembler::movcf2fr(FPURegister fd, CFRegister cj) {
+ GenRegister(MOVCF2FR, cj, fd);
+}
+
+void Assembler::movgr2cf(CFRegister cd, Register rj) {
+ GenRegister(MOVGR2CF, rj, cd);
+}
+
+void Assembler::movcf2gr(Register rd, CFRegister cj) {
+ GenRegister(MOVCF2GR, cj, rd);
+}
+
+void Assembler::fld_s(FPURegister fd, Register rj, int32_t si12) {
+ GenImm(FLD_S, si12, rj, fd);
+}
+
+void Assembler::fld_d(FPURegister fd, Register rj, int32_t si12) {
+ GenImm(FLD_D, si12, rj, fd);
+}
+
+void Assembler::fst_s(FPURegister fd, Register rj, int32_t si12) {
+ GenImm(FST_S, si12, rj, fd);
+}
+
+void Assembler::fst_d(FPURegister fd, Register rj, int32_t si12) {
+ GenImm(FST_D, si12, rj, fd);
+}
+
+void Assembler::fldx_s(FPURegister fd, Register rj, Register rk) {
+ GenRegister(FLDX_S, rk, rj, fd);
+}
+
+void Assembler::fldx_d(FPURegister fd, Register rj, Register rk) {
+ GenRegister(FLDX_D, rk, rj, fd);
+}
+
+void Assembler::fstx_s(FPURegister fd, Register rj, Register rk) {
+ GenRegister(FSTX_S, rk, rj, fd);
+}
+
+void Assembler::fstx_d(FPURegister fd, Register rj, Register rk) {
+ GenRegister(FSTX_D, rk, rj, fd);
+}
+
+void Assembler::AdjustBaseAndOffset(MemOperand* src) {
+ // is_int12 must be passed a signed value, hence the static cast below.
+ if ((!src->hasIndexReg() && is_int12(src->offset())) || src->hasIndexReg()) {
+ return;
+ }
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ if (is_uint12(static_cast<int32_t>(src->offset()))) {
+ ori(scratch, zero_reg, src->offset() & kImm12Mask);
+ } else {
+ lu12i_w(scratch, src->offset() >> 12 & 0xfffff);
+ if (src->offset() & kImm12Mask) {
+ ori(scratch, scratch, src->offset() & kImm12Mask);
+ }
+ }
+ src->index_ = scratch;
+ src->offset_ = 0;
+}
+
+int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
+ intptr_t pc_delta) {
+ DCHECK(RelocInfo::IsInternalReference(rmode));
+ int64_t* p = reinterpret_cast<int64_t*>(pc);
+ if (*p == kEndOfJumpChain) {
+ return 0; // Number of instructions patched.
+ }
+ *p += pc_delta;
+ return 2; // Number of instructions patched.
+}
+
+void Assembler::RelocateRelativeReference(RelocInfo::Mode rmode, Address pc,
+ intptr_t pc_delta) {
+ DCHECK(RelocInfo::IsRelativeCodeTarget(rmode));
+ Instr instr = instr_at(pc);
+ int32_t offset = instr & kImm26Mask;
+ offset = (((offset & 0x3ff) << 22 >> 6) | ((offset >> 10) & kImm16Mask)) << 2;
+ offset -= pc_delta;
+ uint32_t* p = reinterpret_cast<uint32_t*>(pc);
+ offset >>= 2;
+ offset = ((offset & kImm16Mask) << kRkShift) | ((offset & kImm26Mask) >> 16);
+ *p = (instr & ~kImm26Mask) | offset;
+ return;
+}
+
+void Assembler::FixOnHeapReferences(bool update_embedded_objects) {
+ if (!update_embedded_objects) return;
+ for (auto p : saved_handles_for_raw_object_ptr_) {
+ Address address = reinterpret_cast<Address>(buffer_->start() + p.first);
+ Handle<HeapObject> object(reinterpret_cast<Address*>(p.second));
+ set_target_value_at(address, object->ptr());
+ }
+}
+
+void Assembler::FixOnHeapReferencesToHandles() {
+ for (auto p : saved_handles_for_raw_object_ptr_) {
+ Address address = reinterpret_cast<Address>(buffer_->start() + p.first);
+ set_target_value_at(address, p.second);
+ }
+ saved_handles_for_raw_object_ptr_.clear();
+}
+
+void Assembler::GrowBuffer() {
+ bool previously_on_heap = buffer_->IsOnHeap();
+ int previous_on_heap_gc_count = OnHeapGCCount();
+
+ // Compute new buffer size.
+ int old_size = buffer_->size();
+ int new_size = std::min(2 * old_size, old_size + 1 * MB);
+
+ // Some internal data structures overflow for very large buffers,
+ // they must ensure that kMaximalBufferSize is not too large.
+ if (new_size > kMaximalBufferSize) {
+ V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
+ }
+
+ // Set up new buffer.
+ std::unique_ptr<AssemblerBuffer> new_buffer = buffer_->Grow(new_size);
+ DCHECK_EQ(new_size, new_buffer->size());
+ byte* new_start = new_buffer->start();
+
+ // Copy the data.
+ intptr_t pc_delta = new_start - buffer_start_;
+ intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
+ size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
+ MemMove(new_start, buffer_start_, pc_offset());
+ MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
+ reloc_size);
+
+ // Switch buffers.
+ buffer_ = std::move(new_buffer);
+ buffer_start_ = new_start;
+ pc_ += pc_delta;
+ last_call_pc_ += pc_delta;
+ reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.last_pc() + pc_delta);
+
+ // None of our relocation types are pc relative pointing outside the code
+ // buffer nor pc absolute pointing inside the code buffer, so there is no need
+ // to relocate any emitted relocation entries.
+
+ // Relocate internal references.
+ for (auto pos : internal_reference_positions_) {
+ Address address = reinterpret_cast<intptr_t>(buffer_start_) + pos;
+ intptr_t internal_ref = ReadUnalignedValue<intptr_t>(address);
+ if (internal_ref != kEndOfJumpChain) {
+ internal_ref += pc_delta;
+ WriteUnalignedValue<intptr_t>(address, internal_ref);
+ }
+ }
+
+ // Fix on-heap references.
+ if (previously_on_heap) {
+ if (buffer_->IsOnHeap()) {
+ FixOnHeapReferences(previous_on_heap_gc_count != OnHeapGCCount());
+ } else {
+ FixOnHeapReferencesToHandles();
+ }
+ }
+}
+
+void Assembler::db(uint8_t data) {
+ if (!is_buffer_growth_blocked()) {
+ CheckBuffer();
+ }
+ *reinterpret_cast<uint8_t*>(pc_) = data;
+ pc_ += sizeof(uint8_t);
+}
+
+void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
+ if (!is_buffer_growth_blocked()) {
+ CheckBuffer();
+ }
+ if (!RelocInfo::IsNone(rmode)) {
+ DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
+ RelocInfo::IsLiteralConstant(rmode));
+ RecordRelocInfo(rmode);
+ }
+ *reinterpret_cast<uint32_t*>(pc_) = data;
+ pc_ += sizeof(uint32_t);
+}
+
+void Assembler::dq(uint64_t data, RelocInfo::Mode rmode) {
+ if (!is_buffer_growth_blocked()) {
+ CheckBuffer();
+ }
+ if (!RelocInfo::IsNone(rmode)) {
+ DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
+ RelocInfo::IsLiteralConstant(rmode));
+ RecordRelocInfo(rmode);
+ }
+ *reinterpret_cast<uint64_t*>(pc_) = data;
+ pc_ += sizeof(uint64_t);
+}
+
+void Assembler::dd(Label* label) {
+ if (!is_buffer_growth_blocked()) {
+ CheckBuffer();
+ }
+ uint64_t data;
+ if (label->is_bound()) {
+ data = reinterpret_cast<uint64_t>(buffer_start_ + label->pos());
+ } else {
+ data = jump_address(label);
+ unbound_labels_count_++;
+ internal_reference_positions_.insert(label->pos());
+ }
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ EmitHelper(data);
+}
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+ if (!ShouldRecordRelocInfo(rmode)) return;
+ // We do not try to reuse pool constants.
+ RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
+ DCHECK_GE(buffer_space(), kMaxRelocSize); // Too late to grow buffer here.
+ reloc_info_writer.Write(&rinfo);
+}
+
+void Assembler::BlockTrampolinePoolFor(int instructions) {
+ CheckTrampolinePoolQuick(instructions);
+ BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
+}
+
+void Assembler::CheckTrampolinePool() {
+ // Some small sequences of instructions must not be broken up by the
+ // insertion of a trampoline pool; such sequences are protected by setting
+ // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
+ // which are both checked here. Also, recursive calls to CheckTrampolinePool
+ // are blocked by trampoline_pool_blocked_nesting_.
+ if ((trampoline_pool_blocked_nesting_ > 0) ||
+ (pc_offset() < no_trampoline_pool_before_)) {
+ // Emission is currently blocked; make sure we try again as soon as
+ // possible.
+ if (trampoline_pool_blocked_nesting_ > 0) {
+ next_buffer_check_ = pc_offset() + kInstrSize;
+ } else {
+ next_buffer_check_ = no_trampoline_pool_before_;
+ }
+ return;
+ }
+
+ DCHECK(!trampoline_emitted_);
+ DCHECK_GE(unbound_labels_count_, 0);
+ if (unbound_labels_count_ > 0) {
+ // First we emit jump (2 instructions), then we emit trampoline pool.
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label after_pool;
+ b(&after_pool);
+ nop(); // TODO(LOONG_dev): remove this
+
+ int pool_start = pc_offset();
+ for (int i = 0; i < unbound_labels_count_; i++) {
+ {
+ b(&after_pool);
+ nop(); // TODO(LOONG_dev): remove this
+ }
+ }
+ nop();
+ trampoline_ = Trampoline(pool_start, unbound_labels_count_);
+ bind(&after_pool);
+
+ trampoline_emitted_ = true;
+ // As we are only going to emit trampoline once, we need to prevent any
+ // further emission.
+ next_buffer_check_ = kMaxInt;
+ }
+ } else {
+ // Number of branches to unbound label at this point is zero, so we can
+ // move next buffer check to maximum.
+ next_buffer_check_ =
+ pc_offset() + kMax16BranchOffset - kTrampolineSlotsSize * 16;
+ }
+ return;
+}
+
+Address Assembler::target_address_at(Address pc) {
+ Instr instr0 = instr_at(pc);
+ if (IsB(instr0)) {
+ int32_t offset = instr0 & kImm26Mask;
+ offset = (((offset & 0x3ff) << 22 >> 6) | ((offset >> 10) & kImm16Mask))
+ << 2;
+ return pc + offset;
+ }
+ Instr instr1 = instr_at(pc + 1 * kInstrSize);
+ Instr instr2 = instr_at(pc + 2 * kInstrSize);
+
+ // Interpret 4 instructions for address generated by li: See listing in
+ // Assembler::set_target_address_at() just below.
+ DCHECK((IsLu12i_w(instr0) && (IsOri(instr1)) && (IsLu32i_d(instr2))));
+
+ // Assemble the 48 bit value.
+ uint64_t hi20 = ((uint64_t)(instr2 >> 5) & 0xfffff) << 32;
+ uint64_t mid20 = ((uint64_t)(instr0 >> 5) & 0xfffff) << 12;
+ uint64_t low12 = ((uint64_t)(instr1 >> 10) & 0xfff);
+ int64_t addr = static_cast<int64_t>(hi20 | mid20 | low12);
+
+ // Sign extend to get canonical address.
+ addr = (addr << 16) >> 16;
+ return static_cast<Address>(addr);
+}
+
+// On loong64, a target address is stored in a 3-instruction sequence:
+// 0: lu12i_w(rd, (j.imm64_ >> 12) & kImm20Mask);
+// 1: ori(rd, rd, j.imm64_ & kImm12Mask);
+// 2: lu32i_d(rd, (j.imm64_ >> 32) & kImm20Mask);
+//
+// Patching the address must replace all the lui & ori instructions,
+// and flush the i-cache.
+//
+void Assembler::set_target_value_at(Address pc, uint64_t target,
+ ICacheFlushMode icache_flush_mode) {
+ // There is an optimization where only 3 instructions are used to load address
+ // in code on LOONG64 because only 48-bits of address is effectively used.
+ // It relies on fact the upper [63:48] bits are not used for virtual address
+ // translation and they have to be set according to value of bit 47 in order
+ // get canonical address.
+#ifdef DEBUG
+ // Check we have the result from a li macro-instruction.
+ Instr instr0 = instr_at(pc);
+ Instr instr1 = instr_at(pc + kInstrSize);
+ Instr instr2 = instr_at(pc + kInstrSize * 2);
+ DCHECK(IsLu12i_w(instr0) && IsOri(instr1) && IsLu32i_d(instr2) ||
+ IsB(instr0));
+#endif
+
+ Instr instr = instr_at(pc);
+ uint32_t* p = reinterpret_cast<uint32_t*>(pc);
+ if (IsB(instr)) {
+ int32_t offset = (target - pc) >> 2;
+ CHECK(is_int26(offset));
+ offset =
+ ((offset & kImm16Mask) << kRkShift) | ((offset & kImm26Mask) >> 16);
+ *p = (instr & ~kImm26Mask) | offset;
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ FlushInstructionCache(pc, kInstrSize);
+ }
+ return;
+ }
+ uint32_t rd_code = GetRd(instr);
+
+ // Must use 3 instructions to insure patchable code.
+ // lu12i_w rd, middle-20.
+ // ori rd, rd, low-12.
+ // lu32i_d rd, high-20.
+ *p = LU12I_W | (((target >> 12) & 0xfffff) << kRjShift) | rd_code;
+ *(p + 1) =
+ ORI | (target & 0xfff) << kRkShift | (rd_code << kRjShift) | rd_code;
+ *(p + 2) = LU32I_D | (((target >> 32) & 0xfffff) << kRjShift) | rd_code;
+
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ FlushInstructionCache(pc, 3 * kInstrSize);
+ }
+}
+
+UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
+ : available_(assembler->GetScratchRegisterList()),
+ old_available_(*available_) {}
+
+UseScratchRegisterScope::~UseScratchRegisterScope() {
+ *available_ = old_available_;
+}
+
+Register UseScratchRegisterScope::Acquire() {
+ DCHECK_NOT_NULL(available_);
+ DCHECK_NE(*available_, 0);
+ int index = static_cast<int>(base::bits::CountTrailingZeros32(*available_));
+ *available_ &= ~(1UL << index);
+
+ return Register::from_code(index);
+}
+
+bool UseScratchRegisterScope::hasAvailable() const { return *available_ != 0; }
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_LOONG64
diff --git a/chromium/v8/src/codegen/loong64/assembler-loong64.h b/chromium/v8/src/codegen/loong64/assembler-loong64.h
new file mode 100644
index 00000000000..b886b2ef43f
--- /dev/null
+++ b/chromium/v8/src/codegen/loong64/assembler-loong64.h
@@ -0,0 +1,1129 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_H_
+#define V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_H_
+
+#include <stdio.h>
+
+#include <memory>
+#include <set>
+
+#include "src/codegen/assembler.h"
+#include "src/codegen/external-reference.h"
+#include "src/codegen/label.h"
+#include "src/codegen/loong64/constants-loong64.h"
+#include "src/codegen/loong64/register-loong64.h"
+#include "src/codegen/machine-type.h"
+#include "src/objects/contexts.h"
+#include "src/objects/smi.h"
+
+namespace v8 {
+namespace internal {
+
+class SafepointTableBuilder;
+
+// -----------------------------------------------------------------------------
+// Machine instruction Operands.
+constexpr int kSmiShift = kSmiTagSize + kSmiShiftSize;
+constexpr uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
+// Class Operand represents a shifter operand in data processing instructions.
+class Operand {
+ public:
+ // Immediate.
+ V8_INLINE explicit Operand(int64_t immediate,
+ RelocInfo::Mode rmode = RelocInfo::NONE)
+ : rm_(no_reg), rmode_(rmode) {
+ value_.immediate = immediate;
+ }
+ V8_INLINE explicit Operand(const ExternalReference& f)
+ : rm_(no_reg), rmode_(RelocInfo::EXTERNAL_REFERENCE) {
+ value_.immediate = static_cast<int64_t>(f.address());
+ }
+ V8_INLINE explicit Operand(const char* s);
+ explicit Operand(Handle<HeapObject> handle);
+ V8_INLINE explicit Operand(Smi value) : rm_(no_reg), rmode_(RelocInfo::NONE) {
+ value_.immediate = static_cast<intptr_t>(value.ptr());
+ }
+
+ static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
+ static Operand EmbeddedStringConstant(const StringConstantBase* str);
+
+ // Register.
+ V8_INLINE explicit Operand(Register rm) : rm_(rm) {}
+
+ // Return true if this is a register operand.
+ V8_INLINE bool is_reg() const;
+
+ inline int64_t immediate() const;
+
+ bool IsImmediate() const { return !rm_.is_valid(); }
+
+ HeapObjectRequest heap_object_request() const {
+ DCHECK(IsHeapObjectRequest());
+ return value_.heap_object_request;
+ }
+
+ bool IsHeapObjectRequest() const {
+ DCHECK_IMPLIES(is_heap_object_request_, IsImmediate());
+ DCHECK_IMPLIES(is_heap_object_request_,
+ rmode_ == RelocInfo::FULL_EMBEDDED_OBJECT ||
+ rmode_ == RelocInfo::CODE_TARGET);
+ return is_heap_object_request_;
+ }
+
+ Register rm() const { return rm_; }
+
+ RelocInfo::Mode rmode() const { return rmode_; }
+
+ private:
+ Register rm_;
+ union Value {
+ Value() {}
+ HeapObjectRequest heap_object_request; // if is_heap_object_request_
+ int64_t immediate; // otherwise
+ } value_; // valid if rm_ == no_reg
+ bool is_heap_object_request_ = false;
+ RelocInfo::Mode rmode_;
+
+ friend class Assembler;
+ friend class MacroAssembler;
+};
+
+// Class MemOperand represents a memory operand in load and store instructions.
+// 1: base_reg + off_imm( si12 | si14<<2)
+// 2: base_reg + offset_reg
+class V8_EXPORT_PRIVATE MemOperand {
+ public:
+ explicit MemOperand(Register rj, int32_t offset = 0);
+ explicit MemOperand(Register rj, Register offset = no_reg);
+ Register base() const { return base_; }
+ Register index() const { return index_; }
+ int32_t offset() const { return offset_; }
+
+ bool hasIndexReg() const { return index_ != no_reg; }
+
+ private:
+ Register base_; // base
+ Register index_; // index
+ int32_t offset_; // offset
+
+ friend class Assembler;
+};
+
+class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
+ public:
+ // Create an assembler. Instructions and relocation information are emitted
+ // into a buffer, with the instructions starting from the beginning and the
+ // relocation information starting from the end of the buffer. See CodeDesc
+ // for a detailed comment on the layout (globals.h).
+ //
+ // If the provided buffer is nullptr, the assembler allocates and grows its
+ // own buffer. Otherwise it takes ownership of the provided buffer.
+ explicit Assembler(const AssemblerOptions&,
+ std::unique_ptr<AssemblerBuffer> = {});
+
+ virtual ~Assembler() {}
+
+ // GetCode emits any pending (non-emitted) code and fills the descriptor desc.
+ static constexpr int kNoHandlerTable = 0;
+ static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr;
+ void GetCode(Isolate* isolate, CodeDesc* desc,
+ SafepointTableBuilder* safepoint_table_builder,
+ int handler_table_offset);
+
+ // Convenience wrapper for code without safepoint or handler tables.
+ void GetCode(Isolate* isolate, CodeDesc* desc) {
+ GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable);
+ }
+
+ // This function is called when on-heap-compilation invariants are
+ // invalidated. For instance, when the assembler buffer grows or a GC happens
+ // between Code object allocation and Code object finalization.
+ void FixOnHeapReferences(bool update_embedded_objects = true);
+
+ // This function is called when we fallback from on-heap to off-heap
+ // compilation and patch on-heap references to handles.
+ void FixOnHeapReferencesToHandles();
+
+ // Unused on this architecture.
+ void MaybeEmitOutOfLineConstantPool() {}
+
+ // Loong64 uses BlockTrampolinePool to prevent generating trampoline inside a
+ // continuous instruction block. In the destructor of
+ // BlockTrampolinePool, it must check if it needs to generate trampoline
+ // immediately, if it does not do this, the branch range will go beyond the
+ // max branch offset, that means the pc_offset after call CheckTrampolinePool
+ // may be not the Call instruction's location. So we use last_call_pc here for
+ // safepoint record.
+ int pc_offset_for_safepoint() {
+ return static_cast<int>(last_call_pc_ - buffer_start_);
+ }
+
+ // TODO(LOONG_dev): LOONG64 Check this comment
+ // Label operations & relative jumps (PPUM Appendix D).
+ //
+ // Takes a branch opcode (cc) and a label (L) and generates
+ // either a backward branch or a forward branch and links it
+ // to the label fixup chain. Usage:
+ //
+ // Label L; // unbound label
+ // j(cc, &L); // forward branch to unbound label
+ // bind(&L); // bind label to the current pc
+ // j(cc, &L); // backward branch to bound label
+ // bind(&L); // illegal: a label may be bound only once
+ //
+ // Note: The same Label can be used for forward and backward branches
+ // but it may be bound only once.
+ void bind(Label* L); // Binds an unbound label L to current code position.
+
+ enum OffsetSize : int { kOffset26 = 26, kOffset21 = 21, kOffset16 = 16 };
+
+ // Determines if Label is bound and near enough so that branch instruction
+ // can be used to reach it, instead of jump instruction.
+ // c means conditinal branch, a means always branch.
+ bool is_near_c(Label* L);
+ bool is_near(Label* L, OffsetSize bits);
+ bool is_near_a(Label* L);
+
+ int BranchOffset(Instr instr);
+
+ // Returns the branch offset to the given label from the current code
+ // position. Links the label to the current position if it is still unbound.
+ // Manages the jump elimination optimization if the second parameter is true.
+ int32_t branch_offset_helper(Label* L, OffsetSize bits);
+ inline int32_t branch_offset(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset16);
+ }
+ inline int32_t branch_offset21(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset21);
+ }
+ inline int32_t branch_offset26(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset26);
+ }
+ inline int32_t shifted_branch_offset(Label* L) {
+ return branch_offset(L) >> 2;
+ }
+ inline int32_t shifted_branch_offset21(Label* L) {
+ return branch_offset21(L) >> 2;
+ }
+ inline int32_t shifted_branch_offset26(Label* L) {
+ return branch_offset26(L) >> 2;
+ }
+ uint64_t jump_address(Label* L);
+ uint64_t jump_offset(Label* L);
+ uint64_t branch_long_offset(Label* L);
+
+ // Puts a labels target address at the given position.
+ // The high 8 bits are set to zero.
+ void label_at_put(Label* L, int at_offset);
+
+ // Read/Modify the code target address in the branch/call instruction at pc.
+ // The isolate argument is unused (and may be nullptr) when skipping flushing.
+ static Address target_address_at(Address pc);
+ V8_INLINE static void set_target_address_at(
+ Address pc, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
+ set_target_value_at(pc, target, icache_flush_mode);
+ }
+ // On LOONG64 there is no Constant Pool so we skip that parameter.
+ V8_INLINE static Address target_address_at(Address pc,
+ Address constant_pool) {
+ return target_address_at(pc);
+ }
+ V8_INLINE static void set_target_address_at(
+ Address pc, Address constant_pool, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
+ set_target_address_at(pc, target, icache_flush_mode);
+ }
+
+ static void set_target_value_at(
+ Address pc, uint64_t target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+
+ static void JumpLabelToJumpRegister(Address pc);
+
+ // This sets the branch destination (which gets loaded at the call address).
+ // This is for calls and branches within generated code. The serializer
+ // has already deserialized the lui/ori instructions etc.
+ inline static void deserialization_set_special_target_at(
+ Address instruction_payload, Code code, Address target);
+
+ // Get the size of the special target encoded at 'instruction_payload'.
+ inline static int deserialization_special_target_size(
+ Address instruction_payload);
+
+ // This sets the internal reference at the pc.
+ inline static void deserialization_set_target_internal_reference_at(
+ Address pc, Address target,
+ RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
+
+ // Here we are patching the address in the LUI/ORI instruction pair.
+ // These values are used in the serialization process and must be zero for
+ // LOONG platform, as Code, Embedded Object or External-reference pointers
+ // are split across two consecutive instructions and don't exist separately
+ // in the code, so the serializer should not step forwards in memory after
+ // a target is resolved and written.
+ static constexpr int kSpecialTargetSize = 0;
+
+ // Number of consecutive instructions used to store 32bit/64bit constant.
+ // This constant was used in RelocInfo::target_address_address() function
+ // to tell serializer address of the instruction that follows
+ // LUI/ORI instruction pair.
+ // TODO(LOONG_dev): check this
+ static constexpr int kInstructionsFor64BitConstant = 4;
+
+ // Max offset for instructions with 16-bit offset field
+ static constexpr int kMax16BranchOffset = (1 << (18 - 1)) - 1;
+
+ // Max offset for instructions with 21-bit offset field
+ static constexpr int kMax21BranchOffset = (1 << (23 - 1)) - 1;
+
+ // Max offset for compact branch instructions with 26-bit offset field
+ static constexpr int kMax26BranchOffset = (1 << (28 - 1)) - 1;
+
+ static constexpr int kTrampolineSlotsSize = 2 * kInstrSize;
+
+ RegList* GetScratchRegisterList() { return &scratch_register_list_; }
+
+ // ---------------------------------------------------------------------------
+ // Code generation.
+
+ // Insert the smallest number of nop instructions
+ // possible to align the pc offset to a multiple
+ // of m. m must be a power of 2 (>= 4).
+ void Align(int m);
+ // Insert the smallest number of zero bytes possible to align the pc offset
+ // to a mulitple of m. m must be a power of 2 (>= 2).
+ void DataAlign(int m);
+ // Aligns code to something that's optimal for a jump target for the platform.
+ void CodeTargetAlign();
+ void LoopHeaderAlign() { CodeTargetAlign(); }
+
+ // Different nop operations are used by the code generator to detect certain
+ // states of the generated code.
+ enum NopMarkerTypes {
+ NON_MARKING_NOP = 0,
+ DEBUG_BREAK_NOP,
+ // IC markers.
+ PROPERTY_ACCESS_INLINED,
+ PROPERTY_ACCESS_INLINED_CONTEXT,
+ PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
+ // Helper values.
+ LAST_CODE_MARKER,
+ FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED,
+ };
+
+ // Type == 0 is the default non-marking nop. For LoongArch this is a
+ // andi(zero_reg, zero_reg, 0).
+ void nop(unsigned int type = 0) {
+ DCHECK_LT(type, 32);
+ andi(zero_reg, zero_reg, type);
+ }
+
+ // --------Branch-and-jump-instructions----------
+ // We don't use likely variant of instructions.
+ void b(int32_t offset);
+ inline void b(Label* L) { b(shifted_branch_offset26(L)); }
+ void bl(int32_t offset);
+ inline void bl(Label* L) { bl(shifted_branch_offset26(L)); }
+
+ void beq(Register rj, Register rd, int32_t offset);
+ inline void beq(Register rj, Register rd, Label* L) {
+ beq(rj, rd, shifted_branch_offset(L));
+ }
+ void bne(Register rj, Register rd, int32_t offset);
+ inline void bne(Register rj, Register rd, Label* L) {
+ bne(rj, rd, shifted_branch_offset(L));
+ }
+ void blt(Register rj, Register rd, int32_t offset);
+ inline void blt(Register rj, Register rd, Label* L) {
+ blt(rj, rd, shifted_branch_offset(L));
+ }
+ void bge(Register rj, Register rd, int32_t offset);
+ inline void bge(Register rj, Register rd, Label* L) {
+ bge(rj, rd, shifted_branch_offset(L));
+ }
+ void bltu(Register rj, Register rd, int32_t offset);
+ inline void bltu(Register rj, Register rd, Label* L) {
+ bltu(rj, rd, shifted_branch_offset(L));
+ }
+ void bgeu(Register rj, Register rd, int32_t offset);
+ inline void bgeu(Register rj, Register rd, Label* L) {
+ bgeu(rj, rd, shifted_branch_offset(L));
+ }
+ void beqz(Register rj, int32_t offset);
+ inline void beqz(Register rj, Label* L) {
+ beqz(rj, shifted_branch_offset21(L));
+ }
+ void bnez(Register rj, int32_t offset);
+ inline void bnez(Register rj, Label* L) {
+ bnez(rj, shifted_branch_offset21(L));
+ }
+
+ void jirl(Register rd, Register rj, int32_t offset);
+
+ void bceqz(CFRegister cj, int32_t si21);
+ inline void bceqz(CFRegister cj, Label* L) {
+ bceqz(cj, shifted_branch_offset21(L));
+ }
+ void bcnez(CFRegister cj, int32_t si21);
+ inline void bcnez(CFRegister cj, Label* L) {
+ bcnez(cj, shifted_branch_offset21(L));
+ }
+
+ // -------Data-processing-instructions---------
+
+ // Arithmetic.
+ void add_w(Register rd, Register rj, Register rk);
+ void add_d(Register rd, Register rj, Register rk);
+ void sub_w(Register rd, Register rj, Register rk);
+ void sub_d(Register rd, Register rj, Register rk);
+
+ void addi_w(Register rd, Register rj, int32_t si12);
+ void addi_d(Register rd, Register rj, int32_t si12);
+
+ void addu16i_d(Register rd, Register rj, int32_t si16);
+
+ void alsl_w(Register rd, Register rj, Register rk, int32_t sa2);
+ void alsl_wu(Register rd, Register rj, Register rk, int32_t sa2);
+ void alsl_d(Register rd, Register rj, Register rk, int32_t sa2);
+
+ void lu12i_w(Register rd, int32_t si20);
+ void lu32i_d(Register rd, int32_t si20);
+ void lu52i_d(Register rd, Register rj, int32_t si12);
+
+ void slt(Register rd, Register rj, Register rk);
+ void sltu(Register rd, Register rj, Register rk);
+ void slti(Register rd, Register rj, int32_t si12);
+ void sltui(Register rd, Register rj, int32_t si12);
+
+ void pcaddi(Register rd, int32_t si20);
+ void pcaddu12i(Register rd, int32_t si20);
+ void pcaddu18i(Register rd, int32_t si20);
+ void pcalau12i(Register rd, int32_t si20);
+
+ void and_(Register rd, Register rj, Register rk);
+ void or_(Register rd, Register rj, Register rk);
+ void xor_(Register rd, Register rj, Register rk);
+ void nor(Register rd, Register rj, Register rk);
+ void andn(Register rd, Register rj, Register rk);
+ void orn(Register rd, Register rj, Register rk);
+
+ void andi(Register rd, Register rj, int32_t ui12);
+ void ori(Register rd, Register rj, int32_t ui12);
+ void xori(Register rd, Register rj, int32_t ui12);
+
+ void mul_w(Register rd, Register rj, Register rk);
+ void mulh_w(Register rd, Register rj, Register rk);
+ void mulh_wu(Register rd, Register rj, Register rk);
+ void mul_d(Register rd, Register rj, Register rk);
+ void mulh_d(Register rd, Register rj, Register rk);
+ void mulh_du(Register rd, Register rj, Register rk);
+
+ void mulw_d_w(Register rd, Register rj, Register rk);
+ void mulw_d_wu(Register rd, Register rj, Register rk);
+
+ void div_w(Register rd, Register rj, Register rk);
+ void mod_w(Register rd, Register rj, Register rk);
+ void div_wu(Register rd, Register rj, Register rk);
+ void mod_wu(Register rd, Register rj, Register rk);
+ void div_d(Register rd, Register rj, Register rk);
+ void mod_d(Register rd, Register rj, Register rk);
+ void div_du(Register rd, Register rj, Register rk);
+ void mod_du(Register rd, Register rj, Register rk);
+
+ // Shifts.
+ void sll_w(Register rd, Register rj, Register rk);
+ void srl_w(Register rd, Register rj, Register rk);
+ void sra_w(Register rd, Register rj, Register rk);
+ void rotr_w(Register rd, Register rj, Register rk);
+
+ void slli_w(Register rd, Register rj, int32_t ui5);
+ void srli_w(Register rd, Register rj, int32_t ui5);
+ void srai_w(Register rd, Register rj, int32_t ui5);
+ void rotri_w(Register rd, Register rj, int32_t ui5);
+
+ void sll_d(Register rd, Register rj, Register rk);
+ void srl_d(Register rd, Register rj, Register rk);
+ void sra_d(Register rd, Register rj, Register rk);
+ void rotr_d(Register rd, Register rj, Register rk);
+
+ void slli_d(Register rd, Register rj, int32_t ui6);
+ void srli_d(Register rd, Register rj, int32_t ui6);
+ void srai_d(Register rd, Register rj, int32_t ui6);
+ void rotri_d(Register rd, Register rj, int32_t ui6);
+
+ // Bit twiddling.
+ void ext_w_b(Register rd, Register rj);
+ void ext_w_h(Register rd, Register rj);
+
+ void clo_w(Register rd, Register rj);
+ void clz_w(Register rd, Register rj);
+ void cto_w(Register rd, Register rj);
+ void ctz_w(Register rd, Register rj);
+ void clo_d(Register rd, Register rj);
+ void clz_d(Register rd, Register rj);
+ void cto_d(Register rd, Register rj);
+ void ctz_d(Register rd, Register rj);
+
+ void bytepick_w(Register rd, Register rj, Register rk, int32_t sa2);
+ void bytepick_d(Register rd, Register rj, Register rk, int32_t sa3);
+
+ void revb_2h(Register rd, Register rj);
+ void revb_4h(Register rd, Register rj);
+ void revb_2w(Register rd, Register rj);
+ void revb_d(Register rd, Register rj);
+
+ void revh_2w(Register rd, Register rj);
+ void revh_d(Register rd, Register rj);
+
+ void bitrev_4b(Register rd, Register rj);
+ void bitrev_8b(Register rd, Register rj);
+
+ void bitrev_w(Register rd, Register rj);
+ void bitrev_d(Register rd, Register rj);
+
+ void bstrins_w(Register rd, Register rj, int32_t msbw, int32_t lsbw);
+ void bstrins_d(Register rd, Register rj, int32_t msbd, int32_t lsbd);
+
+ void bstrpick_w(Register rd, Register rj, int32_t msbw, int32_t lsbw);
+ void bstrpick_d(Register rd, Register rj, int32_t msbd, int32_t lsbd);
+
+ void maskeqz(Register rd, Register rj, Register rk);
+ void masknez(Register rd, Register rj, Register rk);
+
+ // Memory-instructions
+ void ld_b(Register rd, Register rj, int32_t si12);
+ void ld_h(Register rd, Register rj, int32_t si12);
+ void ld_w(Register rd, Register rj, int32_t si12);
+ void ld_d(Register rd, Register rj, int32_t si12);
+ void ld_bu(Register rd, Register rj, int32_t si12);
+ void ld_hu(Register rd, Register rj, int32_t si12);
+ void ld_wu(Register rd, Register rj, int32_t si12);
+ void st_b(Register rd, Register rj, int32_t si12);
+ void st_h(Register rd, Register rj, int32_t si12);
+ void st_w(Register rd, Register rj, int32_t si12);
+ void st_d(Register rd, Register rj, int32_t si12);
+
+ void ldx_b(Register rd, Register rj, Register rk);
+ void ldx_h(Register rd, Register rj, Register rk);
+ void ldx_w(Register rd, Register rj, Register rk);
+ void ldx_d(Register rd, Register rj, Register rk);
+ void ldx_bu(Register rd, Register rj, Register rk);
+ void ldx_hu(Register rd, Register rj, Register rk);
+ void ldx_wu(Register rd, Register rj, Register rk);
+ void stx_b(Register rd, Register rj, Register rk);
+ void stx_h(Register rd, Register rj, Register rk);
+ void stx_w(Register rd, Register rj, Register rk);
+ void stx_d(Register rd, Register rj, Register rk);
+
+ void ldptr_w(Register rd, Register rj, int32_t si14);
+ void ldptr_d(Register rd, Register rj, int32_t si14);
+ void stptr_w(Register rd, Register rj, int32_t si14);
+ void stptr_d(Register rd, Register rj, int32_t si14);
+
+ void amswap_w(Register rd, Register rk, Register rj);
+ void amswap_d(Register rd, Register rk, Register rj);
+ void amadd_w(Register rd, Register rk, Register rj);
+ void amadd_d(Register rd, Register rk, Register rj);
+ void amand_w(Register rd, Register rk, Register rj);
+ void amand_d(Register rd, Register rk, Register rj);
+ void amor_w(Register rd, Register rk, Register rj);
+ void amor_d(Register rd, Register rk, Register rj);
+ void amxor_w(Register rd, Register rk, Register rj);
+ void amxor_d(Register rd, Register rk, Register rj);
+ void ammax_w(Register rd, Register rk, Register rj);
+ void ammax_d(Register rd, Register rk, Register rj);
+ void ammin_w(Register rd, Register rk, Register rj);
+ void ammin_d(Register rd, Register rk, Register rj);
+ void ammax_wu(Register rd, Register rk, Register rj);
+ void ammax_du(Register rd, Register rk, Register rj);
+ void ammin_wu(Register rd, Register rk, Register rj);
+ void ammin_du(Register rd, Register rk, Register rj);
+
+ void amswap_db_w(Register rd, Register rk, Register rj);
+ void amswap_db_d(Register rd, Register rk, Register rj);
+ void amadd_db_w(Register rd, Register rk, Register rj);
+ void amadd_db_d(Register rd, Register rk, Register rj);
+ void amand_db_w(Register rd, Register rk, Register rj);
+ void amand_db_d(Register rd, Register rk, Register rj);
+ void amor_db_w(Register rd, Register rk, Register rj);
+ void amor_db_d(Register rd, Register rk, Register rj);
+ void amxor_db_w(Register rd, Register rk, Register rj);
+ void amxor_db_d(Register rd, Register rk, Register rj);
+ void ammax_db_w(Register rd, Register rk, Register rj);
+ void ammax_db_d(Register rd, Register rk, Register rj);
+ void ammin_db_w(Register rd, Register rk, Register rj);
+ void ammin_db_d(Register rd, Register rk, Register rj);
+ void ammax_db_wu(Register rd, Register rk, Register rj);
+ void ammax_db_du(Register rd, Register rk, Register rj);
+ void ammin_db_wu(Register rd, Register rk, Register rj);
+ void ammin_db_du(Register rd, Register rk, Register rj);
+
+ void ll_w(Register rd, Register rj, int32_t si14);
+ void ll_d(Register rd, Register rj, int32_t si14);
+ void sc_w(Register rd, Register rj, int32_t si14);
+ void sc_d(Register rd, Register rj, int32_t si14);
+
+ void dbar(int32_t hint);
+ void ibar(int32_t hint);
+
+ // Break instruction
+ void break_(uint32_t code, bool break_as_stop = false);
+ void stop(uint32_t code = kMaxStopCode);
+
+ // Arithmetic.
+ void fadd_s(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fadd_d(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fsub_s(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fsub_d(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fmul_s(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fmul_d(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fdiv_s(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fdiv_d(FPURegister fd, FPURegister fj, FPURegister fk);
+
+ void fmadd_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa);
+ void fmadd_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa);
+ void fmsub_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa);
+ void fmsub_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa);
+ void fnmadd_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa);
+ void fnmadd_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa);
+ void fnmsub_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa);
+ void fnmsub_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa);
+
+ void fmax_s(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fmax_d(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fmin_s(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fmin_d(FPURegister fd, FPURegister fj, FPURegister fk);
+
+ void fmaxa_s(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fmaxa_d(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fmina_s(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fmina_d(FPURegister fd, FPURegister fj, FPURegister fk);
+
+ void fabs_s(FPURegister fd, FPURegister fj);
+ void fabs_d(FPURegister fd, FPURegister fj);
+ void fneg_s(FPURegister fd, FPURegister fj);
+ void fneg_d(FPURegister fd, FPURegister fj);
+
+ void fsqrt_s(FPURegister fd, FPURegister fj);
+ void fsqrt_d(FPURegister fd, FPURegister fj);
+ void frecip_s(FPURegister fd, FPURegister fj);
+ void frecip_d(FPURegister fd, FPURegister fj);
+ void frsqrt_s(FPURegister fd, FPURegister fj);
+ void frsqrt_d(FPURegister fd, FPURegister fj);
+
+ void fscaleb_s(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fscaleb_d(FPURegister fd, FPURegister fj, FPURegister fk);
+ void flogb_s(FPURegister fd, FPURegister fj);
+ void flogb_d(FPURegister fd, FPURegister fj);
+ void fcopysign_s(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fcopysign_d(FPURegister fd, FPURegister fj, FPURegister fk);
+
+ void fclass_s(FPURegister fd, FPURegister fj);
+ void fclass_d(FPURegister fd, FPURegister fj);
+
+ void fcmp_cond_s(FPUCondition cc, FPURegister fj, FPURegister fk,
+ CFRegister cd);
+ void fcmp_cond_d(FPUCondition cc, FPURegister fj, FPURegister fk,
+ CFRegister cd);
+
+ void fcvt_s_d(FPURegister fd, FPURegister fj);
+ void fcvt_d_s(FPURegister fd, FPURegister fj);
+
+ void ffint_s_w(FPURegister fd, FPURegister fj);
+ void ffint_s_l(FPURegister fd, FPURegister fj);
+ void ffint_d_w(FPURegister fd, FPURegister fj);
+ void ffint_d_l(FPURegister fd, FPURegister fj);
+ void ftint_w_s(FPURegister fd, FPURegister fj);
+ void ftint_w_d(FPURegister fd, FPURegister fj);
+ void ftint_l_s(FPURegister fd, FPURegister fj);
+ void ftint_l_d(FPURegister fd, FPURegister fj);
+
+ void ftintrm_w_s(FPURegister fd, FPURegister fj);
+ void ftintrm_w_d(FPURegister fd, FPURegister fj);
+ void ftintrm_l_s(FPURegister fd, FPURegister fj);
+ void ftintrm_l_d(FPURegister fd, FPURegister fj);
+ void ftintrp_w_s(FPURegister fd, FPURegister fj);
+ void ftintrp_w_d(FPURegister fd, FPURegister fj);
+ void ftintrp_l_s(FPURegister fd, FPURegister fj);
+ void ftintrp_l_d(FPURegister fd, FPURegister fj);
+ void ftintrz_w_s(FPURegister fd, FPURegister fj);
+ void ftintrz_w_d(FPURegister fd, FPURegister fj);
+ void ftintrz_l_s(FPURegister fd, FPURegister fj);
+ void ftintrz_l_d(FPURegister fd, FPURegister fj);
+ void ftintrne_w_s(FPURegister fd, FPURegister fj);
+ void ftintrne_w_d(FPURegister fd, FPURegister fj);
+ void ftintrne_l_s(FPURegister fd, FPURegister fj);
+ void ftintrne_l_d(FPURegister fd, FPURegister fj);
+
+ void frint_s(FPURegister fd, FPURegister fj);
+ void frint_d(FPURegister fd, FPURegister fj);
+
+ void fmov_s(FPURegister fd, FPURegister fj);
+ void fmov_d(FPURegister fd, FPURegister fj);
+
+ void fsel(CFRegister ca, FPURegister fd, FPURegister fj, FPURegister fk);
+
+ void movgr2fr_w(FPURegister fd, Register rj);
+ void movgr2fr_d(FPURegister fd, Register rj);
+ void movgr2frh_w(FPURegister fd, Register rj);
+
+ void movfr2gr_s(Register rd, FPURegister fj);
+ void movfr2gr_d(Register rd, FPURegister fj);
+ void movfrh2gr_s(Register rd, FPURegister fj);
+
+ void movgr2fcsr(Register rj, FPUControlRegister fcsr = FCSR0);
+ void movfcsr2gr(Register rd, FPUControlRegister fcsr = FCSR0);
+
+ void movfr2cf(CFRegister cd, FPURegister fj);
+ void movcf2fr(FPURegister fd, CFRegister cj);
+
+ void movgr2cf(CFRegister cd, Register rj);
+ void movcf2gr(Register rd, CFRegister cj);
+
+ void fld_s(FPURegister fd, Register rj, int32_t si12);
+ void fld_d(FPURegister fd, Register rj, int32_t si12);
+ void fst_s(FPURegister fd, Register rj, int32_t si12);
+ void fst_d(FPURegister fd, Register rj, int32_t si12);
+
+ void fldx_s(FPURegister fd, Register rj, Register rk);
+ void fldx_d(FPURegister fd, Register rj, Register rk);
+ void fstx_s(FPURegister fd, Register rj, Register rk);
+ void fstx_d(FPURegister fd, Register rj, Register rk);
+
+ // Check the code size generated from label to here.
+ int SizeOfCodeGeneratedSince(Label* label) {
+ return pc_offset() - label->pos();
+ }
+
+ // Check the number of instructions generated from label to here.
+ int InstructionsGeneratedSince(Label* label) {
+ return SizeOfCodeGeneratedSince(label) / kInstrSize;
+ }
+
+ // Class for scoping postponing the trampoline pool generation.
+ class V8_NODISCARD BlockTrampolinePoolScope {
+ public:
+ explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) {
+ assem_->StartBlockTrampolinePool();
+ }
+ ~BlockTrampolinePoolScope() { assem_->EndBlockTrampolinePool(); }
+
+ private:
+ Assembler* assem_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
+ };
+
+ // Class for postponing the assembly buffer growth. Typically used for
+ // sequences of instructions that must be emitted as a unit, before
+ // buffer growth (and relocation) can occur.
+ // This blocking scope is not nestable.
+ class V8_NODISCARD BlockGrowBufferScope {
+ public:
+ explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) {
+ assem_->StartBlockGrowBuffer();
+ }
+ ~BlockGrowBufferScope() { assem_->EndBlockGrowBuffer(); }
+
+ private:
+ Assembler* assem_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope);
+ };
+
+ // Record a deoptimization reason that can be used by a log or cpu profiler.
+ // Use --trace-deopt to enable.
+ void RecordDeoptReason(DeoptimizeReason reason, uint32_t node_id,
+ SourcePosition position, int id);
+
+ static int RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
+ intptr_t pc_delta);
+ static void RelocateRelativeReference(RelocInfo::Mode rmode, Address pc,
+ intptr_t pc_delta);
+
+ // Writes a single byte or word of data in the code stream. Used for
+ // inline tables, e.g., jump-tables.
+ void db(uint8_t data);
+ void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
+ void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
+ void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NONE) {
+ dq(data, rmode);
+ }
+ void dd(Label* label);
+
+ // Postpone the generation of the trampoline pool for the specified number of
+ // instructions.
+ void BlockTrampolinePoolFor(int instructions);
+
+ // Check if there is less than kGap bytes available in the buffer.
+ // If this is the case, we need to grow the buffer before emitting
+ // an instruction or relocation information.
+ inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
+
+ // Get the number of bytes available in the buffer.
+ inline intptr_t available_space() const {
+ return reloc_info_writer.pos() - pc_;
+ }
+
+ // Read/patch instructions.
+ static Instr instr_at(Address pc) { return *reinterpret_cast<Instr*>(pc); }
+ static void instr_at_put(Address pc, Instr instr) {
+ *reinterpret_cast<Instr*>(pc) = instr;
+ }
+ Instr instr_at(int pos) {
+ return *reinterpret_cast<Instr*>(buffer_start_ + pos);
+ }
+ void instr_at_put(int pos, Instr instr) {
+ *reinterpret_cast<Instr*>(buffer_start_ + pos) = instr;
+ }
+
+ // Check if an instruction is a branch of some kind.
+ static bool IsBranch(Instr instr);
+ static bool IsB(Instr instr);
+ static bool IsBz(Instr instr);
+ static bool IsNal(Instr instr);
+
+ static bool IsBeq(Instr instr);
+ static bool IsBne(Instr instr);
+
+ static bool IsJump(Instr instr);
+ static bool IsMov(Instr instr, Register rd, Register rs);
+ static bool IsPcAddi(Instr instr, Register rd, int32_t si20);
+
+ static bool IsJ(Instr instr);
+ static bool IsLu12i_w(Instr instr);
+ static bool IsOri(Instr instr);
+ static bool IsLu32i_d(Instr instr);
+ static bool IsLu52i_d(Instr instr);
+
+ static bool IsNop(Instr instr, unsigned int type);
+
+ static Register GetRjReg(Instr instr);
+ static Register GetRkReg(Instr instr);
+ static Register GetRdReg(Instr instr);
+
+ static uint32_t GetRj(Instr instr);
+ static uint32_t GetRjField(Instr instr);
+ static uint32_t GetRk(Instr instr);
+ static uint32_t GetRkField(Instr instr);
+ static uint32_t GetRd(Instr instr);
+ static uint32_t GetRdField(Instr instr);
+ static uint32_t GetSa2(Instr instr);
+ static uint32_t GetSa3(Instr instr);
+ static uint32_t GetSa2Field(Instr instr);
+ static uint32_t GetSa3Field(Instr instr);
+ static uint32_t GetOpcodeField(Instr instr);
+ static uint32_t GetFunction(Instr instr);
+ static uint32_t GetFunctionField(Instr instr);
+ static uint32_t GetImmediate16(Instr instr);
+ static uint32_t GetLabelConst(Instr instr);
+
+ static bool IsAddImmediate(Instr instr);
+ static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
+
+ static bool IsAndImmediate(Instr instr);
+ static bool IsEmittedConstant(Instr instr);
+
+ void CheckTrampolinePool();
+
+ // Get the code target object for a pc-relative call or jump.
+ V8_INLINE Handle<Code> relative_code_target_object_handle_at(
+ Address pc_) const;
+
+ inline int UnboundLabelsCount() { return unbound_labels_count_; }
+
+ protected:
+ // Helper function for memory load/store.
+ void AdjustBaseAndOffset(MemOperand* src);
+
+ inline static void set_target_internal_reference_encoded_at(Address pc,
+ Address target);
+
+ int64_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
+
+ // Decode branch instruction at pos and return branch target pos.
+ int target_at(int pos, bool is_internal);
+
+ // Patch branch instruction at pos to branch to given branch target pos.
+ void target_at_put(int pos, int target_pos, bool is_internal);
+
+ // Say if we need to relocate with this mode.
+ bool MustUseReg(RelocInfo::Mode rmode);
+
+ // Record reloc info for current pc_.
+ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+
+ // Block the emission of the trampoline pool before pc_offset.
+ void BlockTrampolinePoolBefore(int pc_offset) {
+ if (no_trampoline_pool_before_ < pc_offset)
+ no_trampoline_pool_before_ = pc_offset;
+ }
+
+ void StartBlockTrampolinePool() { trampoline_pool_blocked_nesting_++; }
+
+ void EndBlockTrampolinePool() {
+ trampoline_pool_blocked_nesting_--;
+ if (trampoline_pool_blocked_nesting_ == 0) {
+ CheckTrampolinePoolQuick(1);
+ }
+ }
+
+ bool is_trampoline_pool_blocked() const {
+ return trampoline_pool_blocked_nesting_ > 0;
+ }
+
+ bool has_exception() const { return internal_trampoline_exception_; }
+
+ bool is_trampoline_emitted() const { return trampoline_emitted_; }
+
+ // Temporarily block automatic assembly buffer growth.
+ void StartBlockGrowBuffer() {
+ DCHECK(!block_buffer_growth_);
+ block_buffer_growth_ = true;
+ }
+
+ void EndBlockGrowBuffer() {
+ DCHECK(block_buffer_growth_);
+ block_buffer_growth_ = false;
+ }
+
+ bool is_buffer_growth_blocked() const { return block_buffer_growth_; }
+
+ void CheckTrampolinePoolQuick(int extra_instructions = 0) {
+ if (pc_offset() >= next_buffer_check_ - extra_instructions * kInstrSize) {
+ CheckTrampolinePool();
+ }
+ }
+
+ void set_last_call_pc_(byte* pc) { last_call_pc_ = pc; }
+
+#ifdef DEBUG
+ bool EmbeddedObjectMatches(int pc_offset, Handle<Object> object) {
+ return target_address_at(
+ reinterpret_cast<Address>(buffer_->start() + pc_offset)) ==
+ (IsOnHeap() ? object->ptr() : object.address());
+ }
+#endif
+
+ private:
+ // Avoid overflows for displacements etc.
+ static const int kMaximalBufferSize = 512 * MB;
+
+ // Buffer size and constant pool distance are checked together at regular
+ // intervals of kBufferCheckInterval emitted bytes.
+ static constexpr int kBufferCheckInterval = 1 * KB / 2;
+
+ // Code generation.
+ // The relocation writer's position is at least kGap bytes below the end of
+ // the generated instructions. This is so that multi-instruction sequences do
+ // not have to check for overflow. The same is true for writes of large
+ // relocation info entries.
+ static constexpr int kGap = 64;
+ STATIC_ASSERT(AssemblerBase::kMinimalBufferSize >= 2 * kGap);
+
+ // Repeated checking whether the trampoline pool should be emitted is rather
+ // expensive. By default we only check again once a number of instructions
+ // has been generated.
+ static constexpr int kCheckConstIntervalInst = 32;
+ static constexpr int kCheckConstInterval =
+ kCheckConstIntervalInst * kInstrSize;
+
+ int next_buffer_check_; // pc offset of next buffer check.
+
+ // Emission of the trampoline pool may be blocked in some code sequences.
+ int trampoline_pool_blocked_nesting_; // Block emission if this is not zero.
+ int no_trampoline_pool_before_; // Block emission before this pc offset.
+
+ // Keep track of the last emitted pool to guarantee a maximal distance.
+ int last_trampoline_pool_end_; // pc offset of the end of the last pool.
+
+ // Automatic growth of the assembly buffer may be blocked for some sequences.
+ bool block_buffer_growth_; // Block growth when true.
+
+ // Relocation information generation.
+ // Each relocation is encoded as a variable size value.
+ static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize;
+ RelocInfoWriter reloc_info_writer;
+
+ // The bound position, before this we cannot do instruction elimination.
+ int last_bound_pos_;
+
+ // Code emission.
+ inline void CheckBuffer();
+ void GrowBuffer();
+ inline void emit(Instr x);
+ inline void emit(uint64_t x);
+ template <typename T>
+ inline void EmitHelper(T x);
+ inline void EmitHelper(Instr x);
+
+ void GenB(Opcode opcode, Register rj, int32_t si21); // opcode:6
+ void GenB(Opcode opcode, CFRegister cj, int32_t si21, bool isEq);
+ void GenB(Opcode opcode, int32_t si26);
+ void GenBJ(Opcode opcode, Register rj, Register rd, int32_t si16);
+ void GenCmp(Opcode opcode, FPUCondition cond, FPURegister fk, FPURegister fj,
+ CFRegister cd);
+ void GenSel(Opcode opcode, CFRegister ca, FPURegister fk, FPURegister fj,
+ FPURegister rd);
+
+ void GenRegister(Opcode opcode, Register rj, Register rd, bool rjrd = true);
+ void GenRegister(Opcode opcode, FPURegister fj, FPURegister fd);
+ void GenRegister(Opcode opcode, Register rj, FPURegister fd);
+ void GenRegister(Opcode opcode, FPURegister fj, Register rd);
+ void GenRegister(Opcode opcode, Register rj, FPUControlRegister fd);
+ void GenRegister(Opcode opcode, FPUControlRegister fj, Register rd);
+ void GenRegister(Opcode opcode, FPURegister fj, CFRegister cd);
+ void GenRegister(Opcode opcode, CFRegister cj, FPURegister fd);
+ void GenRegister(Opcode opcode, Register rj, CFRegister cd);
+ void GenRegister(Opcode opcode, CFRegister cj, Register rd);
+
+ void GenRegister(Opcode opcode, Register rk, Register rj, Register rd);
+ void GenRegister(Opcode opcode, FPURegister fk, FPURegister fj,
+ FPURegister fd);
+
+ void GenRegister(Opcode opcode, FPURegister fa, FPURegister fk,
+ FPURegister fj, FPURegister fd);
+ void GenRegister(Opcode opcode, Register rk, Register rj, FPURegister fd);
+
+ void GenImm(Opcode opcode, int32_t bit3, Register rk, Register rj,
+ Register rd);
+ void GenImm(Opcode opcode, int32_t bit6m, int32_t bit6l, Register rj,
+ Register rd);
+ void GenImm(Opcode opcode, int32_t bit20, Register rd);
+ void GenImm(Opcode opcode, int32_t bit15);
+ void GenImm(Opcode opcode, int32_t value, Register rj, Register rd,
+ int32_t value_bits); // 6 | 12 | 14 | 16
+ void GenImm(Opcode opcode, int32_t bit12, Register rj, FPURegister fd);
+
+ // Labels.
+ void print(const Label* L);
+ void bind_to(Label* L, int pos);
+ void next(Label* L, bool is_internal);
+
+ // One trampoline consists of:
+ // - space for trampoline slots,
+ // - space for labels.
+ //
+ // Space for trampoline slots is equal to slot_count * 2 * kInstrSize.
+ // Space for trampoline slots precedes space for labels. Each label is of one
+ // instruction size, so total amount for labels is equal to
+ // label_count * kInstrSize.
+ class Trampoline {
+ public:
+ Trampoline() {
+ start_ = 0;
+ next_slot_ = 0;
+ free_slot_count_ = 0;
+ end_ = 0;
+ }
+ Trampoline(int start, int slot_count) {
+ start_ = start;
+ next_slot_ = start;
+ free_slot_count_ = slot_count;
+ end_ = start + slot_count * kTrampolineSlotsSize;
+ }
+ int start() { return start_; }
+ int end() { return end_; }
+ int take_slot() {
+ int trampoline_slot = kInvalidSlotPos;
+ if (free_slot_count_ <= 0) {
+ // We have run out of space on trampolines.
+ // Make sure we fail in debug mode, so we become aware of each case
+ // when this happens.
+ DCHECK(0);
+ // Internal exception will be caught.
+ } else {
+ trampoline_slot = next_slot_;
+ free_slot_count_--;
+ next_slot_ += kTrampolineSlotsSize;
+ }
+ return trampoline_slot;
+ }
+
+ private:
+ int start_;
+ int end_;
+ int next_slot_;
+ int free_slot_count_;
+ };
+
+ int32_t get_trampoline_entry(int32_t pos);
+ int unbound_labels_count_;
+ // After trampoline is emitted, long branches are used in generated code for
+ // the forward branches whose target offsets could be beyond reach of branch
+ // instruction. We use this information to trigger different mode of
+ // branch instruction generation, where we use jump instructions rather
+ // than regular branch instructions.
+ bool trampoline_emitted_;
+ static constexpr int kInvalidSlotPos = -1;
+
+ // Internal reference positions, required for unbounded internal reference
+ // labels.
+ std::set<int64_t> internal_reference_positions_;
+ bool is_internal_reference(Label* L) {
+ return internal_reference_positions_.find(L->pos()) !=
+ internal_reference_positions_.end();
+ }
+
+ void EmittedCompactBranchInstruction() { prev_instr_compact_branch_ = true; }
+ void ClearCompactBranchState() { prev_instr_compact_branch_ = false; }
+ bool prev_instr_compact_branch_ = false;
+
+ Trampoline trampoline_;
+ bool internal_trampoline_exception_;
+
+ // Keep track of the last Call's position to ensure that safepoint can get the
+ // correct information even if there is a trampoline immediately after the
+ // Call.
+ byte* last_call_pc_;
+
+ RegList scratch_register_list_;
+
+ private:
+ void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
+
+ int WriteCodeComments();
+
+ friend class RegExpMacroAssemblerLOONG64;
+ friend class RelocInfo;
+ friend class BlockTrampolinePoolScope;
+ friend class EnsureSpace;
+};
+
+class EnsureSpace {
+ public:
+ explicit inline EnsureSpace(Assembler* assembler);
+};
+
+class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
+ public:
+ explicit UseScratchRegisterScope(Assembler* assembler);
+ ~UseScratchRegisterScope();
+
+ Register Acquire();
+ bool hasAvailable() const;
+
+ void Include(const RegList& list) { *available_ |= list; }
+ void Exclude(const RegList& list) { *available_ &= ~list; }
+ void Include(const Register& reg1, const Register& reg2 = no_reg) {
+ RegList list(reg1.bit() | reg2.bit());
+ Include(list);
+ }
+ void Exclude(const Register& reg1, const Register& reg2 = no_reg) {
+ RegList list(reg1.bit() | reg2.bit());
+ Exclude(list);
+ }
+
+ private:
+ RegList* available_;
+ RegList old_available_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_H_
diff --git a/chromium/v8/src/codegen/loong64/constants-loong64.cc b/chromium/v8/src/codegen/loong64/constants-loong64.cc
new file mode 100644
index 00000000000..3f887a50fe8
--- /dev/null
+++ b/chromium/v8/src/codegen/loong64/constants-loong64.cc
@@ -0,0 +1,100 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_LOONG64
+
+#include "src/codegen/loong64/constants-loong64.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Registers.
+
+// These register names are defined in a way to match the native disassembler
+// formatting. See for example the command "objdump -d <binary file>".
+const char* Registers::names_[kNumSimuRegisters] = {
+ "zero_reg", "ra", "tp", "sp", "a0", "a1", "a2", "a3", "a4", "a5", "a6",
+ "a7", "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "x_reg",
+ "fp", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "pc"};
+
+// List of alias names which can be used when referring to registers.
+const Registers::RegisterAlias Registers::aliases_[] = {
+ {0, "zero"}, {30, "cp"}, {kInvalidRegister, nullptr}};
+
+const char* Registers::Name(int reg) {
+ const char* result;
+ if ((0 <= reg) && (reg < kNumSimuRegisters)) {
+ result = names_[reg];
+ } else {
+ result = "noreg";
+ }
+ return result;
+}
+
+int Registers::Number(const char* name) {
+ // Look through the canonical names.
+ for (int i = 0; i < kNumSimuRegisters; i++) {
+ if (strcmp(names_[i], name) == 0) {
+ return i;
+ }
+ }
+
+ // Look through the alias names.
+ int i = 0;
+ while (aliases_[i].reg != kInvalidRegister) {
+ if (strcmp(aliases_[i].name, name) == 0) {
+ return aliases_[i].reg;
+ }
+ i++;
+ }
+
+ // No register with the reguested name found.
+ return kInvalidRegister;
+}
+
+const char* FPURegisters::names_[kNumFPURegisters] = {
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10",
+ "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
+ "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"};
+
+// List of alias names which can be used when referring to LoongArch registers.
+const FPURegisters::RegisterAlias FPURegisters::aliases_[] = {
+ {kInvalidRegister, nullptr}};
+
+const char* FPURegisters::Name(int creg) {
+ const char* result;
+ if ((0 <= creg) && (creg < kNumFPURegisters)) {
+ result = names_[creg];
+ } else {
+ result = "nocreg";
+ }
+ return result;
+}
+
+int FPURegisters::Number(const char* name) {
+ // Look through the canonical names.
+ for (int i = 0; i < kNumFPURegisters; i++) {
+ if (strcmp(names_[i], name) == 0) {
+ return i;
+ }
+ }
+
+ // Look through the alias names.
+ int i = 0;
+ while (aliases_[i].creg != kInvalidRegister) {
+ if (strcmp(aliases_[i].name, name) == 0) {
+ return aliases_[i].creg;
+ }
+ i++;
+ }
+
+ // No Cregister with the reguested name found.
+ return kInvalidFPURegister;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_LOONG64
diff --git a/chromium/v8/src/codegen/loong64/constants-loong64.h b/chromium/v8/src/codegen/loong64/constants-loong64.h
new file mode 100644
index 00000000000..394c5dc6abf
--- /dev/null
+++ b/chromium/v8/src/codegen/loong64/constants-loong64.h
@@ -0,0 +1,1291 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_LOONG64_CONSTANTS_LOONG64_H_
+#define V8_CODEGEN_LOONG64_CONSTANTS_LOONG64_H_
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#include "src/common/globals.h"
+
+// Get the standard printf format macros for C99 stdint types.
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+#include <inttypes.h>
+
+// Defines constants and accessor classes to assemble, disassemble and
+// simulate LOONG64 instructions.
+
+namespace v8 {
+namespace internal {
+
+constexpr size_t kMaxPCRelativeCodeRangeInMB = 128;
+
+// -----------------------------------------------------------------------------
+// Registers and FPURegisters.
+
+// Number of general purpose registers.
+const int kNumRegisters = 32;
+const int kInvalidRegister = -1;
+
+// Number of registers with pc.
+const int kNumSimuRegisters = 33;
+
+// In the simulator, the PC register is simulated as the 33th register.
+const int kPCRegister = 32;
+
+// Number of floating point registers.
+const int kNumFPURegisters = 32;
+const int kInvalidFPURegister = -1;
+
+// FPU control registers.
+const int kFCSRRegister = 0;
+const int kInvalidFPUControlRegister = -1;
+const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1u << 31) - 1;
+const int32_t kFPUInvalidResultNegative = static_cast<int32_t>(1u << 31);
+const uint64_t kFPU64InvalidResult =
+ static_cast<uint64_t>(static_cast<uint64_t>(1) << 63) - 1;
+const int64_t kFPU64InvalidResultNegative =
+ static_cast<int64_t>(static_cast<uint64_t>(1) << 63);
+
+// FCSR constants.
+const uint32_t kFCSRInexactCauseBit = 24;
+const uint32_t kFCSRUnderflowCauseBit = 25;
+const uint32_t kFCSROverflowCauseBit = 26;
+const uint32_t kFCSRDivideByZeroCauseBit = 27;
+const uint32_t kFCSRInvalidOpCauseBit = 28;
+
+const uint32_t kFCSRInexactCauseMask = 1 << kFCSRInexactCauseBit;
+const uint32_t kFCSRUnderflowCauseMask = 1 << kFCSRUnderflowCauseBit;
+const uint32_t kFCSROverflowCauseMask = 1 << kFCSROverflowCauseBit;
+const uint32_t kFCSRDivideByZeroCauseMask = 1 << kFCSRDivideByZeroCauseBit;
+const uint32_t kFCSRInvalidOpCauseMask = 1 << kFCSRInvalidOpCauseBit;
+
+const uint32_t kFCSRCauseMask =
+ kFCSRInexactCauseMask | kFCSRUnderflowCauseMask | kFCSROverflowCauseMask |
+ kFCSRDivideByZeroCauseMask | kFCSRInvalidOpCauseMask;
+
+const uint32_t kFCSRExceptionCauseMask = kFCSRCauseMask ^ kFCSRInexactCauseMask;
+
+// Actual value of root register is offset from the root array's start
+// to take advantage of negative displacement values.
+// TODO(sigurds): Choose best value.
+constexpr int kRootRegisterBias = 256;
+
+// Helper functions for converting between register numbers and names.
+class Registers {
+ public:
+ // Return the name of the register.
+ static const char* Name(int reg);
+
+ // Lookup the register number for the name provided.
+ static int Number(const char* name);
+
+ struct RegisterAlias {
+ int reg;
+ const char* name;
+ };
+
+ static const int64_t kMaxValue = 0x7fffffffffffffffl;
+ static const int64_t kMinValue = 0x8000000000000000l;
+
+ private:
+ static const char* names_[kNumSimuRegisters];
+ static const RegisterAlias aliases_[];
+};
+
+// Helper functions for converting between register numbers and names.
+class FPURegisters {
+ public:
+ // Return the name of the register.
+ static const char* Name(int reg);
+
+ // Lookup the register number for the name provided.
+ static int Number(const char* name);
+
+ struct RegisterAlias {
+ int creg;
+ const char* name;
+ };
+
+ private:
+ static const char* names_[kNumFPURegisters];
+ static const RegisterAlias aliases_[];
+};
+
+// -----------------------------------------------------------------------------
+// Instructions encoding constants.
+
+// On LoongArch all instructions are 32 bits.
+using Instr = int32_t;
+
+// Special Software Interrupt codes when used in the presence of the LOONG64
+// simulator.
+enum SoftwareInterruptCodes {
+ // Transition to C code.
+ call_rt_redirected = 0x7fff
+};
+
+// On LOONG64 Simulator breakpoints can have different codes:
+// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints,
+// the simulator will run through them and print the registers.
+// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop()
+// instructions (see Assembler::stop()).
+// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the
+// debugger.
+const uint32_t kMaxWatchpointCode = 31;
+const uint32_t kMaxStopCode = 127;
+STATIC_ASSERT(kMaxWatchpointCode < kMaxStopCode);
+
+// ----- Fields offset and length.
+const int kRjShift = 5;
+const int kRjBits = 5;
+const int kRkShift = 10;
+const int kRkBits = 5;
+const int kRdShift = 0;
+const int kRdBits = 5;
+const int kSaShift = 15;
+const int kSa2Bits = 2;
+const int kSa3Bits = 3;
+const int kCdShift = 0;
+const int kCdBits = 3;
+const int kCjShift = 5;
+const int kCjBits = 3;
+const int kCodeShift = 0;
+const int kCodeBits = 15;
+const int kCondShift = 15;
+const int kCondBits = 5;
+const int kUi5Shift = 10;
+const int kUi5Bits = 5;
+const int kUi6Shift = 10;
+const int kUi6Bits = 6;
+const int kUi12Shift = 10;
+const int kUi12Bits = 12;
+const int kSi12Shift = 10;
+const int kSi12Bits = 12;
+const int kSi14Shift = 10;
+const int kSi14Bits = 14;
+const int kSi16Shift = 10;
+const int kSi16Bits = 16;
+const int kSi20Shift = 5;
+const int kSi20Bits = 20;
+const int kMsbwShift = 16;
+const int kMsbwBits = 5;
+const int kLsbwShift = 10;
+const int kLsbwBits = 5;
+const int kMsbdShift = 16;
+const int kMsbdBits = 6;
+const int kLsbdShift = 10;
+const int kLsbdBits = 6;
+const int kFdShift = 0;
+const int kFdBits = 5;
+const int kFjShift = 5;
+const int kFjBits = 5;
+const int kFkShift = 10;
+const int kFkBits = 5;
+const int kFaShift = 15;
+const int kFaBits = 5;
+const int kCaShift = 15;
+const int kCaBits = 3;
+const int kHint15Shift = 0;
+const int kHint15Bits = 15;
+const int kHint5Shift = 0;
+const int kHint5Bits = 5;
+const int kOffsLowShift = 10;
+const int kOffsLowBits = 16;
+const int kOffs26HighShift = 0;
+const int kOffs26HighBits = 10;
+const int kOffs21HighShift = 0;
+const int kOffs21HighBits = 5;
+const int kImm12Shift = 0;
+const int kImm12Bits = 12;
+const int kImm16Shift = 0;
+const int kImm16Bits = 16;
+const int kImm26Shift = 0;
+const int kImm26Bits = 26;
+const int kImm28Shift = 0;
+const int kImm28Bits = 28;
+const int kImm32Shift = 0;
+const int kImm32Bits = 32;
+
+// ----- Miscellaneous useful masks.
+// Instruction bit masks.
+const int kRjFieldMask = ((1 << kRjBits) - 1) << kRjShift;
+const int kRkFieldMask = ((1 << kRkBits) - 1) << kRkShift;
+const int kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift;
+const int kSa2FieldMask = ((1 << kSa2Bits) - 1) << kSaShift;
+const int kSa3FieldMask = ((1 << kSa3Bits) - 1) << kSaShift;
+// Misc masks.
+const int kHiMaskOf32 = 0xffff << 16; // Only to be used with 32-bit values
+const int kLoMaskOf32 = 0xffff;
+const int kSignMaskOf32 = 0x80000000; // Only to be used with 32-bit values
+const int64_t kTop16MaskOf64 = (int64_t)0xffff << 48;
+const int64_t kHigher16MaskOf64 = (int64_t)0xffff << 32;
+const int64_t kUpper16MaskOf64 = (int64_t)0xffff << 16;
+
+const int kImm12Mask = ((1 << kImm12Bits) - 1) << kImm12Shift;
+const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift;
+const int kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift;
+const int kImm28Mask = ((1 << kImm28Bits) - 1) << kImm28Shift;
+
+// ----- LOONG64 Opcodes and Function Fields.
+enum Opcode : uint32_t {
+ BEQZ = 0x10U << 26,
+ BNEZ = 0x11U << 26,
+ BCZ = 0x12U << 26, // BCEQZ & BCNEZ
+ JIRL = 0x13U << 26,
+ B = 0x14U << 26,
+ BL = 0x15U << 26,
+ BEQ = 0x16U << 26,
+ BNE = 0x17U << 26,
+ BLT = 0x18U << 26,
+ BGE = 0x19U << 26,
+ BLTU = 0x1aU << 26,
+ BGEU = 0x1bU << 26,
+
+ ADDU16I_D = 0x4U << 26,
+
+ LU12I_W = 0xaU << 25,
+ LU32I_D = 0xbU << 25,
+ PCADDI = 0xcU << 25,
+ PCALAU12I = 0xdU << 25,
+ PCADDU12I = 0xeU << 25,
+ PCADDU18I = 0xfU << 25,
+
+ LL_W = 0x20U << 24,
+ SC_W = 0x21U << 24,
+ LL_D = 0x22U << 24,
+ SC_D = 0x23U << 24,
+ LDPTR_W = 0x24U << 24,
+ STPTR_W = 0x25U << 24,
+ LDPTR_D = 0x26U << 24,
+ STPTR_D = 0x27U << 24,
+
+ BSTR_W = 0x1U << 22, // BSTRINS_W & BSTRPICK_W
+ BSTRINS_W = BSTR_W,
+ BSTRPICK_W = BSTR_W,
+ BSTRINS_D = 0x2U << 22,
+ BSTRPICK_D = 0x3U << 22,
+
+ SLTI = 0x8U << 22,
+ SLTUI = 0x9U << 22,
+ ADDI_W = 0xaU << 22,
+ ADDI_D = 0xbU << 22,
+ LU52I_D = 0xcU << 22,
+ ANDI = 0xdU << 22,
+ ORI = 0xeU << 22,
+ XORI = 0xfU << 22,
+
+ LD_B = 0xa0U << 22,
+ LD_H = 0xa1U << 22,
+ LD_W = 0xa2U << 22,
+ LD_D = 0xa3U << 22,
+ ST_B = 0xa4U << 22,
+ ST_H = 0xa5U << 22,
+ ST_W = 0xa6U << 22,
+ ST_D = 0xa7U << 22,
+ LD_BU = 0xa8U << 22,
+ LD_HU = 0xa9U << 22,
+ LD_WU = 0xaaU << 22,
+ FLD_S = 0xacU << 22,
+ FST_S = 0xadU << 22,
+ FLD_D = 0xaeU << 22,
+ FST_D = 0xafU << 22,
+
+ FMADD_S = 0x81U << 20,
+ FMADD_D = 0x82U << 20,
+ FMSUB_S = 0x85U << 20,
+ FMSUB_D = 0x86U << 20,
+ FNMADD_S = 0x89U << 20,
+ FNMADD_D = 0x8aU << 20,
+ FNMSUB_S = 0x8dU << 20,
+ FNMSUB_D = 0x8eU << 20,
+ FCMP_COND_S = 0xc1U << 20,
+ FCMP_COND_D = 0xc2U << 20,
+
+ BYTEPICK_D = 0x3U << 18,
+ BYTEPICK_W = 0x2U << 18,
+
+ FSEL = 0x340U << 18,
+
+ ALSL = 0x1U << 18,
+ ALSL_W = ALSL,
+ ALSL_WU = ALSL,
+
+ ALSL_D = 0xbU << 18,
+
+ SLLI_W = 0x40U << 16,
+ SRLI_W = 0x44U << 16,
+ SRAI_W = 0x48U << 16,
+ ROTRI_W = 0x4cU << 16,
+
+ SLLI_D = 0x41U << 16,
+ SRLI_D = 0x45U << 16,
+ SRAI_D = 0x49U << 16,
+ ROTRI_D = 0x4dU << 16,
+
+ SLLI = 0x10U << 18,
+ SRLI = 0x11U << 18,
+ SRAI = 0x12U << 18,
+ ROTRI = 0x13U << 18,
+
+ ADD_W = 0x20U << 15,
+ ADD_D = 0x21U << 15,
+ SUB_W = 0x22U << 15,
+ SUB_D = 0x23U << 15,
+ SLT = 0x24U << 15,
+ SLTU = 0x25U << 15,
+ MASKNEZ = 0x26U << 15,
+ MASKEQZ = 0x27U << 15,
+ NOR = 0x28U << 15,
+ AND = 0x29U << 15,
+ OR = 0x2aU << 15,
+ XOR = 0x2bU << 15,
+ ORN = 0x2cU << 15,
+ ANDN = 0x2dU << 15,
+ SLL_W = 0x2eU << 15,
+ SRL_W = 0x2fU << 15,
+ SRA_W = 0x30U << 15,
+ SLL_D = 0x31U << 15,
+ SRL_D = 0x32U << 15,
+ SRA_D = 0x33U << 15,
+ ROTR_W = 0x36U << 15,
+ ROTR_D = 0x37U << 15,
+ MUL_W = 0x38U << 15,
+ MULH_W = 0x39U << 15,
+ MULH_WU = 0x3aU << 15,
+ MUL_D = 0x3bU << 15,
+ MULH_D = 0x3cU << 15,
+ MULH_DU = 0x3dU << 15,
+ MULW_D_W = 0x3eU << 15,
+ MULW_D_WU = 0x3fU << 15,
+
+ DIV_W = 0x40U << 15,
+ MOD_W = 0x41U << 15,
+ DIV_WU = 0x42U << 15,
+ MOD_WU = 0x43U << 15,
+ DIV_D = 0x44U << 15,
+ MOD_D = 0x45U << 15,
+ DIV_DU = 0x46U << 15,
+ MOD_DU = 0x47U << 15,
+
+ BREAK = 0x54U << 15,
+
+ FADD_S = 0x201U << 15,
+ FADD_D = 0x202U << 15,
+ FSUB_S = 0x205U << 15,
+ FSUB_D = 0x206U << 15,
+ FMUL_S = 0x209U << 15,
+ FMUL_D = 0x20aU << 15,
+ FDIV_S = 0x20dU << 15,
+ FDIV_D = 0x20eU << 15,
+ FMAX_S = 0x211U << 15,
+ FMAX_D = 0x212U << 15,
+ FMIN_S = 0x215U << 15,
+ FMIN_D = 0x216U << 15,
+ FMAXA_S = 0x219U << 15,
+ FMAXA_D = 0x21aU << 15,
+ FMINA_S = 0x21dU << 15,
+ FMINA_D = 0x21eU << 15,
+ FSCALEB_S = 0x221U << 15,
+ FSCALEB_D = 0x222U << 15,
+ FCOPYSIGN_S = 0x225U << 15,
+ FCOPYSIGN_D = 0x226U << 15,
+
+ LDX_B = 0x7000U << 15,
+ LDX_H = 0x7008U << 15,
+ LDX_W = 0x7010U << 15,
+ LDX_D = 0x7018U << 15,
+ STX_B = 0x7020U << 15,
+ STX_H = 0x7028U << 15,
+ STX_W = 0x7030U << 15,
+ STX_D = 0x7038U << 15,
+ LDX_BU = 0x7040U << 15,
+ LDX_HU = 0x7048U << 15,
+ LDX_WU = 0x7050U << 15,
+ FLDX_S = 0x7060U << 15,
+ FLDX_D = 0x7068U << 15,
+ FSTX_S = 0x7070U << 15,
+ FSTX_D = 0x7078U << 15,
+
+ AMSWAP_W = 0x70c0U << 15,
+ AMSWAP_D = 0x70c1U << 15,
+ AMADD_W = 0x70c2U << 15,
+ AMADD_D = 0x70c3U << 15,
+ AMAND_W = 0x70c4U << 15,
+ AMAND_D = 0x70c5U << 15,
+ AMOR_W = 0x70c6U << 15,
+ AMOR_D = 0x70c7U << 15,
+ AMXOR_W = 0x70c8U << 15,
+ AMXOR_D = 0x70c9U << 15,
+ AMMAX_W = 0x70caU << 15,
+ AMMAX_D = 0x70cbU << 15,
+ AMMIN_W = 0x70ccU << 15,
+ AMMIN_D = 0x70cdU << 15,
+ AMMAX_WU = 0x70ceU << 15,
+ AMMAX_DU = 0x70cfU << 15,
+ AMMIN_WU = 0x70d0U << 15,
+ AMMIN_DU = 0x70d1U << 15,
+ AMSWAP_DB_W = 0x70d2U << 15,
+ AMSWAP_DB_D = 0x70d3U << 15,
+ AMADD_DB_W = 0x70d4U << 15,
+ AMADD_DB_D = 0x70d5U << 15,
+ AMAND_DB_W = 0x70d6U << 15,
+ AMAND_DB_D = 0x70d7U << 15,
+ AMOR_DB_W = 0x70d8U << 15,
+ AMOR_DB_D = 0x70d9U << 15,
+ AMXOR_DB_W = 0x70daU << 15,
+ AMXOR_DB_D = 0x70dbU << 15,
+ AMMAX_DB_W = 0x70dcU << 15,
+ AMMAX_DB_D = 0x70ddU << 15,
+ AMMIN_DB_W = 0x70deU << 15,
+ AMMIN_DB_D = 0x70dfU << 15,
+ AMMAX_DB_WU = 0x70e0U << 15,
+ AMMAX_DB_DU = 0x70e1U << 15,
+ AMMIN_DB_WU = 0x70e2U << 15,
+ AMMIN_DB_DU = 0x70e3U << 15,
+
+ DBAR = 0x70e4U << 15,
+ IBAR = 0x70e5U << 15,
+
+ CLO_W = 0X4U << 10,
+ CLZ_W = 0X5U << 10,
+ CTO_W = 0X6U << 10,
+ CTZ_W = 0X7U << 10,
+ CLO_D = 0X8U << 10,
+ CLZ_D = 0X9U << 10,
+ CTO_D = 0XaU << 10,
+ CTZ_D = 0XbU << 10,
+ REVB_2H = 0XcU << 10,
+ REVB_4H = 0XdU << 10,
+ REVB_2W = 0XeU << 10,
+ REVB_D = 0XfU << 10,
+ REVH_2W = 0X10U << 10,
+ REVH_D = 0X11U << 10,
+ BITREV_4B = 0X12U << 10,
+ BITREV_8B = 0X13U << 10,
+ BITREV_W = 0X14U << 10,
+ BITREV_D = 0X15U << 10,
+ EXT_W_H = 0X16U << 10,
+ EXT_W_B = 0X17U << 10,
+
+ FABS_S = 0X4501U << 10,
+ FABS_D = 0X4502U << 10,
+ FNEG_S = 0X4505U << 10,
+ FNEG_D = 0X4506U << 10,
+ FLOGB_S = 0X4509U << 10,
+ FLOGB_D = 0X450aU << 10,
+ FCLASS_S = 0X450dU << 10,
+ FCLASS_D = 0X450eU << 10,
+ FSQRT_S = 0X4511U << 10,
+ FSQRT_D = 0X4512U << 10,
+ FRECIP_S = 0X4515U << 10,
+ FRECIP_D = 0X4516U << 10,
+ FRSQRT_S = 0X4519U << 10,
+ FRSQRT_D = 0X451aU << 10,
+ FMOV_S = 0X4525U << 10,
+ FMOV_D = 0X4526U << 10,
+ MOVGR2FR_W = 0X4529U << 10,
+ MOVGR2FR_D = 0X452aU << 10,
+ MOVGR2FRH_W = 0X452bU << 10,
+ MOVFR2GR_S = 0X452dU << 10,
+ MOVFR2GR_D = 0X452eU << 10,
+ MOVFRH2GR_S = 0X452fU << 10,
+ MOVGR2FCSR = 0X4530U << 10,
+ MOVFCSR2GR = 0X4532U << 10,
+ MOVFR2CF = 0X4534U << 10,
+ MOVGR2CF = 0X4536U << 10,
+
+ FCVT_S_D = 0x4646U << 10,
+ FCVT_D_S = 0x4649U << 10,
+ FTINTRM_W_S = 0x4681U << 10,
+ FTINTRM_W_D = 0x4682U << 10,
+ FTINTRM_L_S = 0x4689U << 10,
+ FTINTRM_L_D = 0x468aU << 10,
+ FTINTRP_W_S = 0x4691U << 10,
+ FTINTRP_W_D = 0x4692U << 10,
+ FTINTRP_L_S = 0x4699U << 10,
+ FTINTRP_L_D = 0x469aU << 10,
+ FTINTRZ_W_S = 0x46a1U << 10,
+ FTINTRZ_W_D = 0x46a2U << 10,
+ FTINTRZ_L_S = 0x46a9U << 10,
+ FTINTRZ_L_D = 0x46aaU << 10,
+ FTINTRNE_W_S = 0x46b1U << 10,
+ FTINTRNE_W_D = 0x46b2U << 10,
+ FTINTRNE_L_S = 0x46b9U << 10,
+ FTINTRNE_L_D = 0x46baU << 10,
+ FTINT_W_S = 0x46c1U << 10,
+ FTINT_W_D = 0x46c2U << 10,
+ FTINT_L_S = 0x46c9U << 10,
+ FTINT_L_D = 0x46caU << 10,
+ FFINT_S_W = 0x4744U << 10,
+ FFINT_S_L = 0x4746U << 10,
+ FFINT_D_W = 0x4748U << 10,
+ FFINT_D_L = 0x474aU << 10,
+ FRINT_S = 0x4791U << 10,
+ FRINT_D = 0x4792U << 10,
+
+ MOVCF2FR = 0x4535U << 10,
+ MOVCF2GR = 0x4537U << 10
+};
+
+// ----- Emulated conditions.
+// On LOONG64 we use this enum to abstract from conditional branch instructions.
+// The 'U' prefix is used to specify unsigned comparisons.
+enum Condition {
+ // Any value < 0 is considered no_condition.
+ kNoCondition = -1,
+ overflow = 0,
+ no_overflow = 1,
+ Uless = 2,
+ Ugreater_equal = 3,
+ Uless_equal = 4,
+ Ugreater = 5,
+ equal = 6,
+ not_equal = 7, // Unordered or Not Equal.
+ negative = 8,
+ positive = 9,
+ parity_even = 10,
+ parity_odd = 11,
+ less = 12,
+ greater_equal = 13,
+ less_equal = 14,
+ greater = 15,
+ ueq = 16, // Unordered or Equal.
+ ogl = 17, // Ordered and Not Equal.
+ cc_always = 18,
+
+ // Aliases.
+ carry = Uless,
+ not_carry = Ugreater_equal,
+ zero = equal,
+ eq = equal,
+ not_zero = not_equal,
+ ne = not_equal,
+ nz = not_equal,
+ sign = negative,
+ not_sign = positive,
+ mi = negative,
+ pl = positive,
+ hi = Ugreater,
+ ls = Uless_equal,
+ ge = greater_equal,
+ lt = less,
+ gt = greater,
+ le = less_equal,
+ hs = Ugreater_equal,
+ lo = Uless,
+ al = cc_always,
+ ult = Uless,
+ uge = Ugreater_equal,
+ ule = Uless_equal,
+ ugt = Ugreater,
+ cc_default = kNoCondition
+};
+
+// Returns the equivalent of !cc.
+// Negation of the default kNoCondition (-1) results in a non-default
+// no_condition value (-2). As long as tests for no_condition check
+// for condition < 0, this will work as expected.
+inline Condition NegateCondition(Condition cc) {
+ DCHECK(cc != cc_always);
+ return static_cast<Condition>(cc ^ 1);
+}
+
+inline Condition NegateFpuCondition(Condition cc) {
+ DCHECK(cc != cc_always);
+ switch (cc) {
+ case ult:
+ return ge;
+ case ugt:
+ return le;
+ case uge:
+ return lt;
+ case ule:
+ return gt;
+ case lt:
+ return uge;
+ case gt:
+ return ule;
+ case ge:
+ return ult;
+ case le:
+ return ugt;
+ case eq:
+ return ne;
+ case ne:
+ return eq;
+ case ueq:
+ return ogl;
+ case ogl:
+ return ueq;
+ default:
+ return cc;
+ }
+}
+
+// ----- Coprocessor conditions.
+enum FPUCondition {
+ kNoFPUCondition = -1,
+
+ CAF = 0x00, // False.
+ SAF = 0x01, // False.
+ CLT = 0x02, // Less Than quiet
+ // SLT = 0x03, // Less Than signaling
+ CEQ = 0x04,
+ SEQ = 0x05,
+ CLE = 0x06,
+ SLE = 0x07,
+ CUN = 0x08,
+ SUN = 0x09,
+ CULT = 0x0a,
+ SULT = 0x0b,
+ CUEQ = 0x0c,
+ SUEQ = 0x0d,
+ CULE = 0x0e,
+ SULE = 0x0f,
+ CNE = 0x10,
+ SNE = 0x11,
+ COR = 0x14,
+ SOR = 0x15,
+ CUNE = 0x18,
+ SUNE = 0x19,
+};
+
+const uint32_t kFPURoundingModeShift = 8;
+const uint32_t kFPURoundingModeMask = 0b11 << kFPURoundingModeShift;
+
+// FPU rounding modes.
+enum FPURoundingMode {
+ RN = 0b00 << kFPURoundingModeShift, // Round to Nearest.
+ RZ = 0b01 << kFPURoundingModeShift, // Round towards zero.
+ RP = 0b10 << kFPURoundingModeShift, // Round towards Plus Infinity.
+ RM = 0b11 << kFPURoundingModeShift, // Round towards Minus Infinity.
+
+ // Aliases.
+ kRoundToNearest = RN,
+ kRoundToZero = RZ,
+ kRoundToPlusInf = RP,
+ kRoundToMinusInf = RM,
+
+ mode_round = RN,
+ mode_ceil = RP,
+ mode_floor = RM,
+ mode_trunc = RZ
+};
+
+enum CheckForInexactConversion {
+ kCheckForInexactConversion,
+ kDontCheckForInexactConversion
+};
+
+enum class MaxMinKind : int { kMin = 0, kMax = 1 };
+
+// -----------------------------------------------------------------------------
+// Hints.
+
+// Branch hints are not used on the LOONG64. They are defined so that they can
+// appear in shared function signatures, but will be ignored in LOONG64
+// implementations.
+enum Hint { no_hint = 0 };
+
+inline Hint NegateHint(Hint hint) { return no_hint; }
+
+// -----------------------------------------------------------------------------
+// Specific instructions, constants, and masks.
+// These constants are declared in assembler-loong64.cc, as they use named
+// registers and other constants.
+
+// Break 0xfffff, reserved for redirected real time call.
+const Instr rtCallRedirInstr = BREAK | call_rt_redirected;
+// A nop instruction. (Encoding of addi_w 0 0 0).
+const Instr nopInstr = ADDI_W;
+
+constexpr uint8_t kInstrSize = 4;
+constexpr uint8_t kInstrSizeLog2 = 2;
+
+class InstructionBase {
+ public:
+ enum Type {
+ kOp6Type,
+ kOp7Type,
+ kOp8Type,
+ kOp10Type,
+ kOp12Type,
+ kOp14Type,
+ kOp17Type,
+ kOp22Type,
+ kUnsupported = -1
+ };
+
+ // Get the raw instruction bits.
+ inline Instr InstructionBits() const {
+ return *reinterpret_cast<const Instr*>(this);
+ }
+
+ // Set the raw instruction bits to value.
+ inline void SetInstructionBits(Instr value) {
+ *reinterpret_cast<Instr*>(this) = value;
+ }
+
+ // Read one particular bit out of the instruction bits.
+ inline int Bit(int nr) const { return (InstructionBits() >> nr) & 1; }
+
+ // Read a bit field out of the instruction bits.
+ inline int Bits(int hi, int lo) const {
+ return (InstructionBits() >> lo) & ((2U << (hi - lo)) - 1);
+ }
+
+ // Safe to call within InstructionType().
+ inline int RjFieldRawNoAssert() const {
+ return InstructionBits() & kRjFieldMask;
+ }
+
+ // Get the encoding type of the instruction.
+ inline Type InstructionType() const;
+
+ protected:
+ InstructionBase() {}
+};
+
+template <class T>
+class InstructionGetters : public T {
+ public:
+ inline int RjValue() const {
+ return this->Bits(kRjShift + kRjBits - 1, kRjShift);
+ }
+
+ inline int RkValue() const {
+ return this->Bits(kRkShift + kRkBits - 1, kRkShift);
+ }
+
+ inline int RdValue() const {
+ return this->Bits(kRdShift + kRdBits - 1, kRdShift);
+ }
+
+ inline int Sa2Value() const {
+ return this->Bits(kSaShift + kSa2Bits - 1, kSaShift);
+ }
+
+ inline int Sa3Value() const {
+ return this->Bits(kSaShift + kSa3Bits - 1, kSaShift);
+ }
+
+ inline int Ui5Value() const {
+ return this->Bits(kUi5Shift + kUi5Bits - 1, kUi5Shift);
+ }
+
+ inline int Ui6Value() const {
+ return this->Bits(kUi6Shift + kUi6Bits - 1, kUi6Shift);
+ }
+
+ inline int Ui12Value() const {
+ return this->Bits(kUi12Shift + kUi12Bits - 1, kUi12Shift);
+ }
+
+ inline int LsbwValue() const {
+ return this->Bits(kLsbwShift + kLsbwBits - 1, kLsbwShift);
+ }
+
+ inline int MsbwValue() const {
+ return this->Bits(kMsbwShift + kMsbwBits - 1, kMsbwShift);
+ }
+
+ inline int LsbdValue() const {
+ return this->Bits(kLsbdShift + kLsbdBits - 1, kLsbdShift);
+ }
+
+ inline int MsbdValue() const {
+ return this->Bits(kMsbdShift + kMsbdBits - 1, kMsbdShift);
+ }
+
+ inline int CondValue() const {
+ return this->Bits(kCondShift + kCondBits - 1, kCondShift);
+ }
+
+ inline int Si12Value() const {
+ return this->Bits(kSi12Shift + kSi12Bits - 1, kSi12Shift);
+ }
+
+ inline int Si14Value() const {
+ return this->Bits(kSi14Shift + kSi14Bits - 1, kSi14Shift);
+ }
+
+ inline int Si16Value() const {
+ return this->Bits(kSi16Shift + kSi16Bits - 1, kSi16Shift);
+ }
+
+ inline int Si20Value() const {
+ return this->Bits(kSi20Shift + kSi20Bits - 1, kSi20Shift);
+ }
+
+ inline int FdValue() const {
+ return this->Bits(kFdShift + kFdBits - 1, kFdShift);
+ }
+
+ inline int FaValue() const {
+ return this->Bits(kFaShift + kFaBits - 1, kFaShift);
+ }
+
+ inline int FjValue() const {
+ return this->Bits(kFjShift + kFjBits - 1, kFjShift);
+ }
+
+ inline int FkValue() const {
+ return this->Bits(kFkShift + kFkBits - 1, kFkShift);
+ }
+
+ inline int CjValue() const {
+ return this->Bits(kCjShift + kCjBits - 1, kCjShift);
+ }
+
+ inline int CdValue() const {
+ return this->Bits(kCdShift + kCdBits - 1, kCdShift);
+ }
+
+ inline int CaValue() const {
+ return this->Bits(kCaShift + kCaBits - 1, kCaShift);
+ }
+
+ inline int CodeValue() const {
+ return this->Bits(kCodeShift + kCodeBits - 1, kCodeShift);
+ }
+
+ inline int Hint5Value() const {
+ return this->Bits(kHint5Shift + kHint5Bits - 1, kHint5Shift);
+ }
+
+ inline int Hint15Value() const {
+ return this->Bits(kHint15Shift + kHint15Bits - 1, kHint15Shift);
+ }
+
+ inline int Offs16Value() const {
+ return this->Bits(kOffsLowShift + kOffsLowBits - 1, kOffsLowShift);
+ }
+
+ inline int Offs21Value() const {
+ int low = this->Bits(kOffsLowShift + kOffsLowBits - 1, kOffsLowShift);
+ int high =
+ this->Bits(kOffs21HighShift + kOffs21HighBits - 1, kOffs21HighShift);
+ return ((high << kOffsLowBits) + low);
+ }
+
+ inline int Offs26Value() const {
+ int low = this->Bits(kOffsLowShift + kOffsLowBits - 1, kOffsLowShift);
+ int high =
+ this->Bits(kOffs26HighShift + kOffs26HighBits - 1, kOffs26HighShift);
+ return ((high << kOffsLowBits) + low);
+ }
+
+ inline int RjFieldRaw() const {
+ return this->InstructionBits() & kRjFieldMask;
+ }
+
+ inline int RkFieldRaw() const {
+ return this->InstructionBits() & kRkFieldMask;
+ }
+
+ inline int RdFieldRaw() const {
+ return this->InstructionBits() & kRdFieldMask;
+ }
+
+ inline int32_t ImmValue(int bits) const { return this->Bits(bits - 1, 0); }
+
+ /*TODO*/
+ inline int32_t Imm12Value() const { abort(); }
+
+ inline int32_t Imm14Value() const { abort(); }
+
+ inline int32_t Imm16Value() const { abort(); }
+
+ // Say if the instruction is a break.
+ bool IsTrap() const;
+};
+
+class Instruction : public InstructionGetters<InstructionBase> {
+ public:
+ // Instructions are read of out a code stream. The only way to get a
+ // reference to an instruction is to convert a pointer. There is no way
+ // to allocate or create instances of class Instruction.
+ // Use the At(pc) function to create references to Instruction.
+ static Instruction* At(byte* pc) {
+ return reinterpret_cast<Instruction*>(pc);
+ }
+
+ private:
+ // We need to prevent the creation of instances of class Instruction.
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
+};
+
+// -----------------------------------------------------------------------------
+// LOONG64 assembly various constants.
+
+const int kInvalidStackOffset = -1;
+
+static const int kNegOffset = 0x00008000;
+
+InstructionBase::Type InstructionBase::InstructionType() const {
+ InstructionBase::Type kType = kUnsupported;
+
+ // Check for kOp6Type
+ switch (Bits(31, 26) << 26) {
+ case ADDU16I_D:
+ case BEQZ:
+ case BNEZ:
+ case BCZ:
+ case JIRL:
+ case B:
+ case BL:
+ case BEQ:
+ case BNE:
+ case BLT:
+ case BGE:
+ case BLTU:
+ case BGEU:
+ kType = kOp6Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp7Type
+ switch (Bits(31, 25) << 25) {
+ case LU12I_W:
+ case LU32I_D:
+ case PCADDI:
+ case PCALAU12I:
+ case PCADDU12I:
+ case PCADDU18I:
+ kType = kOp7Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp8Type
+ switch (Bits(31, 24) << 24) {
+ case LDPTR_W:
+ case STPTR_W:
+ case LDPTR_D:
+ case STPTR_D:
+ case LL_W:
+ case SC_W:
+ case LL_D:
+ case SC_D:
+ kType = kOp8Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp10Type
+ switch (Bits(31, 22) << 22) {
+ case BSTR_W: {
+ // If Bit(21) = 0, then the Opcode is not BSTR_W.
+ if (Bit(21) == 0)
+ kType = kUnsupported;
+ else
+ kType = kOp10Type;
+ break;
+ }
+ case BSTRINS_D:
+ case BSTRPICK_D:
+ case SLTI:
+ case SLTUI:
+ case ADDI_W:
+ case ADDI_D:
+ case LU52I_D:
+ case ANDI:
+ case ORI:
+ case XORI:
+ case LD_B:
+ case LD_H:
+ case LD_W:
+ case LD_D:
+ case ST_B:
+ case ST_H:
+ case ST_W:
+ case ST_D:
+ case LD_BU:
+ case LD_HU:
+ case LD_WU:
+ case FLD_S:
+ case FST_S:
+ case FLD_D:
+ case FST_D:
+ kType = kOp10Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp12Type
+ switch (Bits(31, 20) << 20) {
+ case FMADD_S:
+ case FMADD_D:
+ case FMSUB_S:
+ case FMSUB_D:
+ case FNMADD_S:
+ case FNMADD_D:
+ case FNMSUB_S:
+ case FNMSUB_D:
+ case FCMP_COND_S:
+ case FCMP_COND_D:
+ case FSEL:
+ kType = kOp12Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp14Type
+ switch (Bits(31, 18) << 18) {
+ case ALSL:
+ case BYTEPICK_W:
+ case BYTEPICK_D:
+ case ALSL_D:
+ case SLLI:
+ case SRLI:
+ case SRAI:
+ case ROTRI:
+ kType = kOp14Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp17Type
+ switch (Bits(31, 15) << 15) {
+ case ADD_W:
+ case ADD_D:
+ case SUB_W:
+ case SUB_D:
+ case SLT:
+ case SLTU:
+ case MASKEQZ:
+ case MASKNEZ:
+ case NOR:
+ case AND:
+ case OR:
+ case XOR:
+ case ORN:
+ case ANDN:
+ case SLL_W:
+ case SRL_W:
+ case SRA_W:
+ case SLL_D:
+ case SRL_D:
+ case SRA_D:
+ case ROTR_D:
+ case ROTR_W:
+ case MUL_W:
+ case MULH_W:
+ case MULH_WU:
+ case MUL_D:
+ case MULH_D:
+ case MULH_DU:
+ case MULW_D_W:
+ case MULW_D_WU:
+ case DIV_W:
+ case MOD_W:
+ case DIV_WU:
+ case MOD_WU:
+ case DIV_D:
+ case MOD_D:
+ case DIV_DU:
+ case MOD_DU:
+ case BREAK:
+ case FADD_S:
+ case FADD_D:
+ case FSUB_S:
+ case FSUB_D:
+ case FMUL_S:
+ case FMUL_D:
+ case FDIV_S:
+ case FDIV_D:
+ case FMAX_S:
+ case FMAX_D:
+ case FMIN_S:
+ case FMIN_D:
+ case FMAXA_S:
+ case FMAXA_D:
+ case FMINA_S:
+ case FMINA_D:
+ case LDX_B:
+ case LDX_H:
+ case LDX_W:
+ case LDX_D:
+ case STX_B:
+ case STX_H:
+ case STX_W:
+ case STX_D:
+ case LDX_BU:
+ case LDX_HU:
+ case LDX_WU:
+ case FLDX_S:
+ case FLDX_D:
+ case FSTX_S:
+ case FSTX_D:
+ case AMSWAP_W:
+ case AMSWAP_D:
+ case AMADD_W:
+ case AMADD_D:
+ case AMAND_W:
+ case AMAND_D:
+ case AMOR_W:
+ case AMOR_D:
+ case AMXOR_W:
+ case AMXOR_D:
+ case AMMAX_W:
+ case AMMAX_D:
+ case AMMIN_W:
+ case AMMIN_D:
+ case AMMAX_WU:
+ case AMMAX_DU:
+ case AMMIN_WU:
+ case AMMIN_DU:
+ case AMSWAP_DB_W:
+ case AMSWAP_DB_D:
+ case AMADD_DB_W:
+ case AMADD_DB_D:
+ case AMAND_DB_W:
+ case AMAND_DB_D:
+ case AMOR_DB_W:
+ case AMOR_DB_D:
+ case AMXOR_DB_W:
+ case AMXOR_DB_D:
+ case AMMAX_DB_W:
+ case AMMAX_DB_D:
+ case AMMIN_DB_W:
+ case AMMIN_DB_D:
+ case AMMAX_DB_WU:
+ case AMMAX_DB_DU:
+ case AMMIN_DB_WU:
+ case AMMIN_DB_DU:
+ case DBAR:
+ case IBAR:
+ case FSCALEB_S:
+ case FSCALEB_D:
+ case FCOPYSIGN_S:
+ case FCOPYSIGN_D:
+ kType = kOp17Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp22Type
+ switch (Bits(31, 10) << 10) {
+ case CLZ_W:
+ case CTZ_W:
+ case CLZ_D:
+ case CTZ_D:
+ case REVB_2H:
+ case REVB_4H:
+ case REVB_2W:
+ case REVB_D:
+ case REVH_2W:
+ case REVH_D:
+ case BITREV_4B:
+ case BITREV_8B:
+ case BITREV_W:
+ case BITREV_D:
+ case EXT_W_B:
+ case EXT_W_H:
+ case FABS_S:
+ case FABS_D:
+ case FNEG_S:
+ case FNEG_D:
+ case FSQRT_S:
+ case FSQRT_D:
+ case FMOV_S:
+ case FMOV_D:
+ case MOVGR2FR_W:
+ case MOVGR2FR_D:
+ case MOVGR2FRH_W:
+ case MOVFR2GR_S:
+ case MOVFR2GR_D:
+ case MOVFRH2GR_S:
+ case MOVGR2FCSR:
+ case MOVFCSR2GR:
+ case FCVT_S_D:
+ case FCVT_D_S:
+ case FTINTRM_W_S:
+ case FTINTRM_W_D:
+ case FTINTRM_L_S:
+ case FTINTRM_L_D:
+ case FTINTRP_W_S:
+ case FTINTRP_W_D:
+ case FTINTRP_L_S:
+ case FTINTRP_L_D:
+ case FTINTRZ_W_S:
+ case FTINTRZ_W_D:
+ case FTINTRZ_L_S:
+ case FTINTRZ_L_D:
+ case FTINTRNE_W_S:
+ case FTINTRNE_W_D:
+ case FTINTRNE_L_S:
+ case FTINTRNE_L_D:
+ case FTINT_W_S:
+ case FTINT_W_D:
+ case FTINT_L_S:
+ case FTINT_L_D:
+ case FFINT_S_W:
+ case FFINT_S_L:
+ case FFINT_D_W:
+ case FFINT_D_L:
+ case FRINT_S:
+ case FRINT_D:
+ case MOVFR2CF:
+ case MOVCF2FR:
+ case MOVGR2CF:
+ case MOVCF2GR:
+ case FRECIP_S:
+ case FRECIP_D:
+ case FRSQRT_S:
+ case FRSQRT_D:
+ case FCLASS_S:
+ case FCLASS_D:
+ case FLOGB_S:
+ case FLOGB_D:
+ case CLO_W:
+ case CTO_W:
+ case CLO_D:
+ case CTO_D:
+ kType = kOp22Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ return kType;
+}
+
+// -----------------------------------------------------------------------------
+// Instructions.
+
+template <class P>
+bool InstructionGetters<P>::IsTrap() const {
+ return true;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_LOONG64_CONSTANTS_LOONG64_H_
diff --git a/chromium/v8/src/codegen/loong64/cpu-loong64.cc b/chromium/v8/src/codegen/loong64/cpu-loong64.cc
new file mode 100644
index 00000000000..6b4040676d3
--- /dev/null
+++ b/chromium/v8/src/codegen/loong64/cpu-loong64.cc
@@ -0,0 +1,38 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// CPU specific code for LoongArch independent of OS goes here.
+
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#if V8_TARGET_ARCH_LOONG64
+
+#include "src/codegen/cpu-features.h"
+
+namespace v8 {
+namespace internal {
+
+void CpuFeatures::FlushICache(void* start, size_t size) {
+#if defined(V8_HOST_ARCH_LOONG64)
+ // Nothing to do, flushing no instructions.
+ if (size == 0) {
+ return;
+ }
+
+#if defined(ANDROID) && !defined(__LP64__)
+ // Bionic cacheflush can typically run in userland, avoiding kernel call.
+ char* end = reinterpret_cast<char*>(start) + size;
+ cacheflush(reinterpret_cast<intptr_t>(start), reinterpret_cast<intptr_t>(end),
+ 0);
+#else // ANDROID
+ asm("ibar 0\n");
+#endif // ANDROID
+#endif // V8_HOST_ARCH_LOONG64
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_LOONG64
diff --git a/chromium/v8/src/codegen/loong64/interface-descriptors-loong64-inl.h b/chromium/v8/src/codegen/loong64/interface-descriptors-loong64-inl.h
new file mode 100644
index 00000000000..7947c97dc3a
--- /dev/null
+++ b/chromium/v8/src/codegen/loong64/interface-descriptors-loong64-inl.h
@@ -0,0 +1,278 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_LOONG64_INTERFACE_DESCRIPTORS_LOONG64_INL_H_
+#define V8_CODEGEN_LOONG64_INTERFACE_DESCRIPTORS_LOONG64_INL_H_
+
+#if V8_TARGET_ARCH_LOONG64
+
+#include "src/codegen/interface-descriptors.h"
+#include "src/execution/frames.h"
+
+namespace v8 {
+namespace internal {
+
+constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
+ auto registers = RegisterArray(a0, a1, a2, a3, a4);
+ STATIC_ASSERT(registers.size() == kMaxBuiltinRegisterParams);
+ return registers;
+}
+
+#if DEBUG
+template <typename DerivedDescriptor>
+void StaticCallInterfaceDescriptor<DerivedDescriptor>::
+ VerifyArgumentRegisterCount(CallInterfaceDescriptorData* data, int argc) {
+ RegList allocatable_regs = data->allocatable_registers();
+ if (argc >= 1) DCHECK(allocatable_regs | a0.bit());
+ if (argc >= 2) DCHECK(allocatable_regs | a1.bit());
+ if (argc >= 3) DCHECK(allocatable_regs | a2.bit());
+ if (argc >= 4) DCHECK(allocatable_regs | a3.bit());
+ if (argc >= 5) DCHECK(allocatable_regs | a4.bit());
+ if (argc >= 6) DCHECK(allocatable_regs | a5.bit());
+ if (argc >= 7) DCHECK(allocatable_regs | a6.bit());
+ if (argc >= 8) DCHECK(allocatable_regs | a7.bit());
+ // Additional arguments are passed on the stack.
+}
+#endif // DEBUG
+
+// static
+constexpr auto WriteBarrierDescriptor::registers() {
+ return RegisterArray(a1, a5, a4, a2, a0, a3);
+}
+
+// static
+constexpr auto DynamicCheckMapsDescriptor::registers() {
+ STATIC_ASSERT(kReturnRegister0 == a0);
+ return RegisterArray(a0, a1, a2, a3, cp);
+}
+
+// static
+constexpr auto DynamicCheckMapsWithFeedbackVectorDescriptor::registers() {
+ STATIC_ASSERT(kReturnRegister0 == a0);
+ return RegisterArray(a0, a1, a2, a3, cp);
+}
+
+// static
+constexpr Register LoadDescriptor::ReceiverRegister() { return a1; }
+// static
+constexpr Register LoadDescriptor::NameRegister() { return a2; }
+// static
+constexpr Register LoadDescriptor::SlotRegister() { return a0; }
+
+// static
+constexpr Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
+
+// static
+constexpr Register
+LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
+ return a4;
+}
+
+// static
+constexpr Register StoreDescriptor::ReceiverRegister() { return a1; }
+// static
+constexpr Register StoreDescriptor::NameRegister() { return a2; }
+// static
+constexpr Register StoreDescriptor::ValueRegister() { return a0; }
+// static
+constexpr Register StoreDescriptor::SlotRegister() { return a4; }
+
+// static
+constexpr Register StoreWithVectorDescriptor::VectorRegister() { return a3; }
+
+// static
+constexpr Register StoreTransitionDescriptor::MapRegister() { return a5; }
+
+// static
+constexpr Register ApiGetterDescriptor::HolderRegister() { return a0; }
+// static
+constexpr Register ApiGetterDescriptor::CallbackRegister() { return a3; }
+
+// static
+constexpr Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
+// static
+constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
+
+// static
+constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
+ return a2;
+}
+
+// static
+constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() { return a3; }
+
+// static
+constexpr Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
+
+// static
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(a3); }
+
+// static
+constexpr auto CallTrampolineDescriptor::registers() {
+ // a1: target
+ // a0: number of arguments
+ return RegisterArray(a1, a0);
+}
+
+// static
+constexpr auto CallVarargsDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a4 : arguments list length (untagged)
+ // a2 : arguments list (FixedArray)
+ return RegisterArray(a1, a0, a4, a2);
+}
+
+// static
+constexpr auto CallForwardVarargsDescriptor::registers() {
+ // a1: the target to call
+ // a0: number of arguments
+ // a2: start index (to support rest parameters)
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto CallFunctionTemplateDescriptor::registers() {
+ // a1 : function template info
+ // a0 : number of arguments (on the stack, not including receiver)
+ return RegisterArray(a1, a0);
+}
+
+// static
+constexpr auto CallWithSpreadDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a2 : the object to spread
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto CallWithArrayLikeDescriptor::registers() {
+ // a1 : the target to call
+ // a2 : the arguments list
+ return RegisterArray(a1, a2);
+}
+
+// static
+constexpr auto ConstructVarargsDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a3 : the new target
+ // a4 : arguments list length (untagged)
+ // a2 : arguments list (FixedArray)
+ return RegisterArray(a1, a3, a0, a4, a2);
+}
+
+// static
+constexpr auto ConstructForwardVarargsDescriptor::registers() {
+ // a1: the target to call
+ // a3: new target
+ // a0: number of arguments
+ // a2: start index (to support rest parameters)
+ return RegisterArray(a1, a3, a0, a2);
+}
+
+// static
+constexpr auto ConstructWithSpreadDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a3 : the new target
+ // a2 : the object to spread
+ return RegisterArray(a1, a3, a0, a2);
+}
+
+// static
+constexpr auto ConstructWithArrayLikeDescriptor::registers() {
+ // a1 : the target to call
+ // a3 : the new target
+ // a2 : the arguments list
+ return RegisterArray(a1, a3, a2);
+}
+
+// static
+constexpr auto ConstructStubDescriptor::registers() {
+ // a1: target
+ // a3: new target
+ // a0: number of arguments
+ // a2: allocation site or undefined
+ return RegisterArray(a1, a3, a0, a2);
+}
+
+// static
+constexpr auto AbortDescriptor::registers() { return RegisterArray(a0); }
+
+// static
+constexpr auto CompareDescriptor::registers() { return RegisterArray(a1, a0); }
+
+// static
+constexpr auto Compare_BaselineDescriptor::registers() {
+ // a1: left operand
+ // a0: right operand
+ // a2: feedback slot
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto BinaryOpDescriptor::registers() { return RegisterArray(a1, a0); }
+
+// static
+constexpr auto BinaryOp_BaselineDescriptor::registers() {
+ // a1: left operand
+ // a0: right operand
+ // a2: feedback slot
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto ApiCallbackDescriptor::registers() {
+ // a1 : kApiFunctionAddress
+ // a2 : kArgc
+ // a3 : kCallData
+ // a0 : kHolder
+ return RegisterArray(a1, a2, a3, a0);
+}
+
+// static
+constexpr auto InterpreterDispatchDescriptor::registers() {
+ return RegisterArray(
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister);
+}
+
+// static
+constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
+ // a0 : argument count (not including receiver)
+ // a2 : address of first argument
+ // a1 : the target callable to be call
+ return RegisterArray(a0, a2, a1);
+}
+
+// static
+constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() {
+ // a0 : argument count (not including receiver)
+ // a4 : address of the first argument
+ // a1 : constructor to call
+ // a3 : new target
+ // a2 : allocation site feedback if available, undefined otherwise
+ return RegisterArray(a0, a4, a1, a3, a2);
+}
+
+// static
+constexpr auto ResumeGeneratorDescriptor::registers() {
+ // v0 : the value to pass to the generator
+ // a1 : the JSGeneratorObject to resume
+ return RegisterArray(a0, a1);
+}
+
+// static
+constexpr auto RunMicrotasksEntryDescriptor::registers() {
+ return RegisterArray(a0, a1);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_LOONG64
+
+#endif // V8_CODEGEN_LOONG64_INTERFACE_DESCRIPTORS_LOONG64_INL_H_
diff --git a/chromium/v8/src/codegen/loong64/macro-assembler-loong64.cc b/chromium/v8/src/codegen/loong64/macro-assembler-loong64.cc
new file mode 100644
index 00000000000..6c1fa8e7298
--- /dev/null
+++ b/chromium/v8/src/codegen/loong64/macro-assembler-loong64.cc
@@ -0,0 +1,4108 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits.h> // For LONG_MIN, LONG_MAX.
+
+#if V8_TARGET_ARCH_LOONG64
+
+#include "src/base/bits.h"
+#include "src/base/division-by-constant.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/callable.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/external-reference-table.h"
+#include "src/codegen/interface-descriptors-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/register-configuration.h"
+#include "src/debug/debug.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/frames-inl.h"
+#include "src/heap/memory-chunk.h"
+#include "src/init/bootstrapper.h"
+#include "src/logging/counters.h"
+#include "src/objects/heap-number.h"
+#include "src/runtime/runtime.h"
+#include "src/snapshot/snapshot.h"
+
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/wasm-code-manager.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
+// Satisfy cpplint check, but don't include platform-specific header. It is
+// included recursively via macro-assembler.h.
+#if 0
+#include "src/codegen/loong64/macro-assembler-loong64.h"
+#endif
+
+namespace v8 {
+namespace internal {
+
+static inline bool IsZero(const Operand& rk) {
+ if (rk.is_reg()) {
+ return rk.rm() == zero_reg;
+ } else {
+ return rk.immediate() == 0;
+ }
+}
+
+int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1,
+ Register exclusion2,
+ Register exclusion3) const {
+ int bytes = 0;
+ RegList exclusions = 0;
+ if (exclusion1 != no_reg) {
+ exclusions |= exclusion1.bit();
+ if (exclusion2 != no_reg) {
+ exclusions |= exclusion2.bit();
+ if (exclusion3 != no_reg) {
+ exclusions |= exclusion3.bit();
+ }
+ }
+ }
+
+ RegList list = kJSCallerSaved & ~exclusions;
+ bytes += NumRegs(list) * kPointerSize;
+
+ if (fp_mode == SaveFPRegsMode::kSave) {
+ bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
+ }
+
+ return bytes;
+}
+
+int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+ Register exclusion2, Register exclusion3) {
+ int bytes = 0;
+ RegList exclusions = 0;
+ if (exclusion1 != no_reg) {
+ exclusions |= exclusion1.bit();
+ if (exclusion2 != no_reg) {
+ exclusions |= exclusion2.bit();
+ if (exclusion3 != no_reg) {
+ exclusions |= exclusion3.bit();
+ }
+ }
+ }
+
+ RegList list = kJSCallerSaved & ~exclusions;
+ MultiPush(list);
+ bytes += NumRegs(list) * kPointerSize;
+
+ if (fp_mode == SaveFPRegsMode::kSave) {
+ MultiPushFPU(kCallerSavedFPU);
+ bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
+ }
+
+ return bytes;
+}
+
+int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+ Register exclusion2, Register exclusion3) {
+ int bytes = 0;
+ if (fp_mode == SaveFPRegsMode::kSave) {
+ MultiPopFPU(kCallerSavedFPU);
+ bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
+ }
+
+ RegList exclusions = 0;
+ if (exclusion1 != no_reg) {
+ exclusions |= exclusion1.bit();
+ if (exclusion2 != no_reg) {
+ exclusions |= exclusion2.bit();
+ if (exclusion3 != no_reg) {
+ exclusions |= exclusion3.bit();
+ }
+ }
+ }
+
+ RegList list = kJSCallerSaved & ~exclusions;
+ MultiPop(list);
+ bytes += NumRegs(list) * kPointerSize;
+
+ return bytes;
+}
+
+void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
+ Ld_d(destination, MemOperand(s6, RootRegisterOffsetForRootIndex(index)));
+}
+
+void TurboAssembler::PushCommonFrame(Register marker_reg) {
+ if (marker_reg.is_valid()) {
+ Push(ra, fp, marker_reg);
+ Add_d(fp, sp, Operand(kPointerSize));
+ } else {
+ Push(ra, fp);
+ mov(fp, sp);
+ }
+}
+
+void TurboAssembler::PushStandardFrame(Register function_reg) {
+ int offset = -StandardFrameConstants::kContextOffset;
+ if (function_reg.is_valid()) {
+ Push(ra, fp, cp, function_reg, kJavaScriptCallArgCountRegister);
+ offset += 2 * kPointerSize;
+ } else {
+ Push(ra, fp, cp, kJavaScriptCallArgCountRegister);
+ offset += kPointerSize;
+ }
+ Add_d(fp, sp, Operand(offset));
+}
+
+// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
+// The register 'object' contains a heap object pointer. The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWriteField(Register object, int offset,
+ Register value, RAStatus ra_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis.
+ Label done;
+
+ // Skip barrier if writing a smi.
+ if (smi_check == SmiCheck::kInline) {
+ JumpIfSmi(value, &done);
+ }
+
+ // Although the object register is tagged, the offset is relative to the start
+ // of the object, so so offset must be a multiple of kPointerSize.
+ DCHECK(IsAligned(offset, kPointerSize));
+
+ if (FLAG_debug_code) {
+ Label ok;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Add_d(scratch, object, offset - kHeapObjectTag);
+ And(scratch, scratch, Operand(kPointerSize - 1));
+ Branch(&ok, eq, scratch, Operand(zero_reg));
+ Abort(AbortReason::kUnalignedCellInWriteBarrier);
+ bind(&ok);
+ }
+
+ RecordWrite(object, Operand(offset - kHeapObjectTag), value, ra_status,
+ save_fp, remembered_set_action, SmiCheck::kOmit);
+
+ bind(&done);
+}
+
+void TurboAssembler::MaybeSaveRegisters(RegList registers) {
+ if (registers == 0) return;
+ RegList regs = 0;
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ regs |= Register::from_code(i).bit();
+ }
+ }
+ MultiPush(regs);
+}
+
+void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
+ if (registers == 0) return;
+ RegList regs = 0;
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ regs |= Register::from_code(i).bit();
+ }
+ }
+ MultiPop(regs);
+}
+
+void TurboAssembler::CallEphemeronKeyBarrier(Register object, Operand offset,
+ SaveFPRegsMode fp_mode) {
+ RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object);
+ MaybeSaveRegisters(registers);
+
+ Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
+ Register slot_address_parameter =
+ WriteBarrierDescriptor::SlotAddressRegister();
+
+ MoveObjectAndSlot(object_parameter, slot_address_parameter, object, offset);
+
+ Call(isolate()->builtins()->code_handle(
+ Builtins::GetEphemeronKeyBarrierStub(fp_mode)),
+ RelocInfo::CODE_TARGET);
+ MaybeRestoreRegisters(registers);
+}
+
+void TurboAssembler::CallRecordWriteStubSaveRegisters(
+ Register object, Operand offset, RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode, StubCallMode mode) {
+ ASM_CODE_COMMENT(this);
+ RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object);
+ MaybeSaveRegisters(registers);
+
+ Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
+ Register slot_address_parameter =
+ WriteBarrierDescriptor::SlotAddressRegister();
+
+ MoveObjectAndSlot(object_parameter, slot_address_parameter, object, offset);
+
+ CallRecordWriteStub(object_parameter, slot_address_parameter,
+ remembered_set_action, fp_mode, mode);
+
+ MaybeRestoreRegisters(registers);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register slot_address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode) {
+ // Use CallRecordWriteStubSaveRegisters if the object and slot registers
+ // need to be caller saved.
+ DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object);
+ DCHECK_EQ(WriteBarrierDescriptor::SlotAddressRegister(), slot_address);
+#if V8_ENABLE_WEBASSEMBLY
+ if (mode == StubCallMode::kCallWasmRuntimeStub) {
+ auto wasm_target =
+ wasm::WasmCode::GetRecordWriteStub(remembered_set_action, fp_mode);
+ Call(wasm_target, RelocInfo::WASM_STUB_CALL);
+#else
+ if (false) {
+#endif
+ } else {
+ auto builtin = Builtins::GetRecordWriteStub(remembered_set_action, fp_mode);
+ if (options().inline_offheap_trampolines) {
+ // Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
+ Call(scratch);
+ } else {
+ Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
+ Call(code_target, RelocInfo::CODE_TARGET);
+ }
+ }
+}
+
+void TurboAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot,
+ Register object, Operand offset) {
+ DCHECK_NE(dst_object, dst_slot);
+ // If `offset` is a register, it cannot overlap with `object`.
+ DCHECK_IMPLIES(!offset.IsImmediate(), offset.rm() != object);
+
+ // If the slot register does not overlap with the object register, we can
+ // overwrite it.
+ if (dst_slot != object) {
+ Add_d(dst_slot, object, offset);
+ mov(dst_object, object);
+ return;
+ }
+
+ DCHECK_EQ(dst_slot, object);
+
+ // If the destination object register does not overlap with the offset
+ // register, we can overwrite it.
+ if (offset.IsImmediate() || (offset.rm() != dst_object)) {
+ mov(dst_object, dst_slot);
+ Add_d(dst_slot, dst_slot, offset);
+ return;
+ }
+
+ DCHECK_EQ(dst_object, offset.rm());
+
+ // We only have `dst_slot` and `dst_object` left as distinct registers so we
+ // have to swap them. We write this as a add+sub sequence to avoid using a
+ // scratch register.
+ Add_d(dst_slot, dst_slot, dst_object);
+ Sub_d(dst_object, dst_slot, dst_object);
+}
+
+// If lr_status is kLRHasBeenSaved, lr will be clobbered.
+// TODO(LOONG_dev): LOONG64 Check this comment
+// Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
+// The register 'object' contains a heap object pointer. The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWrite(Register object, Operand offset,
+ Register value, RAStatus ra_status,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ DCHECK(!AreAliased(object, value));
+
+ if (FLAG_debug_code) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Add_d(scratch, object, offset);
+ Ld_d(scratch, MemOperand(scratch, 0));
+ Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, scratch,
+ Operand(value));
+ }
+
+ if ((remembered_set_action == RememberedSetAction::kOmit &&
+ !FLAG_incremental_marking) ||
+ FLAG_disable_write_barriers) {
+ return;
+ }
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of smis and stores into the young generation.
+ Label done;
+
+ if (smi_check == SmiCheck::kInline) {
+ DCHECK_EQ(0, kSmiTag);
+ JumpIfSmi(value, &done);
+ }
+
+ CheckPageFlag(value, MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ &done);
+
+ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask, eq,
+ &done);
+
+ // Record the actual write.
+ if (ra_status == kRAHasNotBeenSaved) {
+ Push(ra);
+ }
+
+ Register slot_address = WriteBarrierDescriptor::SlotAddressRegister();
+ DCHECK(!AreAliased(object, slot_address, value));
+ DCHECK(offset.IsImmediate());
+ Add_d(slot_address, object, offset);
+ CallRecordWriteStub(object, slot_address, remembered_set_action, fp_mode);
+ if (ra_status == kRAHasNotBeenSaved) {
+ Pop(ra);
+ }
+
+ bind(&done);
+}
+
+// ---------------------------------------------------------------------------
+// Instruction macros.
+
+void TurboAssembler::Add_w(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ add_w(rd, rj, rk.rm());
+ } else {
+ if (is_int12(rk.immediate()) && !MustUseReg(rk.rmode())) {
+ addi_w(rd, rj, static_cast<int32_t>(rk.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ add_w(rd, rj, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Add_d(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ add_d(rd, rj, rk.rm());
+ } else {
+ if (is_int12(rk.immediate()) && !MustUseReg(rk.rmode())) {
+ addi_d(rd, rj, static_cast<int32_t>(rk.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ add_d(rd, rj, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Sub_w(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ sub_w(rd, rj, rk.rm());
+ } else {
+ DCHECK(is_int32(rk.immediate()));
+ if (is_int12(-rk.immediate()) && !MustUseReg(rk.rmode())) {
+ // No subi_w instr, use addi_w(x, y, -imm).
+ addi_w(rd, rj, static_cast<int32_t>(-rk.immediate()));
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ if (-rk.immediate() >> 12 == 0 && !MustUseReg(rk.rmode())) {
+ // Use load -imm and addu when loading -imm generates one instruction.
+ li(scratch, -rk.immediate());
+ add_w(rd, rj, scratch);
+ } else {
+ // li handles the relocation.
+ li(scratch, rk);
+ sub_w(rd, rj, scratch);
+ }
+ }
+ }
+}
+
+void TurboAssembler::Sub_d(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ sub_d(rd, rj, rk.rm());
+ } else if (is_int12(-rk.immediate()) && !MustUseReg(rk.rmode())) {
+ // No subi_d instr, use addi_d(x, y, -imm).
+ addi_d(rd, rj, static_cast<int32_t>(-rk.immediate()));
+ } else {
+ DCHECK(rj != t7);
+ int li_count = InstrCountForLi64Bit(rk.immediate());
+ int li_neg_count = InstrCountForLi64Bit(-rk.immediate());
+ if (li_neg_count < li_count && !MustUseReg(rk.rmode())) {
+ // Use load -imm and add_d when loading -imm generates one instruction.
+ DCHECK(rk.immediate() != std::numeric_limits<int32_t>::min());
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(-rk.immediate()));
+ add_d(rd, rj, scratch);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, rk);
+ sub_d(rd, rj, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Mul_w(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ mul_w(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ mul_w(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Mulh_w(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ mulh_w(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ mulh_w(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Mulh_wu(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ mulh_wu(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ mulh_wu(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Mul_d(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ mul_d(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ mul_d(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Mulh_d(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ mulh_d(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ mulh_d(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Div_w(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ div_w(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ div_w(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Mod_w(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ mod_w(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ mod_w(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Mod_wu(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ mod_wu(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ mod_wu(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Div_d(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ div_d(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ div_d(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Div_wu(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ div_wu(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ div_wu(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Div_du(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ div_du(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ div_du(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Mod_d(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ mod_d(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ mod_d(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Mod_du(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ mod_du(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ mod_du(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::And(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ and_(rd, rj, rk.rm());
+ } else {
+ if (is_uint12(rk.immediate()) && !MustUseReg(rk.rmode())) {
+ andi(rd, rj, static_cast<int32_t>(rk.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ and_(rd, rj, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Or(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ or_(rd, rj, rk.rm());
+ } else {
+ if (is_uint12(rk.immediate()) && !MustUseReg(rk.rmode())) {
+ ori(rd, rj, static_cast<int32_t>(rk.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ or_(rd, rj, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Xor(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ xor_(rd, rj, rk.rm());
+ } else {
+ if (is_uint12(rk.immediate()) && !MustUseReg(rk.rmode())) {
+ xori(rd, rj, static_cast<int32_t>(rk.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ xor_(rd, rj, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Nor(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ nor(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ nor(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Andn(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ andn(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ andn(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Orn(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ orn(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ orn(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Neg(Register rj, const Operand& rk) {
+ DCHECK(rk.is_reg());
+ sub_d(rj, zero_reg, rk.rm());
+}
+
+void TurboAssembler::Slt(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ slt(rd, rj, rk.rm());
+ } else {
+ if (is_int12(rk.immediate()) && !MustUseReg(rk.rmode())) {
+ slti(rd, rj, static_cast<int32_t>(rk.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ slt(rd, rj, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Sltu(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ sltu(rd, rj, rk.rm());
+ } else {
+ if (is_int12(rk.immediate()) && !MustUseReg(rk.rmode())) {
+ sltui(rd, rj, static_cast<int32_t>(rk.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ sltu(rd, rj, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Sle(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ slt(rd, rk.rm(), rj);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ slt(rd, scratch, rj);
+ }
+ xori(rd, rd, 1);
+}
+
+void TurboAssembler::Sleu(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ sltu(rd, rk.rm(), rj);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ sltu(rd, scratch, rj);
+ }
+ xori(rd, rd, 1);
+}
+
+void TurboAssembler::Sge(Register rd, Register rj, const Operand& rk) {
+ Slt(rd, rj, rk);
+ xori(rd, rd, 1);
+}
+
+void TurboAssembler::Sgeu(Register rd, Register rj, const Operand& rk) {
+ Sltu(rd, rj, rk);
+ xori(rd, rd, 1);
+}
+
+void TurboAssembler::Sgt(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ slt(rd, rk.rm(), rj);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ slt(rd, scratch, rj);
+ }
+}
+
+void TurboAssembler::Sgtu(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ sltu(rd, rk.rm(), rj);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ sltu(rd, scratch, rj);
+ }
+}
+
+void TurboAssembler::Rotr_w(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ rotr_w(rd, rj, rk.rm());
+ } else {
+ int64_t ror_value = rk.immediate() % 32;
+ if (ror_value < 0) {
+ ror_value += 32;
+ }
+ rotri_w(rd, rj, ror_value);
+ }
+}
+
+void TurboAssembler::Rotr_d(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ rotr_d(rd, rj, rk.rm());
+ } else {
+ int64_t dror_value = rk.immediate() % 64;
+ if (dror_value < 0) dror_value += 64;
+ rotri_d(rd, rj, dror_value);
+ }
+}
+
+void TurboAssembler::Alsl_w(Register rd, Register rj, Register rk, uint8_t sa,
+ Register scratch) {
+ DCHECK(sa >= 1 && sa <= 31);
+ if (sa <= 4) {
+ alsl_w(rd, rj, rk, sa);
+ } else {
+ Register tmp = rd == rk ? scratch : rd;
+ DCHECK(tmp != rk);
+ slli_w(tmp, rj, sa);
+ add_w(rd, rk, tmp);
+ }
+}
+
+void TurboAssembler::Alsl_d(Register rd, Register rj, Register rk, uint8_t sa,
+ Register scratch) {
+ DCHECK(sa >= 1 && sa <= 31);
+ if (sa <= 4) {
+ alsl_d(rd, rj, rk, sa);
+ } else {
+ Register tmp = rd == rk ? scratch : rd;
+ DCHECK(tmp != rk);
+ slli_d(tmp, rj, sa);
+ add_d(rd, rk, tmp);
+ }
+}
+
+// ------------Pseudo-instructions-------------
+
+// Change endianness
+void TurboAssembler::ByteSwapSigned(Register dest, Register src,
+ int operand_size) {
+ DCHECK(operand_size == 2 || operand_size == 4 || operand_size == 8);
+ if (operand_size == 2) {
+ revb_2h(dest, src);
+ ext_w_h(dest, dest);
+ } else if (operand_size == 4) {
+ revb_2w(dest, src);
+ slli_w(dest, dest, 0);
+ } else {
+ revb_d(dest, dest);
+ }
+}
+
+void TurboAssembler::ByteSwapUnsigned(Register dest, Register src,
+ int operand_size) {
+ DCHECK(operand_size == 2 || operand_size == 4);
+ if (operand_size == 2) {
+ revb_2h(dest, src);
+ bstrins_d(dest, zero_reg, 63, 16);
+ } else {
+ revb_2w(dest, src);
+ bstrins_d(dest, zero_reg, 63, 32);
+ }
+}
+
+void TurboAssembler::Ld_b(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ ldx_b(rd, source.base(), source.index());
+ } else {
+ ld_b(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::Ld_bu(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ ldx_bu(rd, source.base(), source.index());
+ } else {
+ ld_bu(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::St_b(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ stx_b(rd, source.base(), source.index());
+ } else {
+ st_b(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::Ld_h(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ ldx_h(rd, source.base(), source.index());
+ } else {
+ ld_h(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::Ld_hu(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ ldx_hu(rd, source.base(), source.index());
+ } else {
+ ld_hu(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::St_h(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ stx_h(rd, source.base(), source.index());
+ } else {
+ st_h(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::Ld_w(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+
+ if (!(source.hasIndexReg()) && is_int16(source.offset()) &&
+ (source.offset() & 0b11) == 0) {
+ ldptr_w(rd, source.base(), source.offset());
+ return;
+ }
+
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ ldx_w(rd, source.base(), source.index());
+ } else {
+ ld_w(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::Ld_wu(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ ldx_wu(rd, source.base(), source.index());
+ } else {
+ ld_wu(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::St_w(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+
+ if (!(source.hasIndexReg()) && is_int16(source.offset()) &&
+ (source.offset() & 0b11) == 0) {
+ stptr_w(rd, source.base(), source.offset());
+ return;
+ }
+
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ stx_w(rd, source.base(), source.index());
+ } else {
+ st_w(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::Ld_d(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+
+ if (!(source.hasIndexReg()) && is_int16(source.offset()) &&
+ (source.offset() & 0b11) == 0) {
+ ldptr_d(rd, source.base(), source.offset());
+ return;
+ }
+
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ ldx_d(rd, source.base(), source.index());
+ } else {
+ ld_d(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::St_d(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+
+ if (!(source.hasIndexReg()) && is_int16(source.offset()) &&
+ (source.offset() & 0b11) == 0) {
+ stptr_d(rd, source.base(), source.offset());
+ return;
+ }
+
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ stx_d(rd, source.base(), source.index());
+ } else {
+ st_d(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::Fld_s(FPURegister fd, const MemOperand& src) {
+ MemOperand tmp = src;
+ AdjustBaseAndOffset(&tmp);
+ if (tmp.hasIndexReg()) {
+ fldx_s(fd, tmp.base(), tmp.index());
+ } else {
+ fld_s(fd, tmp.base(), tmp.offset());
+ }
+}
+
+void TurboAssembler::Fst_s(FPURegister fs, const MemOperand& src) {
+ MemOperand tmp = src;
+ AdjustBaseAndOffset(&tmp);
+ if (tmp.hasIndexReg()) {
+ fstx_s(fs, tmp.base(), tmp.index());
+ } else {
+ fst_s(fs, tmp.base(), tmp.offset());
+ }
+}
+
+void TurboAssembler::Fld_d(FPURegister fd, const MemOperand& src) {
+ MemOperand tmp = src;
+ AdjustBaseAndOffset(&tmp);
+ if (tmp.hasIndexReg()) {
+ fldx_d(fd, tmp.base(), tmp.index());
+ } else {
+ fld_d(fd, tmp.base(), tmp.offset());
+ }
+}
+
+void TurboAssembler::Fst_d(FPURegister fs, const MemOperand& src) {
+ MemOperand tmp = src;
+ AdjustBaseAndOffset(&tmp);
+ if (tmp.hasIndexReg()) {
+ fstx_d(fs, tmp.base(), tmp.index());
+ } else {
+ fst_d(fs, tmp.base(), tmp.offset());
+ }
+}
+
+void TurboAssembler::Ll_w(Register rd, const MemOperand& rj) {
+ DCHECK(!rj.hasIndexReg());
+ bool is_one_instruction = is_int14(rj.offset());
+ if (is_one_instruction) {
+ ll_w(rd, rj.base(), rj.offset());
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, rj.offset());
+ add_d(scratch, scratch, rj.base());
+ ll_w(rd, scratch, 0);
+ }
+}
+
+void TurboAssembler::Ll_d(Register rd, const MemOperand& rj) {
+ DCHECK(!rj.hasIndexReg());
+ bool is_one_instruction = is_int14(rj.offset());
+ if (is_one_instruction) {
+ ll_d(rd, rj.base(), rj.offset());
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, rj.offset());
+ add_d(scratch, scratch, rj.base());
+ ll_d(rd, scratch, 0);
+ }
+}
+
+void TurboAssembler::Sc_w(Register rd, const MemOperand& rj) {
+ DCHECK(!rj.hasIndexReg());
+ bool is_one_instruction = is_int14(rj.offset());
+ if (is_one_instruction) {
+ sc_w(rd, rj.base(), rj.offset());
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, rj.offset());
+ add_d(scratch, scratch, rj.base());
+ sc_w(rd, scratch, 0);
+ }
+}
+
+void TurboAssembler::Sc_d(Register rd, const MemOperand& rj) {
+ DCHECK(!rj.hasIndexReg());
+ bool is_one_instruction = is_int14(rj.offset());
+ if (is_one_instruction) {
+ sc_d(rd, rj.base(), rj.offset());
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, rj.offset());
+ add_d(scratch, scratch, rj.base());
+ sc_d(rd, scratch, 0);
+ }
+}
+
+void TurboAssembler::li(Register dst, Handle<HeapObject> value, LiFlags mode) {
+ // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
+ // non-isolate-independent code. In many cases it might be cheaper than
+ // embedding the relocatable value.
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadConstant(dst, value);
+ return;
+ }
+ li(dst, Operand(value), mode);
+}
+
+void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) {
+ // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
+ // non-isolate-independent code. In many cases it might be cheaper than
+ // embedding the relocatable value.
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadExternalReference(dst, value);
+ return;
+ }
+ li(dst, Operand(value), mode);
+}
+
+void TurboAssembler::li(Register dst, const StringConstantBase* string,
+ LiFlags mode) {
+ li(dst, Operand::EmbeddedStringConstant(string), mode);
+}
+
+static inline int InstrCountForLiLower32Bit(int64_t value) {
+ if (is_int12(static_cast<int32_t>(value)) ||
+ is_uint12(static_cast<int32_t>(value)) || !(value & kImm12Mask)) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+void TurboAssembler::LiLower32BitHelper(Register rd, Operand j) {
+ if (is_int12(static_cast<int32_t>(j.immediate()))) {
+ addi_d(rd, zero_reg, j.immediate());
+ } else if (is_uint12(static_cast<int32_t>(j.immediate()))) {
+ ori(rd, zero_reg, j.immediate() & kImm12Mask);
+ } else {
+ lu12i_w(rd, j.immediate() >> 12 & 0xfffff);
+ if (j.immediate() & kImm12Mask) {
+ ori(rd, rd, j.immediate() & kImm12Mask);
+ }
+ }
+}
+
+int TurboAssembler::InstrCountForLi64Bit(int64_t value) {
+ if (is_int32(value)) {
+ return InstrCountForLiLower32Bit(value);
+ } else if (is_int52(value)) {
+ return InstrCountForLiLower32Bit(value) + 1;
+ } else if ((value & 0xffffffffL) == 0) {
+ // 32 LSBs (Least Significant Bits) all set to zero.
+ uint8_t tzc = base::bits::CountTrailingZeros32(value >> 32);
+ uint8_t lzc = base::bits::CountLeadingZeros32(value >> 32);
+ if (tzc >= 20) {
+ return 1;
+ } else if (tzc + lzc > 12) {
+ return 2;
+ } else {
+ return 3;
+ }
+ } else {
+ int64_t imm21 = (value >> 31) & 0x1fffffL;
+ if (imm21 != 0x1fffffL && imm21 != 0) {
+ return InstrCountForLiLower32Bit(value) + 2;
+ } else {
+ return InstrCountForLiLower32Bit(value) + 1;
+ }
+ }
+ UNREACHABLE();
+ return INT_MAX;
+}
+
+// All changes to if...else conditions here must be added to
+// InstrCountForLi64Bit as well.
+void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) {
+ DCHECK(!j.is_reg());
+ DCHECK(!MustUseReg(j.rmode()));
+ DCHECK(mode == OPTIMIZE_SIZE);
+ int64_t imm = j.immediate();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ // Normal load of an immediate value which does not need Relocation Info.
+ if (is_int32(imm)) {
+ LiLower32BitHelper(rd, j);
+ } else if (is_int52(imm)) {
+ LiLower32BitHelper(rd, j);
+ lu32i_d(rd, imm >> 32 & 0xfffff);
+ } else if ((imm & 0xffffffffL) == 0) {
+ // 32 LSBs (Least Significant Bits) all set to zero.
+ uint8_t tzc = base::bits::CountTrailingZeros32(imm >> 32);
+ uint8_t lzc = base::bits::CountLeadingZeros32(imm >> 32);
+ if (tzc >= 20) {
+ lu52i_d(rd, zero_reg, imm >> 52 & kImm12Mask);
+ } else if (tzc + lzc > 12) {
+ int32_t mask = (1 << (32 - tzc)) - 1;
+ lu12i_w(rd, imm >> (tzc + 32) & mask);
+ slli_d(rd, rd, tzc + 20);
+ } else {
+ xor_(rd, rd, rd);
+ lu32i_d(rd, imm >> 32 & 0xfffff);
+ lu52i_d(rd, rd, imm >> 52 & kImm12Mask);
+ }
+ } else {
+ int64_t imm21 = (imm >> 31) & 0x1fffffL;
+ LiLower32BitHelper(rd, j);
+ if (imm21 != 0x1fffffL && imm21 != 0) lu32i_d(rd, imm >> 32 & 0xfffff);
+ lu52i_d(rd, rd, imm >> 52 & kImm12Mask);
+ }
+}
+
+void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
+ DCHECK(!j.is_reg());
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (!MustUseReg(j.rmode()) && mode == OPTIMIZE_SIZE) {
+ li_optimized(rd, j, mode);
+ } else if (IsOnHeap() && RelocInfo::IsEmbeddedObjectMode(j.rmode())) {
+ BlockGrowBufferScope block_growbuffer(this);
+ int offset = pc_offset();
+ Address address = j.immediate();
+ saved_handles_for_raw_object_ptr_.push_back(
+ std::make_pair(offset, address));
+ Handle<HeapObject> object(reinterpret_cast<Address*>(address));
+ int64_t immediate = object->ptr();
+ RecordRelocInfo(j.rmode(), immediate);
+ lu12i_w(rd, immediate >> 12 & 0xfffff);
+ ori(rd, rd, immediate & kImm12Mask);
+ lu32i_d(rd, immediate >> 32 & 0xfffff);
+ } else if (MustUseReg(j.rmode())) {
+ int64_t immediate;
+ if (j.IsHeapObjectRequest()) {
+ RequestHeapObject(j.heap_object_request());
+ immediate = 0;
+ } else {
+ immediate = j.immediate();
+ }
+
+ RecordRelocInfo(j.rmode(), immediate);
+ lu12i_w(rd, immediate >> 12 & 0xfffff);
+ ori(rd, rd, immediate & kImm12Mask);
+ lu32i_d(rd, immediate >> 32 & 0xfffff);
+ } else if (mode == ADDRESS_LOAD) {
+ // We always need the same number of instructions as we may need to patch
+ // this code to load another value which may need all 3 instructions.
+ lu12i_w(rd, j.immediate() >> 12 & 0xfffff);
+ ori(rd, rd, j.immediate() & kImm12Mask);
+ lu32i_d(rd, j.immediate() >> 32 & 0xfffff);
+ } else { // mode == CONSTANT_SIZE - always emit the same instruction
+ // sequence.
+ lu12i_w(rd, j.immediate() >> 12 & 0xfffff);
+ ori(rd, rd, j.immediate() & kImm12Mask);
+ lu32i_d(rd, j.immediate() >> 32 & 0xfffff);
+ lu52i_d(rd, rd, j.immediate() >> 52 & kImm12Mask);
+ }
+}
+
+void TurboAssembler::MultiPush(RegList regs) {
+ int16_t stack_offset = 0;
+
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ if ((regs & (1 << i)) != 0) {
+ stack_offset -= kPointerSize;
+ St_d(ToRegister(i), MemOperand(sp, stack_offset));
+ }
+ }
+ addi_d(sp, sp, stack_offset);
+}
+
+void TurboAssembler::MultiPush(RegList regs1, RegList regs2) {
+ DCHECK_EQ(regs1 & regs2, 0);
+ int16_t stack_offset = 0;
+
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ if ((regs1 & (1 << i)) != 0) {
+ stack_offset -= kPointerSize;
+ St_d(ToRegister(i), MemOperand(sp, stack_offset));
+ }
+ }
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ if ((regs2 & (1 << i)) != 0) {
+ stack_offset -= kPointerSize;
+ St_d(ToRegister(i), MemOperand(sp, stack_offset));
+ }
+ }
+ addi_d(sp, sp, stack_offset);
+}
+
+void TurboAssembler::MultiPush(RegList regs1, RegList regs2, RegList regs3) {
+ DCHECK_EQ(regs1 & regs2, 0);
+ DCHECK_EQ(regs1 & regs3, 0);
+ DCHECK_EQ(regs2 & regs3, 0);
+ int16_t stack_offset = 0;
+
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ if ((regs1 & (1 << i)) != 0) {
+ stack_offset -= kPointerSize;
+ St_d(ToRegister(i), MemOperand(sp, stack_offset));
+ }
+ }
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ if ((regs2 & (1 << i)) != 0) {
+ stack_offset -= kPointerSize;
+ St_d(ToRegister(i), MemOperand(sp, stack_offset));
+ }
+ }
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ if ((regs3 & (1 << i)) != 0) {
+ stack_offset -= kPointerSize;
+ St_d(ToRegister(i), MemOperand(sp, stack_offset));
+ }
+ }
+ addi_d(sp, sp, stack_offset);
+}
+
+void TurboAssembler::MultiPop(RegList regs) {
+ int16_t stack_offset = 0;
+
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ if ((regs & (1 << i)) != 0) {
+ Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
+ stack_offset += kPointerSize;
+ }
+ }
+ addi_d(sp, sp, stack_offset);
+}
+
+void TurboAssembler::MultiPop(RegList regs1, RegList regs2) {
+ DCHECK_EQ(regs1 & regs2, 0);
+ int16_t stack_offset = 0;
+
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ if ((regs2 & (1 << i)) != 0) {
+ Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
+ stack_offset += kPointerSize;
+ }
+ }
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ if ((regs1 & (1 << i)) != 0) {
+ Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
+ stack_offset += kPointerSize;
+ }
+ }
+ addi_d(sp, sp, stack_offset);
+}
+
+void TurboAssembler::MultiPop(RegList regs1, RegList regs2, RegList regs3) {
+ DCHECK_EQ(regs1 & regs2, 0);
+ DCHECK_EQ(regs1 & regs3, 0);
+ DCHECK_EQ(regs2 & regs3, 0);
+ int16_t stack_offset = 0;
+
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ if ((regs3 & (1 << i)) != 0) {
+ Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
+ stack_offset += kPointerSize;
+ }
+ }
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ if ((regs2 & (1 << i)) != 0) {
+ Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
+ stack_offset += kPointerSize;
+ }
+ }
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ if ((regs1 & (1 << i)) != 0) {
+ Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
+ stack_offset += kPointerSize;
+ }
+ }
+ addi_d(sp, sp, stack_offset);
+}
+
+void TurboAssembler::MultiPushFPU(RegList regs) {
+ int16_t num_to_push = base::bits::CountPopulation(regs);
+ int16_t stack_offset = num_to_push * kDoubleSize;
+
+ Sub_d(sp, sp, Operand(stack_offset));
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ if ((regs & (1 << i)) != 0) {
+ stack_offset -= kDoubleSize;
+ Fst_d(FPURegister::from_code(i), MemOperand(sp, stack_offset));
+ }
+ }
+}
+
+void TurboAssembler::MultiPopFPU(RegList regs) {
+ int16_t stack_offset = 0;
+
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ if ((regs & (1 << i)) != 0) {
+ Fld_d(FPURegister::from_code(i), MemOperand(sp, stack_offset));
+ stack_offset += kDoubleSize;
+ }
+ }
+ addi_d(sp, sp, stack_offset);
+}
+
+void TurboAssembler::Bstrpick_w(Register rk, Register rj, uint16_t msbw,
+ uint16_t lsbw) {
+ DCHECK_LT(lsbw, msbw);
+ DCHECK_LT(lsbw, 32);
+ DCHECK_LT(msbw, 32);
+ bstrpick_w(rk, rj, msbw, lsbw);
+}
+
+void TurboAssembler::Bstrpick_d(Register rk, Register rj, uint16_t msbw,
+ uint16_t lsbw) {
+ DCHECK_LT(lsbw, msbw);
+ DCHECK_LT(lsbw, 64);
+ DCHECK_LT(msbw, 64);
+ bstrpick_d(rk, rj, msbw, lsbw);
+}
+
+void TurboAssembler::Neg_s(FPURegister fd, FPURegister fj) { fneg_s(fd, fj); }
+
+void TurboAssembler::Neg_d(FPURegister fd, FPURegister fj) { fneg_d(fd, fj); }
+
+void TurboAssembler::Ffint_d_uw(FPURegister fd, FPURegister fj) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ movfr2gr_s(t8, fj);
+ Ffint_d_uw(fd, t8);
+}
+
+void TurboAssembler::Ffint_d_uw(FPURegister fd, Register rj) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(rj != t7);
+
+ Bstrpick_d(t7, rj, 31, 0);
+ movgr2fr_d(fd, t7);
+ ffint_d_l(fd, fd);
+}
+
+void TurboAssembler::Ffint_d_ul(FPURegister fd, FPURegister fj) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ movfr2gr_d(t8, fj);
+ Ffint_d_ul(fd, t8);
+}
+
+void TurboAssembler::Ffint_d_ul(FPURegister fd, Register rj) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(rj != t7);
+
+ Label msb_clear, conversion_done;
+
+ Branch(&msb_clear, ge, rj, Operand(zero_reg));
+
+ // Rj >= 2^63
+ andi(t7, rj, 1);
+ srli_d(rj, rj, 1);
+ or_(t7, t7, rj);
+ movgr2fr_d(fd, t7);
+ ffint_d_l(fd, fd);
+ fadd_d(fd, fd, fd);
+ Branch(&conversion_done);
+
+ bind(&msb_clear);
+ // Rs < 2^63, we can do simple conversion.
+ movgr2fr_d(fd, rj);
+ ffint_d_l(fd, fd);
+
+ bind(&conversion_done);
+}
+
+void TurboAssembler::Ffint_s_uw(FPURegister fd, FPURegister fj) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ movfr2gr_d(t8, fj);
+ Ffint_s_uw(fd, t8);
+}
+
+void TurboAssembler::Ffint_s_uw(FPURegister fd, Register rj) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(rj != t7);
+
+ bstrpick_d(t7, rj, 31, 0);
+ movgr2fr_d(fd, t7);
+ ffint_s_l(fd, fd);
+}
+
+void TurboAssembler::Ffint_s_ul(FPURegister fd, FPURegister fj) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ movfr2gr_d(t8, fj);
+ Ffint_s_ul(fd, t8);
+}
+
+void TurboAssembler::Ffint_s_ul(FPURegister fd, Register rj) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(rj != t7);
+
+ Label positive, conversion_done;
+
+ Branch(&positive, ge, rj, Operand(zero_reg));
+
+ // Rs >= 2^31.
+ andi(t7, rj, 1);
+ srli_d(rj, rj, 1);
+ or_(t7, t7, rj);
+ movgr2fr_d(fd, t7);
+ ffint_s_l(fd, fd);
+ fadd_s(fd, fd, fd);
+ Branch(&conversion_done);
+
+ bind(&positive);
+ // Rs < 2^31, we can do simple conversion.
+ movgr2fr_d(fd, rj);
+ ffint_s_l(fd, fd);
+
+ bind(&conversion_done);
+}
+
+void MacroAssembler::Ftintrne_l_d(FPURegister fd, FPURegister fj) {
+ ftintrne_l_d(fd, fj);
+}
+
+void MacroAssembler::Ftintrm_l_d(FPURegister fd, FPURegister fj) {
+ ftintrm_l_d(fd, fj);
+}
+
+void MacroAssembler::Ftintrp_l_d(FPURegister fd, FPURegister fj) {
+ ftintrp_l_d(fd, fj);
+}
+
+void MacroAssembler::Ftintrz_l_d(FPURegister fd, FPURegister fj) {
+ ftintrz_l_d(fd, fj);
+}
+
+void MacroAssembler::Ftintrz_l_ud(FPURegister fd, FPURegister fj,
+ FPURegister scratch) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ // Load to GPR.
+ movfr2gr_d(t8, fj);
+ // Reset sign bit.
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch1 = temps.Acquire();
+ li(scratch1, 0x7FFFFFFFFFFFFFFFl);
+ and_(t8, t8, scratch1);
+ }
+ movgr2fr_d(scratch, t8);
+ Ftintrz_l_d(fd, scratch);
+}
+
+void TurboAssembler::Ftintrz_uw_d(FPURegister fd, FPURegister fj,
+ FPURegister scratch) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Ftintrz_uw_d(t8, fj, scratch);
+ movgr2fr_w(fd, t8);
+}
+
+void TurboAssembler::Ftintrz_uw_s(FPURegister fd, FPURegister fj,
+ FPURegister scratch) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Ftintrz_uw_s(t8, fj, scratch);
+ movgr2fr_w(fd, t8);
+}
+
+void TurboAssembler::Ftintrz_ul_d(FPURegister fd, FPURegister fj,
+ FPURegister scratch, Register result) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Ftintrz_ul_d(t8, fj, scratch, result);
+ movgr2fr_d(fd, t8);
+}
+
+void TurboAssembler::Ftintrz_ul_s(FPURegister fd, FPURegister fj,
+ FPURegister scratch, Register result) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Ftintrz_ul_s(t8, fj, scratch, result);
+ movgr2fr_d(fd, t8);
+}
+
+void MacroAssembler::Ftintrz_w_d(FPURegister fd, FPURegister fj) {
+ ftintrz_w_d(fd, fj);
+}
+
+void MacroAssembler::Ftintrne_w_d(FPURegister fd, FPURegister fj) {
+ ftintrne_w_d(fd, fj);
+}
+
+void MacroAssembler::Ftintrm_w_d(FPURegister fd, FPURegister fj) {
+ ftintrm_w_d(fd, fj);
+}
+
+void MacroAssembler::Ftintrp_w_d(FPURegister fd, FPURegister fj) {
+ ftintrp_w_d(fd, fj);
+}
+
+void TurboAssembler::Ftintrz_uw_d(Register rd, FPURegister fj,
+ FPURegister scratch) {
+ DCHECK(fj != scratch);
+ DCHECK(rd != t7);
+
+ {
+ // Load 2^31 into scratch as its float representation.
+ UseScratchRegisterScope temps(this);
+ Register scratch1 = temps.Acquire();
+ li(scratch1, 0x41E00000);
+ movgr2fr_w(scratch, zero_reg);
+ movgr2frh_w(scratch, scratch1);
+ }
+ // Test if scratch > fd.
+ // If fd < 2^31 we can convert it normally.
+ Label simple_convert;
+ CompareF64(fj, scratch, CLT);
+ BranchTrueShortF(&simple_convert);
+
+ // First we subtract 2^31 from fd, then trunc it to rs
+ // and add 2^31 to rj.
+ fsub_d(scratch, fj, scratch);
+ ftintrz_w_d(scratch, scratch);
+ movfr2gr_s(rd, scratch);
+ Or(rd, rd, 1 << 31);
+
+ Label done;
+ Branch(&done);
+ // Simple conversion.
+ bind(&simple_convert);
+ ftintrz_w_d(scratch, fj);
+ movfr2gr_s(rd, scratch);
+
+ bind(&done);
+}
+
+void TurboAssembler::Ftintrz_uw_s(Register rd, FPURegister fj,
+ FPURegister scratch) {
+ DCHECK(fj != scratch);
+ DCHECK(rd != t7);
+ {
+ // Load 2^31 into scratch as its float representation.
+ UseScratchRegisterScope temps(this);
+ Register scratch1 = temps.Acquire();
+ li(scratch1, 0x4F000000);
+ movgr2fr_w(scratch, scratch1);
+ }
+ // Test if scratch > fs.
+ // If fs < 2^31 we can convert it normally.
+ Label simple_convert;
+ CompareF32(fj, scratch, CLT);
+ BranchTrueShortF(&simple_convert);
+
+ // First we subtract 2^31 from fs, then trunc it to rd
+ // and add 2^31 to rd.
+ fsub_s(scratch, fj, scratch);
+ ftintrz_w_s(scratch, scratch);
+ movfr2gr_s(rd, scratch);
+ Or(rd, rd, 1 << 31);
+
+ Label done;
+ Branch(&done);
+ // Simple conversion.
+ bind(&simple_convert);
+ ftintrz_w_s(scratch, fj);
+ movfr2gr_s(rd, scratch);
+
+ bind(&done);
+}
+
+void TurboAssembler::Ftintrz_ul_d(Register rd, FPURegister fj,
+ FPURegister scratch, Register result) {
+ DCHECK(fj != scratch);
+ DCHECK(result.is_valid() ? !AreAliased(rd, result, t7) : !AreAliased(rd, t7));
+
+ Label simple_convert, done, fail;
+ if (result.is_valid()) {
+ mov(result, zero_reg);
+ Move(scratch, -1.0);
+ // If fd =< -1 or unordered, then the conversion fails.
+ CompareF64(fj, scratch, CLE);
+ BranchTrueShortF(&fail);
+ CompareIsNanF64(fj, scratch);
+ BranchTrueShortF(&fail);
+ }
+
+ // Load 2^63 into scratch as its double representation.
+ li(t7, 0x43E0000000000000);
+ movgr2fr_d(scratch, t7);
+
+ // Test if scratch > fs.
+ // If fs < 2^63 we can convert it normally.
+ CompareF64(fj, scratch, CLT);
+ BranchTrueShortF(&simple_convert);
+
+ // First we subtract 2^63 from fs, then trunc it to rd
+ // and add 2^63 to rd.
+ fsub_d(scratch, fj, scratch);
+ ftintrz_l_d(scratch, scratch);
+ movfr2gr_d(rd, scratch);
+ Or(rd, rd, Operand(1UL << 63));
+ Branch(&done);
+
+ // Simple conversion.
+ bind(&simple_convert);
+ ftintrz_l_d(scratch, fj);
+ movfr2gr_d(rd, scratch);
+
+ bind(&done);
+ if (result.is_valid()) {
+ // Conversion is failed if the result is negative.
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch1 = temps.Acquire();
+ addi_d(scratch1, zero_reg, -1);
+ srli_d(scratch1, scratch1, 1); // Load 2^62.
+ movfr2gr_d(result, scratch);
+ xor_(result, result, scratch1);
+ }
+ Slt(result, zero_reg, result);
+ }
+
+ bind(&fail);
+}
+
+void TurboAssembler::Ftintrz_ul_s(Register rd, FPURegister fj,
+ FPURegister scratch, Register result) {
+ DCHECK(fj != scratch);
+ DCHECK(result.is_valid() ? !AreAliased(rd, result, t7) : !AreAliased(rd, t7));
+
+ Label simple_convert, done, fail;
+ if (result.is_valid()) {
+ mov(result, zero_reg);
+ Move(scratch, -1.0f);
+ // If fd =< -1 or unordered, then the conversion fails.
+ CompareF32(fj, scratch, CLE);
+ BranchTrueShortF(&fail);
+ CompareIsNanF32(fj, scratch);
+ BranchTrueShortF(&fail);
+ }
+
+ {
+ // Load 2^63 into scratch as its float representation.
+ UseScratchRegisterScope temps(this);
+ Register scratch1 = temps.Acquire();
+ li(scratch1, 0x5F000000);
+ movgr2fr_w(scratch, scratch1);
+ }
+
+ // Test if scratch > fs.
+ // If fs < 2^63 we can convert it normally.
+ CompareF32(fj, scratch, CLT);
+ BranchTrueShortF(&simple_convert);
+
+ // First we subtract 2^63 from fs, then trunc it to rd
+ // and add 2^63 to rd.
+ fsub_s(scratch, fj, scratch);
+ ftintrz_l_s(scratch, scratch);
+ movfr2gr_d(rd, scratch);
+ Or(rd, rd, Operand(1UL << 63));
+ Branch(&done);
+
+ // Simple conversion.
+ bind(&simple_convert);
+ ftintrz_l_s(scratch, fj);
+ movfr2gr_d(rd, scratch);
+
+ bind(&done);
+ if (result.is_valid()) {
+ // Conversion is failed if the result is negative or unordered.
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch1 = temps.Acquire();
+ addi_d(scratch1, zero_reg, -1);
+ srli_d(scratch1, scratch1, 1); // Load 2^62.
+ movfr2gr_d(result, scratch);
+ xor_(result, result, scratch1);
+ }
+ Slt(result, zero_reg, result);
+ }
+
+ bind(&fail);
+}
+
+void TurboAssembler::RoundDouble(FPURegister dst, FPURegister src,
+ FPURoundingMode mode) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = t8;
+ movfcsr2gr(scratch);
+ li(t7, Operand(mode));
+ movgr2fcsr(t7);
+ frint_d(dst, src);
+ movgr2fcsr(scratch);
+}
+
+void TurboAssembler::Floor_d(FPURegister dst, FPURegister src) {
+ RoundDouble(dst, src, mode_floor);
+}
+
+void TurboAssembler::Ceil_d(FPURegister dst, FPURegister src) {
+ RoundDouble(dst, src, mode_ceil);
+}
+
+void TurboAssembler::Trunc_d(FPURegister dst, FPURegister src) {
+ RoundDouble(dst, src, mode_trunc);
+}
+
+void TurboAssembler::Round_d(FPURegister dst, FPURegister src) {
+ RoundDouble(dst, src, mode_round);
+}
+
+void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src,
+ FPURoundingMode mode) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = t8;
+ movfcsr2gr(scratch);
+ li(t7, Operand(mode));
+ movgr2fcsr(t7);
+ frint_s(dst, src);
+ movgr2fcsr(scratch);
+}
+
+void TurboAssembler::Floor_s(FPURegister dst, FPURegister src) {
+ RoundFloat(dst, src, mode_floor);
+}
+
+void TurboAssembler::Ceil_s(FPURegister dst, FPURegister src) {
+ RoundFloat(dst, src, mode_ceil);
+}
+
+void TurboAssembler::Trunc_s(FPURegister dst, FPURegister src) {
+ RoundFloat(dst, src, mode_trunc);
+}
+
+void TurboAssembler::Round_s(FPURegister dst, FPURegister src) {
+ RoundFloat(dst, src, mode_round);
+}
+
+void TurboAssembler::CompareF(FPURegister cmp1, FPURegister cmp2,
+ FPUCondition cc, CFRegister cd, bool f32) {
+ if (f32) {
+ fcmp_cond_s(cc, cmp1, cmp2, cd);
+ } else {
+ fcmp_cond_d(cc, cmp1, cmp2, cd);
+ }
+}
+
+void TurboAssembler::CompareIsNanF(FPURegister cmp1, FPURegister cmp2,
+ CFRegister cd, bool f32) {
+ CompareF(cmp1, cmp2, CUN, cd, f32);
+}
+
+void TurboAssembler::BranchTrueShortF(Label* target, CFRegister cj) {
+ bcnez(cj, target);
+}
+
+void TurboAssembler::BranchFalseShortF(Label* target, CFRegister cj) {
+ bceqz(cj, target);
+}
+
+void TurboAssembler::BranchTrueF(Label* target, CFRegister cj) {
+ // TODO(yuyin): can be optimzed
+ bool long_branch = target->is_bound()
+ ? !is_near(target, OffsetSize::kOffset21)
+ : is_trampoline_emitted();
+ if (long_branch) {
+ Label skip;
+ BranchFalseShortF(&skip, cj);
+ Branch(target);
+ bind(&skip);
+ } else {
+ BranchTrueShortF(target, cj);
+ }
+}
+
+void TurboAssembler::BranchFalseF(Label* target, CFRegister cj) {
+ bool long_branch = target->is_bound()
+ ? !is_near(target, OffsetSize::kOffset21)
+ : is_trampoline_emitted();
+ if (long_branch) {
+ Label skip;
+ BranchTrueShortF(&skip, cj);
+ Branch(target);
+ bind(&skip);
+ } else {
+ BranchFalseShortF(target, cj);
+ }
+}
+
+void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(src_low != scratch);
+ movfrh2gr_s(scratch, dst);
+ movgr2fr_w(dst, src_low);
+ movgr2frh_w(dst, scratch);
+}
+
+void TurboAssembler::Move(FPURegister dst, uint32_t src) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(static_cast<int32_t>(src)));
+ movgr2fr_w(dst, scratch);
+}
+
+void TurboAssembler::Move(FPURegister dst, uint64_t src) {
+ // Handle special values first.
+ if (src == bit_cast<uint64_t>(0.0) && has_double_zero_reg_set_) {
+ fmov_d(dst, kDoubleRegZero);
+ } else if (src == bit_cast<uint64_t>(-0.0) && has_double_zero_reg_set_) {
+ Neg_d(dst, kDoubleRegZero);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(static_cast<int64_t>(src)));
+ movgr2fr_d(dst, scratch);
+ if (dst == kDoubleRegZero) has_double_zero_reg_set_ = true;
+ }
+}
+
+void TurboAssembler::Movz(Register rd, Register rj, Register rk) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ maskeqz(scratch, rj, rk);
+ masknez(rd, rd, rk);
+ or_(rd, rd, scratch);
+}
+
+void TurboAssembler::Movn(Register rd, Register rj, Register rk) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ masknez(scratch, rj, rk);
+ maskeqz(rd, rd, rk);
+ or_(rd, rd, scratch);
+}
+
+void TurboAssembler::LoadZeroOnCondition(Register rd, Register rj,
+ const Operand& rk, Condition cond) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ switch (cond) {
+ case cc_always:
+ mov(rd, zero_reg);
+ break;
+ case eq:
+ if (rj == zero_reg) {
+ if (rk.is_reg()) {
+ LoadZeroIfConditionZero(rd, rk.rm());
+ } else if (rk.immediate() == 0) {
+ mov(rd, zero_reg);
+ }
+ } else if (IsZero(rk)) {
+ LoadZeroIfConditionZero(rd, rj);
+ } else {
+ Sub_d(t7, rj, rk);
+ LoadZeroIfConditionZero(rd, t7);
+ }
+ break;
+ case ne:
+ if (rj == zero_reg) {
+ if (rk.is_reg()) {
+ LoadZeroIfConditionNotZero(rd, rk.rm());
+ } else if (rk.immediate() != 0) {
+ mov(rd, zero_reg);
+ }
+ } else if (IsZero(rk)) {
+ LoadZeroIfConditionNotZero(rd, rj);
+ } else {
+ Sub_d(t7, rj, rk);
+ LoadZeroIfConditionNotZero(rd, t7);
+ }
+ break;
+
+ // Signed comparison.
+ case greater:
+ Sgt(t7, rj, rk);
+ LoadZeroIfConditionNotZero(rd, t7);
+ break;
+ case greater_equal:
+ Sge(t7, rj, rk);
+ LoadZeroIfConditionNotZero(rd, t7);
+ // rj >= rk
+ break;
+ case less:
+ Slt(t7, rj, rk);
+ LoadZeroIfConditionNotZero(rd, t7);
+ // rj < rk
+ break;
+ case less_equal:
+ Sle(t7, rj, rk);
+ LoadZeroIfConditionNotZero(rd, t7);
+ // rj <= rk
+ break;
+
+ // Unsigned comparison.
+ case Ugreater:
+ Sgtu(t7, rj, rk);
+ LoadZeroIfConditionNotZero(rd, t7);
+ // rj > rk
+ break;
+
+ case Ugreater_equal:
+ Sgeu(t7, rj, rk);
+ LoadZeroIfConditionNotZero(rd, t7);
+ // rj >= rk
+ break;
+ case Uless:
+ Sltu(t7, rj, rk);
+ LoadZeroIfConditionNotZero(rd, t7);
+ // rj < rk
+ break;
+ case Uless_equal:
+ Sleu(t7, rj, rk);
+ LoadZeroIfConditionNotZero(rd, t7);
+ // rj <= rk
+ break;
+ default:
+ UNREACHABLE();
+ } // namespace internal
+} // namespace internal
+
+void TurboAssembler::LoadZeroIfConditionNotZero(Register dest,
+ Register condition) {
+ maskeqz(dest, dest, condition);
+}
+
+void TurboAssembler::LoadZeroIfConditionZero(Register dest,
+ Register condition) {
+ masknez(dest, dest, condition);
+}
+
+void TurboAssembler::LoadZeroIfFPUCondition(Register dest, CFRegister cc) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ movcf2gr(scratch, cc);
+ LoadZeroIfConditionNotZero(dest, scratch);
+}
+
+void TurboAssembler::LoadZeroIfNotFPUCondition(Register dest, CFRegister cc) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ movcf2gr(scratch, cc);
+ LoadZeroIfConditionZero(dest, scratch);
+}
+
+void TurboAssembler::Clz_w(Register rd, Register rj) { clz_w(rd, rj); }
+
+void TurboAssembler::Clz_d(Register rd, Register rj) { clz_d(rd, rj); }
+
+void TurboAssembler::Ctz_w(Register rd, Register rj) { ctz_w(rd, rj); }
+
+void TurboAssembler::Ctz_d(Register rd, Register rj) { ctz_d(rd, rj); }
+
+// TODO(LOONG_dev): Optimize like arm64, use simd instruction
+void TurboAssembler::Popcnt_w(Register rd, Register rj) {
+ // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
+ //
+ // A generalization of the best bit counting method to integers of
+ // bit-widths up to 128 (parameterized by type T) is this:
+ //
+ // v = v - ((v >> 1) & (T)~(T)0/3); // temp
+ // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); // temp
+ // v = (v + (v >> 4)) & (T)~(T)0/255*15; // temp
+ // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; //count
+ //
+ // There are algorithms which are faster in the cases where very few
+ // bits are set but the algorithm here attempts to minimize the total
+ // number of instructions executed even when a large number of bits
+ // are set.
+ int32_t B0 = 0x55555555; // (T)~(T)0/3
+ int32_t B1 = 0x33333333; // (T)~(T)0/15*3
+ int32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15
+ int32_t value = 0x01010101; // (T)~(T)0/255
+ uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
+
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = t8;
+ srli_w(scratch, rj, 1);
+ li(scratch2, B0);
+ And(scratch, scratch, scratch2);
+ Sub_w(scratch, rj, scratch);
+ li(scratch2, B1);
+ And(rd, scratch, scratch2);
+ srli_w(scratch, scratch, 2);
+ And(scratch, scratch, scratch2);
+ Add_w(scratch, rd, scratch);
+ srli_w(rd, scratch, 4);
+ Add_w(rd, rd, scratch);
+ li(scratch2, B2);
+ And(rd, rd, scratch2);
+ li(scratch, value);
+ Mul_w(rd, rd, scratch);
+ srli_w(rd, rd, shift);
+}
+
+void TurboAssembler::Popcnt_d(Register rd, Register rj) {
+ int64_t B0 = 0x5555555555555555l; // (T)~(T)0/3
+ int64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3
+ int64_t B2 = 0x0F0F0F0F0F0F0F0Fl; // (T)~(T)0/255*15
+ int64_t value = 0x0101010101010101l; // (T)~(T)0/255
+ uint32_t shift = 56; // (sizeof(T) - 1) * BITS_PER_BYTE
+
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = t8;
+ srli_d(scratch, rj, 1);
+ li(scratch2, B0);
+ And(scratch, scratch, scratch2);
+ Sub_d(scratch, rj, scratch);
+ li(scratch2, B1);
+ And(rd, scratch, scratch2);
+ srli_d(scratch, scratch, 2);
+ And(scratch, scratch, scratch2);
+ Add_d(scratch, rd, scratch);
+ srli_d(rd, scratch, 4);
+ Add_d(rd, rd, scratch);
+ li(scratch2, B2);
+ And(rd, rd, scratch2);
+ li(scratch, value);
+ Mul_d(rd, rd, scratch);
+ srli_d(rd, rd, shift);
+}
+
+void TurboAssembler::ExtractBits(Register dest, Register source, Register pos,
+ int size, bool sign_extend) {
+ sra_d(dest, source, pos);
+ bstrpick_d(dest, dest, size - 1, 0);
+ if (sign_extend) {
+ switch (size) {
+ case 8:
+ ext_w_b(dest, dest);
+ break;
+ case 16:
+ ext_w_h(dest, dest);
+ break;
+ case 32:
+ // sign-extend word
+ slli_w(dest, dest, 0);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+void TurboAssembler::InsertBits(Register dest, Register source, Register pos,
+ int size) {
+ Rotr_d(dest, dest, pos);
+ bstrins_d(dest, source, size - 1, 0);
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Sub_d(scratch, zero_reg, pos);
+ Rotr_d(dest, dest, scratch);
+ }
+}
+
+void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
+ DoubleRegister double_input,
+ Label* done) {
+ DoubleRegister single_scratch = kScratchDoubleReg.low();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+
+ ftintrz_l_d(single_scratch, double_input);
+ movfr2gr_d(scratch2, single_scratch);
+ li(scratch, 1L << 63);
+ Xor(scratch, scratch, scratch2);
+ rotri_d(scratch2, scratch, 1);
+ movfr2gr_s(result, single_scratch);
+ Branch(done, ne, scratch, Operand(scratch2));
+
+ // Truncate NaN to zero.
+ CompareIsNanF64(double_input, double_input);
+ Move(result, zero_reg);
+ bcnez(FCC0, done);
+}
+
+void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
+ Register result,
+ DoubleRegister double_input,
+ StubCallMode stub_mode) {
+ Label done;
+
+ TryInlineTruncateDoubleToI(result, double_input, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ Sub_d(sp, sp,
+ Operand(kDoubleSize + kSystemPointerSize)); // Put input on stack.
+ St_d(ra, MemOperand(sp, kSystemPointerSize));
+ Fst_d(double_input, MemOperand(sp, 0));
+
+#if V8_ENABLE_WEBASSEMBLY
+ if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
+ Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
+#else
+ // For balance.
+ if (false) {
+#endif // V8_ENABLE_WEBASSEMBLY
+ } else {
+ Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
+ }
+
+ Pop(ra, result);
+ bind(&done);
+}
+
+// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
+#define BRANCH_ARGS_CHECK(cond, rj, rk) \
+ DCHECK((cond == cc_always && rj == zero_reg && rk.rm() == zero_reg) || \
+ (cond != cc_always && (rj != zero_reg || rk.rm() != zero_reg)))
+
+void TurboAssembler::Branch(Label* L, bool need_link) {
+ int offset = GetOffset(L, OffsetSize::kOffset26);
+ if (need_link) {
+ bl(offset);
+ } else {
+ b(offset);
+ }
+}
+
+void TurboAssembler::Branch(Label* L, Condition cond, Register rj,
+ const Operand& rk, bool need_link) {
+ if (L->is_bound()) {
+ BRANCH_ARGS_CHECK(cond, rj, rk);
+ if (!BranchShortOrFallback(L, cond, rj, rk, need_link)) {
+ if (cond != cc_always) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rj, rk, need_link);
+ Branch(L, need_link);
+ bind(&skip);
+ } else {
+ Branch(L);
+ }
+ }
+ } else {
+ if (is_trampoline_emitted()) {
+ if (cond != cc_always) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rj, rk, need_link);
+ Branch(L, need_link);
+ bind(&skip);
+ } else {
+ Branch(L);
+ }
+ } else {
+ BranchShort(L, cond, rj, rk, need_link);
+ }
+ }
+}
+
+void TurboAssembler::Branch(Label* L, Condition cond, Register rj,
+ RootIndex index) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ LoadRoot(scratch, index);
+ Branch(L, cond, rj, Operand(scratch));
+}
+
+int32_t TurboAssembler::GetOffset(Label* L, OffsetSize bits) {
+ return branch_offset_helper(L, bits) >> 2;
+}
+
+Register TurboAssembler::GetRkAsRegisterHelper(const Operand& rk,
+ Register scratch) {
+ Register r2 = no_reg;
+ if (rk.is_reg()) {
+ r2 = rk.rm();
+ } else {
+ r2 = scratch;
+ li(r2, rk);
+ }
+
+ return r2;
+}
+
+bool TurboAssembler::BranchShortOrFallback(Label* L, Condition cond,
+ Register rj, const Operand& rk,
+ bool need_link) {
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ DCHECK_NE(rj, zero_reg);
+
+ // Be careful to always use shifted_branch_offset only just before the
+ // branch instruction, as the location will be remember for patching the
+ // target.
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ int offset = 0;
+ switch (cond) {
+ case cc_always:
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false;
+ offset = GetOffset(L, OffsetSize::kOffset26);
+ if (need_link) {
+ bl(offset);
+ } else {
+ b(offset);
+ }
+ break;
+ case eq:
+ if (rk.is_reg() && rj.code() == rk.rm().code()) {
+ // beq is used here to make the code patchable. Otherwise b should
+ // be used which has no condition field so is not patchable.
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ beq(rj, rj, offset);
+ } else if (IsZero(rk)) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset21)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset21);
+ beqz(rj, offset);
+ } else {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ // We don't want any other register but scratch clobbered.
+ Register sc = GetRkAsRegisterHelper(rk, scratch);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ beq(rj, sc, offset);
+ }
+ break;
+ case ne:
+ if (rk.is_reg() && rj.code() == rk.rm().code()) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ // bne is used here to make the code patchable. Otherwise we
+ // should not generate any instruction.
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ bne(rj, rj, offset);
+ } else if (IsZero(rk)) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset21)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset21);
+ bnez(rj, offset);
+ } else {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ // We don't want any other register but scratch clobbered.
+ Register sc = GetRkAsRegisterHelper(rk, scratch);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ bne(rj, sc, offset);
+ }
+ break;
+
+ // Signed comparison.
+ case greater:
+ // rj > rk
+ if (rk.is_reg() && rj.code() == rk.rm().code()) {
+ // No code needs to be emitted.
+ } else if (IsZero(rk)) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ blt(zero_reg, rj, offset);
+ } else {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ Register sc = GetRkAsRegisterHelper(rk, scratch);
+ DCHECK(rj != sc);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ blt(sc, rj, offset);
+ }
+ break;
+ case greater_equal:
+ // rj >= rk
+ if (rk.is_reg() && rj.code() == rk.rm().code()) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset26);
+ b(offset);
+ } else if (IsZero(rk)) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ bge(rj, zero_reg, offset);
+ } else {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ Register sc = GetRkAsRegisterHelper(rk, scratch);
+ DCHECK(rj != sc);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ bge(rj, sc, offset);
+ }
+ break;
+ case less:
+ // rj < rk
+ if (rk.is_reg() && rj.code() == rk.rm().code()) {
+ // No code needs to be emitted.
+ } else if (IsZero(rk)) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ blt(rj, zero_reg, offset);
+ } else {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ Register sc = GetRkAsRegisterHelper(rk, scratch);
+ DCHECK(rj != sc);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ blt(rj, sc, offset);
+ }
+ break;
+ case less_equal:
+ // rj <= rk
+ if (rk.is_reg() && rj.code() == rk.rm().code()) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset26);
+ b(offset);
+ } else if (IsZero(rk)) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ bge(zero_reg, rj, offset);
+ } else {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ Register sc = GetRkAsRegisterHelper(rk, scratch);
+ DCHECK(rj != sc);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ bge(sc, rj, offset);
+ }
+ break;
+
+ // Unsigned comparison.
+ case Ugreater:
+ // rj > rk
+ if (rk.is_reg() && rj.code() == rk.rm().code()) {
+ // No code needs to be emitted.
+ } else if (IsZero(rk)) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset26);
+ bnez(rj, offset);
+ } else {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ Register sc = GetRkAsRegisterHelper(rk, scratch);
+ DCHECK(rj != sc);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ bltu(sc, rj, offset);
+ }
+ break;
+ case Ugreater_equal:
+ // rj >= rk
+ if (rk.is_reg() && rj.code() == rk.rm().code()) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset26);
+ b(offset);
+ } else if (IsZero(rk)) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset26);
+ b(offset);
+ } else {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ Register sc = GetRkAsRegisterHelper(rk, scratch);
+ DCHECK(rj != sc);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ bgeu(rj, sc, offset);
+ }
+ break;
+ case Uless:
+ // rj < rk
+ if (rk.is_reg() && rj.code() == rk.rm().code()) {
+ // No code needs to be emitted.
+ } else if (IsZero(rk)) {
+ // No code needs to be emitted.
+ } else {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ Register sc = GetRkAsRegisterHelper(rk, scratch);
+ DCHECK(rj != sc);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ bltu(rj, sc, offset);
+ }
+ break;
+ case Uless_equal:
+ // rj <= rk
+ if (rk.is_reg() && rj.code() == rk.rm().code()) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset26);
+ b(offset);
+ } else if (IsZero(rk)) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset21)) return false;
+ if (need_link) pcaddi(ra, 2);
+ beqz(rj, L);
+ } else {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ Register sc = GetRkAsRegisterHelper(rk, scratch);
+ DCHECK(rj != sc);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ bgeu(sc, rj, offset);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ return true;
+}
+
+void TurboAssembler::BranchShort(Label* L, Condition cond, Register rj,
+ const Operand& rk, bool need_link) {
+ BRANCH_ARGS_CHECK(cond, rj, rk);
+ bool result = BranchShortOrFallback(L, cond, rj, rk, need_link);
+ DCHECK(result);
+ USE(result);
+}
+
+void TurboAssembler::LoadFromConstantsTable(Register destination,
+ int constant_index) {
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
+ LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
+ Ld_d(destination,
+ FieldMemOperand(destination, FixedArray::kHeaderSize +
+ constant_index * kPointerSize));
+}
+
+void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
+ Ld_d(destination, MemOperand(kRootRegister, offset));
+}
+
+void TurboAssembler::LoadRootRegisterOffset(Register destination,
+ intptr_t offset) {
+ if (offset == 0) {
+ Move(destination, kRootRegister);
+ } else {
+ Add_d(destination, kRootRegister, Operand(offset));
+ }
+}
+
+void TurboAssembler::Jump(Register target, Condition cond, Register rj,
+ const Operand& rk) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (cond == cc_always) {
+ jirl(zero_reg, target, 0);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rj, rk);
+ Label skip;
+ Branch(&skip, NegateCondition(cond), rj, rk);
+ jirl(zero_reg, target, 0);
+ bind(&skip);
+ }
+}
+
+void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
+ Condition cond, Register rj, const Operand& rk) {
+ Label skip;
+ if (cond != cc_always) {
+ Branch(&skip, NegateCondition(cond), rj, rk);
+ }
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ li(t7, Operand(target, rmode));
+ jirl(zero_reg, t7, 0);
+ bind(&skip);
+ }
+}
+
+void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
+ Register rj, const Operand& rk) {
+ DCHECK(!RelocInfo::IsCodeTarget(rmode));
+ Jump(static_cast<intptr_t>(target), rmode, cond, rj, rk);
+}
+
+void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
+ Condition cond, Register rj, const Operand& rk) {
+ DCHECK(RelocInfo::IsCodeTarget(rmode));
+
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label skip;
+ if (cond != cc_always) {
+ BranchShort(&skip, NegateCondition(cond), rj, rk);
+ }
+
+ Builtin builtin = Builtin::kNoBuiltinId;
+ bool target_is_isolate_independent_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin) &&
+ Builtins::IsIsolateIndependent(builtin);
+ if (target_is_isolate_independent_builtin &&
+ options().use_pc_relative_calls_and_jumps) {
+ int32_t code_target_index = AddCodeTarget(code);
+ RecordRelocInfo(RelocInfo::RELATIVE_CODE_TARGET);
+ b(code_target_index);
+ bind(&skip);
+ return;
+ } else if (root_array_available_ && options().isolate_independent_code) {
+ UNREACHABLE();
+ /*int offset = code->builtin_index() * kSystemPointerSize +
+ IsolateData::builtin_entry_table_offset();
+ Ld_d(t7, MemOperand(kRootRegister, offset));
+ Jump(t7, cc_always, rj, rk);
+ bind(&skip);
+ return;*/
+ } else if (options().inline_offheap_trampolines &&
+ target_is_isolate_independent_builtin) {
+ // Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin);
+ li(t7, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
+ Jump(t7, cc_always, rj, rk);
+ bind(&skip);
+ return;
+ }
+
+ Jump(static_cast<intptr_t>(code.address()), rmode, cc_always, rj, rk);
+ bind(&skip);
+}
+
+void TurboAssembler::Jump(const ExternalReference& reference) {
+ li(t7, reference);
+ Jump(t7);
+}
+
+// Note: To call gcc-compiled C code on loonarch, you must call through t[0-8].
+void TurboAssembler::Call(Register target, Condition cond, Register rj,
+ const Operand& rk) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (cond == cc_always) {
+ jirl(ra, target, 0);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rj, rk);
+ Label skip;
+ Branch(&skip, NegateCondition(cond), rj, rk);
+ jirl(ra, target, 0);
+ bind(&skip);
+ }
+ set_last_call_pc_(pc_);
+}
+
+void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
+ unsigned higher_limit,
+ Label* on_in_range) {
+ if (lower_limit != 0) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Sub_d(scratch, value, Operand(lower_limit));
+ Branch(on_in_range, ls, scratch, Operand(higher_limit - lower_limit));
+ } else {
+ Branch(on_in_range, ls, value, Operand(higher_limit - lower_limit));
+ }
+}
+
+void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
+ Register rj, const Operand& rk) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label skip;
+ if (cond != cc_always) {
+ BranchShort(&skip, NegateCondition(cond), rj, rk);
+ }
+ intptr_t offset_diff = target - pc_offset();
+ if (RelocInfo::IsNone(rmode) && is_int28(offset_diff)) {
+ bl(offset_diff >> 2);
+ } else if (RelocInfo::IsNone(rmode) && is_int38(offset_diff)) {
+ pcaddu18i(t7, static_cast<int32_t>(offset_diff) >> 18);
+ jirl(ra, t7, (offset_diff & 0x3ffff) >> 2);
+ } else {
+ li(t7, Operand(static_cast<int64_t>(target), rmode), ADDRESS_LOAD);
+ Call(t7, cc_always, rj, rk);
+ }
+ bind(&skip);
+}
+
+void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
+ Condition cond, Register rj, const Operand& rk) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label skip;
+ if (cond != cc_always) {
+ BranchShort(&skip, NegateCondition(cond), rj, rk);
+ }
+
+ Builtin builtin = Builtin::kNoBuiltinId;
+ bool target_is_isolate_independent_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin) &&
+ Builtins::IsIsolateIndependent(builtin);
+
+ if (target_is_isolate_independent_builtin &&
+ options().use_pc_relative_calls_and_jumps) {
+ int32_t code_target_index = AddCodeTarget(code);
+ RecordCommentForOffHeapTrampoline(builtin);
+ RecordRelocInfo(RelocInfo::RELATIVE_CODE_TARGET);
+ bl(code_target_index);
+ set_last_call_pc_(pc_);
+ bind(&skip);
+ RecordComment("]");
+ return;
+ } else if (root_array_available_ && options().isolate_independent_code) {
+ UNREACHABLE();
+ /*int offset = code->builtin_index() * kSystemPointerSize +
+ IsolateData::builtin_entry_table_offset();
+ LoadRootRelative(t7, offset);
+ Call(t7, cond, rj, rk);
+ bind(&skip);
+ return;*/
+ } else if (options().inline_offheap_trampolines &&
+ target_is_isolate_independent_builtin) {
+ // Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin);
+ li(t7, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
+ Call(t7, cond, rj, rk);
+ bind(&skip);
+ return;
+ }
+
+ DCHECK(RelocInfo::IsCodeTarget(rmode));
+ DCHECK(code->IsExecutable());
+ Call(code.address(), rmode, cc_always, rj, rk);
+ bind(&skip);
+}
+
+void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
+ STATIC_ASSERT(kSystemPointerSize == 8);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+
+ // The builtin_index register contains the builtin index as a Smi.
+ SmiUntag(builtin_index, builtin_index);
+ Alsl_d(builtin_index, builtin_index, kRootRegister, kSystemPointerSizeLog2,
+ t7);
+ Ld_d(builtin_index,
+ MemOperand(builtin_index, IsolateData::builtin_entry_table_offset()));
+}
+
+void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin,
+ Register destination) {
+ Ld_d(destination, EntryFromBuiltinAsOperand(builtin));
+}
+MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
+ DCHECK(root_array_available());
+ return MemOperand(kRootRegister,
+ IsolateData::BuiltinEntrySlotOffset(builtin));
+}
+
+void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+ LoadEntryFromBuiltinIndex(builtin_index);
+ Call(builtin_index);
+}
+void TurboAssembler::CallBuiltin(Builtin builtin) {
+ RecordCommentForOffHeapTrampoline(builtin);
+ Call(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET);
+ if (FLAG_code_comments) RecordComment("]");
+}
+
+void TurboAssembler::PatchAndJump(Address target) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ pcaddi(scratch, 4);
+ Ld_d(t7, MemOperand(scratch, 0));
+ jirl(zero_reg, t7, 0);
+ nop();
+ DCHECK_EQ(reinterpret_cast<uint64_t>(pc_) % 8, 0);
+ *reinterpret_cast<uint64_t*>(pc_) = target; // pc_ should be align.
+ pc_ += sizeof(uint64_t);
+}
+
+void TurboAssembler::StoreReturnAddressAndCall(Register target) {
+ // This generates the final instruction sequence for calls to C functions
+ // once an exit frame has been constructed.
+ //
+ // Note that this assumes the caller code (i.e. the Code object currently
+ // being generated) is immovable or that the callee function cannot trigger
+ // GC, since the callee function will return to it.
+
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(this);
+ static constexpr int kNumInstructionsToJump = 2;
+ Label find_ra;
+ // Adjust the value in ra to point to the correct return location, 2nd
+ // instruction past the real call into C code (the jirl)), and push it.
+ // This is the return address of the exit frame.
+ pcaddi(ra, kNumInstructionsToJump + 1);
+ bind(&find_ra);
+
+ // This spot was reserved in EnterExitFrame.
+ St_d(ra, MemOperand(sp, 0));
+ // Stack is still aligned.
+
+ // TODO(LOONG_dev): can be jirl target? a0 -- a7?
+ jirl(zero_reg, target, 0);
+ // Make sure the stored 'ra' points to this position.
+ DCHECK_EQ(kNumInstructionsToJump, InstructionsGeneratedSince(&find_ra));
+}
+
+void TurboAssembler::Ret(Condition cond, Register rj, const Operand& rk) {
+ Jump(ra, cond, rj, rk);
+}
+
+void TurboAssembler::Drop(int count, Condition cond, Register reg,
+ const Operand& op) {
+ if (count <= 0) {
+ return;
+ }
+
+ Label skip;
+
+ if (cond != al) {
+ Branch(&skip, NegateCondition(cond), reg, op);
+ }
+
+ Add_d(sp, sp, Operand(count * kPointerSize));
+
+ if (cond != al) {
+ bind(&skip);
+ }
+}
+
+void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) {
+ if (scratch == no_reg) {
+ Xor(reg1, reg1, Operand(reg2));
+ Xor(reg2, reg2, Operand(reg1));
+ Xor(reg1, reg1, Operand(reg2));
+ } else {
+ mov(scratch, reg1);
+ mov(reg1, reg2);
+ mov(reg2, scratch);
+ }
+}
+
+void TurboAssembler::Call(Label* target) { Branch(target, true); }
+
+void TurboAssembler::Push(Smi smi) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(smi));
+ Push(scratch);
+}
+
+void TurboAssembler::Push(Handle<HeapObject> handle) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(handle));
+ Push(scratch);
+}
+
+void TurboAssembler::PushArray(Register array, Register size, Register scratch,
+ Register scratch2, PushArrayOrder order) {
+ DCHECK(!AreAliased(array, size, scratch, scratch2));
+ Label loop, entry;
+ if (order == PushArrayOrder::kReverse) {
+ mov(scratch, zero_reg);
+ jmp(&entry);
+ bind(&loop);
+ Alsl_d(scratch2, scratch, array, kPointerSizeLog2, t7);
+ Ld_d(scratch2, MemOperand(scratch2, 0));
+ Push(scratch2);
+ Add_d(scratch, scratch, Operand(1));
+ bind(&entry);
+ Branch(&loop, less, scratch, Operand(size));
+ } else {
+ mov(scratch, size);
+ jmp(&entry);
+ bind(&loop);
+ Alsl_d(scratch2, scratch, array, kPointerSizeLog2, t7);
+ Ld_d(scratch2, MemOperand(scratch2, 0));
+ Push(scratch2);
+ bind(&entry);
+ Add_d(scratch, scratch, Operand(-1));
+ Branch(&loop, greater_equal, scratch, Operand(zero_reg));
+ }
+}
+
+// ---------------------------------------------------------------------------
+// Exception handling.
+
+void MacroAssembler::PushStackHandler() {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+
+ Push(Smi::zero()); // Padding.
+
+ // Link the current handler as the next handler.
+ li(t2,
+ ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
+ Ld_d(t1, MemOperand(t2, 0));
+ Push(t1);
+
+ // Set this new handler as the current one.
+ St_d(sp, MemOperand(t2, 0));
+}
+
+void MacroAssembler::PopStackHandler() {
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ Pop(a1);
+ Add_d(sp, sp,
+ Operand(
+ static_cast<int64_t>(StackHandlerConstants::kSize - kPointerSize)));
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch,
+ ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
+ St_d(a1, MemOperand(scratch, 0));
+}
+
+void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
+ const DoubleRegister src) {
+ fsub_d(dst, src, kDoubleRegZero);
+}
+
+// -----------------------------------------------------------------------------
+// JavaScript invokes.
+
+void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
+ DCHECK(root_array_available());
+ Isolate* isolate = this->isolate();
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
+ DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+
+ intptr_t offset =
+ TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ CHECK(is_int32(offset));
+ Ld_d(destination, MemOperand(kRootRegister, static_cast<int32_t>(offset)));
+}
+
+void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1,
+ Register scratch2,
+ Label* stack_overflow) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+
+ LoadStackLimit(scratch1, StackLimitKind::kRealStackLimit);
+ // Make scratch1 the space we have left. The stack might already be overflowed
+ // here which will cause scratch1 to become negative.
+ sub_d(scratch1, sp, scratch1);
+ // Check if the arguments will overflow the stack.
+ slli_d(scratch2, num_args, kPointerSizeLog2);
+ // Signed comparison.
+ Branch(stack_overflow, le, scratch1, Operand(scratch2));
+}
+
+void MacroAssembler::InvokePrologue(Register expected_parameter_count,
+ Register actual_parameter_count,
+ Label* done, InvokeType type) {
+ Label regular_invoke;
+
+ // a0: actual arguments count
+ // a1: function (passed through to callee)
+ // a2: expected arguments count
+
+ DCHECK_EQ(actual_parameter_count, a0);
+ DCHECK_EQ(expected_parameter_count, a2);
+
+ // If the expected parameter count is equal to the adaptor sentinel, no need
+ // to push undefined value as arguments.
+ Branch(&regular_invoke, eq, expected_parameter_count,
+ Operand(kDontAdaptArgumentsSentinel));
+
+ // If overapplication or if the actual argument count is equal to the
+ // formal parameter count, no need to push extra undefined values.
+ sub_d(expected_parameter_count, expected_parameter_count,
+ actual_parameter_count);
+ Branch(&regular_invoke, le, expected_parameter_count, Operand(zero_reg));
+
+ Label stack_overflow;
+ StackOverflowCheck(expected_parameter_count, t0, t1, &stack_overflow);
+ // Underapplication. Move the arguments already in the stack, including the
+ // receiver and the return address.
+ {
+ Label copy;
+ Register src = a6, dest = a7;
+ mov(src, sp);
+ slli_d(t0, expected_parameter_count, kSystemPointerSizeLog2);
+ Sub_d(sp, sp, Operand(t0));
+ // Update stack pointer.
+ mov(dest, sp);
+ mov(t0, actual_parameter_count);
+ bind(&copy);
+ Ld_d(t1, MemOperand(src, 0));
+ St_d(t1, MemOperand(dest, 0));
+ Sub_d(t0, t0, Operand(1));
+ Add_d(src, src, Operand(kSystemPointerSize));
+ Add_d(dest, dest, Operand(kSystemPointerSize));
+ Branch(&copy, ge, t0, Operand(zero_reg));
+ }
+
+ // Fill remaining expected arguments with undefined values.
+ LoadRoot(t0, RootIndex::kUndefinedValue);
+ {
+ Label loop;
+ bind(&loop);
+ St_d(t0, MemOperand(a7, 0));
+ Sub_d(expected_parameter_count, expected_parameter_count, Operand(1));
+ Add_d(a7, a7, Operand(kSystemPointerSize));
+ Branch(&loop, gt, expected_parameter_count, Operand(zero_reg));
+ }
+ b(&regular_invoke);
+
+ bind(&stack_overflow);
+ {
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
+ CallRuntime(Runtime::kThrowStackOverflow);
+ break_(0xCC);
+ }
+
+ bind(&regular_invoke);
+}
+
+void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
+ Register expected_parameter_count,
+ Register actual_parameter_count) {
+ // Load receiver to pass it later to DebugOnFunctionCall hook.
+ LoadReceiver(t0, actual_parameter_count);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
+
+ SmiTag(expected_parameter_count);
+ Push(expected_parameter_count);
+
+ SmiTag(actual_parameter_count);
+ Push(actual_parameter_count);
+
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ // TODO(LOONG_dev): MultiPush/Pop
+ Push(fun);
+ Push(fun);
+ Push(t0);
+ CallRuntime(Runtime::kDebugOnFunctionCall);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+
+ Pop(actual_parameter_count);
+ SmiUntag(actual_parameter_count);
+
+ Pop(expected_parameter_count);
+ SmiUntag(expected_parameter_count);
+}
+
+void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
+ Register expected_parameter_count,
+ Register actual_parameter_count,
+ InvokeType type) {
+ // You can't call a function without a valid frame.
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
+ DCHECK_EQ(function, a1);
+ DCHECK_IMPLIES(new_target.is_valid(), new_target == a3);
+
+ // On function call, call into the debugger if necessary.
+ Label debug_hook, continue_after_hook;
+ {
+ li(t0, ExternalReference::debug_hook_on_function_call_address(isolate()));
+ Ld_b(t0, MemOperand(t0, 0));
+ BranchShort(&debug_hook, ne, t0, Operand(zero_reg));
+ }
+ bind(&continue_after_hook);
+
+ // Clear the new.target register if not given.
+ if (!new_target.is_valid()) {
+ LoadRoot(a3, RootIndex::kUndefinedValue);
+ }
+
+ Label done;
+ InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ Register code = kJavaScriptCallCodeStartRegister;
+ Ld_d(code, FieldMemOperand(function, JSFunction::kCodeOffset));
+ switch (type) {
+ case InvokeType::kCall:
+ CallCodeObject(code);
+ break;
+ case InvokeType::kJump:
+ JumpCodeObject(code);
+ break;
+ }
+
+ Branch(&done);
+
+ // Deferred debug hook.
+ bind(&debug_hook);
+ CallDebugOnFunctionCall(function, new_target, expected_parameter_count,
+ actual_parameter_count);
+ Branch(&continue_after_hook);
+
+ // Continue here if InvokePrologue does handle the invocation due to
+ // mismatched parameter counts.
+ bind(&done);
+}
+
+void MacroAssembler::InvokeFunctionWithNewTarget(
+ Register function, Register new_target, Register actual_parameter_count,
+ InvokeType type) {
+ // You can't call a function without a valid frame.
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
+
+ // Contract with called JS functions requires that function is passed in a1.
+ DCHECK_EQ(function, a1);
+ Register expected_parameter_count = a2;
+ Register temp_reg = t0;
+ Ld_d(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ Ld_d(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ // The argument count is stored as uint16_t
+ Ld_hu(expected_parameter_count,
+ FieldMemOperand(temp_reg,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+
+ InvokeFunctionCode(a1, new_target, expected_parameter_count,
+ actual_parameter_count, type);
+}
+
+void MacroAssembler::InvokeFunction(Register function,
+ Register expected_parameter_count,
+ Register actual_parameter_count,
+ InvokeType type) {
+ // You can't call a function without a valid frame.
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
+
+ // Contract with called JS functions requires that function is passed in a1.
+ DCHECK_EQ(function, a1);
+
+ // Get the function and setup the context.
+ Ld_d(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+ InvokeFunctionCode(a1, no_reg, expected_parameter_count,
+ actual_parameter_count, type);
+}
+
+// ---------------------------------------------------------------------------
+// Support functions.
+
+void MacroAssembler::GetObjectType(Register object, Register map,
+ Register type_reg) {
+ LoadMap(map, object);
+ Ld_hu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+}
+
+void MacroAssembler::GetInstanceTypeRange(Register map, Register type_reg,
+ InstanceType lower_limit,
+ Register range) {
+ Ld_hu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ Sub_d(range, type_reg, Operand(lower_limit));
+}
+
+// -----------------------------------------------------------------------------
+// Runtime calls.
+
+void TurboAssembler::AddOverflow_d(Register dst, Register left,
+ const Operand& right, Register overflow) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ Register right_reg = no_reg;
+ if (!right.is_reg()) {
+ li(scratch, Operand(right));
+ right_reg = scratch;
+ } else {
+ right_reg = right.rm();
+ }
+
+ DCHECK(left != scratch2 && right_reg != scratch2 && dst != scratch2 &&
+ overflow != scratch2);
+ DCHECK(overflow != left && overflow != right_reg);
+
+ if (dst == left || dst == right_reg) {
+ add_d(scratch2, left, right_reg);
+ xor_(overflow, scratch2, left);
+ xor_(scratch, scratch2, right_reg);
+ and_(overflow, overflow, scratch);
+ mov(dst, scratch2);
+ } else {
+ add_d(dst, left, right_reg);
+ xor_(overflow, dst, left);
+ xor_(scratch, dst, right_reg);
+ and_(overflow, overflow, scratch);
+ }
+}
+
+void TurboAssembler::SubOverflow_d(Register dst, Register left,
+ const Operand& right, Register overflow) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ Register right_reg = no_reg;
+ if (!right.is_reg()) {
+ li(scratch, Operand(right));
+ right_reg = scratch;
+ } else {
+ right_reg = right.rm();
+ }
+
+ DCHECK(left != scratch2 && right_reg != scratch2 && dst != scratch2 &&
+ overflow != scratch2);
+ DCHECK(overflow != left && overflow != right_reg);
+
+ if (dst == left || dst == right_reg) {
+ Sub_d(scratch2, left, right_reg);
+ xor_(overflow, left, scratch2);
+ xor_(scratch, left, right_reg);
+ and_(overflow, overflow, scratch);
+ mov(dst, scratch2);
+ } else {
+ sub_d(dst, left, right_reg);
+ xor_(overflow, left, dst);
+ xor_(scratch, left, right_reg);
+ and_(overflow, overflow, scratch);
+ }
+}
+
+void TurboAssembler::MulOverflow_w(Register dst, Register left,
+ const Operand& right, Register overflow) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ Register right_reg = no_reg;
+ if (!right.is_reg()) {
+ li(scratch, Operand(right));
+ right_reg = scratch;
+ } else {
+ right_reg = right.rm();
+ }
+
+ DCHECK(left != scratch2 && right_reg != scratch2 && dst != scratch2 &&
+ overflow != scratch2);
+ DCHECK(overflow != left && overflow != right_reg);
+
+ if (dst == left || dst == right_reg) {
+ Mul_w(scratch2, left, right_reg);
+ Mulh_w(overflow, left, right_reg);
+ mov(dst, scratch2);
+ } else {
+ Mul_w(dst, left, right_reg);
+ Mulh_w(overflow, left, right_reg);
+ }
+
+ srai_d(scratch2, dst, 32);
+ xor_(overflow, overflow, scratch2);
+}
+
+void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
+ SaveFPRegsMode save_doubles) {
+ // All parameters are on the stack. v0 has the return value after call.
+
+ // If the expected number of arguments of the runtime function is
+ // constant, we check that the actual number of arguments match the
+ // expectation.
+ CHECK(f->nargs < 0 || f->nargs == num_arguments);
+
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ PrepareCEntryArgs(num_arguments);
+ PrepareCEntryFunction(ExternalReference::Create(f));
+ Handle<Code> code =
+ CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
+ Call(code, RelocInfo::CODE_TARGET);
+}
+
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ DCHECK_EQ(1, function->result_size);
+ if (function->nargs >= 0) {
+ PrepareCEntryArgs(function->nargs);
+ }
+ JumpToExternalReference(ExternalReference::Create(fid));
+}
+
+void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
+ bool builtin_exit_frame) {
+ PrepareCEntryFunction(builtin);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
+ ArgvMode::kStack, builtin_exit_frame);
+ Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
+}
+
+void MacroAssembler::JumpToInstructionStream(Address entry) {
+ li(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Jump(kOffHeapTrampolineRegister);
+}
+
+void MacroAssembler::LoadWeakValue(Register out, Register in,
+ Label* target_if_cleared) {
+ Branch(target_if_cleared, eq, in, Operand(kClearedWeakHeapObjectLower32));
+
+ And(out, in, Operand(~kWeakHeapObjectMask));
+}
+
+void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value,
+ Register scratch1,
+ Register scratch2) {
+ DCHECK_GT(value, 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ // This operation has to be exactly 32-bit wide in case the external
+ // reference table redirects the counter to a uint32_t dummy_stats_counter_
+ // field.
+ li(scratch2, ExternalReference::Create(counter));
+ Ld_w(scratch1, MemOperand(scratch2, 0));
+ Add_w(scratch1, scratch1, Operand(value));
+ St_w(scratch1, MemOperand(scratch2, 0));
+ }
+}
+
+void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
+ Register scratch1,
+ Register scratch2) {
+ DCHECK_GT(value, 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ // This operation has to be exactly 32-bit wide in case the external
+ // reference table redirects the counter to a uint32_t dummy_stats_counter_
+ // field.
+ li(scratch2, ExternalReference::Create(counter));
+ Ld_w(scratch1, MemOperand(scratch2, 0));
+ Sub_w(scratch1, scratch1, Operand(value));
+ St_w(scratch1, MemOperand(scratch2, 0));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Debugging.
+
+void TurboAssembler::Trap() { stop(); }
+void TurboAssembler::DebugBreak() { stop(); }
+
+void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs,
+ Operand rk) {
+ if (FLAG_debug_code) Check(cc, reason, rs, rk);
+}
+
+void TurboAssembler::Check(Condition cc, AbortReason reason, Register rj,
+ Operand rk) {
+ Label L;
+ Branch(&L, cc, rj, rk);
+ Abort(reason);
+ // Will not return here.
+ bind(&L);
+}
+
+void TurboAssembler::Abort(AbortReason reason) {
+ Label abort_start;
+ bind(&abort_start);
+ if (FLAG_code_comments) {
+ const char* msg = GetAbortReason(reason);
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
+
+ // Avoid emitting call to builtin if requested.
+ if (trap_on_abort()) {
+ stop();
+ return;
+ }
+
+ if (should_abort_hard()) {
+ // We don't care if we constructed a frame. Just pretend we did.
+ FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
+ PrepareCallCFunction(0, a0);
+ li(a0, Operand(static_cast<int>(reason)));
+ CallCFunction(ExternalReference::abort_with_reason(), 1);
+ return;
+ }
+
+ Move(a0, Smi::FromInt(static_cast<int>(reason)));
+
+ // Disable stub call restrictions to always allow calls to abort.
+ if (!has_frame()) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
+ Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
+ } else {
+ Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
+ }
+ // Will not return here.
+ if (is_trampoline_pool_blocked()) {
+ // If the calling code cares about the exact number of
+ // instructions generated, we insert padding here to keep the size
+ // of the Abort macro constant.
+ // Currently in debug mode with debug_code enabled the number of
+ // generated instructions is 10, so we use this as a maximum value.
+ static const int kExpectedAbortInstructions = 10;
+ int abort_instructions = InstructionsGeneratedSince(&abort_start);
+ DCHECK_LE(abort_instructions, kExpectedAbortInstructions);
+ while (abort_instructions++ < kExpectedAbortInstructions) {
+ nop();
+ }
+ }
+}
+
+void TurboAssembler::LoadMap(Register destination, Register object) {
+ Ld_d(destination, FieldMemOperand(object, HeapObject::kMapOffset));
+}
+
+void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
+ LoadMap(dst, cp);
+ Ld_d(dst, FieldMemOperand(
+ dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
+ Ld_d(dst, MemOperand(dst, Context::SlotOffset(index)));
+}
+
+void TurboAssembler::StubPrologue(StackFrame::Type type) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(StackFrame::TypeToMarker(type)));
+ PushCommonFrame(scratch);
+}
+
+void TurboAssembler::Prologue() { PushStandardFrame(a1); }
+
+void TurboAssembler::EnterFrame(StackFrame::Type type) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Push(ra, fp);
+ Move(fp, sp);
+ if (!StackFrame::IsJavaScript(type)) {
+ li(kScratchReg, Operand(StackFrame::TypeToMarker(type)));
+ Push(kScratchReg);
+ }
+#if V8_ENABLE_WEBASSEMBLY
+ if (type == StackFrame::WASM) Push(kWasmInstanceRegister);
+#endif // V8_ENABLE_WEBASSEMBLY
+}
+
+void TurboAssembler::LeaveFrame(StackFrame::Type type) {
+ addi_d(sp, fp, 2 * kPointerSize);
+ Ld_d(ra, MemOperand(fp, 1 * kPointerSize));
+ Ld_d(fp, MemOperand(fp, 0 * kPointerSize));
+}
+
+void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
+ StackFrame::Type frame_type) {
+ DCHECK(frame_type == StackFrame::EXIT ||
+ frame_type == StackFrame::BUILTIN_EXIT);
+
+ // Set up the frame structure on the stack.
+ STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
+ STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
+ STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
+
+ // This is how the stack will look:
+ // fp + 2 (==kCallerSPDisplacement) - old stack's end
+ // [fp + 1 (==kCallerPCOffset)] - saved old ra
+ // [fp + 0 (==kCallerFPOffset)] - saved old fp
+ // [fp - 1 StackFrame::EXIT Smi
+ // [fp - 2 (==kSPOffset)] - sp of the called function
+ // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
+ // new stack (will contain saved ra)
+
+ // Save registers and reserve room for saved entry sp.
+ addi_d(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
+ St_d(ra, MemOperand(sp, 3 * kPointerSize));
+ St_d(fp, MemOperand(sp, 2 * kPointerSize));
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(StackFrame::TypeToMarker(frame_type)));
+ St_d(scratch, MemOperand(sp, 1 * kPointerSize));
+ }
+ // Set up new frame pointer.
+ addi_d(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
+
+ if (FLAG_debug_code) {
+ St_d(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
+ }
+
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ // Save the frame pointer and the context in top.
+ li(t8, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ isolate()));
+ St_d(fp, MemOperand(t8, 0));
+ li(t8,
+ ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
+ St_d(cp, MemOperand(t8, 0));
+ }
+
+ const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
+ if (save_doubles) {
+ // The stack is already aligned to 0 modulo 8 for stores with sdc1.
+ int kNumOfSavedRegisters = FPURegister::kNumRegisters / 2;
+ int space = kNumOfSavedRegisters * kDoubleSize;
+ Sub_d(sp, sp, Operand(space));
+ // Remember: we only need to save every 2nd double FPU value.
+ for (int i = 0; i < kNumOfSavedRegisters; i++) {
+ FPURegister reg = FPURegister::from_code(2 * i);
+ Fst_d(reg, MemOperand(sp, i * kDoubleSize));
+ }
+ }
+
+ // Reserve place for the return address, stack space and an optional slot
+ // (used by DirectCEntry to hold the return value if a struct is
+ // returned) and align the frame preparing for calling the runtime function.
+ DCHECK_GE(stack_space, 0);
+ Sub_d(sp, sp, Operand((stack_space + 2) * kPointerSize));
+ if (frame_alignment > 0) {
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
+ And(sp, sp, Operand(-frame_alignment)); // Align stack.
+ }
+
+ // Set the exit frame sp value to point just before the return address
+ // location.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ addi_d(scratch, sp, kPointerSize);
+ St_d(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
+}
+
+void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
+ bool do_return,
+ bool argument_count_is_length) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ // Optionally restore all double registers.
+ if (save_doubles) {
+ // Remember: we only need to restore every 2nd double FPU value.
+ int kNumOfSavedRegisters = FPURegister::kNumRegisters / 2;
+ Sub_d(t8, fp,
+ Operand(ExitFrameConstants::kFixedFrameSizeFromFp +
+ kNumOfSavedRegisters * kDoubleSize));
+ for (int i = 0; i < kNumOfSavedRegisters; i++) {
+ FPURegister reg = FPURegister::from_code(2 * i);
+ Fld_d(reg, MemOperand(t8, i * kDoubleSize));
+ }
+ }
+
+ // Clear top frame.
+ li(t8,
+ ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate()));
+ St_d(zero_reg, MemOperand(t8, 0));
+
+ // Restore current context from top and clear it in debug mode.
+ li(t8,
+ ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
+ Ld_d(cp, MemOperand(t8, 0));
+
+ if (FLAG_debug_code) {
+ UseScratchRegisterScope temp(this);
+ Register scratch = temp.Acquire();
+ li(scratch, Operand(Context::kInvalidContext));
+ St_d(scratch, MemOperand(t8, 0));
+ }
+
+ // Pop the arguments, restore registers, and return.
+ mov(sp, fp); // Respect ABI stack constraint.
+ Ld_d(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
+ Ld_d(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
+
+ if (argument_count.is_valid()) {
+ if (argument_count_is_length) {
+ add_d(sp, sp, argument_count);
+ } else {
+ Alsl_d(sp, argument_count, sp, kPointerSizeLog2, t8);
+ }
+ }
+
+ addi_d(sp, sp, 2 * kPointerSize);
+ if (do_return) {
+ Ret();
+ }
+}
+
+int TurboAssembler::ActivationFrameAlignment() {
+#if V8_HOST_ARCH_LOONG64
+ // Running on the real platform. Use the alignment as mandated by the local
+ // environment.
+ // Note: This will break if we ever start generating snapshots on one LOONG64
+ // platform for another LOONG64 platform with a different alignment.
+ return base::OS::ActivationFrameAlignment();
+#else // V8_HOST_ARCH_LOONG64
+ // If we are using the simulator then we should always align to the expected
+ // alignment. As the simulator is used to generate snapshots we do not know
+ // if the target platform will need alignment, so this is controlled from a
+ // flag.
+ return FLAG_sim_stack_alignment;
+#endif // V8_HOST_ARCH_LOONG64
+}
+
+void MacroAssembler::AssertStackIsAligned() {
+ if (FLAG_debug_code) {
+ const int frame_alignment = ActivationFrameAlignment();
+ const int frame_alignment_mask = frame_alignment - 1;
+
+ if (frame_alignment > kPointerSize) {
+ Label alignment_as_expected;
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ andi(scratch, sp, frame_alignment_mask);
+ Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
+ }
+ // Don't use Check here, as it will call Runtime_Abort re-entering here.
+ stop();
+ bind(&alignment_as_expected);
+ }
+ }
+}
+
+void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
+ if (SmiValuesAre32Bits()) {
+ Ld_w(dst, MemOperand(src.base(), SmiWordOffset(src.offset())));
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ Ld_w(dst, src);
+ SmiUntag(dst);
+ }
+}
+
+void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) {
+ DCHECK_EQ(0, kSmiTag);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ andi(scratch, value, kSmiTagMask);
+ Branch(smi_label, eq, scratch, Operand(zero_reg));
+}
+
+void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
+ DCHECK_EQ(0, kSmiTag);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ andi(scratch, value, kSmiTagMask);
+ Branch(not_smi_label, ne, scratch, Operand(zero_reg));
+}
+
+void TurboAssembler::AssertNotSmi(Register object) {
+ if (FLAG_debug_code) {
+ STATIC_ASSERT(kSmiTag == 0);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ andi(scratch, object, kSmiTagMask);
+ Check(ne, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg));
+ }
+}
+
+void TurboAssembler::AssertSmi(Register object) {
+ if (FLAG_debug_code) {
+ STATIC_ASSERT(kSmiTag == 0);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ andi(scratch, object, kSmiTagMask);
+ Check(eq, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg));
+ }
+}
+
+void MacroAssembler::AssertConstructor(Register object) {
+ if (FLAG_debug_code) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, t8,
+ Operand(zero_reg));
+
+ LoadMap(t8, object);
+ Ld_bu(t8, FieldMemOperand(t8, Map::kBitFieldOffset));
+ And(t8, t8, Operand(Map::Bits1::IsConstructorBit::kMask));
+ Check(ne, AbortReason::kOperandIsNotAConstructor, t8, Operand(zero_reg));
+ }
+}
+
+void MacroAssembler::AssertFunction(Register object) {
+ if (FLAG_debug_code) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8,
+ Operand(zero_reg));
+ Push(object);
+ LoadMap(object, object);
+ GetInstanceTypeRange(object, object, FIRST_JS_FUNCTION_TYPE, t8);
+ Check(ls, AbortReason::kOperandIsNotAFunction, t8,
+ Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
+ Pop(object);
+ }
+}
+
+void MacroAssembler::AssertBoundFunction(Register object) {
+ if (FLAG_debug_code) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, t8,
+ Operand(zero_reg));
+ GetObjectType(object, t8, t8);
+ Check(eq, AbortReason::kOperandIsNotABoundFunction, t8,
+ Operand(JS_BOUND_FUNCTION_TYPE));
+ }
+}
+
+void MacroAssembler::AssertGeneratorObject(Register object) {
+ if (!FLAG_debug_code) return;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, t8,
+ Operand(zero_reg));
+
+ GetObjectType(object, t8, t8);
+
+ Label done;
+
+ // Check if JSGeneratorObject
+ Branch(&done, eq, t8, Operand(JS_GENERATOR_OBJECT_TYPE));
+
+ // Check if JSAsyncFunctionObject (See MacroAssembler::CompareInstanceType)
+ Branch(&done, eq, t8, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE));
+
+ // Check if JSAsyncGeneratorObject
+ Branch(&done, eq, t8, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
+
+ Abort(AbortReason::kOperandIsNotAGeneratorObject);
+
+ bind(&done);
+}
+
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
+ Register scratch) {
+ if (FLAG_debug_code) {
+ Label done_checking;
+ AssertNotSmi(object);
+ LoadRoot(scratch, RootIndex::kUndefinedValue);
+ Branch(&done_checking, eq, object, Operand(scratch));
+ GetObjectType(object, scratch, scratch);
+ Assert(eq, AbortReason::kExpectedUndefinedOrCell, scratch,
+ Operand(ALLOCATION_SITE_TYPE));
+ bind(&done_checking);
+ }
+}
+
+void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
+ FPURegister src2, Label* out_of_line) {
+ if (src1 == src2) {
+ Move_s(dst, src1);
+ return;
+ }
+
+ // Check if one of operands is NaN.
+ CompareIsNanF32(src1, src2);
+ BranchTrueF(out_of_line);
+
+ fmax_s(dst, src1, src2);
+}
+
+void TurboAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ fadd_s(dst, src1, src2);
+}
+
+void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1,
+ FPURegister src2, Label* out_of_line) {
+ if (src1 == src2) {
+ Move_s(dst, src1);
+ return;
+ }
+
+ // Check if one of operands is NaN.
+ CompareIsNanF32(src1, src2);
+ BranchTrueF(out_of_line);
+
+ fmin_s(dst, src1, src2);
+}
+
+void TurboAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ fadd_s(dst, src1, src2);
+}
+
+void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1,
+ FPURegister src2, Label* out_of_line) {
+ if (src1 == src2) {
+ Move_d(dst, src1);
+ return;
+ }
+
+ // Check if one of operands is NaN.
+ CompareIsNanF64(src1, src2);
+ BranchTrueF(out_of_line);
+
+ fmax_d(dst, src1, src2);
+}
+
+void TurboAssembler::Float64MaxOutOfLine(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ fadd_d(dst, src1, src2);
+}
+
+void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1,
+ FPURegister src2, Label* out_of_line) {
+ if (src1 == src2) {
+ Move_d(dst, src1);
+ return;
+ }
+
+ // Check if one of operands is NaN.
+ CompareIsNanF64(src1, src2);
+ BranchTrueF(out_of_line);
+
+ fmin_d(dst, src1, src2);
+}
+
+void TurboAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ fadd_d(dst, src1, src2);
+}
+
+static const int kRegisterPassedArguments = 8;
+
+int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
+ int num_double_arguments) {
+ int stack_passed_words = 0;
+ num_reg_arguments += 2 * num_double_arguments;
+
+ // Up to eight simple arguments are passed in registers a0..a7.
+ if (num_reg_arguments > kRegisterPassedArguments) {
+ stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
+ }
+ return stack_passed_words;
+}
+
+void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
+ int num_double_arguments,
+ Register scratch) {
+ int frame_alignment = ActivationFrameAlignment();
+
+ // Up to eight simple arguments in a0..a3, a4..a7, No argument slots.
+ // Remaining arguments are pushed on the stack.
+ int stack_passed_arguments =
+ CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
+ if (frame_alignment > kPointerSize) {
+ // Make stack end at alignment and make room for num_arguments - 4 words
+ // and the original value of sp.
+ mov(scratch, sp);
+ Sub_d(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
+ bstrins_d(sp, zero_reg, std::log2(frame_alignment) - 1, 0);
+ St_d(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ } else {
+ Sub_d(sp, sp, Operand(stack_passed_arguments * kPointerSize));
+ }
+}
+
+void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
+ Register scratch) {
+ PrepareCallCFunction(num_reg_arguments, 0, scratch);
+}
+
+void TurboAssembler::CallCFunction(ExternalReference function,
+ int num_reg_arguments,
+ int num_double_arguments) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ li(t7, function);
+ CallCFunctionHelper(t7, num_reg_arguments, num_double_arguments);
+}
+
+void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
+ int num_double_arguments) {
+ CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
+}
+
+void TurboAssembler::CallCFunction(ExternalReference function,
+ int num_arguments) {
+ CallCFunction(function, num_arguments, 0);
+}
+
+void TurboAssembler::CallCFunction(Register function, int num_arguments) {
+ CallCFunction(function, num_arguments, 0);
+}
+
+void TurboAssembler::CallCFunctionHelper(Register function,
+ int num_reg_arguments,
+ int num_double_arguments) {
+ DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
+ DCHECK(has_frame());
+ // Make sure that the stack is aligned before calling a C function unless
+ // running in the simulator. The simulator has its own alignment check which
+ // provides more information.
+
+#if V8_HOST_ARCH_LOONG64
+ if (FLAG_debug_code) {
+ int frame_alignment = base::OS::ActivationFrameAlignment();
+ int frame_alignment_mask = frame_alignment - 1;
+ if (frame_alignment > kPointerSize) {
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
+ Label alignment_as_expected;
+ {
+ Register scratch = t8;
+ And(scratch, sp, Operand(frame_alignment_mask));
+ Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
+ }
+ // Don't use Check here, as it will call Runtime_Abort possibly
+ // re-entering here.
+ stop();
+ bind(&alignment_as_expected);
+ }
+ }
+#endif // V8_HOST_ARCH_LOONG64
+
+ // Just call directly. The function called cannot cause a GC, or
+ // allow preemption, so the return address in the link register
+ // stays correct.
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (function != t7) {
+ mov(t7, function);
+ function = t7;
+ }
+
+ // Save the frame pointer and PC so that the stack layout remains iterable,
+ // even without an ExitFrame which normally exists between JS and C frames.
+ // 't' registers are caller-saved so this is safe as a scratch register.
+ Register pc_scratch = t1;
+ Register scratch = t2;
+ DCHECK(!AreAliased(pc_scratch, scratch, function));
+
+ pcaddi(pc_scratch, 1);
+
+ // See x64 code for reasoning about how to address the isolate data fields.
+ if (root_array_available()) {
+ St_d(pc_scratch, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_pc_offset()));
+ St_d(fp, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ li(scratch, ExternalReference::fast_c_call_caller_pc_address(isolate()));
+ St_d(pc_scratch, MemOperand(scratch, 0));
+ li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ St_d(fp, MemOperand(scratch, 0));
+ }
+
+ Call(function);
+
+ // We don't unset the PC; the FP is the source of truth.
+ if (root_array_available()) {
+ St_d(zero_reg, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ St_d(zero_reg, MemOperand(scratch, 0));
+ }
+ }
+
+ int stack_passed_arguments =
+ CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
+
+ if (base::OS::ActivationFrameAlignment() > kPointerSize) {
+ Ld_d(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ } else {
+ Add_d(sp, sp, Operand(stack_passed_arguments * kPointerSize));
+ }
+}
+
+#undef BRANCH_ARGS_CHECK
+
+void TurboAssembler::CheckPageFlag(const Register& object, int mask,
+ Condition cc, Label* condition_met) {
+ UseScratchRegisterScope temps(this);
+ temps.Include(t8);
+ Register scratch = temps.Acquire();
+ And(scratch, object, Operand(~kPageAlignmentMask));
+ Ld_d(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
+ And(scratch, scratch, Operand(mask));
+ Branch(condition_met, cc, scratch, Operand(zero_reg));
+}
+
+Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
+ Register reg4, Register reg5,
+ Register reg6) {
+ RegList regs = 0;
+ if (reg1.is_valid()) regs |= reg1.bit();
+ if (reg2.is_valid()) regs |= reg2.bit();
+ if (reg3.is_valid()) regs |= reg3.bit();
+ if (reg4.is_valid()) regs |= reg4.bit();
+ if (reg5.is_valid()) regs |= reg5.bit();
+ if (reg6.is_valid()) regs |= reg6.bit();
+
+ const RegisterConfiguration* config = RegisterConfiguration::Default();
+ for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ Register candidate = Register::from_code(code);
+ if (regs & candidate.bit()) continue;
+ return candidate;
+ }
+ UNREACHABLE();
+}
+
+void TurboAssembler::ComputeCodeStartAddress(Register dst) {
+ // TODO(LOONG_dev): range check, add Pcadd macro function?
+ pcaddi(dst, -pc_offset() >> 2);
+}
+
+void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
+ DeoptimizeKind kind, Label* ret,
+ Label*) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Ld_d(t7,
+ MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(target)));
+ Call(t7);
+ DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
+ (kind == DeoptimizeKind::kLazy)
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
+
+ if (kind == DeoptimizeKind::kEagerWithResume) {
+ Branch(ret);
+ DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
+ Deoptimizer::kEagerWithResumeBeforeArgsSize);
+ }
+}
+
+void TurboAssembler::LoadCodeObjectEntry(Register destination,
+ Register code_object) {
+ // Code objects are called differently depending on whether we are generating
+ // builtin code (which will later be embedded into the binary) or compiling
+ // user JS code at runtime.
+ // * Builtin code runs in --jitless mode and thus must not call into on-heap
+ // Code targets. Instead, we dispatch through the builtins entry table.
+ // * Codegen at runtime does not have this restriction and we can use the
+ // shorter, branchless instruction sequence. The assumption here is that
+ // targets are usually generated code and not builtin Code objects.
+ if (options().isolate_independent_code) {
+ DCHECK(root_array_available());
+ Label if_code_is_off_heap, out;
+ Register scratch = t8;
+
+ DCHECK(!AreAliased(destination, scratch));
+ DCHECK(!AreAliased(code_object, scratch));
+
+ // Check whether the Code object is an off-heap trampoline. If so, call its
+ // (off-heap) entry point directly without going through the (on-heap)
+ // trampoline. Otherwise, just call the Code object as always.
+ Ld_w(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
+ And(scratch, scratch, Operand(Code::IsOffHeapTrampoline::kMask));
+ BranchShort(&if_code_is_off_heap, ne, scratch, Operand(zero_reg));
+ // Not an off-heap trampoline object, the entry point is at
+ // Code::raw_instruction_start().
+ Add_d(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
+ Branch(&out);
+
+ // An off-heap trampoline, the entry point is loaded from the builtin entry
+ // table.
+ bind(&if_code_is_off_heap);
+ Ld_w(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
+ // TODO(liuyu): don't use scratch_reg in Alsl_d;
+ Alsl_d(destination, scratch, kRootRegister, kSystemPointerSizeLog2,
+ zero_reg);
+ Ld_d(destination,
+ MemOperand(destination, IsolateData::builtin_entry_table_offset()));
+
+ bind(&out);
+ } else {
+ Add_d(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
+ }
+}
+
+void TurboAssembler::CallCodeObject(Register code_object) {
+ LoadCodeObjectEntry(code_object, code_object);
+ Call(code_object);
+}
+
+void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
+ DCHECK_EQ(JumpMode::kJump, jump_mode);
+ LoadCodeObjectEntry(code_object, code_object);
+ Jump(code_object);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_LOONG64
diff --git a/chromium/v8/src/codegen/loong64/macro-assembler-loong64.h b/chromium/v8/src/codegen/loong64/macro-assembler-loong64.h
new file mode 100644
index 00000000000..c34a5bc18e4
--- /dev/null
+++ b/chromium/v8/src/codegen/loong64/macro-assembler-loong64.h
@@ -0,0 +1,1071 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
+#error This header must be included via macro-assembler.h
+#endif
+
+#ifndef V8_CODEGEN_LOONG64_MACRO_ASSEMBLER_LOONG64_H_
+#define V8_CODEGEN_LOONG64_MACRO_ASSEMBLER_LOONG64_H_
+
+#include "src/codegen/assembler.h"
+#include "src/codegen/loong64/assembler-loong64.h"
+#include "src/common/globals.h"
+#include "src/objects/tagged-index.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+enum class AbortReason : uint8_t;
+
+// Flags used for LeaveExitFrame function.
+enum LeaveExitFrameMode { EMIT_RETURN = true, NO_EMIT_RETURN = false };
+
+// Flags used for the li macro-assembler function.
+enum LiFlags {
+ // If the constant value can be represented in just 12 bits, then
+ // optimize the li to use a single instruction, rather than lu12i_w/lu32i_d/
+ // lu52i_d/ori sequence. A number of other optimizations that emits less than
+ // maximum number of instructions exists.
+ OPTIMIZE_SIZE = 0,
+ // Always use 4 instructions (lu12i_w/ori/lu32i_d/lu52i_d sequence),
+ // even if the constant could be loaded with just one, so that this value is
+ // patchable later.
+ CONSTANT_SIZE = 1,
+ // For address loads only 3 instruction are required. Used to mark
+ // constant load that will be used as address without relocation
+ // information. It ensures predictable code size, so specific sites
+ // in code are patchable.
+ ADDRESS_LOAD = 2
+};
+
+enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
+
+Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
+ Register reg3 = no_reg,
+ Register reg4 = no_reg,
+ Register reg5 = no_reg,
+ Register reg6 = no_reg);
+
+// -----------------------------------------------------------------------------
+// Static helper functions.
+
+#define SmiWordOffset(offset) (offset + kPointerSize / 2)
+
+// Generate a MemOperand for loading a field from an object.
+inline MemOperand FieldMemOperand(Register object, int offset) {
+ return MemOperand(object, offset - kHeapObjectTag);
+}
+
+class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
+ public:
+ using TurboAssemblerBase::TurboAssemblerBase;
+
+ // Activation support.
+ void EnterFrame(StackFrame::Type type);
+ void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
+ // Out-of-line constant pool not implemented on loong64.
+ UNREACHABLE();
+ }
+ void LeaveFrame(StackFrame::Type type);
+
+ void AllocateStackSpace(Register bytes) { Sub_d(sp, sp, bytes); }
+
+ void AllocateStackSpace(int bytes) {
+ DCHECK_GE(bytes, 0);
+ if (bytes == 0) return;
+ Sub_d(sp, sp, Operand(bytes));
+ }
+
+ // Generates function and stub prologue code.
+ void StubPrologue(StackFrame::Type type);
+ void Prologue();
+
+ void InitializeRootRegister() {
+ ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
+ li(kRootRegister, Operand(isolate_root));
+ }
+
+ // Jump unconditionally to given label.
+ // Use rather b(Label) for code generation.
+ void jmp(Label* L) { Branch(L); }
+
+ // -------------------------------------------------------------------------
+ // Debugging.
+
+ void Trap();
+ void DebugBreak();
+
+ // Calls Abort(msg) if the condition cc is not satisfied.
+ // Use --debug_code to enable.
+ void Assert(Condition cc, AbortReason reason, Register rj, Operand rk);
+
+ // Like Assert(), but always enabled.
+ void Check(Condition cc, AbortReason reason, Register rj, Operand rk);
+
+ // Print a message to stdout and abort execution.
+ void Abort(AbortReason msg);
+
+ void Branch(Label* label, bool need_link = false);
+ void Branch(Label* label, Condition cond, Register r1, const Operand& r2,
+ bool need_link = false);
+ void BranchShort(Label* label, Condition cond, Register r1, const Operand& r2,
+ bool need_link = false);
+ void Branch(Label* L, Condition cond, Register rj, RootIndex index);
+
+ // Floating point branches
+ void CompareF32(FPURegister cmp1, FPURegister cmp2, FPUCondition cc,
+ CFRegister cd = FCC0) {
+ CompareF(cmp1, cmp2, cc, cd, true);
+ }
+
+ void CompareIsNanF32(FPURegister cmp1, FPURegister cmp2,
+ CFRegister cd = FCC0) {
+ CompareIsNanF(cmp1, cmp2, cd, true);
+ }
+
+ void CompareF64(FPURegister cmp1, FPURegister cmp2, FPUCondition cc,
+ CFRegister cd = FCC0) {
+ CompareF(cmp1, cmp2, cc, cd, false);
+ }
+
+ void CompareIsNanF64(FPURegister cmp1, FPURegister cmp2,
+ CFRegister cd = FCC0) {
+ CompareIsNanF(cmp1, cmp2, cd, false);
+ }
+
+ void BranchTrueShortF(Label* target, CFRegister cc = FCC0);
+ void BranchFalseShortF(Label* target, CFRegister cc = FCC0);
+
+ void BranchTrueF(Label* target, CFRegister cc = FCC0);
+ void BranchFalseF(Label* target, CFRegister cc = FCC0);
+
+ static int InstrCountForLi64Bit(int64_t value);
+ inline void LiLower32BitHelper(Register rd, Operand j);
+ void li_optimized(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
+ void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
+ inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) {
+ li(rd, Operand(j), mode);
+ }
+ inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
+ li(rd, Operand(static_cast<int64_t>(j)), mode);
+ }
+ void li(Register dst, Handle<HeapObject> value, LiFlags mode = OPTIMIZE_SIZE);
+ void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE);
+ void li(Register dst, const StringConstantBase* string,
+ LiFlags mode = OPTIMIZE_SIZE);
+
+ void LoadFromConstantsTable(Register destination, int constant_index) final;
+ void LoadRootRegisterOffset(Register destination, intptr_t offset) final;
+ void LoadRootRelative(Register destination, int32_t offset) final;
+
+ inline void Move(Register output, MemOperand operand) {
+ Ld_d(output, operand);
+ }
+
+ inline void GenPCRelativeJump(Register rd, int64_t offset);
+ inline void GenPCRelativeJumpAndLink(Register rd, int64_t offset);
+
+// Jump, Call, and Ret pseudo instructions implementing inter-working.
+#define COND_ARGS \
+ Condition cond = al, Register rj = zero_reg, \
+ const Operand &rk = Operand(zero_reg)
+
+ void Jump(Register target, COND_ARGS);
+ void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
+ void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
+ // Deffer from li, this method save target to the memory, and then load
+ // it to register use ld_d, it can be used in wasm jump table for concurrent
+ // patching.
+ void PatchAndJump(Address target);
+ void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
+ void Jump(const ExternalReference& reference);
+ void Call(Register target, COND_ARGS);
+ void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
+ void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ COND_ARGS);
+ void Call(Label* target);
+
+ // Load the builtin given by the Smi in |builtin_index| into the same
+ // register.
+ void LoadEntryFromBuiltinIndex(Register builtin);
+ void LoadEntryFromBuiltin(Builtin builtin, Register destination);
+ MemOperand EntryFromBuiltinAsOperand(Builtin builtin);
+
+ void CallBuiltinByIndex(Register builtin);
+ void CallBuiltin(Builtin builtin);
+
+ void LoadCodeObjectEntry(Register destination, Register code_object);
+ void CallCodeObject(Register code_object);
+
+ void JumpCodeObject(Register code_object,
+ JumpMode jump_mode = JumpMode::kJump);
+
+ // Generates an instruction sequence s.t. the return address points to the
+ // instruction following the call.
+ // The return address on the stack is used by frame iteration.
+ void StoreReturnAddressAndCall(Register target);
+
+ void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
+ DeoptimizeKind kind, Label* ret,
+ Label* jump_deoptimization_entry_label);
+
+ void Ret(COND_ARGS);
+
+ // Emit code to discard a non-negative number of pointer-sized elements
+ // from the stack, clobbering only the sp register.
+ void Drop(int count, Condition cond = cc_always, Register reg = no_reg,
+ const Operand& op = Operand(no_reg));
+
+ void Ld_d(Register rd, const MemOperand& rj);
+ void St_d(Register rd, const MemOperand& rj);
+
+ void Push(Handle<HeapObject> handle);
+ void Push(Smi smi);
+
+ void Push(Register src) {
+ Add_d(sp, sp, Operand(-kPointerSize));
+ St_d(src, MemOperand(sp, 0));
+ }
+
+ // Push two registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2) {
+ Sub_d(sp, sp, Operand(2 * kPointerSize));
+ St_d(src1, MemOperand(sp, 1 * kPointerSize));
+ St_d(src2, MemOperand(sp, 0 * kPointerSize));
+ }
+
+ // Push three registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3) {
+ Sub_d(sp, sp, Operand(3 * kPointerSize));
+ St_d(src1, MemOperand(sp, 2 * kPointerSize));
+ St_d(src2, MemOperand(sp, 1 * kPointerSize));
+ St_d(src3, MemOperand(sp, 0 * kPointerSize));
+ }
+
+ // Push four registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3, Register src4) {
+ Sub_d(sp, sp, Operand(4 * kPointerSize));
+ St_d(src1, MemOperand(sp, 3 * kPointerSize));
+ St_d(src2, MemOperand(sp, 2 * kPointerSize));
+ St_d(src3, MemOperand(sp, 1 * kPointerSize));
+ St_d(src4, MemOperand(sp, 0 * kPointerSize));
+ }
+
+ // Push five registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3, Register src4,
+ Register src5) {
+ Sub_d(sp, sp, Operand(5 * kPointerSize));
+ St_d(src1, MemOperand(sp, 4 * kPointerSize));
+ St_d(src2, MemOperand(sp, 3 * kPointerSize));
+ St_d(src3, MemOperand(sp, 2 * kPointerSize));
+ St_d(src4, MemOperand(sp, 1 * kPointerSize));
+ St_d(src5, MemOperand(sp, 0 * kPointerSize));
+ }
+
+ enum PushArrayOrder { kNormal, kReverse };
+ void PushArray(Register array, Register size, Register scratch,
+ Register scratch2, PushArrayOrder order = kNormal);
+
+ void MaybeSaveRegisters(RegList registers);
+ void MaybeRestoreRegisters(RegList registers);
+
+ void CallEphemeronKeyBarrier(Register object, Operand offset,
+ SaveFPRegsMode fp_mode);
+
+ void CallRecordWriteStubSaveRegisters(
+ Register object, Operand offset,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode = StubCallMode::kCallBuiltinPointer);
+ void CallRecordWriteStub(
+ Register object, Register slot_address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode = StubCallMode::kCallBuiltinPointer);
+
+ // For a given |object| and |offset|:
+ // - Move |object| to |dst_object|.
+ // - Compute the address of the slot pointed to by |offset| in |object| and
+ // write it to |dst_slot|.
+ // This method makes sure |object| and |offset| are allowed to overlap with
+ // the destination registers.
+ void MoveObjectAndSlot(Register dst_object, Register dst_slot,
+ Register object, Operand offset);
+
+ // Push multiple registers on the stack.
+ // Registers are saved in numerical order, with higher numbered registers
+ // saved in higher memory addresses.
+ void MultiPush(RegList regs);
+ void MultiPush(RegList regs1, RegList regs2);
+ void MultiPush(RegList regs1, RegList regs2, RegList regs3);
+ void MultiPushFPU(RegList regs);
+
+ // Calculate how much stack space (in bytes) are required to store caller
+ // registers excluding those specified in the arguments.
+ int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg) const;
+
+ // Push caller saved registers on the stack, and return the number of bytes
+ // stack pointer is adjusted.
+ int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg);
+ // Restore caller saved registers from the stack, and return the number of
+ // bytes stack pointer is adjusted.
+ int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg);
+
+ void Pop(Register dst) {
+ Ld_d(dst, MemOperand(sp, 0));
+ Add_d(sp, sp, Operand(kPointerSize));
+ }
+
+ // Pop two registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2) {
+ DCHECK(src1 != src2);
+ Ld_d(src2, MemOperand(sp, 0 * kPointerSize));
+ Ld_d(src1, MemOperand(sp, 1 * kPointerSize));
+ Add_d(sp, sp, 2 * kPointerSize);
+ }
+
+ // Pop three registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2, Register src3) {
+ Ld_d(src3, MemOperand(sp, 0 * kPointerSize));
+ Ld_d(src2, MemOperand(sp, 1 * kPointerSize));
+ Ld_d(src1, MemOperand(sp, 2 * kPointerSize));
+ Add_d(sp, sp, 3 * kPointerSize);
+ }
+
+ // Pops multiple values from the stack and load them in the
+ // registers specified in regs. Pop order is the opposite as in MultiPush.
+ void MultiPop(RegList regs);
+ void MultiPop(RegList regs1, RegList regs2);
+ void MultiPop(RegList regs1, RegList regs2, RegList regs3);
+
+ void MultiPopFPU(RegList regs);
+
+#define DEFINE_INSTRUCTION(instr) \
+ void instr(Register rd, Register rj, const Operand& rk); \
+ void instr(Register rd, Register rj, Register rk) { \
+ instr(rd, rj, Operand(rk)); \
+ } \
+ void instr(Register rj, Register rk, int32_t j) { instr(rj, rk, Operand(j)); }
+
+#define DEFINE_INSTRUCTION2(instr) \
+ void instr(Register rj, const Operand& rk); \
+ void instr(Register rj, Register rk) { instr(rj, Operand(rk)); } \
+ void instr(Register rj, int32_t j) { instr(rj, Operand(j)); }
+
+ DEFINE_INSTRUCTION(Add_w)
+ DEFINE_INSTRUCTION(Add_d)
+ DEFINE_INSTRUCTION(Div_w)
+ DEFINE_INSTRUCTION(Div_wu)
+ DEFINE_INSTRUCTION(Div_du)
+ DEFINE_INSTRUCTION(Mod_w)
+ DEFINE_INSTRUCTION(Mod_wu)
+ DEFINE_INSTRUCTION(Div_d)
+ DEFINE_INSTRUCTION(Sub_w)
+ DEFINE_INSTRUCTION(Sub_d)
+ DEFINE_INSTRUCTION(Mod_d)
+ DEFINE_INSTRUCTION(Mod_du)
+ DEFINE_INSTRUCTION(Mul_w)
+ DEFINE_INSTRUCTION(Mulh_w)
+ DEFINE_INSTRUCTION(Mulh_wu)
+ DEFINE_INSTRUCTION(Mul_d)
+ DEFINE_INSTRUCTION(Mulh_d)
+ DEFINE_INSTRUCTION2(Div_w)
+ DEFINE_INSTRUCTION2(Div_d)
+ DEFINE_INSTRUCTION2(Div_wu)
+ DEFINE_INSTRUCTION2(Div_du)
+
+ DEFINE_INSTRUCTION(And)
+ DEFINE_INSTRUCTION(Or)
+ DEFINE_INSTRUCTION(Xor)
+ DEFINE_INSTRUCTION(Nor)
+ DEFINE_INSTRUCTION2(Neg)
+ DEFINE_INSTRUCTION(Andn)
+ DEFINE_INSTRUCTION(Orn)
+
+ DEFINE_INSTRUCTION(Slt)
+ DEFINE_INSTRUCTION(Sltu)
+ DEFINE_INSTRUCTION(Slti)
+ DEFINE_INSTRUCTION(Sltiu)
+ DEFINE_INSTRUCTION(Sle)
+ DEFINE_INSTRUCTION(Sleu)
+ DEFINE_INSTRUCTION(Sgt)
+ DEFINE_INSTRUCTION(Sgtu)
+ DEFINE_INSTRUCTION(Sge)
+ DEFINE_INSTRUCTION(Sgeu)
+
+ DEFINE_INSTRUCTION(Rotr_w)
+ DEFINE_INSTRUCTION(Rotr_d)
+
+#undef DEFINE_INSTRUCTION
+#undef DEFINE_INSTRUCTION2
+#undef DEFINE_INSTRUCTION3
+
+ void SmiUntag(Register dst, const MemOperand& src);
+ void SmiUntag(Register dst, Register src) {
+ if (SmiValuesAre32Bits()) {
+ srai_d(dst, src, kSmiShift);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ srai_w(dst, src, kSmiShift);
+ }
+ }
+
+ void SmiUntag(Register reg) { SmiUntag(reg, reg); }
+
+ // On LoongArch64, we should sign-extend 32-bit values.
+ void SmiToInt32(Register smi) {
+ if (FLAG_enable_slow_asserts) {
+ AssertSmi(smi);
+ }
+ DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
+ SmiUntag(smi);
+ }
+
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object);
+ void AssertSmi(Register object);
+
+ int CalculateStackPassedWords(int num_reg_arguments,
+ int num_double_arguments);
+
+ // Before calling a C-function from generated code, align arguments on stack.
+ // After aligning the frame, non-register arguments must be stored on the
+ // stack, after the argument-slots using helper: CFunctionArgumentOperand().
+ // The argument count assumes all arguments are word sized.
+ // Some compilers/platforms require the stack to be aligned when calling
+ // C++ code.
+ // Needs a scratch register to do some arithmetic. This register will be
+ // trashed.
+ void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
+ Register scratch);
+ void PrepareCallCFunction(int num_reg_arguments, Register scratch);
+
+ // Calls a C function and cleans up the space for arguments allocated
+ // by PrepareCallCFunction. The called function is not allowed to trigger a
+ // garbage collection, since that might move the code and invalidate the
+ // return address (unless this is somehow accounted for by the called
+ // function).
+ void CallCFunction(ExternalReference function, int num_arguments);
+ void CallCFunction(Register function, int num_arguments);
+ void CallCFunction(ExternalReference function, int num_reg_arguments,
+ int num_double_arguments);
+ void CallCFunction(Register function, int num_reg_arguments,
+ int num_double_arguments);
+
+ // See comments at the beginning of Builtins::Generate_CEntry.
+ inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
+ inline void PrepareCEntryFunction(const ExternalReference& ref) {
+ li(a1, ref);
+ }
+
+ void CheckPageFlag(const Register& object, int mask, Condition cc,
+ Label* condition_met);
+#undef COND_ARGS
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
+ // Exits with 'result' holding the answer.
+ void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
+ DoubleRegister double_input, StubCallMode stub_mode);
+
+ // Conditional move.
+ void Movz(Register rd, Register rj, Register rk);
+ void Movn(Register rd, Register rj, Register rk);
+
+ void LoadZeroIfFPUCondition(Register dest, CFRegister = FCC0);
+ void LoadZeroIfNotFPUCondition(Register dest, CFRegister = FCC0);
+
+ void LoadZeroIfConditionNotZero(Register dest, Register condition);
+ void LoadZeroIfConditionZero(Register dest, Register condition);
+ void LoadZeroOnCondition(Register rd, Register rj, const Operand& rk,
+ Condition cond);
+
+ void Clz_w(Register rd, Register rj);
+ void Clz_d(Register rd, Register rj);
+ void Ctz_w(Register rd, Register rj);
+ void Ctz_d(Register rd, Register rj);
+ void Popcnt_w(Register rd, Register rj);
+ void Popcnt_d(Register rd, Register rj);
+
+ void ExtractBits(Register dest, Register source, Register pos, int size,
+ bool sign_extend = false);
+ void InsertBits(Register dest, Register source, Register pos, int size);
+
+ void Bstrins_w(Register rk, Register rj, uint16_t msbw, uint16_t lswb);
+ void Bstrins_d(Register rk, Register rj, uint16_t msbw, uint16_t lsbw);
+ void Bstrpick_w(Register rk, Register rj, uint16_t msbw, uint16_t lsbw);
+ void Bstrpick_d(Register rk, Register rj, uint16_t msbw, uint16_t lsbw);
+ void Neg_s(FPURegister fd, FPURegister fj);
+ void Neg_d(FPURegister fd, FPURegister fk);
+
+ // Convert single to unsigned word.
+ void Trunc_uw_s(FPURegister fd, FPURegister fj, FPURegister scratch);
+ void Trunc_uw_s(Register rd, FPURegister fj, FPURegister scratch);
+
+ // Change endianness
+ void ByteSwapSigned(Register dest, Register src, int operand_size);
+ void ByteSwapUnsigned(Register dest, Register src, int operand_size);
+
+ void Ld_b(Register rd, const MemOperand& rj);
+ void Ld_bu(Register rd, const MemOperand& rj);
+ void St_b(Register rd, const MemOperand& rj);
+
+ void Ld_h(Register rd, const MemOperand& rj);
+ void Ld_hu(Register rd, const MemOperand& rj);
+ void St_h(Register rd, const MemOperand& rj);
+
+ void Ld_w(Register rd, const MemOperand& rj);
+ void Ld_wu(Register rd, const MemOperand& rj);
+ void St_w(Register rd, const MemOperand& rj);
+
+ void Fld_s(FPURegister fd, const MemOperand& src);
+ void Fst_s(FPURegister fj, const MemOperand& dst);
+
+ void Fld_d(FPURegister fd, const MemOperand& src);
+ void Fst_d(FPURegister fj, const MemOperand& dst);
+
+ void Ll_w(Register rd, const MemOperand& rj);
+ void Sc_w(Register rd, const MemOperand& rj);
+
+ void Ll_d(Register rd, const MemOperand& rj);
+ void Sc_d(Register rd, const MemOperand& rj);
+
+ // These functions assume (and assert) that src1!=src2. It is permitted
+ // for the result to alias either input register.
+ void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* out_of_line);
+ void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* out_of_line);
+ void Float64Max(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* out_of_line);
+ void Float64Min(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* out_of_line);
+
+ // Generate out-of-line cases for the macros above.
+ void Float32MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+ void Float32MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+ void Float64MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+ void Float64MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+
+ bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
+
+ void mov(Register rd, Register rj) { or_(rd, rj, zero_reg); }
+
+ inline void Move(Register dst, Handle<HeapObject> handle) { li(dst, handle); }
+ inline void Move(Register dst, Smi smi) { li(dst, Operand(smi)); }
+
+ inline void Move(Register dst, Register src) {
+ if (dst != src) {
+ mov(dst, src);
+ }
+ }
+
+ inline void FmoveLow(Register dst_low, FPURegister src) {
+ movfr2gr_s(dst_low, src);
+ }
+
+ void FmoveLow(FPURegister dst, Register src_low);
+
+ inline void Move(FPURegister dst, FPURegister src) { Move_d(dst, src); }
+
+ inline void Move_d(FPURegister dst, FPURegister src) {
+ if (dst != src) {
+ fmov_d(dst, src);
+ }
+ }
+
+ inline void Move_s(FPURegister dst, FPURegister src) {
+ if (dst != src) {
+ fmov_s(dst, src);
+ }
+ }
+
+ void Move(FPURegister dst, float imm) { Move(dst, bit_cast<uint32_t>(imm)); }
+ void Move(FPURegister dst, double imm) { Move(dst, bit_cast<uint64_t>(imm)); }
+ void Move(FPURegister dst, uint32_t src);
+ void Move(FPURegister dst, uint64_t src);
+
+ // AddOverflow_d sets overflow register to a negative value if
+ // overflow occured, otherwise it is zero or positive
+ void AddOverflow_d(Register dst, Register left, const Operand& right,
+ Register overflow);
+ // SubOverflow_d sets overflow register to a negative value if
+ // overflow occured, otherwise it is zero or positive
+ void SubOverflow_d(Register dst, Register left, const Operand& right,
+ Register overflow);
+ // MulOverflow_w sets overflow register to zero if no overflow occured
+ void MulOverflow_w(Register dst, Register left, const Operand& right,
+ Register overflow);
+
+ // TODO(LOONG_dev): LOONG64 Remove this constant
+ // Number of instructions needed for calculation of switch table entry address
+ static const int kSwitchTablePrologueSize = 5;
+
+ // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a
+ // functor/function with 'Label *func(size_t index)' declaration.
+ template <typename Func>
+ void GenerateSwitchTable(Register index, size_t case_count,
+ Func GetLabelFunction);
+
+ // Load an object from the root table.
+ void LoadRoot(Register destination, RootIndex index) final;
+ void LoadRoot(Register destination, RootIndex index, Condition cond,
+ Register src1, const Operand& src2);
+
+ void LoadMap(Register destination, Register object);
+
+ // If the value is a NaN, canonicalize the value else, do nothing.
+ void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
+
+ // ---------------------------------------------------------------------------
+ // FPU macros. These do not handle special cases like NaN or +- inf.
+
+ // Convert unsigned word to double.
+ void Ffint_d_uw(FPURegister fd, FPURegister fj);
+ void Ffint_d_uw(FPURegister fd, Register rj);
+
+ // Convert unsigned long to double.
+ void Ffint_d_ul(FPURegister fd, FPURegister fj);
+ void Ffint_d_ul(FPURegister fd, Register rj);
+
+ // Convert unsigned word to float.
+ void Ffint_s_uw(FPURegister fd, FPURegister fj);
+ void Ffint_s_uw(FPURegister fd, Register rj);
+
+ // Convert unsigned long to float.
+ void Ffint_s_ul(FPURegister fd, FPURegister fj);
+ void Ffint_s_ul(FPURegister fd, Register rj);
+
+ // Convert double to unsigned word.
+ void Ftintrz_uw_d(FPURegister fd, FPURegister fj, FPURegister scratch);
+ void Ftintrz_uw_d(Register rd, FPURegister fj, FPURegister scratch);
+
+ // Convert single to unsigned word.
+ void Ftintrz_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch);
+ void Ftintrz_uw_s(Register rd, FPURegister fs, FPURegister scratch);
+
+ // Convert double to unsigned long.
+ void Ftintrz_ul_d(FPURegister fd, FPURegister fj, FPURegister scratch,
+ Register result = no_reg);
+ void Ftintrz_ul_d(Register rd, FPURegister fj, FPURegister scratch,
+ Register result = no_reg);
+
+ // Convert single to unsigned long.
+ void Ftintrz_ul_s(FPURegister fd, FPURegister fj, FPURegister scratch,
+ Register result = no_reg);
+ void Ftintrz_ul_s(Register rd, FPURegister fj, FPURegister scratch,
+ Register result = no_reg);
+
+ // Round double functions
+ void Trunc_d(FPURegister fd, FPURegister fj);
+ void Round_d(FPURegister fd, FPURegister fj);
+ void Floor_d(FPURegister fd, FPURegister fj);
+ void Ceil_d(FPURegister fd, FPURegister fj);
+
+ // Round float functions
+ void Trunc_s(FPURegister fd, FPURegister fj);
+ void Round_s(FPURegister fd, FPURegister fj);
+ void Floor_s(FPURegister fd, FPURegister fj);
+ void Ceil_s(FPURegister fd, FPURegister fj);
+
+ // Jump the register contains a smi.
+ void JumpIfSmi(Register value, Label* smi_label);
+
+ void JumpIfEqual(Register a, int32_t b, Label* dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(b));
+ Branch(dest, eq, a, Operand(scratch));
+ }
+
+ void JumpIfLessThan(Register a, int32_t b, Label* dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(b));
+ Branch(dest, lt, a, Operand(scratch));
+ }
+
+ // Push a standard frame, consisting of ra, fp, context and JS function.
+ void PushStandardFrame(Register function_reg);
+
+ // Get the actual activation frame alignment for target environment.
+ static int ActivationFrameAlignment();
+
+ // Load Scaled Address instructions. Parameter sa (shift argument) must be
+ // between [1, 31] (inclusive). The scratch register may be clobbered.
+ void Alsl_w(Register rd, Register rj, Register rk, uint8_t sa,
+ Register scratch = t7);
+ void Alsl_d(Register rd, Register rj, Register rk, uint8_t sa,
+ Register scratch = t7);
+
+ // Compute the start of the generated instruction stream from the current PC.
+ // This is an alternative to embedding the {CodeObject} handle as a reference.
+ void ComputeCodeStartAddress(Register dst);
+
+ // Control-flow integrity:
+
+ // Define a function entrypoint. This doesn't emit any code for this
+ // architecture, as control-flow integrity is not supported for it.
+ void CodeEntry() {}
+ // Define an exception handler.
+ void ExceptionHandler() {}
+ // Define an exception handler and bind a label.
+ void BindExceptionHandler(Label* label) { bind(label); }
+
+ protected:
+ inline Register GetRkAsRegisterHelper(const Operand& rk, Register scratch);
+ inline int32_t GetOffset(Label* L, OffsetSize bits);
+
+ private:
+ bool has_double_zero_reg_set_ = false;
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
+ // succeeds, otherwise falls through if result is saturated. On return
+ // 'result' either holds answer, or is clobbered on fall through.
+ void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
+ Label* done);
+
+ bool BranchShortOrFallback(Label* L, Condition cond, Register rj,
+ const Operand& rk, bool need_link);
+
+ // f32 or f64
+ void CompareF(FPURegister cmp1, FPURegister cmp2, FPUCondition cc,
+ CFRegister cd, bool f32 = true);
+
+ void CompareIsNanF(FPURegister cmp1, FPURegister cmp2, CFRegister cd,
+ bool f32 = true);
+
+ void CallCFunctionHelper(Register function, int num_reg_arguments,
+ int num_double_arguments);
+
+ void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode);
+
+ void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode);
+
+ // Push a fixed frame, consisting of ra, fp.
+ void PushCommonFrame(Register marker_reg = no_reg);
+};
+
+// MacroAssembler implements a collection of frequently used macros.
+class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
+ public:
+ using TurboAssembler::TurboAssembler;
+
+ // It assumes that the arguments are located below the stack pointer.
+ // argc is the number of arguments not including the receiver.
+ // TODO(LOONG_dev): LOONG64: Remove this function once we stick with the
+ // reversed arguments order.
+ void LoadReceiver(Register dest, Register argc) {
+ Ld_d(dest, MemOperand(sp, 0));
+ }
+
+ void StoreReceiver(Register rec, Register argc, Register scratch) {
+ St_d(rec, MemOperand(sp, 0));
+ }
+
+ bool IsNear(Label* L, Condition cond, int rs_reg);
+
+ // Swap two registers. If the scratch register is omitted then a slightly
+ // less efficient form using xor instead of mov is emitted.
+ void Swap(Register reg1, Register reg2, Register scratch = no_reg);
+
+ void PushRoot(RootIndex index) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ LoadRoot(scratch, index);
+ Push(scratch);
+ }
+
+ // Compare the object in a register to a value and jump if they are equal.
+ void JumpIfRoot(Register with, RootIndex index, Label* if_equal) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ LoadRoot(scratch, index);
+ Branch(if_equal, eq, with, Operand(scratch));
+ }
+
+ // Compare the object in a register to a value and jump if they are not equal.
+ void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ LoadRoot(scratch, index);
+ Branch(if_not_equal, ne, with, Operand(scratch));
+ }
+
+ // Checks if value is in range [lower_limit, higher_limit] using a single
+ // comparison.
+ void JumpIfIsInRange(Register value, unsigned lower_limit,
+ unsigned higher_limit, Label* on_in_range);
+
+ // ---------------------------------------------------------------------------
+ // GC Support
+
+ // Notify the garbage collector that we wrote a pointer into an object.
+ // |object| is the object being stored into, |value| is the object being
+ // stored.
+ // The offset is the offset from the start of the object, not the offset from
+ // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
+ void RecordWriteField(
+ Register object, int offset, Register value, RAStatus ra_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
+
+ // For a given |object| notify the garbage collector that the slot at |offset|
+ // has been written. |value| is the object being stored.
+ void RecordWrite(
+ Register object, Operand offset, Register value, RAStatus ra_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
+
+ // ---------------------------------------------------------------------------
+ // Pseudo-instructions.
+
+ // Convert double to unsigned long.
+ void Ftintrz_l_ud(FPURegister fd, FPURegister fj, FPURegister scratch);
+
+ void Ftintrz_l_d(FPURegister fd, FPURegister fj);
+ void Ftintrne_l_d(FPURegister fd, FPURegister fj);
+ void Ftintrm_l_d(FPURegister fd, FPURegister fj);
+ void Ftintrp_l_d(FPURegister fd, FPURegister fj);
+
+ void Ftintrz_w_d(FPURegister fd, FPURegister fj);
+ void Ftintrne_w_d(FPURegister fd, FPURegister fj);
+ void Ftintrm_w_d(FPURegister fd, FPURegister fj);
+ void Ftintrp_w_d(FPURegister fd, FPURegister fj);
+
+ void Madd_s(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk);
+ void Madd_d(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk);
+ void Msub_s(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk);
+ void Msub_d(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk);
+
+ // Enter exit frame.
+ // argc - argument count to be dropped by LeaveExitFrame.
+ // save_doubles - saves FPU registers on stack, currently disabled.
+ // stack_space - extra stack space.
+ void EnterExitFrame(bool save_doubles, int stack_space = 0,
+ StackFrame::Type frame_type = StackFrame::EXIT);
+
+ // Leave the current exit frame.
+ void LeaveExitFrame(bool save_doubles, Register arg_count,
+ bool do_return = NO_EMIT_RETURN,
+ bool argument_count_is_length = false);
+
+ // Make sure the stack is aligned. Only emits code in debug mode.
+ void AssertStackIsAligned();
+
+ // Load the global proxy from the current context.
+ void LoadGlobalProxy(Register dst) {
+ LoadNativeContextSlot(dst, Context::GLOBAL_PROXY_INDEX);
+ }
+
+ void LoadNativeContextSlot(Register dst, int index);
+
+ // Load the initial map from the global function. The registers
+ // function and map can be the same, function is then overwritten.
+ void LoadGlobalFunctionInitialMap(Register function, Register map,
+ Register scratch);
+
+ // -------------------------------------------------------------------------
+ // JavaScript invokes.
+
+ // Invoke the JavaScript function code by either calling or jumping.
+ void InvokeFunctionCode(Register function, Register new_target,
+ Register expected_parameter_count,
+ Register actual_parameter_count, InvokeType type);
+
+ // On function call, call into the debugger.
+ void CallDebugOnFunctionCall(Register fun, Register new_target,
+ Register expected_parameter_count,
+ Register actual_parameter_count);
+
+ // Invoke the JavaScript function in the given register. Changes the
+ // current context to the context in the function before invoking.
+ void InvokeFunctionWithNewTarget(Register function, Register new_target,
+ Register actual_parameter_count,
+ InvokeType type);
+ void InvokeFunction(Register function, Register expected_parameter_count,
+ Register actual_parameter_count, InvokeType type);
+
+ // Exception handling.
+
+ // Push a new stack handler and link into stack handler chain.
+ void PushStackHandler();
+
+ // Unlink the stack handler on top of the stack from the stack handler chain.
+ // Must preserve the result register.
+ void PopStackHandler();
+
+ // -------------------------------------------------------------------------
+ // Support functions.
+
+ void GetObjectType(Register function, Register map, Register type_reg);
+
+ void GetInstanceTypeRange(Register map, Register type_reg,
+ InstanceType lower_limit, Register range);
+
+ // -------------------------------------------------------------------------
+ // Runtime calls.
+
+ // Call a runtime routine.
+ void CallRuntime(const Runtime::Function* f, int num_arguments,
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ CallRuntime(function, function->nargs, save_doubles);
+ }
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments,
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
+ }
+
+ // Convenience function: tail call a runtime routine (jump).
+ void TailCallRuntime(Runtime::FunctionId fid);
+
+ // Jump to the builtin routine.
+ void JumpToExternalReference(const ExternalReference& builtin,
+ bool builtin_exit_frame = false);
+
+ // Generates a trampoline to jump to the off-heap instruction stream.
+ void JumpToInstructionStream(Address entry);
+
+ // ---------------------------------------------------------------------------
+ // In-place weak references.
+ void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
+
+ // -------------------------------------------------------------------------
+ // StatsCounter support.
+
+ void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2) {
+ if (!FLAG_native_code_counters) return;
+ EmitIncrementCounter(counter, value, scratch1, scratch2);
+ }
+ void EmitIncrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+ void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2) {
+ if (!FLAG_native_code_counters) return;
+ EmitDecrementCounter(counter, value, scratch1, scratch2);
+ }
+ void EmitDecrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+
+ // -------------------------------------------------------------------------
+ // Stack limit utilities
+
+ enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+ void LoadStackLimit(Register destination, StackLimitKind kind);
+ void StackOverflowCheck(Register num_args, Register scratch1,
+ Register scratch2, Label* stack_overflow);
+
+ // ---------------------------------------------------------------------------
+ // Smi utilities.
+
+ void SmiTag(Register dst, Register src) {
+ STATIC_ASSERT(kSmiTag == 0);
+ if (SmiValuesAre32Bits()) {
+ slli_d(dst, src, 32);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ add_w(dst, src, src);
+ }
+ }
+
+ void SmiTag(Register reg) { SmiTag(reg, reg); }
+
+ // Left-shifted from int32 equivalent of Smi.
+ void SmiScale(Register dst, Register src, int scale) {
+ if (SmiValuesAre32Bits()) {
+ // The int portion is upper 32-bits of 64-bit word.
+ srai_d(dst, src, kSmiShift - scale);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ DCHECK_GE(scale, kSmiTagSize);
+ slli_w(dst, src, scale - kSmiTagSize);
+ }
+ }
+
+ // Test if the register contains a smi.
+ inline void SmiTst(Register value, Register scratch) {
+ And(scratch, value, Operand(kSmiTagMask));
+ }
+
+ // Jump if the register contains a non-smi.
+ void JumpIfNotSmi(Register value, Label* not_smi_label);
+
+ // Abort execution if argument is not a Constructor, enabled via --debug-code.
+ void AssertConstructor(Register object);
+
+ // Abort execution if argument is not a JSFunction, enabled via --debug-code.
+ void AssertFunction(Register object);
+
+ // Abort execution if argument is not a JSBoundFunction,
+ // enabled via --debug-code.
+ void AssertBoundFunction(Register object);
+
+ // Abort execution if argument is not a JSGeneratorObject (or subclass),
+ // enabled via --debug-code.
+ void AssertGeneratorObject(Register object);
+
+ // Abort execution if argument is not undefined or an AllocationSite, enabled
+ // via --debug-code.
+ void AssertUndefinedOrAllocationSite(Register object, Register scratch);
+
+ template <typename Field>
+ void DecodeField(Register dst, Register src) {
+ Bstrpick_d(dst, src, Field::kShift + Field::kSize - 1, Field::kShift);
+ }
+
+ template <typename Field>
+ void DecodeField(Register reg) {
+ DecodeField<Field>(reg, reg);
+ }
+
+ private:
+ // Helper functions for generating invokes.
+ void InvokePrologue(Register expected_parameter_count,
+ Register actual_parameter_count, Label* done,
+ InvokeType type);
+
+ friend class CommonFrame;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
+};
+
+template <typename Func>
+void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
+ Func GetLabelFunction) {
+ UseScratchRegisterScope scope(this);
+ Register scratch = scope.Acquire();
+ BlockTrampolinePoolFor((3 + case_count) * kInstrSize);
+
+ pcaddi(scratch, 3);
+ alsl_d(scratch, index, scratch, kInstrSizeLog2);
+ jirl(zero_reg, scratch, 0);
+ for (size_t index = 0; index < case_count; ++index) {
+ b(GetLabelFunction(index));
+ }
+}
+
+#define ACCESS_MASM(masm) masm->
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_LOONG64_MACRO_ASSEMBLER_LOONG64_H_
diff --git a/chromium/v8/src/codegen/loong64/register-loong64.h b/chromium/v8/src/codegen/loong64/register-loong64.h
new file mode 100644
index 00000000000..7d9d88c1f0f
--- /dev/null
+++ b/chromium/v8/src/codegen/loong64/register-loong64.h
@@ -0,0 +1,288 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_LOONG64_REGISTER_LOONG64_H_
+#define V8_CODEGEN_LOONG64_REGISTER_LOONG64_H_
+
+#include "src/codegen/loong64/constants-loong64.h"
+#include "src/codegen/register.h"
+#include "src/codegen/reglist.h"
+
+namespace v8 {
+namespace internal {
+
+// clang-format off
+#define GENERAL_REGISTERS(V) \
+ V(zero_reg) V(ra) V(tp) V(sp) \
+ V(a0) V(a1) V(a2) V(a3) V(a4) V(a5) V(a6) V(a7) \
+ V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(t6) V(t7) V(t8) \
+ V(x_reg) V(fp) \
+ V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) V(s8) \
+
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(a0) V(a1) V(a2) V(a3) V(a4) V(a5) V(a6) V(a7) \
+ V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) \
+ V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s7) V(s8)
+
+#define DOUBLE_REGISTERS(V) \
+ V(f0) V(f1) V(f2) V(f3) V(f4) V(f5) V(f6) V(f7) \
+ V(f8) V(f9) V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) \
+ V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \
+ V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31)
+
+#define FLOAT_REGISTERS DOUBLE_REGISTERS
+#define SIMD128_REGISTERS(V) \
+ V(w0) V(w1) V(w2) V(w3) V(w4) V(w5) V(w6) V(w7) \
+ V(w8) V(w9) V(w10) V(w11) V(w12) V(w13) V(w14) V(w15) \
+ V(w16) V(w17) V(w18) V(w19) V(w20) V(w21) V(w22) V(w23) \
+ V(w24) V(w25) V(w26) V(w27) V(w28) V(w29) V(w30) V(w31)
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(f0) V(f1) V(f2) V(f3) V(f4) V(f5) V(f6) V(f7) \
+ V(f8) V(f9) V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) V(f16) \
+ V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23)
+// clang-format on
+
+// Note that the bit values must match those used in actual instruction
+// encoding.
+const int kNumRegs = 32;
+
+const RegList kJSCallerSaved = 1 << 4 | // a0
+ 1 << 5 | // a1
+ 1 << 6 | // a2
+ 1 << 7 | // a3
+ 1 << 8 | // a4
+ 1 << 9 | // a5
+ 1 << 10 | // a6
+ 1 << 11 | // a7
+ 1 << 12 | // t0
+ 1 << 13 | // t1
+ 1 << 14 | // t2
+ 1 << 15 | // t3
+ 1 << 16 | // t4
+ 1 << 17 | // t5
+ 1 << 20; // t8
+
+const int kNumJSCallerSaved = 15;
+
+// Callee-saved registers preserved when switching from C to JavaScript.
+const RegList kCalleeSaved = 1 << 22 | // fp
+ 1 << 23 | // s0
+ 1 << 24 | // s1
+ 1 << 25 | // s2
+ 1 << 26 | // s3
+ 1 << 27 | // s4
+ 1 << 28 | // s5
+ 1 << 29 | // s6 (roots in Javascript code)
+ 1 << 30 | // s7 (cp in Javascript code)
+ 1 << 31; // s8
+
+const int kNumCalleeSaved = 10;
+
+const RegList kCalleeSavedFPU = 1 << 24 | // f24
+ 1 << 25 | // f25
+ 1 << 26 | // f26
+ 1 << 27 | // f27
+ 1 << 28 | // f28
+ 1 << 29 | // f29
+ 1 << 30 | // f30
+ 1 << 31; // f31
+
+const int kNumCalleeSavedFPU = 8;
+
+const RegList kCallerSavedFPU = 1 << 0 | // f0
+ 1 << 1 | // f1
+ 1 << 2 | // f2
+ 1 << 3 | // f3
+ 1 << 4 | // f4
+ 1 << 5 | // f5
+ 1 << 6 | // f6
+ 1 << 7 | // f7
+ 1 << 8 | // f8
+ 1 << 9 | // f9
+ 1 << 10 | // f10
+ 1 << 11 | // f11
+ 1 << 12 | // f12
+ 1 << 13 | // f13
+ 1 << 14 | // f14
+ 1 << 15 | // f15
+ 1 << 16 | // f16
+ 1 << 17 | // f17
+ 1 << 18 | // f18
+ 1 << 19 | // f19
+ 1 << 20 | // f20
+ 1 << 21 | // f21
+ 1 << 22 | // f22
+ 1 << 23; // f23
+
+// CPU Registers.
+//
+// 1) We would prefer to use an enum, but enum values are assignment-
+// compatible with int, which has caused code-generation bugs.
+//
+// 2) We would prefer to use a class instead of a struct but we don't like
+// the register initialization to depend on the particular initialization
+// order (which appears to be different on OS X, Linux, and Windows for the
+// installed versions of C++ we tried). Using a struct permits C-style
+// "initialization". Also, the Register objects cannot be const as this
+// forces initialization stubs in MSVC, making us dependent on initialization
+// order.
+//
+// 3) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the struct in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+
+// -----------------------------------------------------------------------------
+// Implementation of Register and FPURegister.
+
+enum RegisterCode {
+#define REGISTER_CODE(R) kRegCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kRegAfterLast
+};
+
+class Register : public RegisterBase<Register, kRegAfterLast> {
+ public:
+ static constexpr int kMantissaOffset = 0;
+ static constexpr int kExponentOffset = 4;
+
+ private:
+ friend class RegisterBase;
+ explicit constexpr Register(int code) : RegisterBase(code) {}
+};
+
+// s7: context register
+// s3: scratch register
+// s4: scratch register 2
+#define DECLARE_REGISTER(R) \
+ constexpr Register R = Register::from_code(kRegCode_##R);
+GENERAL_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+
+constexpr Register no_reg = Register::no_reg();
+
+int ToNumber(Register reg);
+
+Register ToRegister(int num);
+
+// Returns the number of padding slots needed for stack pointer alignment.
+constexpr int ArgumentPaddingSlots(int argument_count) {
+ // No argument padding required.
+ return 0;
+}
+
+constexpr bool kSimpleFPAliasing = true;
+constexpr bool kSimdMaskRegisters = false;
+
+enum DoubleRegisterCode {
+#define REGISTER_CODE(R) kDoubleCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kDoubleAfterLast
+};
+
+// FPURegister register.
+class FPURegister : public RegisterBase<FPURegister, kDoubleAfterLast> {
+ public:
+ FPURegister low() const { return FPURegister::from_code(code()); }
+
+ private:
+ friend class RegisterBase;
+ explicit constexpr FPURegister(int code) : RegisterBase(code) {}
+};
+
+// Condition Flag Register
+enum CFRegister { FCC0, FCC1, FCC2, FCC3, FCC4, FCC5, FCC6, FCC7 };
+
+using FloatRegister = FPURegister;
+
+using DoubleRegister = FPURegister;
+
+using Simd128Register = FPURegister;
+
+#define DECLARE_DOUBLE_REGISTER(R) \
+ constexpr DoubleRegister R = DoubleRegister::from_code(kDoubleCode_##R);
+DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER)
+#undef DECLARE_DOUBLE_REGISTER
+
+constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
+
+// Register aliases.
+// cp is assumed to be a callee saved register.
+constexpr Register kRootRegister = s6;
+constexpr Register cp = s7;
+constexpr Register kScratchReg = s3;
+constexpr Register kScratchReg2 = s4;
+constexpr DoubleRegister kScratchDoubleReg = f30;
+constexpr DoubleRegister kScratchDoubleReg1 = f30;
+constexpr DoubleRegister kScratchDoubleReg2 = f31;
+// FPU zero reg is often used to hold 0.0, but it's not hardwired to 0.0.
+constexpr DoubleRegister kDoubleRegZero = f29;
+
+struct FPUControlRegister {
+ bool is_valid() const { return (reg_code >> 2) == 0; }
+ bool is(FPUControlRegister creg) const { return reg_code == creg.reg_code; }
+ int code() const {
+ DCHECK(is_valid());
+ return reg_code;
+ }
+ int bit() const {
+ DCHECK(is_valid());
+ return 1 << reg_code;
+ }
+ void setcode(int f) {
+ reg_code = f;
+ DCHECK(is_valid());
+ }
+ // Unfortunately we can't make this private in a struct.
+ int reg_code;
+};
+
+constexpr FPUControlRegister no_fpucreg = {kInvalidFPUControlRegister};
+constexpr FPUControlRegister FCSR = {kFCSRRegister};
+constexpr FPUControlRegister FCSR0 = {kFCSRRegister};
+constexpr FPUControlRegister FCSR1 = {kFCSRRegister + 1};
+constexpr FPUControlRegister FCSR2 = {kFCSRRegister + 2};
+constexpr FPUControlRegister FCSR3 = {kFCSRRegister + 3};
+
+// Define {RegisterName} methods for the register types.
+DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS)
+DEFINE_REGISTER_NAMES(FPURegister, DOUBLE_REGISTERS)
+
+// Give alias names to registers for calling conventions.
+constexpr Register kReturnRegister0 = a0;
+constexpr Register kReturnRegister1 = a1;
+constexpr Register kReturnRegister2 = a2;
+constexpr Register kJSFunctionRegister = a1;
+constexpr Register kContextRegister = s7;
+constexpr Register kAllocateSizeRegister = a0;
+constexpr Register kInterpreterAccumulatorRegister = a0;
+constexpr Register kInterpreterBytecodeOffsetRegister = t0;
+constexpr Register kInterpreterBytecodeArrayRegister = t1;
+constexpr Register kInterpreterDispatchTableRegister = t2;
+
+constexpr Register kJavaScriptCallArgCountRegister = a0;
+constexpr Register kJavaScriptCallCodeStartRegister = a2;
+constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
+constexpr Register kJavaScriptCallNewTargetRegister = a3;
+constexpr Register kJavaScriptCallExtraArg1Register = a2;
+
+constexpr Register kOffHeapTrampolineRegister = t7;
+constexpr Register kRuntimeCallFunctionRegister = a1;
+constexpr Register kRuntimeCallArgCountRegister = a0;
+constexpr Register kRuntimeCallArgvRegister = a2;
+constexpr Register kWasmInstanceRegister = a0;
+constexpr Register kWasmCompileLazyFuncIndexRegister = t0;
+
+constexpr DoubleRegister kFPReturnRegister0 = f0;
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_LOONG64_REGISTER_LOONG64_H_
diff --git a/chromium/v8/src/codegen/macro-assembler.h b/chromium/v8/src/codegen/macro-assembler.h
index cfa7a4d3416..448807b20ec 100644
--- a/chromium/v8/src/codegen/macro-assembler.h
+++ b/chromium/v8/src/codegen/macro-assembler.h
@@ -57,6 +57,9 @@ enum class SmiCheck { kOmit, kInline };
#elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/constants-mips64.h"
#include "src/codegen/mips64/macro-assembler-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/codegen/loong64/constants-loong64.h"
+#include "src/codegen/loong64/macro-assembler-loong64.h"
#elif V8_TARGET_ARCH_S390
#include "src/codegen/s390/constants-s390.h"
#include "src/codegen/s390/macro-assembler-s390.h"
@@ -91,13 +94,13 @@ class V8_NODISCARD FrameScope {
type_(type),
old_has_frame_(tasm->has_frame()) {
tasm->set_has_frame(true);
- if (type != StackFrame::MANUAL && type_ != StackFrame::NONE) {
+ if (type != StackFrame::MANUAL && type_ != StackFrame::NO_FRAME_TYPE) {
tasm->EnterFrame(type);
}
}
~FrameScope() {
- if (type_ != StackFrame::MANUAL && type_ != StackFrame::NONE) {
+ if (type_ != StackFrame::MANUAL && type_ != StackFrame::NO_FRAME_TYPE) {
tasm_->LeaveFrame(type_);
}
tasm_->set_has_frame(old_has_frame_);
@@ -107,8 +110,8 @@ class V8_NODISCARD FrameScope {
#ifdef V8_CODE_COMMENTS
const char* frame_name(StackFrame::Type type) {
switch (type) {
- case StackFrame::NONE:
- return "Frame: NONE";
+ case StackFrame::NO_FRAME_TYPE:
+ return "Frame: NO_FRAME_TYPE";
case StackFrame::MANUAL:
return "Frame: MANUAL";
#define FRAME_TYPE_CASE(type, field) \
@@ -142,7 +145,7 @@ class V8_NODISCARD FrameAndConstantPoolScope {
if (FLAG_enable_embedded_constant_pool) {
masm->set_constant_pool_available(true);
}
- if (type_ != StackFrame::MANUAL && type_ != StackFrame::NONE) {
+ if (type_ != StackFrame::MANUAL && type_ != StackFrame::NO_FRAME_TYPE) {
masm->EnterFrame(type, !old_constant_pool_available_);
}
}
@@ -191,7 +194,7 @@ class V8_NODISCARD ConstantPoolUnavailableScope {
class V8_NODISCARD AllowExternalCallThatCantCauseGC : public FrameScope {
public:
explicit AllowExternalCallThatCantCauseGC(MacroAssembler* masm)
- : FrameScope(masm, StackFrame::NONE) {}
+ : FrameScope(masm, StackFrame::NO_FRAME_TYPE) {}
};
// Prevent the use of the RootArray during the lifetime of this
diff --git a/chromium/v8/src/codegen/mips/assembler-mips-inl.h b/chromium/v8/src/codegen/mips/assembler-mips-inl.h
index d00da6efbac..ea983668a48 100644
--- a/chromium/v8/src/codegen/mips/assembler-mips-inl.h
+++ b/chromium/v8/src/codegen/mips/assembler-mips-inl.h
@@ -166,7 +166,7 @@ HeapObject RelocInfo::target_object() {
Object(Assembler::target_address_at(pc_, constant_pool_)));
}
-HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
+HeapObject RelocInfo::target_object_no_host(PtrComprCageBase cage_base) {
return target_object();
}
diff --git a/chromium/v8/src/codegen/mips/assembler-mips.cc b/chromium/v8/src/codegen/mips/assembler-mips.cc
index 0d5a8710e5a..dde08710fbe 100644
--- a/chromium/v8/src/codegen/mips/assembler-mips.cc
+++ b/chromium/v8/src/codegen/mips/assembler-mips.cc
@@ -878,7 +878,6 @@ int Assembler::target_at(int pos, bool is_internal) {
}
}
}
- return 0;
}
static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
diff --git a/chromium/v8/src/codegen/mips/macro-assembler-mips.cc b/chromium/v8/src/codegen/mips/macro-assembler-mips.cc
index 9c1af1cb056..e7bb08ef26c 100644
--- a/chromium/v8/src/codegen/mips/macro-assembler-mips.cc
+++ b/chromium/v8/src/codegen/mips/macro-assembler-mips.cc
@@ -1398,8 +1398,7 @@ void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
BlockGrowBufferScope block_growbuffer(this);
int offset = pc_offset();
Address address = j.immediate();
- saved_handles_for_raw_object_ptr_.push_back(
- std::make_pair(offset, address));
+ saved_handles_for_raw_object_ptr_.emplace_back(offset, address);
Handle<HeapObject> object(reinterpret_cast<Address*>(address));
int32_t immediate = object->ptr();
RecordRelocInfo(j.rmode(), immediate);
@@ -3279,7 +3278,6 @@ bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
return BranchShortHelper(0, L, cond, rs, rt, bdslot);
}
}
- return false;
}
void TurboAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
@@ -3631,7 +3629,6 @@ bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot);
}
}
- return false;
}
void TurboAssembler::LoadFromConstantsTable(Register destination,
@@ -3978,7 +3975,7 @@ void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin,
MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
DCHECK(root_array_available());
return MemOperand(kRootRegister,
- IsolateData::builtin_entry_slot_offset(builtin));
+ IsolateData::BuiltinEntrySlotOffset(builtin));
}
void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
@@ -4400,8 +4397,8 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
bind(&stack_overflow);
{
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
CallRuntime(Runtime::kThrowStackOverflow);
break_(0xCC);
}
@@ -4421,8 +4418,8 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
// Load receiver to pass it later to DebugOnFunctionCall hook.
LoadReceiver(t0, actual_parameter_count);
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
SmiTag(expected_parameter_count);
Push(expected_parameter_count);
@@ -4748,7 +4745,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (should_abort_hard()) {
// We don't care if we constructed a frame. Just pretend we did.
- FrameScope assume_frame(this, StackFrame::NONE);
+ FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
PrepareCallCFunction(0, a0);
li(a0, Operand(static_cast<int>(reason)));
CallCFunction(ExternalReference::abort_with_reason(), 1);
@@ -4761,7 +4758,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
+ FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
} else {
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
@@ -4987,15 +4984,19 @@ void MacroAssembler::AssertStackIsAligned() {
}
void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
- Register scratch, BranchDelaySlot bd) {
+ BranchDelaySlot bd) {
DCHECK_EQ(0, kSmiTag);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
andi(scratch, value, kSmiTagMask);
Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
}
void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label,
- Register scratch, BranchDelaySlot bd) {
+ BranchDelaySlot bd) {
DCHECK_EQ(0, kSmiTag);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
andi(scratch, value, kSmiTagMask);
Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
}
@@ -5519,16 +5520,12 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
pop(ra); // Restore ra
}
-void TurboAssembler::ResetSpeculationPoisonRegister() {
- li(kSpeculationPoisonRegister, -1);
-}
-
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Lw(t9,
- MemOperand(kRootRegister, IsolateData::builtin_entry_slot_offset(target)));
+ MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(target)));
Call(t9);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
(kind == DeoptimizeKind::kLazy)
diff --git a/chromium/v8/src/codegen/mips/macro-assembler-mips.h b/chromium/v8/src/codegen/mips/macro-assembler-mips.h
index ffa5f5820d2..ba1e94ac92c 100644
--- a/chromium/v8/src/codegen/mips/macro-assembler-mips.h
+++ b/chromium/v8/src/codegen/mips/macro-assembler-mips.h
@@ -460,6 +460,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void SmiUntag(Register dst, Register src) { sra(dst, src, kSmiTagSize); }
+ void SmiToInt32(Register smi) { SmiUntag(smi); }
+
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
@@ -795,7 +797,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Jump the register contains a smi.
void JumpIfSmi(Register value, Label* smi_label,
- Register scratch = kScratchReg, BranchDelaySlot bd = PROTECT);
+ BranchDelaySlot bd = PROTECT);
void JumpIfEqual(Register a, int32_t b, Label* dest) {
li(kScratchReg, Operand(b));
@@ -817,8 +819,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(Register dst);
- void ResetSpeculationPoisonRegister();
-
// Control-flow integrity:
// Define a function entrypoint. This doesn't emit any code for this
@@ -1108,7 +1108,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
}
// Jump if the register contains a non-smi.
- void JumpIfNotSmi(Register value, Label* not_smi_label, Register scratch = at,
+ void JumpIfNotSmi(Register value, Label* not_smi_label,
BranchDelaySlot bd = PROTECT);
// Abort execution if argument is a smi, enabled via --debug-code.
diff --git a/chromium/v8/src/codegen/mips/register-mips.h b/chromium/v8/src/codegen/mips/register-mips.h
index 95164a86c1c..7fd259bf9ba 100644
--- a/chromium/v8/src/codegen/mips/register-mips.h
+++ b/chromium/v8/src/codegen/mips/register-mips.h
@@ -362,7 +362,6 @@ constexpr Register kReturnRegister2 = a0;
constexpr Register kJSFunctionRegister = a1;
constexpr Register kContextRegister = s7;
constexpr Register kAllocateSizeRegister = a0;
-constexpr Register kSpeculationPoisonRegister = t3;
constexpr Register kInterpreterAccumulatorRegister = v0;
constexpr Register kInterpreterBytecodeOffsetRegister = t4;
constexpr Register kInterpreterBytecodeArrayRegister = t5;
diff --git a/chromium/v8/src/codegen/mips64/assembler-mips64-inl.h b/chromium/v8/src/codegen/mips64/assembler-mips64-inl.h
index 2924b661f21..aaced78154b 100644
--- a/chromium/v8/src/codegen/mips64/assembler-mips64-inl.h
+++ b/chromium/v8/src/codegen/mips64/assembler-mips64-inl.h
@@ -145,7 +145,7 @@ HeapObject RelocInfo::target_object() {
Object(Assembler::target_address_at(pc_, constant_pool_)));
}
-HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
+HeapObject RelocInfo::target_object_no_host(PtrComprCageBase cage_base) {
return target_object();
}
diff --git a/chromium/v8/src/codegen/mips64/macro-assembler-mips64.cc b/chromium/v8/src/codegen/mips64/macro-assembler-mips64.cc
index 708cf4baa67..28fd588a7c2 100644
--- a/chromium/v8/src/codegen/mips64/macro-assembler-mips64.cc
+++ b/chromium/v8/src/codegen/mips64/macro-assembler-mips64.cc
@@ -1918,8 +1918,7 @@ void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
BlockGrowBufferScope block_growbuffer(this);
int offset = pc_offset();
Address address = j.immediate();
- saved_handles_for_raw_object_ptr_.push_back(
- std::make_pair(offset, address));
+ saved_handles_for_raw_object_ptr_.emplace_back(offset, address);
Handle<HeapObject> object(reinterpret_cast<Address*>(address));
int64_t immediate = object->ptr();
RecordRelocInfo(j.rmode(), immediate);
@@ -3922,7 +3921,6 @@ bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
return BranchShortHelper(0, L, cond, rs, rt, bdslot);
}
}
- return false;
}
void TurboAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
@@ -4274,7 +4272,6 @@ bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot);
}
}
- return false;
}
void TurboAssembler::LoadFromConstantsTable(Register destination,
@@ -4468,7 +4465,7 @@ void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin,
MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
DCHECK(root_array_available());
return MemOperand(kRootRegister,
- IsolateData::builtin_entry_slot_offset(builtin));
+ IsolateData::BuiltinEntrySlotOffset(builtin));
}
void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
@@ -4924,8 +4921,8 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
bind(&stack_overflow);
{
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
CallRuntime(Runtime::kThrowStackOverflow);
break_(0xCC);
}
@@ -4946,8 +4943,8 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
// Load receiver to pass it later to DebugOnFunctionCall hook.
LoadReceiver(t0, actual_parameter_count);
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
SmiTag(expected_parameter_count);
Push(expected_parameter_count);
@@ -5279,7 +5276,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (should_abort_hard()) {
// We don't care if we constructed a frame. Just pretend we did.
- FrameScope assume_frame(this, StackFrame::NONE);
+ FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
PrepareCallCFunction(0, a0);
li(a0, Operand(static_cast<int>(reason)));
CallCFunction(ExternalReference::abort_with_reason(), 1);
@@ -5292,7 +5289,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (!has_frame()) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
+ FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
} else {
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
@@ -5532,20 +5529,24 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
}
void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
- Register scratch, BranchDelaySlot bd) {
+ BranchDelaySlot bd) {
DCHECK_EQ(0, kSmiTag);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
andi(scratch, value, kSmiTagMask);
Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
}
void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label,
- Register scratch, BranchDelaySlot bd) {
+ BranchDelaySlot bd) {
DCHECK_EQ(0, kSmiTag);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
andi(scratch, value, kSmiTagMask);
Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
}
-void MacroAssembler::AssertNotSmi(Register object) {
+void TurboAssembler::AssertNotSmi(Register object) {
if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
UseScratchRegisterScope temps(this);
@@ -5555,7 +5556,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
}
-void MacroAssembler::AssertSmi(Register object) {
+void TurboAssembler::AssertSmi(Register object) {
if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
UseScratchRegisterScope temps(this);
@@ -6059,16 +6060,12 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
pop(ra); // Restore ra
}
-void TurboAssembler::ResetSpeculationPoisonRegister() {
- li(kSpeculationPoisonRegister, -1);
-}
-
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Ld(t9,
- MemOperand(kRootRegister, IsolateData::builtin_entry_slot_offset(target)));
+ MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(target)));
Call(t9);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
(kind == DeoptimizeKind::kLazy)
diff --git a/chromium/v8/src/codegen/mips64/macro-assembler-mips64.h b/chromium/v8/src/codegen/mips64/macro-assembler-mips64.h
index a4991bcb1e6..a42fe1a6d03 100644
--- a/chromium/v8/src/codegen/mips64/macro-assembler-mips64.h
+++ b/chromium/v8/src/codegen/mips64/macro-assembler-mips64.h
@@ -485,6 +485,19 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void SmiUntag(Register reg) { SmiUntag(reg, reg); }
+ // On MIPS64, we should sign-extend 32-bit values.
+ void SmiToInt32(Register smi) {
+ if (FLAG_enable_slow_asserts) {
+ AssertSmi(smi);
+ }
+ DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
+ SmiUntag(smi);
+ }
+
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object);
+ void AssertSmi(Register object);
+
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
@@ -805,7 +818,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void MSARoundD(MSARegister dst, MSARegister src, FPURoundingMode mode);
// Jump the register contains a smi.
- void JumpIfSmi(Register value, Label* smi_label, Register scratch = at,
+ void JumpIfSmi(Register value, Label* smi_label,
BranchDelaySlot bd = PROTECT);
void JumpIfEqual(Register a, int32_t b, Label* dest) {
@@ -836,8 +849,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(Register dst);
- void ResetSpeculationPoisonRegister();
-
// Control-flow integrity:
// Define a function entrypoint. This doesn't emit any code for this
@@ -1182,13 +1193,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
}
// Jump if the register contains a non-smi.
- void JumpIfNotSmi(Register value, Label* not_smi_label, Register scratch = at,
+ void JumpIfNotSmi(Register value, Label* not_smi_label,
BranchDelaySlot bd = PROTECT);
- // Abort execution if argument is a smi, enabled via --debug-code.
- void AssertNotSmi(Register object);
- void AssertSmi(Register object);
-
// Abort execution if argument is not a Constructor, enabled via --debug-code.
void AssertConstructor(Register object);
diff --git a/chromium/v8/src/codegen/mips64/register-mips64.h b/chromium/v8/src/codegen/mips64/register-mips64.h
index 51b03aba1fa..1fbe3ec7ac2 100644
--- a/chromium/v8/src/codegen/mips64/register-mips64.h
+++ b/chromium/v8/src/codegen/mips64/register-mips64.h
@@ -373,7 +373,6 @@ constexpr Register kReturnRegister2 = a0;
constexpr Register kJSFunctionRegister = a1;
constexpr Register kContextRegister = s7;
constexpr Register kAllocateSizeRegister = a0;
-constexpr Register kSpeculationPoisonRegister = t3;
constexpr Register kInterpreterAccumulatorRegister = v0;
constexpr Register kInterpreterBytecodeOffsetRegister = t0;
constexpr Register kInterpreterBytecodeArrayRegister = t1;
diff --git a/chromium/v8/src/codegen/optimized-compilation-info.cc b/chromium/v8/src/codegen/optimized-compilation-info.cc
index e3ca07a3c9d..d0c4ed52e64 100644
--- a/chromium/v8/src/codegen/optimized-compilation-info.cc
+++ b/chromium/v8/src/codegen/optimized-compilation-info.cc
@@ -63,34 +63,10 @@ OptimizedCompilationInfo::OptimizedCompilationInfo(
ConfigureFlags();
}
-#ifdef DEBUG
-bool OptimizedCompilationInfo::FlagSetIsValid(Flag flag) const {
- switch (flag) {
- case kPoisonRegisterArguments:
- return untrusted_code_mitigations();
- default:
- return true;
- }
- UNREACHABLE();
-}
-
-bool OptimizedCompilationInfo::FlagGetIsValid(Flag flag) const {
- switch (flag) {
- case kPoisonRegisterArguments:
- if (!GetFlag(kPoisonRegisterArguments)) return true;
- return untrusted_code_mitigations() && called_with_code_start_register();
- default:
- return true;
- }
- UNREACHABLE();
-}
-#endif // DEBUG
-
void OptimizedCompilationInfo::ConfigureFlags() {
- if (FLAG_untrusted_code_mitigations) set_untrusted_code_mitigations();
if (FLAG_turbo_inline_js_wasm_calls) set_inline_js_wasm_calls();
- if (!is_osr() && (IsTurboprop() || FLAG_concurrent_inlining)) {
+ if (IsTurboprop() || FLAG_concurrent_inlining) {
set_concurrent_inlining();
}
@@ -104,7 +80,6 @@ void OptimizedCompilationInfo::ConfigureFlags() {
case CodeKind::TURBOPROP:
set_called_with_code_start_register();
set_switch_jump_table();
- if (FLAG_untrusted_code_mitigations) set_poison_register_arguments();
// TODO(yangguo): Disable this in case of debugging for crbug.com/826613
if (FLAG_analyze_environment_liveness) set_analyze_environment_liveness();
break;
@@ -123,8 +98,15 @@ void OptimizedCompilationInfo::ConfigureFlags() {
case CodeKind::WASM_TO_CAPI_FUNCTION:
set_switch_jump_table();
break;
- default:
+ case CodeKind::C_WASM_ENTRY:
+ case CodeKind::JS_TO_JS_FUNCTION:
+ case CodeKind::JS_TO_WASM_FUNCTION:
+ case CodeKind::WASM_TO_JS_FUNCTION:
break;
+ case CodeKind::BASELINE:
+ case CodeKind::INTERPRETED_FUNCTION:
+ case CodeKind::REGEXP:
+ UNREACHABLE();
}
}
diff --git a/chromium/v8/src/codegen/optimized-compilation-info.h b/chromium/v8/src/codegen/optimized-compilation-info.h
index b7ed0d29c4f..d92964c7961 100644
--- a/chromium/v8/src/codegen/optimized-compilation-info.h
+++ b/chromium/v8/src/codegen/optimized-compilation-info.h
@@ -58,21 +58,19 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
V(SourcePositions, source_positions, 4) \
V(BailoutOnUninitialized, bailout_on_uninitialized, 5) \
V(LoopPeeling, loop_peeling, 6) \
- V(UntrustedCodeMitigations, untrusted_code_mitigations, 7) \
- V(SwitchJumpTable, switch_jump_table, 8) \
- V(CalledWithCodeStartRegister, called_with_code_start_register, 9) \
- V(PoisonRegisterArguments, poison_register_arguments, 10) \
- V(AllocationFolding, allocation_folding, 11) \
- V(AnalyzeEnvironmentLiveness, analyze_environment_liveness, 12) \
- V(TraceTurboJson, trace_turbo_json, 13) \
- V(TraceTurboGraph, trace_turbo_graph, 14) \
- V(TraceTurboScheduled, trace_turbo_scheduled, 15) \
- V(TraceTurboAllocation, trace_turbo_allocation, 16) \
- V(TraceHeapBroker, trace_heap_broker, 17) \
- V(WasmRuntimeExceptionSupport, wasm_runtime_exception_support, 18) \
- V(ConcurrentInlining, concurrent_inlining, 19) \
- V(DiscardResultForTesting, discard_result_for_testing, 20) \
- V(InlineJSWasmCalls, inline_js_wasm_calls, 21)
+ V(SwitchJumpTable, switch_jump_table, 7) \
+ V(CalledWithCodeStartRegister, called_with_code_start_register, 8) \
+ V(AllocationFolding, allocation_folding, 9) \
+ V(AnalyzeEnvironmentLiveness, analyze_environment_liveness, 10) \
+ V(TraceTurboJson, trace_turbo_json, 11) \
+ V(TraceTurboGraph, trace_turbo_graph, 12) \
+ V(TraceTurboScheduled, trace_turbo_scheduled, 13) \
+ V(TraceTurboAllocation, trace_turbo_allocation, 14) \
+ V(TraceHeapBroker, trace_heap_broker, 15) \
+ V(WasmRuntimeExceptionSupport, wasm_runtime_exception_support, 16) \
+ V(ConcurrentInlining, concurrent_inlining, 17) \
+ V(DiscardResultForTesting, discard_result_for_testing, 18) \
+ V(InlineJSWasmCalls, inline_js_wasm_calls, 19)
enum Flag {
#define DEF_ENUM(Camel, Lower, Bit) k##Camel = 1 << Bit,
@@ -82,7 +80,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
#define DEF_GETTER(Camel, Lower, Bit) \
bool Lower() const { \
- DCHECK(FlagGetIsValid(k##Camel)); \
return GetFlag(k##Camel); \
}
FLAGS(DEF_GETTER)
@@ -90,17 +87,11 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
#define DEF_SETTER(Camel, Lower, Bit) \
void set_##Lower() { \
- DCHECK(FlagSetIsValid(k##Camel)); \
SetFlag(k##Camel); \
}
FLAGS(DEF_SETTER)
#undef DEF_SETTER
-#ifdef DEBUG
- bool FlagGetIsValid(Flag flag) const;
- bool FlagSetIsValid(Flag flag) const;
-#endif // DEBUG
-
// Construct a compilation info for optimized compilation.
OptimizedCompilationInfo(Zone* zone, Isolate* isolate,
Handle<SharedFunctionInfo> shared,
@@ -141,13 +132,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
}
compiler::NodeObserver* node_observer() const { return node_observer_; }
- void SetPoisoningMitigationLevel(PoisoningMitigationLevel poisoning_level) {
- poisoning_level_ = poisoning_level;
- }
- PoisoningMitigationLevel GetPoisoningMitigationLevel() const {
- return poisoning_level_;
- }
-
// Code getters and setters.
void SetCode(Handle<Code> code);
@@ -269,8 +253,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
// Compilation flags.
unsigned flags_ = 0;
- PoisoningMitigationLevel poisoning_level_ =
- PoisoningMitigationLevel::kDontPoison;
const CodeKind code_kind_;
Builtin builtin_ = Builtin::kNoBuiltinId;
diff --git a/chromium/v8/src/codegen/ppc/assembler-ppc-inl.h b/chromium/v8/src/codegen/ppc/assembler-ppc-inl.h
index d8cd524451b..a4917192d8f 100644
--- a/chromium/v8/src/codegen/ppc/assembler-ppc-inl.h
+++ b/chromium/v8/src/codegen/ppc/assembler-ppc-inl.h
@@ -159,10 +159,10 @@ HeapObject RelocInfo::target_object() {
}
}
-HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
+HeapObject RelocInfo::target_object_no_host(PtrComprCageBase cage_base) {
if (IsCompressedEmbeddedObject(rmode_)) {
return HeapObject::cast(Object(DecompressTaggedAny(
- isolate,
+ cage_base,
Assembler::target_compressed_address_at(pc_, constant_pool_))));
} else {
return target_object();
diff --git a/chromium/v8/src/codegen/ppc/assembler-ppc.cc b/chromium/v8/src/codegen/ppc/assembler-ppc.cc
index 2c568b3f3f7..3e154e4c297 100644
--- a/chromium/v8/src/codegen/ppc/assembler-ppc.cc
+++ b/chromium/v8/src/codegen/ppc/assembler-ppc.cc
@@ -187,13 +187,13 @@ Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
return result;
}
-MemOperand::MemOperand(Register rn, int32_t offset)
+MemOperand::MemOperand(Register rn, int64_t offset)
: ra_(rn), offset_(offset), rb_(no_reg) {}
MemOperand::MemOperand(Register ra, Register rb)
: ra_(ra), offset_(0), rb_(rb) {}
-MemOperand::MemOperand(Register ra, Register rb, int32_t offset)
+MemOperand::MemOperand(Register ra, Register rb, int64_t offset)
: ra_(ra), offset_(offset), rb_(rb) {}
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
@@ -303,7 +303,6 @@ Condition Assembler::GetCondition(Instr instr) {
default:
UNIMPLEMENTED();
}
- return al;
}
bool Assembler::IsLis(Instr instr) {
@@ -1621,8 +1620,8 @@ void Assembler::fmul(const DoubleRegister frt, const DoubleRegister fra,
}
void Assembler::fcpsgn(const DoubleRegister frt, const DoubleRegister fra,
- const DoubleRegister frc, RCBit rc) {
- emit(EXT4 | FCPSGN | frt.code() * B21 | fra.code() * B16 | frc.code() * B6 |
+ const DoubleRegister frb, RCBit rc) {
+ emit(EXT4 | FCPSGN | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
rc);
}
diff --git a/chromium/v8/src/codegen/ppc/assembler-ppc.h b/chromium/v8/src/codegen/ppc/assembler-ppc.h
index f46090cec5c..2b5c156204c 100644
--- a/chromium/v8/src/codegen/ppc/assembler-ppc.h
+++ b/chromium/v8/src/codegen/ppc/assembler-ppc.h
@@ -133,13 +133,13 @@ class V8_EXPORT_PRIVATE Operand {
// Alternatively we can have a 16bit signed value immediate
class V8_EXPORT_PRIVATE MemOperand {
public:
- explicit MemOperand(Register rn, int32_t offset = 0);
+ explicit MemOperand(Register rn, int64_t offset = 0);
explicit MemOperand(Register ra, Register rb);
- explicit MemOperand(Register ra, Register rb, int32_t offset);
+ explicit MemOperand(Register ra, Register rb, int64_t offset);
- int32_t offset() const { return offset_; }
+ int64_t offset() const { return offset_; }
// PowerPC - base register
Register ra() const { return ra_; }
@@ -148,7 +148,7 @@ class V8_EXPORT_PRIVATE MemOperand {
private:
Register ra_; // base
- int32_t offset_; // offset
+ int64_t offset_; // offset
Register rb_; // index
friend class Assembler;
@@ -373,6 +373,11 @@ class Assembler : public AssemblerBase {
x_form(instr_name, cr.code() * B2, src1.code(), src2.code(), LeaveRC); \
}
+#define DECLARE_PPC_X_INSTRUCTIONS_G_FORM(name, instr_name, instr_value) \
+ inline void name(const Register dst, const Register src) { \
+ x_form(instr_name, src, dst, r0, LeaveRC); \
+ }
+
#define DECLARE_PPC_X_INSTRUCTIONS_EH_S_FORM(name, instr_name, instr_value) \
inline void name(const Register dst, const MemOperand& src) { \
x_form(instr_name, src.ra(), dst, src.rb(), SetEH); \
@@ -411,6 +416,7 @@ class Assembler : public AssemblerBase {
PPC_X_OPCODE_D_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_D_FORM)
PPC_X_OPCODE_E_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_E_FORM)
PPC_X_OPCODE_F_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_F_FORM)
+ PPC_X_OPCODE_G_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_G_FORM)
PPC_X_OPCODE_EH_S_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_EH_S_FORM)
PPC_X_OPCODE_EH_L_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_EH_L_FORM)
@@ -442,26 +448,40 @@ class Assembler : public AssemblerBase {
#undef DECLARE_PPC_X_INSTRUCTIONS_D_FORM
#undef DECLARE_PPC_X_INSTRUCTIONS_E_FORM
#undef DECLARE_PPC_X_INSTRUCTIONS_F_FORM
+#undef DECLARE_PPC_X_INSTRUCTIONS_G_FORM
#undef DECLARE_PPC_X_INSTRUCTIONS_EH_S_FORM
#undef DECLARE_PPC_X_INSTRUCTIONS_EH_L_FORM
-#define DECLARE_PPC_XX2_INSTRUCTIONS(name, instr_name, instr_value) \
- inline void name(const Simd128Register rt, const Simd128Register rb) { \
- xx2_form(instr_name, rt, rb); \
+#define DECLARE_PPC_XX2_VECTOR_INSTRUCTIONS(name, instr_name, instr_value) \
+ inline void name(const Simd128Register rt, const Simd128Register rb) { \
+ xx2_form(instr_name, rt, rb); \
+ }
+#define DECLARE_PPC_XX2_SCALAR_INSTRUCTIONS(name, instr_name, instr_value) \
+ inline void name(const DoubleRegister rt, const DoubleRegister rb) { \
+ xx2_form(instr_name, rt, rb); \
}
- inline void xx2_form(Instr instr, Simd128Register t, Simd128Register b) {
- // Using VR (high VSR) registers.
- int BX = 1;
- int TX = 1;
+ template <typename T>
+ inline void xx2_form(Instr instr, T t, T b) {
+ static_assert(std::is_same<T, Simd128Register>::value ||
+ std::is_same<T, DoubleRegister>::value,
+ "VSX only uses FP or Vector registers.");
+ // Using FP (low VSR) registers.
+ int BX = 0, TX = 0;
+ // Using VR (high VSR) registers when Simd registers are used.
+ if (std::is_same<T, Simd128Register>::value) {
+ BX = TX = 1;
+ }
emit(instr | (t.code() & 0x1F) * B21 | (b.code() & 0x1F) * B11 | BX * B1 |
TX);
}
- PPC_XX2_OPCODE_A_FORM_LIST(DECLARE_PPC_XX2_INSTRUCTIONS)
- PPC_XX2_OPCODE_B_FORM_LIST(DECLARE_PPC_XX2_INSTRUCTIONS)
-#undef DECLARE_PPC_XX2_INSTRUCTIONS
+ PPC_XX2_OPCODE_VECTOR_A_FORM_LIST(DECLARE_PPC_XX2_VECTOR_INSTRUCTIONS)
+ PPC_XX2_OPCODE_SCALAR_A_FORM_LIST(DECLARE_PPC_XX2_SCALAR_INSTRUCTIONS)
+ PPC_XX2_OPCODE_B_FORM_LIST(DECLARE_PPC_XX2_VECTOR_INSTRUCTIONS)
+#undef DECLARE_PPC_XX2_VECTOR_INSTRUCTIONS
+#undef DECLARE_PPC_XX2_SCALAR_INSTRUCTIONS
#define DECLARE_PPC_XX3_VECTOR_INSTRUCTIONS(name, instr_name, instr_value) \
inline void name(const Simd128Register rt, const Simd128Register ra, \
diff --git a/chromium/v8/src/codegen/ppc/constants-ppc.h b/chromium/v8/src/codegen/ppc/constants-ppc.h
index e7f1ff311d3..4f17f08969a 100644
--- a/chromium/v8/src/codegen/ppc/constants-ppc.h
+++ b/chromium/v8/src/codegen/ppc/constants-ppc.h
@@ -64,7 +64,7 @@ constexpr size_t kMaxPCRelativeCodeRangeInMB = 0;
// Used to encode a boolean value when emitting 32 bit
// opcodes which will indicate the presence of function descriptors
-constexpr int kHasFunctionDescriptorBitShift = 9;
+constexpr int kHasFunctionDescriptorBitShift = 4;
constexpr int kHasFunctionDescriptorBitMask = 1
<< kHasFunctionDescriptorBitShift;
@@ -364,7 +364,7 @@ using Instr = uint32_t;
/* Decimal Floating Test Data Group Quad */ \
V(dtstdgq, DTSTDGQ, 0xFC0001C4)
-#define PPC_XX2_OPCODE_A_FORM_LIST(V) \
+#define PPC_XX2_OPCODE_VECTOR_A_FORM_LIST(V) \
/* VSX Vector Absolute Value Double-Precision */ \
V(xvabsdp, XVABSDP, 0xF0000764) \
/* VSX Vector Negate Double-Precision */ \
@@ -423,6 +423,14 @@ using Instr = uint32_t;
/* Saturate */ \
V(xvcvdpuxws, XVCVDPUXWS, 0xF0000320)
+#define PPC_XX2_OPCODE_SCALAR_A_FORM_LIST(V) \
+ /* VSX Scalar Convert Double-Precision to Single-Precision format Non- */ \
+ /* signalling */ \
+ V(xscvdpspn, XSCVDPSPN, 0xF000042C) \
+ /* VSX Scalar Convert Single-Precision to Double-Precision format Non- */ \
+ /* signalling */ \
+ V(xscvspdpn, XSCVSPDPN, 0xF000052C)
+
#define PPC_XX2_OPCODE_B_FORM_LIST(V) \
/* Vector Byte-Reverse Quadword */ \
V(xxbrq, XXBRQ, 0xF01F076C)
@@ -440,9 +448,6 @@ using Instr = uint32_t;
V(xsabsdp, XSABSDP, 0xF0000564) \
/* VSX Scalar Convert Double-Precision to Single-Precision */ \
V(xscvdpsp, XSCVDPSP, 0xF0000424) \
- /* VSX Scalar Convert Double-Precision to Single-Precision format Non- */ \
- /* signalling */ \
- V(xscvdpspn, XSCVDPSPN, 0xF000042C) \
/* VSX Scalar Convert Double-Precision to Signed Fixed-Point Doubleword */ \
/* Saturate */ \
V(xscvdpsxds, XSCVDPSXDS, 0xF0000560) \
@@ -457,9 +462,6 @@ using Instr = uint32_t;
V(xscvdpuxws, XSCVDPUXWS, 0xF0000120) \
/* VSX Scalar Convert Single-Precision to Double-Precision (p=1) */ \
V(xscvspdp, XSCVSPDP, 0xF0000524) \
- /* Scalar Convert Single-Precision to Double-Precision format Non- */ \
- /* signalling */ \
- V(xscvspdpn, XSCVSPDPN, 0xF000052C) \
/* VSX Scalar Convert Signed Fixed-Point Doubleword to Double-Precision */ \
V(xscvsxddp, XSCVSXDDP, 0xF00005E0) \
/* VSX Scalar Convert Signed Fixed-Point Doubleword to Single-Precision */ \
@@ -531,9 +533,10 @@ using Instr = uint32_t;
/* Vector Splat Immediate Byte */ \
V(xxspltib, XXSPLTIB, 0xF00002D0)
-#define PPC_XX2_OPCODE_LIST(V) \
- PPC_XX2_OPCODE_A_FORM_LIST(V) \
- PPC_XX2_OPCODE_B_FORM_LIST(V) \
+#define PPC_XX2_OPCODE_LIST(V) \
+ PPC_XX2_OPCODE_VECTOR_A_FORM_LIST(V) \
+ PPC_XX2_OPCODE_SCALAR_A_FORM_LIST(V) \
+ PPC_XX2_OPCODE_B_FORM_LIST(V) \
PPC_XX2_OPCODE_UNUSED_LIST(V)
#define PPC_EVX_OPCODE_LIST(V) \
@@ -1267,6 +1270,14 @@ using Instr = uint32_t;
/* Compare Logical */ \
V(cmpl, CMPL, 0x7C000040)
+#define PPC_X_OPCODE_G_FORM_LIST(V) \
+ /* Byte-Reverse Halfword */ \
+ V(brh, BRH, 0x7C0001B6) \
+ /* Byte-Reverse Word */ \
+ V(brw, BRW, 0x7C000136) \
+ /* Byte-Reverse Doubleword */ \
+ V(brd, BRD, 0x7C000176)
+
#define PPC_X_OPCODE_EH_S_FORM_LIST(V) \
/* Store Byte Conditional Indexed */ \
V(stbcx, STBCX, 0x7C00056D) \
@@ -1737,6 +1748,7 @@ using Instr = uint32_t;
PPC_X_OPCODE_D_FORM_LIST(V) \
PPC_X_OPCODE_E_FORM_LIST(V) \
PPC_X_OPCODE_F_FORM_LIST(V) \
+ PPC_X_OPCODE_G_FORM_LIST(V) \
PPC_X_OPCODE_EH_L_FORM_LIST(V) \
PPC_X_OPCODE_UNUSED_LIST(V)
@@ -3006,7 +3018,8 @@ class Instruction {
}
opcode = extcode | BitField(10, 2);
switch (opcode) {
- PPC_XX2_OPCODE_A_FORM_LIST(OPCODE_CASES)
+ PPC_XX2_OPCODE_VECTOR_A_FORM_LIST(OPCODE_CASES)
+ PPC_XX2_OPCODE_SCALAR_A_FORM_LIST(OPCODE_CASES)
PPC_XX2_OPCODE_UNUSED_LIST(OPCODE_CASES)
return static_cast<Opcode>(opcode);
}
diff --git a/chromium/v8/src/codegen/ppc/macro-assembler-ppc.cc b/chromium/v8/src/codegen/ppc/macro-assembler-ppc.cc
index f243055490c..c7b119a3112 100644
--- a/chromium/v8/src/codegen/ppc/macro-assembler-ppc.cc
+++ b/chromium/v8/src/codegen/ppc/macro-assembler-ppc.cc
@@ -168,8 +168,6 @@ void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
if (cond != al) b(NegateCondition(cond), &skip, cr);
- DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY);
-
mov(ip, Operand(target, rmode));
mtctr(ip);
bctr();
@@ -189,15 +187,14 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code));
- Builtin builtin_index = Builtin::kNoBuiltinId;
+ Builtin builtin = Builtin::kNoBuiltinId;
bool target_is_builtin =
- isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin);
if (root_array_available_ && options().isolate_independent_code) {
Label skip;
Register scratch = ip;
- int offset = static_cast<int>(code->builtin_id()) * kSystemPointerSize +
- IsolateData::builtin_entry_table_offset();
+ int offset = IsolateData::BuiltinEntrySlotOffset(code->builtin_id());
LoadU64(scratch, MemOperand(kRootRegister, offset), r0);
if (cond != al) b(NegateCondition(cond), &skip, cr);
Jump(scratch);
@@ -206,10 +203,10 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
} else if (options().inline_offheap_trampolines && target_is_builtin) {
// Inline the trampoline.
Label skip;
- RecordCommentForOffHeapTrampoline(builtin_index);
+ RecordCommentForOffHeapTrampoline(builtin);
// Use ip directly instead of using UseScratchRegisterScope, as we do
// not preserve scratch registers across calls.
- mov(ip, Operand(BuiltinEntry(builtin_index), RelocInfo::OFF_HEAP_TARGET));
+ mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
if (cond != al) b(NegateCondition(cond), &skip, cr);
Jump(ip);
bind(&skip);
@@ -276,14 +273,13 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
DCHECK_IMPLIES(options().use_pc_relative_calls_and_jumps,
Builtins::IsIsolateIndependentBuiltin(*code));
- Builtin builtin_index = Builtin::kNoBuiltinId;
+ Builtin builtin = Builtin::kNoBuiltinId;
bool target_is_builtin =
- isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin);
if (root_array_available_ && options().isolate_independent_code) {
Label skip;
- int offset = static_cast<int>(code->builtin_id()) * kSystemPointerSize +
- IsolateData::builtin_entry_table_offset();
+ int offset = IsolateData::BuiltinEntrySlotOffset(code->builtin_id());
LoadU64(ip, MemOperand(kRootRegister, offset));
if (cond != al) b(NegateCondition(cond), &skip);
Call(ip);
@@ -291,14 +287,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
return;
} else if (options().inline_offheap_trampolines && target_is_builtin) {
// Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- // Use ip directly instead of using UseScratchRegisterScope, as we do
- // not preserve scratch registers across calls.
- mov(ip, Operand(BuiltinEntry(builtin_index), RelocInfo::OFF_HEAP_TARGET));
- Label skip;
- if (cond != al) b(NegateCondition(cond), &skip);
- Call(ip);
- bind(&skip);
+ CallBuiltin(builtin, cond);
return;
}
DCHECK(code->IsExecutable());
@@ -306,6 +295,18 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Call(static_cast<Address>(target_index), rmode, cond);
}
+void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) {
+ ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin));
+ DCHECK(Builtins::IsBuiltinId(builtin));
+ // Use ip directly instead of using UseScratchRegisterScope, as we do not
+ // preserve scratch registers across calls.
+ mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
+ Label skip;
+ if (cond != al) b(NegateCondition(cond), &skip);
+ Call(ip);
+ bind(&skip);
+}
+
void TurboAssembler::Drop(int count) {
if (count > 0) {
AddS64(sp, sp, Operand(count * kSystemPointerSize), r0);
@@ -1252,6 +1253,9 @@ void TurboAssembler::EnterFrame(StackFrame::Type type,
mov(ip, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(ip);
}
+#if V8_ENABLE_WEBASSEMBLY
+ if (type == StackFrame::WASM) Push(kWasmInstanceRegister);
+#endif // V8_ENABLE_WEBASSEMBLY
}
int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
@@ -1470,9 +1474,11 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
// If the expected parameter count is equal to the adaptor sentinel, no need
// to push undefined value as arguments.
- mov(r0, Operand(kDontAdaptArgumentsSentinel));
- CmpS64(expected_parameter_count, r0);
- beq(&regular_invoke);
+ if (kDontAdaptArgumentsSentinel != 0) {
+ mov(r0, Operand(kDontAdaptArgumentsSentinel));
+ CmpS64(expected_parameter_count, r0);
+ beq(&regular_invoke);
+ }
// If overapplication or if the actual argument count is equal to the
// formal parameter count, no need to push extra undefined values.
@@ -1517,8 +1523,8 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
bind(&stack_overflow);
{
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
CallRuntime(Runtime::kThrowStackOverflow);
bkpt(0);
}
@@ -1542,8 +1548,8 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
{
// Load receiver to pass it later to DebugOnFunctionCall hook.
LoadReceiver(r7, actual_parameter_count);
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
SmiTag(expected_parameter_count);
Push(expected_parameter_count);
@@ -1698,16 +1704,28 @@ void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
cmpi(type_reg, Operand(type));
}
+void MacroAssembler::CompareRange(Register value, unsigned lower_limit,
+ unsigned higher_limit) {
+ ASM_CODE_COMMENT(this);
+ DCHECK_LT(lower_limit, higher_limit);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ if (lower_limit != 0) {
+ mov(scratch, Operand(lower_limit));
+ sub(scratch, value, scratch);
+ cmpli(scratch, Operand(higher_limit - lower_limit));
+ } else {
+ mov(scratch, Operand(higher_limit));
+ CmpU64(value, scratch);
+ }
+}
+
void MacroAssembler::CompareInstanceTypeRange(Register map, Register type_reg,
InstanceType lower_limit,
InstanceType higher_limit) {
DCHECK_LT(lower_limit, higher_limit);
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
LoadU16(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
- mov(scratch, Operand(lower_limit));
- sub(scratch, type_reg, scratch);
- cmpli(scratch, Operand(higher_limit - lower_limit));
+ CompareRange(type_reg, lower_limit, higher_limit);
}
void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
@@ -1894,15 +1912,7 @@ void TurboAssembler::MaxF64(DoubleRegister dst, DoubleRegister lhs,
void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
unsigned higher_limit,
Label* on_in_range) {
- Register scratch = r0;
- if (lower_limit != 0) {
- mov(scratch, Operand(lower_limit));
- sub(scratch, value, scratch);
- cmpli(scratch, Operand(higher_limit - lower_limit));
- } else {
- mov(scratch, Operand(higher_limit));
- CmpU64(value, scratch);
- }
+ CompareRange(value, lower_limit, higher_limit);
ble(on_in_range);
}
@@ -2076,7 +2086,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (should_abort_hard()) {
// We don't care if we constructed a frame. Just pretend we did.
- FrameScope assume_frame(this, StackFrame::NONE);
+ FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
mov(r3, Operand(static_cast<int>(reason)));
PrepareCallCFunction(1, r4);
CallCFunction(ExternalReference::abort_with_reason(), 1);
@@ -2089,7 +2099,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
+ FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
} else {
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
@@ -2111,7 +2121,7 @@ void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)), r0);
}
-void MacroAssembler::AssertNotSmi(Register object) {
+void TurboAssembler::AssertNotSmi(Register object) {
if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object, r0);
@@ -2119,7 +2129,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
}
-void MacroAssembler::AssertSmi(Register object) {
+void TurboAssembler::AssertSmi(Register object) {
if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object, r0);
@@ -2662,7 +2672,14 @@ void TurboAssembler::MovDoubleToInt64(
addi(sp, sp, Operand(kDoubleSize));
}
-void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
+void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src,
+ Register scratch) {
+ if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
+ ShiftLeftU64(scratch, src, Operand(32));
+ mtfprd(dst, scratch);
+ xscvspdpn(dst, dst);
+ return;
+ }
subi(sp, sp, Operand(kFloatSize));
stw(src, MemOperand(sp, 0));
nop(GROUP_ENDING_NOP); // LHS/RAW optimization
@@ -2670,7 +2687,13 @@ void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
addi(sp, sp, Operand(kFloatSize));
}
-void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
+void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src,
+ DoubleRegister scratch) {
+ if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
+ xscvdpspn(scratch, src);
+ mffprwz(dst, scratch);
+ return;
+ }
subi(sp, sp, Operand(kFloatSize));
stfs(src, MemOperand(sp, 0));
nop(GROUP_ENDING_NOP); // LHS/RAW optimization
@@ -2759,6 +2782,80 @@ void TurboAssembler::MulS32(Register dst, Register src, Register value, OEBit s,
extsw(dst, dst, r);
}
+void TurboAssembler::DivS64(Register dst, Register src, Register value, OEBit s,
+ RCBit r) {
+ divd(dst, src, value, s, r);
+}
+
+void TurboAssembler::DivU64(Register dst, Register src, Register value, OEBit s,
+ RCBit r) {
+ divdu(dst, src, value, s, r);
+}
+
+void TurboAssembler::DivS32(Register dst, Register src, Register value, OEBit s,
+ RCBit r) {
+ divw(dst, src, value, s, r);
+ extsw(dst, dst);
+}
+void TurboAssembler::DivU32(Register dst, Register src, Register value, OEBit s,
+ RCBit r) {
+ divwu(dst, src, value, s, r);
+ ZeroExtWord32(dst, dst);
+}
+
+void TurboAssembler::ModS64(Register dst, Register src, Register value) {
+ if (CpuFeatures::IsSupported(PPC_9_PLUS)) {
+ modsd(dst, src, value);
+ } else {
+ Register scratch = GetRegisterThatIsNotOneOf(dst, src, value);
+ Push(scratch);
+ divd(scratch, src, value);
+ mulld(scratch, scratch, value);
+ sub(dst, src, scratch);
+ Pop(scratch);
+ }
+}
+
+void TurboAssembler::ModU64(Register dst, Register src, Register value) {
+ if (CpuFeatures::IsSupported(PPC_9_PLUS)) {
+ modud(dst, src, value);
+ } else {
+ Register scratch = GetRegisterThatIsNotOneOf(dst, src, value);
+ Push(scratch);
+ divdu(scratch, src, value);
+ mulld(scratch, scratch, value);
+ sub(dst, src, scratch);
+ Pop(scratch);
+ }
+}
+
+void TurboAssembler::ModS32(Register dst, Register src, Register value) {
+ if (CpuFeatures::IsSupported(PPC_9_PLUS)) {
+ modsw(dst, src, value);
+ } else {
+ Register scratch = GetRegisterThatIsNotOneOf(dst, src, value);
+ Push(scratch);
+ divw(scratch, src, value);
+ mullw(scratch, scratch, value);
+ sub(dst, src, scratch);
+ Pop(scratch);
+ }
+ extsw(dst, dst);
+}
+void TurboAssembler::ModU32(Register dst, Register src, Register value) {
+ if (CpuFeatures::IsSupported(PPC_9_PLUS)) {
+ moduw(dst, src, value);
+ } else {
+ Register scratch = GetRegisterThatIsNotOneOf(dst, src, value);
+ Push(scratch);
+ divwu(scratch, src, value);
+ mullw(scratch, scratch, value);
+ sub(dst, src, scratch);
+ Pop(scratch);
+ }
+ ZeroExtWord32(dst, dst);
+}
+
void TurboAssembler::AndU64(Register dst, Register src, const Operand& value,
Register scratch, RCBit r) {
if (is_uint16(value.immediate()) && r == SetRC) {
@@ -3004,6 +3101,11 @@ void TurboAssembler::DivF32(DoubleRegister dst, DoubleRegister lhs,
frsp(dst, dst, r);
}
+void TurboAssembler::CopySignF64(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs, RCBit r) {
+ fcpsgn(dst, rhs, lhs, r);
+}
+
void MacroAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch,
CRegister cr) {
#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
@@ -3056,7 +3158,7 @@ void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi smi,
#define GenerateMemoryOperation(reg, mem, ri_op, rr_op) \
{ \
- int offset = mem.offset(); \
+ int64_t offset = mem.offset(); \
\
if (mem.rb() == no_reg) { \
if (!is_int16(offset)) { \
@@ -3085,7 +3187,7 @@ void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi smi,
#define GenerateMemoryOperationWithAlign(reg, mem, ri_op, rr_op) \
{ \
- int offset = mem.offset(); \
+ int64_t offset = mem.offset(); \
int misaligned = (offset & 3); \
\
if (mem.rb() == no_reg) { \
@@ -3174,7 +3276,10 @@ void TurboAssembler::StoreSimd128(Simd128Register src, const MemOperand& mem) {
#define GenerateMemoryLEOperation(reg, mem, op) \
{ \
if (mem.offset() == 0) { \
- op(reg, mem); \
+ if (mem.rb() != no_reg) \
+ op(reg, mem); \
+ else \
+ op(reg, MemOperand(r0, mem.ra())); \
} else if (is_int16(mem.offset())) { \
if (mem.rb() != no_reg) \
addi(scratch, mem.rb(), Operand(mem.offset())); \
@@ -3265,7 +3370,7 @@ void TurboAssembler::StoreF64LE(DoubleRegister dst, const MemOperand& mem,
LoadU64(scratch, mem, scratch2);
StoreU64LE(scratch, mem, scratch2);
#else
- LoadF64(dst, mem, scratch);
+ StoreF64(dst, mem, scratch);
#endif
}
@@ -3276,7 +3381,7 @@ void TurboAssembler::StoreF32LE(DoubleRegister dst, const MemOperand& mem,
LoadU32(scratch, mem, scratch2);
StoreU32LE(scratch, mem, scratch2);
#else
- LoadF64(dst, mem, scratch);
+ StoreF32(dst, mem, scratch);
#endif
}
@@ -3453,8 +3558,25 @@ void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst,
addi(sp, sp, Operand(2 * kSimd128Size));
}
-void TurboAssembler::ResetSpeculationPoisonRegister() {
- mov(kSpeculationPoisonRegister, Operand(-1));
+void TurboAssembler::ByteReverseU16(Register dst, Register val) {
+ subi(sp, sp, Operand(kSystemPointerSize));
+ sth(val, MemOperand(sp));
+ lhbrx(dst, MemOperand(r0, sp));
+ addi(sp, sp, Operand(kSystemPointerSize));
+}
+
+void TurboAssembler::ByteReverseU32(Register dst, Register val) {
+ subi(sp, sp, Operand(kSystemPointerSize));
+ stw(val, MemOperand(sp));
+ lwbrx(dst, MemOperand(r0, sp));
+ addi(sp, sp, Operand(kSystemPointerSize));
+}
+
+void TurboAssembler::ByteReverseU64(Register dst, Register val) {
+ subi(sp, sp, Operand(kSystemPointerSize));
+ std(val, MemOperand(sp));
+ ldbrx(dst, MemOperand(r0, sp));
+ addi(sp, sp, Operand(kSystemPointerSize));
}
void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
@@ -3590,8 +3712,9 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
BlockTrampolinePoolScope block_trampoline_pool(this);
+ CHECK_LE(target, Builtins::kLastTier0);
LoadU64(ip, MemOperand(kRootRegister,
- IsolateData::builtin_entry_slot_offset(target)));
+ IsolateData::BuiltinEntrySlotOffset(target)));
Call(ip);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
(kind == DeoptimizeKind::kLazy)
@@ -3631,14 +3754,88 @@ void TurboAssembler::CountLeadingZerosU64(Register dst, Register src, RCBit r) {
cntlzd(dst, src, r);
}
+#define COUNT_TRAILING_ZEROES_SLOW(max_count, scratch1, scratch2) \
+ Label loop, done; \
+ li(scratch1, Operand(max_count)); \
+ mtctr(scratch1); \
+ mr(scratch1, src); \
+ li(dst, Operand::Zero()); \
+ bind(&loop); /* while ((src & 1) == 0) */ \
+ andi(scratch2, scratch1, Operand(1)); \
+ bne(&done, cr0); \
+ srdi(scratch1, scratch1, Operand(1)); /* src >>= 1;*/ \
+ addi(dst, dst, Operand(1)); /* dst++ */ \
+ bdnz(&loop); \
+ bind(&done);
void TurboAssembler::CountTrailingZerosU32(Register dst, Register src,
+ Register scratch1, Register scratch2,
RCBit r) {
- cnttzw(dst, src, r);
+ if (CpuFeatures::IsSupported(PPC_9_PLUS)) {
+ cnttzw(dst, src, r);
+ } else {
+ COUNT_TRAILING_ZEROES_SLOW(32, scratch1, scratch2);
+ }
}
void TurboAssembler::CountTrailingZerosU64(Register dst, Register src,
+ Register scratch1, Register scratch2,
RCBit r) {
- cnttzd(dst, src, r);
+ if (CpuFeatures::IsSupported(PPC_9_PLUS)) {
+ cnttzd(dst, src, r);
+ } else {
+ COUNT_TRAILING_ZEROES_SLOW(64, scratch1, scratch2);
+ }
+}
+#undef COUNT_TRAILING_ZEROES_SLOW
+
+void TurboAssembler::ClearByteU64(Register dst, int byte_idx) {
+ CHECK(0 <= byte_idx && byte_idx <= 7);
+ int shift = byte_idx*8;
+ rldicl(dst, dst, shift, 8);
+ rldicl(dst, dst, 64-shift, 0);
+}
+
+void TurboAssembler::ReverseBitsU64(Register dst, Register src,
+ Register scratch1, Register scratch2) {
+ ByteReverseU64(dst, src);
+ for (int i = 0; i < 8; i++) {
+ ReverseBitsInSingleByteU64(dst, dst, scratch1, scratch2, i);
+ }
+}
+
+void TurboAssembler::ReverseBitsU32(Register dst, Register src,
+ Register scratch1, Register scratch2) {
+ ByteReverseU32(dst, src);
+ for (int i = 4; i < 8; i++) {
+ ReverseBitsInSingleByteU64(dst, dst, scratch1, scratch2, i);
+ }
+}
+
+// byte_idx=7 refers to least significant byte
+void TurboAssembler::ReverseBitsInSingleByteU64(Register dst, Register src,
+ Register scratch1,
+ Register scratch2,
+ int byte_idx) {
+ CHECK(0 <= byte_idx && byte_idx <= 7);
+ int j = byte_idx;
+ // zero all bits of scratch1
+ li(scratch2, Operand(0));
+ for (int i = 0; i <= 7; i++) {
+ // zero all bits of scratch1
+ li(scratch1, Operand(0));
+ // move bit (j+1)*8-i-1 of src to bit j*8+i of scratch1, erase bits
+ // (j*8+i+1):end of scratch1
+ int shift = 7 - (2*i);
+ if (shift < 0) shift += 64;
+ rldicr(scratch1, src, shift, j*8+i);
+ // erase bits start:(j*8-1+i) of scratch1 (inclusive)
+ rldicl(scratch1, scratch1, 0, j*8+i);
+ // scratch2 = scratch2|scratch1
+ orx(scratch2, scratch2, scratch1);
+ }
+ // clear jth byte of dst and insert jth byte of scratch2
+ ClearByteU64(dst, j);
+ orx(dst, dst, scratch2);
}
} // namespace internal
diff --git a/chromium/v8/src/codegen/ppc/macro-assembler-ppc.h b/chromium/v8/src/codegen/ppc/macro-assembler-ppc.h
index 035c29b1e52..f4f7d0663c2 100644
--- a/chromium/v8/src/codegen/ppc/macro-assembler-ppc.h
+++ b/chromium/v8/src/codegen/ppc/macro-assembler-ppc.h
@@ -49,6 +49,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
using TurboAssemblerBase::TurboAssemblerBase;
+ void CallBuiltin(Builtin builtin, Condition cond);
void Popcnt32(Register dst, Register src);
void Popcnt64(Register dst, Register src);
// Converts the integer (untagged smi) in |src| to a double, storing
@@ -201,6 +202,18 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register scratch = r0, OEBit s = LeaveOE, RCBit r = LeaveRC);
void MulS32(Register dst, Register src, Register value, OEBit s = LeaveOE,
RCBit r = LeaveRC);
+ void DivS64(Register dst, Register src, Register value, OEBit s = LeaveOE,
+ RCBit r = LeaveRC);
+ void DivU64(Register dst, Register src, Register value, OEBit s = LeaveOE,
+ RCBit r = LeaveRC);
+ void DivS32(Register dst, Register src, Register value, OEBit s = LeaveOE,
+ RCBit r = LeaveRC);
+ void DivU32(Register dst, Register src, Register value, OEBit s = LeaveOE,
+ RCBit r = LeaveRC);
+ void ModS64(Register dst, Register src, Register value);
+ void ModU64(Register dst, Register src, Register value);
+ void ModS32(Register dst, Register src, Register value);
+ void ModU32(Register dst, Register src, Register value);
void AndU64(Register dst, Register src, const Operand& value,
Register scratch = r0, RCBit r = SetRC);
@@ -248,8 +261,19 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CountLeadingZerosU32(Register dst, Register src, RCBit r = LeaveRC);
void CountLeadingZerosU64(Register dst, Register src, RCBit r = LeaveRC);
- void CountTrailingZerosU32(Register dst, Register src, RCBit r = LeaveRC);
- void CountTrailingZerosU64(Register dst, Register src, RCBit r = LeaveRC);
+ void CountTrailingZerosU32(Register dst, Register src, Register scratch1 = ip,
+ Register scratch2 = r0, RCBit r = LeaveRC);
+ void CountTrailingZerosU64(Register dst, Register src, Register scratch1 = ip,
+ Register scratch2 = r0, RCBit r = LeaveRC);
+
+ void ClearByteU64(Register dst, int byte_idx);
+ void ReverseBitsU64(Register dst, Register src, Register scratch1,
+ Register scratch2);
+ void ReverseBitsU32(Register dst, Register src, Register scratch1,
+ Register scratch2);
+ void ReverseBitsInSingleByteU64(Register dst, Register src,
+ Register scratch1, Register scratch2,
+ int byte_idx);
void AddF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs,
RCBit r = LeaveRC);
@@ -267,6 +291,174 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
RCBit r = LeaveRC);
void DivF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs,
RCBit r = LeaveRC);
+ void CopySignF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs,
+ RCBit r = LeaveRC);
+
+ template <class _type>
+ void SignedExtend(Register dst, Register value) {
+ switch (sizeof(_type)) {
+ case 1:
+ extsb(dst, value);
+ break;
+ case 2:
+ extsh(dst, value);
+ break;
+ case 4:
+ extsw(dst, value);
+ break;
+ case 8:
+ if (dst != value) mr(dst, value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ template <class _type>
+ void ZeroExtend(Register dst, Register value) {
+ switch (sizeof(_type)) {
+ case 1:
+ ZeroExtByte(dst, value);
+ break;
+ case 2:
+ ZeroExtHalfWord(dst, value);
+ break;
+ case 4:
+ ZeroExtWord32(dst, value);
+ break;
+ case 8:
+ if (dst != value) mr(dst, value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ template <class _type>
+ void ExtendValue(Register dst, Register value) {
+ if (std::is_signed<_type>::value) {
+ SignedExtend<_type>(dst, value);
+ } else {
+ ZeroExtend<_type>(dst, value);
+ }
+ }
+
+ template <class _type>
+ void LoadReserve(Register output, MemOperand dst) {
+ switch (sizeof(_type)) {
+ case 1:
+ lbarx(output, dst);
+ break;
+ case 2:
+ lharx(output, dst);
+ break;
+ case 4:
+ lwarx(output, dst);
+ break;
+ case 8:
+ ldarx(output, dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ if (std::is_signed<_type>::value) {
+ SignedExtend<_type>(output, output);
+ }
+ }
+
+ template <class _type>
+ void StoreConditional(Register value, MemOperand dst) {
+ switch (sizeof(_type)) {
+ case 1:
+ stbcx(value, dst);
+ break;
+ case 2:
+ sthcx(value, dst);
+ break;
+ case 4:
+ stwcx(value, dst);
+ break;
+ case 8:
+ stdcx(value, dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ template <class _type>
+ void AtomicCompareExchange(MemOperand dst, Register old_value,
+ Register new_value, Register output,
+ Register scratch) {
+ Label loop;
+ Label exit;
+ if (sizeof(_type) != 8) {
+ ExtendValue<_type>(scratch, old_value);
+ old_value = scratch;
+ }
+ lwsync();
+ bind(&loop);
+ LoadReserve<_type>(output, dst);
+ cmp(output, old_value, cr0);
+ bne(&exit, cr0);
+ StoreConditional<_type>(new_value, dst);
+ bne(&loop, cr0);
+ bind(&exit);
+ sync();
+ }
+
+ template <class _type>
+ void AtomicExchange(MemOperand dst, Register new_value, Register output) {
+ Label exchange;
+ lwsync();
+ bind(&exchange);
+ LoadReserve<_type>(output, dst);
+ StoreConditional<_type>(new_value, dst);
+ bne(&exchange, cr0);
+ sync();
+ }
+
+ template <class _type, class bin_op>
+ void AtomicOps(MemOperand dst, Register value, Register output,
+ Register result, bin_op op) {
+ Label binop;
+ lwsync();
+ bind(&binop);
+ switch (sizeof(_type)) {
+ case 1:
+ lbarx(output, dst);
+ break;
+ case 2:
+ lharx(output, dst);
+ break;
+ case 4:
+ lwarx(output, dst);
+ break;
+ case 8:
+ ldarx(output, dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ op(result, output, value);
+ switch (sizeof(_type)) {
+ case 1:
+ stbcx(result, dst);
+ break;
+ case 2:
+ sthcx(result, dst);
+ break;
+ case 4:
+ stwcx(result, dst);
+ break;
+ case 8:
+ stdcx(result, dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ bne(&binop, cr0);
+ sync();
+ }
void Push(Register src) { push(src); }
// Push a handle.
@@ -418,6 +610,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Simd128Register scratch);
void SwapSimd128(MemOperand src, MemOperand dst, Simd128Register scratch);
+ void ByteReverseU16(Register dst, Register val);
+ void ByteReverseU32(Register dst, Register val);
+ void ByteReverseU64(Register dst, Register val);
+
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, non-register arguments must be stored in
// sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
@@ -561,8 +757,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register dst_hi,
#endif
Register dst, DoubleRegister src);
- void MovIntToFloat(DoubleRegister dst, Register src);
- void MovFloatToInt(Register dst, DoubleRegister src);
+ void MovIntToFloat(DoubleRegister dst, Register src, Register scratch);
+ void MovFloatToInt(Register dst, DoubleRegister src, DoubleRegister scratch);
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Smi smi) { LoadSmiLiteral(dst, smi); }
void Move(Register dst, Handle<HeapObject> value,
@@ -582,6 +778,17 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
ShiftRightS64(dst, src, Operand(kSmiShift), rc);
}
}
+ void SmiToInt32(Register smi) {
+ if (FLAG_enable_slow_asserts) {
+ AssertSmi(smi);
+ }
+ DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
+ SmiUntag(smi);
+ }
+
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object);
+ void AssertSmi(Register object);
void ZeroExtByte(Register dst, Register src);
void ZeroExtHalfWord(Register dst, Register src);
@@ -735,8 +942,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// The return address on the stack is used by frame iteration.
void StoreReturnAddressAndCall(Register target);
- void ResetSpeculationPoisonRegister();
-
// Control-flow integrity:
// Define a function entrypoint. This doesn't emit any code for this
@@ -1012,6 +1217,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Checks if value is in range [lower_limit, higher_limit] using a single
// comparison.
+ void CompareRange(Register value, unsigned lower_limit,
+ unsigned higher_limit);
void JumpIfIsInRange(Register value, unsigned lower_limit,
unsigned higher_limit, Label* on_in_range);
@@ -1098,10 +1305,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
bne(not_smi_label, cr0);
}
- // Abort execution if argument is a smi, enabled via --debug-code.
- void AssertNotSmi(Register object);
- void AssertSmi(Register object);
-
#if !defined(V8_COMPRESS_POINTERS) && !defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
// Ensure it is permissible to read/write int value directly from
// upper half of the smi.
diff --git a/chromium/v8/src/codegen/ppc/register-ppc.h b/chromium/v8/src/codegen/ppc/register-ppc.h
index ffeb327055f..68adfdb1557 100644
--- a/chromium/v8/src/codegen/ppc/register-ppc.h
+++ b/chromium/v8/src/codegen/ppc/register-ppc.h
@@ -349,7 +349,6 @@ constexpr Register kReturnRegister2 = r5;
constexpr Register kJSFunctionRegister = r4;
constexpr Register kContextRegister = r30;
constexpr Register kAllocateSizeRegister = r4;
-constexpr Register kSpeculationPoisonRegister = r14;
constexpr Register kInterpreterAccumulatorRegister = r3;
constexpr Register kInterpreterBytecodeOffsetRegister = r15;
constexpr Register kInterpreterBytecodeArrayRegister = r16;
diff --git a/chromium/v8/src/codegen/register-arch.h b/chromium/v8/src/codegen/register-arch.h
index eb4cdb8789b..d5ea2879daa 100644
--- a/chromium/v8/src/codegen/register-arch.h
+++ b/chromium/v8/src/codegen/register-arch.h
@@ -22,6 +22,8 @@
#include "src/codegen/mips/register-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/register-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/codegen/loong64/register-loong64.h"
#elif V8_TARGET_ARCH_S390
#include "src/codegen/s390/register-s390.h"
#elif V8_TARGET_ARCH_RISCV64
diff --git a/chromium/v8/src/codegen/register-configuration.cc b/chromium/v8/src/codegen/register-configuration.cc
index aca5295c119..2fc97e2fec6 100644
--- a/chromium/v8/src/codegen/register-configuration.cc
+++ b/chromium/v8/src/codegen/register-configuration.cc
@@ -60,6 +60,8 @@ static int get_num_allocatable_double_registers() {
kMaxAllocatableDoubleRegisterCount;
#elif V8_TARGET_ARCH_MIPS64
kMaxAllocatableDoubleRegisterCount;
+#elif V8_TARGET_ARCH_LOONG64
+ kMaxAllocatableDoubleRegisterCount;
#elif V8_TARGET_ARCH_PPC
kMaxAllocatableDoubleRegisterCount;
#elif V8_TARGET_ARCH_PPC64
@@ -102,42 +104,6 @@ class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
DEFINE_LAZY_LEAKY_OBJECT_GETTER(ArchDefaultRegisterConfiguration,
GetDefaultRegisterConfiguration)
-// Allocatable registers with the masking register removed.
-class ArchDefaultPoisoningRegisterConfiguration : public RegisterConfiguration {
- public:
- ArchDefaultPoisoningRegisterConfiguration()
- : RegisterConfiguration(
- Register::kNumRegisters, DoubleRegister::kNumRegisters,
- kMaxAllocatableGeneralRegisterCount - 1,
- get_num_allocatable_double_registers(),
- InitializeGeneralRegisterCodes(), get_allocatable_double_codes(),
- kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE) {
- }
-
- private:
- static const int* InitializeGeneralRegisterCodes() {
- int filtered_index = 0;
- for (int i = 0; i < kMaxAllocatableGeneralRegisterCount; ++i) {
- if (kAllocatableGeneralCodes[i] != kSpeculationPoisonRegister.code()) {
- allocatable_general_codes_[filtered_index] =
- kAllocatableGeneralCodes[i];
- filtered_index++;
- }
- }
- DCHECK_EQ(filtered_index, kMaxAllocatableGeneralRegisterCount - 1);
- return allocatable_general_codes_;
- }
-
- static int
- allocatable_general_codes_[kMaxAllocatableGeneralRegisterCount - 1];
-};
-
-int ArchDefaultPoisoningRegisterConfiguration::allocatable_general_codes_
- [kMaxAllocatableGeneralRegisterCount - 1];
-
-DEFINE_LAZY_LEAKY_OBJECT_GETTER(ArchDefaultPoisoningRegisterConfiguration,
- GetDefaultPoisoningRegisterConfiguration)
-
// RestrictedRegisterConfiguration uses the subset of allocatable general
// registers the architecture support, which results into generating assembly
// to use less registers. Currently, it's only used by RecordWrite code stub.
@@ -184,10 +150,6 @@ const RegisterConfiguration* RegisterConfiguration::Default() {
return GetDefaultRegisterConfiguration();
}
-const RegisterConfiguration* RegisterConfiguration::Poisoning() {
- return GetDefaultPoisoningRegisterConfiguration();
-}
-
const RegisterConfiguration* RegisterConfiguration::RestrictGeneralRegisters(
RegList registers) {
int num = NumRegs(registers);
diff --git a/chromium/v8/src/codegen/reloc-info.cc b/chromium/v8/src/codegen/reloc-info.cc
index 0693d324597..7c4d85128f0 100644
--- a/chromium/v8/src/codegen/reloc-info.cc
+++ b/chromium/v8/src/codegen/reloc-info.cc
@@ -320,7 +320,7 @@ bool RelocInfo::OffHeapTargetIsCodedSpecially() {
#elif defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_MIPS) || \
defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC) || \
defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_S390) || \
- defined(V8_TARGET_ARCH_RISCV64)
+ defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_LOONG64)
return true;
#endif
}
diff --git a/chromium/v8/src/codegen/reloc-info.h b/chromium/v8/src/codegen/reloc-info.h
index 918c93b13f8..2479a926e38 100644
--- a/chromium/v8/src/codegen/reloc-info.h
+++ b/chromium/v8/src/codegen/reloc-info.h
@@ -255,8 +255,9 @@ class RelocInfo {
V8_INLINE HeapObject target_object();
// In GC operations, we don't have a host_ pointer. Retrieving a target
- // for COMPRESSED_EMBEDDED_OBJECT mode requires an isolate.
- V8_INLINE HeapObject target_object_no_host(Isolate* isolate);
+ // for COMPRESSED_EMBEDDED_OBJECT mode requires a pointer compression cage
+ // base value.
+ V8_INLINE HeapObject target_object_no_host(PtrComprCageBase cage_base);
V8_INLINE Handle<HeapObject> target_object_handle(Assembler* origin);
V8_INLINE void set_target_object(
diff --git a/chromium/v8/src/codegen/riscv64/assembler-riscv64-inl.h b/chromium/v8/src/codegen/riscv64/assembler-riscv64-inl.h
index e3ac9b83f43..be3e59c7e42 100644
--- a/chromium/v8/src/codegen/riscv64/assembler-riscv64-inl.h
+++ b/chromium/v8/src/codegen/riscv64/assembler-riscv64-inl.h
@@ -170,10 +170,10 @@ HeapObject RelocInfo::target_object() {
}
}
-HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
+HeapObject RelocInfo::target_object_no_host(PtrComprCageBase cage_base) {
if (IsCompressedEmbeddedObject(rmode_)) {
return HeapObject::cast(Object(DecompressTaggedAny(
- isolate,
+ cage_base,
Assembler::target_compressed_address_at(pc_, constant_pool_))));
} else {
return target_object();
diff --git a/chromium/v8/src/codegen/riscv64/assembler-riscv64.cc b/chromium/v8/src/codegen/riscv64/assembler-riscv64.cc
index 0c322542a94..47479cd0160 100644
--- a/chromium/v8/src/codegen/riscv64/assembler-riscv64.cc
+++ b/chromium/v8/src/codegen/riscv64/assembler-riscv64.cc
@@ -57,6 +57,9 @@ static unsigned CpuFeaturesImpliedByCompiler() {
answer |= 1u << FPU;
#endif // def CAN_USE_FPU_INSTRUCTIONS
+#ifdef CAN_USE_RVV_INSTRUCTIONS
+ answer |= 1u << RISCV_SIMD;
+#endif // def CAN_USE_RVV_INSTRUCTIONS
return answer;
}
@@ -64,18 +67,20 @@ bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(RISCV_SIMD); }
void CpuFeatures::ProbeImpl(bool cross_compile) {
supported_ |= CpuFeaturesImpliedByCompiler();
-
// Only use statically determined features for cross compile (snapshot).
if (cross_compile) return;
-
// Probe for additional features at runtime.
base::CPU cpu;
if (cpu.has_fpu()) supported_ |= 1u << FPU;
+ // Set a static value on whether SIMD is supported.
+ // This variable is only used for certain archs to query SupportWasmSimd128()
+ // at runtime in builtins using an extern ref. Other callers should use
+ // CpuFeatures::SupportWasmSimd128().
+ CpuFeatures::supports_wasm_simd_128_ = CpuFeatures::SupportsWasmSimd128();
}
void CpuFeatures::PrintTarget() {}
void CpuFeatures::PrintFeatures() {}
-
int ToNumber(Register reg) {
DCHECK(reg.is_valid());
const int kNumbers[] = {
@@ -207,7 +212,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Assembler::Assembler(const AssemblerOptions& options,
std::unique_ptr<AssemblerBuffer> buffer)
: AssemblerBase(options, std::move(buffer)),
- scratch_register_list_(t3.bit() | t5.bit() | s10.bit()),
+ VU(this),
+ scratch_register_list_(t3.bit() | t5.bit()),
constpool_(this) {
reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
@@ -309,7 +315,6 @@ bool Assembler::IsCBranch(Instr instr) {
int Op = instr & kRvcOpcodeMask;
return Op == RO_C_BNEZ || Op == RO_C_BEQZ;
}
-
bool Assembler::IsJump(Instr instr) {
int Op = instr & kBaseOpcodeMask;
return Op == JAL || Op == JALR;
@@ -377,7 +382,7 @@ int Assembler::target_at(int pos, bool is_internal) {
} else {
return pos + imm13;
}
- } break;
+ }
case JAL: {
int32_t imm21 = JumpOffset(instr);
if (imm21 == kEndOfJumpChain) {
@@ -386,7 +391,7 @@ int Assembler::target_at(int pos, bool is_internal) {
} else {
return pos + imm21;
}
- } break;
+ }
case JALR: {
int32_t imm12 = instr >> 20;
if (imm12 == kEndOfJumpChain) {
@@ -395,7 +400,7 @@ int Assembler::target_at(int pos, bool is_internal) {
} else {
return pos + imm12;
}
- } break;
+ }
case LUI: {
Address pc = reinterpret_cast<Address>(buffer_start_ + pos);
pc = target_address_at(pc);
@@ -409,7 +414,7 @@ int Assembler::target_at(int pos, bool is_internal) {
DCHECK(pos > delta);
return pos - delta;
}
- } break;
+ }
case AUIPC: {
Instr instr_auipc = instr;
Instr instr_I = instr_at(pos + 4);
@@ -417,18 +422,18 @@ int Assembler::target_at(int pos, bool is_internal) {
int32_t offset = BrachlongOffset(instr_auipc, instr_I);
if (offset == kEndOfJumpChain) return kEndOfChain;
return offset + pos;
- } break;
+ }
case RO_C_J: {
int32_t offset = instruction->RvcImm11CJValue();
if (offset == kEndOfJumpChain) return kEndOfChain;
return offset + pos;
- } break;
+ }
case RO_C_BNEZ:
case RO_C_BEQZ: {
int32_t offset = instruction->RvcImm8BValue();
if (offset == kEndOfJumpChain) return kEndOfChain;
return pos + offset;
- } break;
+ }
default: {
if (instr == kEndOfJumpChain) {
return kEndOfChain;
@@ -437,7 +442,7 @@ int Assembler::target_at(int pos, bool is_internal) {
((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
return (imm18 + pos);
}
- } break;
+ }
}
}
@@ -511,7 +516,6 @@ static inline ShortInstr SetCJalOffset(int32_t pos, int32_t target_pos,
DCHECK(Assembler::IsCJal(instr | (imm11 & kImm11Mask)));
return instr | (imm11 & kImm11Mask);
}
-
static inline Instr SetCBranchOffset(int32_t pos, int32_t target_pos,
Instr instr) {
DCHECK(Assembler::IsCBranch(instr));
@@ -572,7 +576,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal,
instr_at_put(pos, instr);
instr_at_put(pos + 4, kNopByte);
} else {
- DCHECK(is_int32(offset));
+ CHECK(is_int32(offset + 0x800));
int32_t Hi20 = (((int32_t)offset + 0x800) >> 12);
int32_t Lo12 = (int32_t)offset << 20 >> 20;
@@ -699,7 +703,7 @@ void Assembler::next(Label* L, bool is_internal) {
if (link == kEndOfChain) {
L->Unuse();
} else {
- DCHECK_GT(link, 0);
+ DCHECK_GE(link, 0);
DEBUG_PRINTF("next: %p to %p (%d)\n", L,
reinterpret_cast<Instr*>(buffer_start_ + link), link);
L->link_to(link);
@@ -762,9 +766,9 @@ int Assembler::PatchBranchlongOffset(Address pc, Instr instr_auipc,
Instr instr_jalr, int32_t offset) {
DCHECK(IsAuipc(instr_auipc));
DCHECK(IsJalr(instr_jalr));
+ CHECK(is_int32(offset + 0x800));
int32_t Hi20 = (((int32_t)offset + 0x800) >> 12);
int32_t Lo12 = (int32_t)offset << 20 >> 20;
- CHECK(is_int32(offset));
instr_at_put(pc, SetAuipcOffset(Hi20, instr_auipc));
instr_at_put(pc + 4, SetJalrOffset(Lo12, instr_jalr));
DCHECK(offset ==
@@ -1137,6 +1141,123 @@ void Assembler::GenInstrCBA(uint8_t funct3, uint8_t funct2, Opcode opcode,
emit(instr);
}
+// OPIVV OPFVV OPMVV
+void Assembler::GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd,
+ VRegister vs1, VRegister vs2, MaskType mask) {
+ DCHECK(opcode == OP_MVV || opcode == OP_FVV || opcode == OP_IVV);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((vd.code() & 0x1F) << kRvvVdShift) |
+ ((vs1.code() & 0x1F) << kRvvVs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+void Assembler::GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd,
+ int8_t vs1, VRegister vs2, MaskType mask) {
+ DCHECK(opcode == OP_MVV || opcode == OP_FVV || opcode == OP_IVV);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((vd.code() & 0x1F) << kRvvVdShift) |
+ ((vs1 & 0x1F) << kRvvVs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+// OPMVV OPFVV
+void Assembler::GenInstrV(uint8_t funct6, Opcode opcode, Register rd,
+ VRegister vs1, VRegister vs2, MaskType mask) {
+ DCHECK(opcode == OP_MVV || opcode == OP_FVV);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((rd.code() & 0x1F) << kRvvVdShift) |
+ ((vs1.code() & 0x1F) << kRvvVs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+// OPIVX OPMVX
+void Assembler::GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd,
+ Register rs1, VRegister vs2, MaskType mask) {
+ DCHECK(opcode == OP_IVX || opcode == OP_MVX);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((vd.code() & 0x1F) << kRvvVdShift) |
+ ((rs1.code() & 0x1F) << kRvvRs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+// OPFVF
+void Assembler::GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd,
+ FPURegister fs1, VRegister vs2, MaskType mask) {
+ DCHECK(opcode == OP_FVF);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((vd.code() & 0x1F) << kRvvVdShift) |
+ ((fs1.code() & 0x1F) << kRvvRs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+// OPMVX
+void Assembler::GenInstrV(uint8_t funct6, Register rd, Register rs1,
+ VRegister vs2, MaskType mask) {
+ Instr instr = (funct6 << kRvvFunct6Shift) | OP_MVX | (mask << kRvvVmShift) |
+ ((rd.code() & 0x1F) << kRvvVdShift) |
+ ((rs1.code() & 0x1F) << kRvvRs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+// OPIVI
+void Assembler::GenInstrV(uint8_t funct6, VRegister vd, int8_t imm5,
+ VRegister vs2, MaskType mask) {
+ DCHECK(is_uint5(imm5) || is_int5(imm5));
+ Instr instr = (funct6 << kRvvFunct6Shift) | OP_IVI | (mask << kRvvVmShift) |
+ ((vd.code() & 0x1F) << kRvvVdShift) |
+ (((uint32_t)imm5 << kRvvImm5Shift) & kRvvImm5Mask) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+// VL VS
+void Assembler::GenInstrV(Opcode opcode, uint8_t width, VRegister vd,
+ Register rs1, uint8_t umop, MaskType mask,
+ uint8_t IsMop, bool IsMew, uint8_t Nf) {
+ DCHECK(opcode == LOAD_FP || opcode == STORE_FP);
+ Instr instr = opcode | ((vd.code() << kRvvVdShift) & kRvvVdMask) |
+ ((width << kRvvWidthShift) & kRvvWidthMask) |
+ ((rs1.code() << kRvvRs1Shift) & kRvvRs1Mask) |
+ ((umop << kRvvRs2Shift) & kRvvRs2Mask) |
+ ((mask << kRvvVmShift) & kRvvVmMask) |
+ ((IsMop << kRvvMopShift) & kRvvMopMask) |
+ ((IsMew << kRvvMewShift) & kRvvMewMask) |
+ ((Nf << kRvvNfShift) & kRvvNfMask);
+ emit(instr);
+}
+void Assembler::GenInstrV(Opcode opcode, uint8_t width, VRegister vd,
+ Register rs1, Register rs2, MaskType mask,
+ uint8_t IsMop, bool IsMew, uint8_t Nf) {
+ DCHECK(opcode == LOAD_FP || opcode == STORE_FP);
+ Instr instr = opcode | ((vd.code() << kRvvVdShift) & kRvvVdMask) |
+ ((width << kRvvWidthShift) & kRvvWidthMask) |
+ ((rs1.code() << kRvvRs1Shift) & kRvvRs1Mask) |
+ ((rs2.code() << kRvvRs2Shift) & kRvvRs2Mask) |
+ ((mask << kRvvVmShift) & kRvvVmMask) |
+ ((IsMop << kRvvMopShift) & kRvvMopMask) |
+ ((IsMew << kRvvMewShift) & kRvvMewMask) |
+ ((Nf << kRvvNfShift) & kRvvNfMask);
+ emit(instr);
+}
+// VL VS AMO
+void Assembler::GenInstrV(Opcode opcode, uint8_t width, VRegister vd,
+ Register rs1, VRegister vs2, MaskType mask,
+ uint8_t IsMop, bool IsMew, uint8_t Nf) {
+ DCHECK(opcode == LOAD_FP || opcode == STORE_FP || opcode == AMO);
+ Instr instr = opcode | ((vd.code() << kRvvVdShift) & kRvvVdMask) |
+ ((width << kRvvWidthShift) & kRvvWidthMask) |
+ ((rs1.code() << kRvvRs1Shift) & kRvvRs1Mask) |
+ ((vs2.code() << kRvvRs2Shift) & kRvvRs2Mask) |
+ ((mask << kRvvVmShift) & kRvvVmMask) |
+ ((IsMop << kRvvMopShift) & kRvvMopMask) |
+ ((IsMew << kRvvMewShift) & kRvvMewMask) |
+ ((Nf << kRvvNfShift) & kRvvNfMask);
+ emit(instr);
+}
// ----- Instruction class templates match those in the compiler
void Assembler::GenInstrBranchCC_rri(uint8_t funct3, Register rs1, Register rs2,
@@ -2328,8 +2449,590 @@ void Assembler::EBREAK() {
ebreak();
}
-// Privileged
+// RVV
+void Assembler::vmv_vv(VRegister vd, VRegister vs1) {
+ GenInstrV(VMV_FUNCT6, OP_IVV, vd, vs1, v0, NoMask);
+}
+
+void Assembler::vmv_vx(VRegister vd, Register rs1) {
+ GenInstrV(VMV_FUNCT6, OP_IVX, vd, rs1, v0, NoMask);
+}
+
+void Assembler::vmv_vi(VRegister vd, uint8_t simm5) {
+ GenInstrV(VMV_FUNCT6, vd, simm5, v0, NoMask);
+}
+
+void Assembler::vmv_xs(Register rd, VRegister vs2) {
+ GenInstrV(VWXUNARY0_FUNCT6, OP_MVV, rd, v0, vs2, NoMask);
+}
+
+void Assembler::vmv_sx(VRegister vd, Register rs1) {
+ GenInstrV(VRXUNARY0_FUNCT6, OP_MVX, vd, rs1, v0, NoMask);
+}
+
+void Assembler::vmerge_vv(VRegister vd, VRegister vs1, VRegister vs2) {
+ GenInstrV(VMV_FUNCT6, OP_IVV, vd, vs1, vs2, Mask);
+}
+
+void Assembler::vmerge_vx(VRegister vd, Register rs1, VRegister vs2) {
+ GenInstrV(VMV_FUNCT6, OP_IVX, vd, rs1, vs2, Mask);
+}
+
+void Assembler::vmerge_vi(VRegister vd, uint8_t imm5, VRegister vs2) {
+ GenInstrV(VMV_FUNCT6, vd, imm5, vs2, Mask);
+}
+
+void Assembler::vadc_vv(VRegister vd, VRegister vs1, VRegister vs2) {
+ GenInstrV(VADC_FUNCT6, OP_IVV, vd, vs1, vs2, Mask);
+}
+
+void Assembler::vadc_vx(VRegister vd, Register rs1, VRegister vs2) {
+ GenInstrV(VADC_FUNCT6, OP_IVX, vd, rs1, vs2, Mask);
+}
+
+void Assembler::vadc_vi(VRegister vd, uint8_t imm5, VRegister vs2) {
+ GenInstrV(VADC_FUNCT6, vd, imm5, vs2, Mask);
+}
+
+void Assembler::vmadc_vv(VRegister vd, VRegister vs1, VRegister vs2) {
+ GenInstrV(VMADC_FUNCT6, OP_IVV, vd, vs1, vs2, Mask);
+}
+
+void Assembler::vmadc_vx(VRegister vd, Register rs1, VRegister vs2) {
+ GenInstrV(VMADC_FUNCT6, OP_IVX, vd, rs1, vs2, Mask);
+}
+
+void Assembler::vmadc_vi(VRegister vd, uint8_t imm5, VRegister vs2) {
+ GenInstrV(VMADC_FUNCT6, vd, imm5, vs2, Mask);
+}
+
+void Assembler::vrgather_vv(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask) {
+ DCHECK_NE(vd, vs1);
+ DCHECK_NE(vd, vs2);
+ GenInstrV(VRGATHER_FUNCT6, OP_IVV, vd, vs1, vs2, mask);
+}
+void Assembler::vrgather_vi(VRegister vd, VRegister vs2, int8_t imm5,
+ MaskType mask) {
+ DCHECK_NE(vd, vs2);
+ GenInstrV(VRGATHER_FUNCT6, vd, imm5, vs2, mask);
+}
+
+void Assembler::vrgather_vx(VRegister vd, VRegister vs2, Register rs1,
+ MaskType mask) {
+ DCHECK_NE(vd, vs2);
+ GenInstrV(VRGATHER_FUNCT6, OP_IVX, vd, rs1, vs2, mask);
+}
+
+#define DEFINE_OPIVV(name, funct6) \
+ void Assembler::name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask) { \
+ GenInstrV(funct6, OP_IVV, vd, vs1, vs2, mask); \
+ }
+
+#define DEFINE_OPFVV(name, funct6) \
+ void Assembler::name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask) { \
+ GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
+ }
+
+#define DEFINE_OPIVX(name, funct6) \
+ void Assembler::name##_vx(VRegister vd, VRegister vs2, Register rs1, \
+ MaskType mask) { \
+ GenInstrV(funct6, OP_IVX, vd, rs1, vs2, mask); \
+ }
+
+#define DEFINE_OPIVI(name, funct6) \
+ void Assembler::name##_vi(VRegister vd, VRegister vs2, int8_t imm5, \
+ MaskType mask) { \
+ GenInstrV(funct6, vd, imm5, vs2, mask); \
+ }
+
+#define DEFINE_OPMVV(name, funct6) \
+ void Assembler::name##_vs(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask) { \
+ GenInstrV(funct6, OP_MVV, vd, vs1, vs2, mask); \
+ }
+
+#define DEFINE_OPFVF(name, funct6) \
+ void Assembler::name##_vf(VRegister vd, VRegister vs2, FPURegister fs1, \
+ MaskType mask) { \
+ GenInstrV(funct6, OP_FVF, vd, fs1, vs2, mask); \
+ }
+
+DEFINE_OPIVV(vadd, VADD_FUNCT6)
+DEFINE_OPIVX(vadd, VADD_FUNCT6)
+DEFINE_OPIVI(vadd, VADD_FUNCT6)
+DEFINE_OPIVV(vsub, VSUB_FUNCT6)
+DEFINE_OPIVX(vsub, VSUB_FUNCT6)
+DEFINE_OPIVX(vsadd, VSADD_FUNCT6)
+DEFINE_OPIVV(vsadd, VSADD_FUNCT6)
+DEFINE_OPIVI(vsadd, VSADD_FUNCT6)
+DEFINE_OPIVX(vsaddu, VSADDU_FUNCT6)
+DEFINE_OPIVV(vsaddu, VSADDU_FUNCT6)
+DEFINE_OPIVI(vsaddu, VSADDU_FUNCT6)
+DEFINE_OPIVX(vssub, VSSUB_FUNCT6)
+DEFINE_OPIVV(vssub, VSSUB_FUNCT6)
+DEFINE_OPIVX(vssubu, VSSUBU_FUNCT6)
+DEFINE_OPIVV(vssubu, VSSUBU_FUNCT6)
+DEFINE_OPIVX(vrsub, VRSUB_FUNCT6)
+DEFINE_OPIVI(vrsub, VRSUB_FUNCT6)
+DEFINE_OPIVV(vminu, VMINU_FUNCT6)
+DEFINE_OPIVX(vminu, VMINU_FUNCT6)
+DEFINE_OPIVV(vmin, VMIN_FUNCT6)
+DEFINE_OPIVX(vmin, VMIN_FUNCT6)
+DEFINE_OPIVV(vmaxu, VMAXU_FUNCT6)
+DEFINE_OPIVX(vmaxu, VMAXU_FUNCT6)
+DEFINE_OPIVV(vmax, VMAX_FUNCT6)
+DEFINE_OPIVX(vmax, VMAX_FUNCT6)
+DEFINE_OPIVV(vand, VAND_FUNCT6)
+DEFINE_OPIVX(vand, VAND_FUNCT6)
+DEFINE_OPIVI(vand, VAND_FUNCT6)
+DEFINE_OPIVV(vor, VOR_FUNCT6)
+DEFINE_OPIVX(vor, VOR_FUNCT6)
+DEFINE_OPIVI(vor, VOR_FUNCT6)
+DEFINE_OPIVV(vxor, VXOR_FUNCT6)
+DEFINE_OPIVX(vxor, VXOR_FUNCT6)
+DEFINE_OPIVI(vxor, VXOR_FUNCT6)
+
+DEFINE_OPIVX(vslidedown, VSLIDEDOWN_FUNCT6)
+DEFINE_OPIVI(vslidedown, VSLIDEDOWN_FUNCT6)
+DEFINE_OPIVX(vslideup, VSLIDEUP_FUNCT6)
+DEFINE_OPIVI(vslideup, VSLIDEUP_FUNCT6)
+
+DEFINE_OPIVV(vmseq, VMSEQ_FUNCT6)
+DEFINE_OPIVX(vmseq, VMSEQ_FUNCT6)
+DEFINE_OPIVI(vmseq, VMSEQ_FUNCT6)
+
+DEFINE_OPIVV(vmsne, VMSNE_FUNCT6)
+DEFINE_OPIVX(vmsne, VMSNE_FUNCT6)
+DEFINE_OPIVI(vmsne, VMSNE_FUNCT6)
+
+DEFINE_OPIVV(vmsltu, VMSLTU_FUNCT6)
+DEFINE_OPIVX(vmsltu, VMSLTU_FUNCT6)
+
+DEFINE_OPIVV(vmslt, VMSLT_FUNCT6)
+DEFINE_OPIVX(vmslt, VMSLT_FUNCT6)
+
+DEFINE_OPIVV(vmsle, VMSLE_FUNCT6)
+DEFINE_OPIVX(vmsle, VMSLE_FUNCT6)
+DEFINE_OPIVI(vmsle, VMSLE_FUNCT6)
+
+DEFINE_OPIVV(vmsleu, VMSLEU_FUNCT6)
+DEFINE_OPIVX(vmsleu, VMSLEU_FUNCT6)
+DEFINE_OPIVI(vmsleu, VMSLEU_FUNCT6)
+
+DEFINE_OPIVI(vmsgt, VMSGT_FUNCT6)
+DEFINE_OPIVX(vmsgt, VMSGT_FUNCT6)
+
+DEFINE_OPIVI(vmsgtu, VMSGTU_FUNCT6)
+DEFINE_OPIVX(vmsgtu, VMSGTU_FUNCT6)
+
+DEFINE_OPIVV(vsrl, VSRL_FUNCT6)
+DEFINE_OPIVX(vsrl, VSRL_FUNCT6)
+DEFINE_OPIVI(vsrl, VSRL_FUNCT6)
+
+DEFINE_OPIVV(vsll, VSLL_FUNCT6)
+DEFINE_OPIVX(vsll, VSLL_FUNCT6)
+DEFINE_OPIVI(vsll, VSLL_FUNCT6)
+
+DEFINE_OPMVV(vredmaxu, VREDMAXU_FUNCT6)
+DEFINE_OPMVV(vredmax, VREDMAX_FUNCT6)
+DEFINE_OPMVV(vredmin, VREDMIN_FUNCT6)
+DEFINE_OPMVV(vredminu, VREDMINU_FUNCT6)
+
+DEFINE_OPFVV(vfadd, VFADD_FUNCT6)
+DEFINE_OPFVF(vfadd, VFADD_FUNCT6)
+DEFINE_OPFVV(vfsub, VFSUB_FUNCT6)
+DEFINE_OPFVF(vfsub, VFSUB_FUNCT6)
+DEFINE_OPFVV(vfdiv, VFDIV_FUNCT6)
+DEFINE_OPFVF(vfdiv, VFDIV_FUNCT6)
+DEFINE_OPFVV(vfmul, VFMUL_FUNCT6)
+DEFINE_OPFVF(vfmul, VFMUL_FUNCT6)
+DEFINE_OPFVV(vmfeq, VMFEQ_FUNCT6)
+DEFINE_OPFVV(vmfne, VMFNE_FUNCT6)
+DEFINE_OPFVV(vmflt, VMFLT_FUNCT6)
+DEFINE_OPFVV(vmfle, VMFLE_FUNCT6)
+DEFINE_OPFVV(vfmax, VFMAX_FUNCT6)
+DEFINE_OPFVV(vfmin, VFMIN_FUNCT6)
+
+DEFINE_OPFVV(vfsngj, VFSGNJ_FUNCT6)
+DEFINE_OPFVF(vfsngj, VFSGNJ_FUNCT6)
+DEFINE_OPFVV(vfsngjn, VFSGNJN_FUNCT6)
+DEFINE_OPFVF(vfsngjn, VFSGNJN_FUNCT6)
+DEFINE_OPFVV(vfsngjx, VFSGNJX_FUNCT6)
+DEFINE_OPFVF(vfsngjx, VFSGNJX_FUNCT6)
+#undef DEFINE_OPIVI
+#undef DEFINE_OPIVV
+#undef DEFINE_OPIVX
+#undef DEFINE_OPFVV
+#undef DEFINE_OPFVF
+
+void Assembler::vsetvli(Register rd, Register rs1, VSew vsew, Vlmul vlmul,
+ TailAgnosticType tail, MaskAgnosticType mask) {
+ int32_t zimm = GenZimm(vsew, vlmul, tail, mask);
+ Instr instr = OP_V | ((rd.code() & 0x1F) << kRvvRdShift) | (0x7 << 12) |
+ ((rs1.code() & 0x1F) << kRvvRs1Shift) |
+ (((uint32_t)zimm << kRvvZimmShift) & kRvvZimmMask) | 0x0 << 31;
+ emit(instr);
+}
+
+void Assembler::vsetivli(Register rd, uint8_t uimm, VSew vsew, Vlmul vlmul,
+ TailAgnosticType tail, MaskAgnosticType mask) {
+ DCHECK(is_uint5(uimm));
+ int32_t zimm = GenZimm(vsew, vlmul, tail, mask) & 0x3FF;
+ Instr instr = OP_V | ((rd.code() & 0x1F) << kRvvRdShift) | (0x7 << 12) |
+ ((uimm & 0x1F) << kRvvUimmShift) |
+ (((uint32_t)zimm << kRvvZimmShift) & kRvvZimmMask) | 0x3 << 30;
+ emit(instr);
+}
+
+void Assembler::vsetvl(Register rd, Register rs1, Register rs2) {
+ Instr instr = OP_V | ((rd.code() & 0x1F) << kRvvRdShift) | (0x7 << 12) |
+ ((rs1.code() & 0x1F) << kRvvRs1Shift) |
+ ((rs2.code() & 0x1F) << kRvvRs2Shift) | 0x40 << 25;
+ emit(instr);
+}
+
+uint8_t vsew_switch(VSew vsew) {
+ uint8_t width;
+ switch (vsew) {
+ case E8:
+ width = 0b000;
+ break;
+ case E16:
+ width = 0b101;
+ break;
+ case E32:
+ width = 0b110;
+ break;
+ case E64:
+ width = 0b111;
+ break;
+ case E128:
+ width = 0b000;
+ break;
+ case E256:
+ width = 0b101;
+ break;
+ case E512:
+ width = 0b110;
+ break;
+ case E1024:
+ width = 0b111;
+ break;
+ }
+ return width;
+}
+
+void Assembler::vl(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b000);
+}
+void Assembler::vls(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b000);
+}
+void Assembler::vlx(VRegister vd, Register rs1, VRegister vs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, vs2, mask, 0b11, IsMew, 0);
+}
+
+void Assembler::vs(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b000);
+}
+void Assembler::vss(VRegister vs3, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vs3, rs1, rs2, mask, 0b10, IsMew, 0b000);
+}
+
+void Assembler::vsx(VRegister vd, Register rs1, VRegister vs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, vs2, mask, 0b11, IsMew, 0b000);
+}
+void Assembler::vsu(VRegister vd, Register rs1, VRegister vs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, vs2, mask, 0b01, IsMew, 0b000);
+}
+
+void Assembler::vlseg2(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b001);
+}
+
+void Assembler::vlseg3(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b010);
+}
+
+void Assembler::vlseg4(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b011);
+}
+
+void Assembler::vlseg5(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b100);
+}
+
+void Assembler::vlseg6(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b101);
+}
+
+void Assembler::vlseg7(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b110);
+}
+
+void Assembler::vlseg8(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b111);
+}
+void Assembler::vsseg2(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b001);
+}
+void Assembler::vsseg3(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b010);
+}
+void Assembler::vsseg4(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b011);
+}
+void Assembler::vsseg5(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b100);
+}
+void Assembler::vsseg6(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b101);
+}
+void Assembler::vsseg7(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b110);
+}
+void Assembler::vsseg8(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b111);
+}
+
+void Assembler::vlsseg2(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b001);
+}
+void Assembler::vlsseg3(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b010);
+}
+void Assembler::vlsseg4(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b011);
+}
+void Assembler::vlsseg5(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b100);
+}
+void Assembler::vlsseg6(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b101);
+}
+void Assembler::vlsseg7(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b110);
+}
+void Assembler::vlsseg8(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b111);
+}
+void Assembler::vssseg2(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b001);
+}
+void Assembler::vssseg3(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b010);
+}
+void Assembler::vssseg4(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b011);
+}
+void Assembler::vssseg5(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b100);
+}
+void Assembler::vssseg6(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b101);
+}
+void Assembler::vssseg7(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b110);
+}
+void Assembler::vssseg8(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b111);
+}
+
+void Assembler::vlxseg2(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b001);
+}
+void Assembler::vlxseg3(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b010);
+}
+void Assembler::vlxseg4(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b011);
+}
+void Assembler::vlxseg5(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b100);
+}
+void Assembler::vlxseg6(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b101);
+}
+void Assembler::vlxseg7(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b110);
+}
+void Assembler::vlxseg8(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b111);
+}
+void Assembler::vsxseg2(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b001);
+}
+void Assembler::vsxseg3(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b010);
+}
+void Assembler::vsxseg4(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b011);
+}
+void Assembler::vsxseg5(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b100);
+}
+void Assembler::vsxseg6(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b101);
+}
+void Assembler::vsxseg7(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b110);
+}
+void Assembler::vsxseg8(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b111);
+}
+
+// Privileged
void Assembler::uret() {
GenInstrPriv(0b0000000, ToRegister(0), ToRegister(0b00010));
}
@@ -2723,8 +3426,6 @@ void Assembler::AdjustBaseAndOffset(MemOperand* src, Register scratch,
// for a load/store when the offset doesn't fit into int12.
// Must not overwrite the register 'base' while loading 'offset'.
- DCHECK(src->rm() != scratch);
-
constexpr int32_t kMinOffsetForSimpleAdjustment = 0x7F8;
constexpr int32_t kMaxOffsetForSimpleAdjustment =
2 * kMinOffsetForSimpleAdjustment;
@@ -2766,7 +3467,6 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
return 8; // Number of instructions patched.
} else {
UNIMPLEMENTED();
- return 1;
}
}
@@ -2957,7 +3657,7 @@ void Assembler::CheckTrampolinePool() {
for (int i = 0; i < unbound_labels_count_; i++) {
int64_t imm64;
imm64 = branch_long_offset(&after_pool);
- DCHECK(is_int32(imm64));
+ CHECK(is_int32(imm64 + 0x800));
int32_t Hi20 = (((int32_t)imm64 + 0x800) >> 12);
int32_t Lo12 = (int32_t)imm64 << 20 >> 20;
auipc(t6, Hi20); // Read PC + Hi20 into t6
@@ -3001,7 +3701,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
int64_t imm = (int64_t)target - (int64_t)pc;
Instr instr = instr_at(pc);
Instr instr1 = instr_at(pc + 1 * kInstrSize);
- DCHECK(is_int32(imm));
+ DCHECK(is_int32(imm + 0x800));
int num = PatchBranchlongOffset(pc, instr, instr1, (int32_t)imm);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
FlushInstructionCache(pc, num * kInstrSize);
@@ -3203,9 +3903,9 @@ void ConstantPool::SetLoadOffsetToConstPoolEntry(int load_offset,
int32_t distance = static_cast<int32_t>(
reinterpret_cast<Address>(entry_offset) -
reinterpret_cast<Address>(assm_->toAddress(load_offset)));
+ CHECK(is_int32(distance + 0x800));
int32_t Hi20 = (((int32_t)distance + 0x800) >> 12);
int32_t Lo12 = (int32_t)distance << 20 >> 20;
- CHECK(is_int32(distance));
assm_->instr_at_put(load_offset, SetAuipcOffset(Hi20, instr_auipc));
assm_->instr_at_put(load_offset + 4, SetLdOffset(Lo12, instr_ld));
}
diff --git a/chromium/v8/src/codegen/riscv64/assembler-riscv64.h b/chromium/v8/src/codegen/riscv64/assembler-riscv64.h
index 88e403d366b..e30254aa653 100644
--- a/chromium/v8/src/codegen/riscv64/assembler-riscv64.h
+++ b/chromium/v8/src/codegen/riscv64/assembler-riscv64.h
@@ -358,11 +358,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// invalidated. For instance, when the assembler buffer grows or a GC happens
// between Code object allocation and Code object finalization.
void FixOnHeapReferences(bool update_embedded_objects = true);
-
// This function is called when we fallback from on-heap to off-heap
// compilation and patch on-heap references to handles.
void FixOnHeapReferencesToHandles();
-
// Insert the smallest number of nop instructions
// possible to align the pc offset to a multiple
// of m. m must be a power of 2 (>= 4).
@@ -666,6 +664,258 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void NOP();
void EBREAK();
+ // RVV
+ static int32_t GenZimm(VSew vsew, Vlmul vlmul, TailAgnosticType tail = tu,
+ MaskAgnosticType mask = mu) {
+ return (mask << 7) | (tail << 6) | ((vsew & 0x7) << 3) | (vlmul & 0x7);
+ }
+
+ void vsetvli(Register rd, Register rs1, VSew vsew, Vlmul vlmul,
+ TailAgnosticType tail = tu, MaskAgnosticType mask = mu);
+
+ void vsetivli(Register rd, uint8_t uimm, VSew vsew, Vlmul vlmul,
+ TailAgnosticType tail = tu, MaskAgnosticType mask = mu);
+
+ inline void vsetvlmax(Register rd, VSew vsew, Vlmul vlmul,
+ TailAgnosticType tail = tu,
+ MaskAgnosticType mask = mu) {
+ vsetvli(rd, zero_reg, vsew, vlmul, tu, mu);
+ }
+
+ inline void vsetvl(VSew vsew, Vlmul vlmul, TailAgnosticType tail = tu,
+ MaskAgnosticType mask = mu) {
+ vsetvli(zero_reg, zero_reg, vsew, vlmul, tu, mu);
+ }
+
+ void vsetvl(Register rd, Register rs1, Register rs2);
+
+ void vl(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask = NoMask);
+ void vls(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask = NoMask);
+ void vlx(VRegister vd, Register rs1, VRegister vs3, VSew vsew,
+ MaskType mask = NoMask);
+
+ void vs(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask = NoMask);
+ void vss(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask = NoMask);
+ void vsx(VRegister vd, Register rs1, VRegister vs3, VSew vsew,
+ MaskType mask = NoMask);
+
+ void vsu(VRegister vd, Register rs1, VRegister vs3, VSew vsew,
+ MaskType mask = NoMask);
+
+#define SegInstr(OP) \
+ void OP##seg2(ARG); \
+ void OP##seg3(ARG); \
+ void OP##seg4(ARG); \
+ void OP##seg5(ARG); \
+ void OP##seg6(ARG); \
+ void OP##seg7(ARG); \
+ void OP##seg8(ARG);
+
+#define ARG \
+ VRegister vd, Register rs1, uint8_t lumop, VSew vsew, MaskType mask = NoMask
+
+ SegInstr(vl) SegInstr(vs)
+#undef ARG
+
+#define ARG \
+ VRegister vd, Register rs1, Register rs2, VSew vsew, MaskType mask = NoMask
+
+ SegInstr(vls) SegInstr(vss)
+#undef ARG
+
+#define ARG \
+ VRegister vd, Register rs1, VRegister rs2, VSew vsew, MaskType mask = NoMask
+
+ SegInstr(vsx) SegInstr(vlx)
+#undef ARG
+#undef SegInstr
+
+ // RVV Vector Arithmetic Instruction
+
+ void vmv_vv(VRegister vd, VRegister vs1);
+ void vmv_vx(VRegister vd, Register rs1);
+ void vmv_vi(VRegister vd, uint8_t simm5);
+ void vmv_xs(Register rd, VRegister vs2);
+ void vmv_sx(VRegister vd, Register rs1);
+ void vmerge_vv(VRegister vd, VRegister vs1, VRegister vs2);
+ void vmerge_vx(VRegister vd, Register rs1, VRegister vs2);
+ void vmerge_vi(VRegister vd, uint8_t imm5, VRegister vs2);
+
+ void vadc_vv(VRegister vd, VRegister vs1, VRegister vs2);
+ void vadc_vx(VRegister vd, Register rs1, VRegister vs2);
+ void vadc_vi(VRegister vd, uint8_t imm5, VRegister vs2);
+
+ void vmadc_vv(VRegister vd, VRegister vs1, VRegister vs2);
+ void vmadc_vx(VRegister vd, Register rs1, VRegister vs2);
+ void vmadc_vi(VRegister vd, uint8_t imm5, VRegister vs2);
+
+#define DEFINE_OPIVV(name, funct6) \
+ void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask = NoMask);
+
+#define DEFINE_OPIVX(name, funct6) \
+ void name##_vx(VRegister vd, VRegister vs2, Register rs1, \
+ MaskType mask = NoMask);
+
+#define DEFINE_OPIVI(name, funct6) \
+ void name##_vi(VRegister vd, VRegister vs2, int8_t imm5, \
+ MaskType mask = NoMask);
+
+#define DEFINE_OPMVV(name, funct6) \
+ void name##_vs(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask = NoMask);
+
+#define DEFINE_OPMVX(name, funct6) \
+ void name##_vx(VRegister vd, VRegister vs2, Register rs1, \
+ MaskType mask = NoMask);
+
+#define DEFINE_OPFVV(name, funct6) \
+ void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask = NoMask);
+
+#define DEFINE_OPFVF(name, funct6) \
+ void name##_vf(VRegister vd, VRegister vs2, FPURegister fs1, \
+ MaskType mask = NoMask);
+
+ DEFINE_OPIVV(vadd, VADD_FUNCT6)
+ DEFINE_OPIVX(vadd, VADD_FUNCT6)
+ DEFINE_OPIVI(vadd, VADD_FUNCT6)
+ DEFINE_OPIVV(vsub, VSUB_FUNCT6)
+ DEFINE_OPIVX(vsub, VSUB_FUNCT6)
+ DEFINE_OPIVX(vsadd, VSADD_FUNCT6)
+ DEFINE_OPIVV(vsadd, VSADD_FUNCT6)
+ DEFINE_OPIVI(vsadd, VSADD_FUNCT6)
+ DEFINE_OPIVX(vsaddu, VSADD_FUNCT6)
+ DEFINE_OPIVV(vsaddu, VSADDU_FUNCT6)
+ DEFINE_OPIVI(vsaddu, VSADDU_FUNCT6)
+ DEFINE_OPIVX(vssub, VSSUB_FUNCT6)
+ DEFINE_OPIVV(vssub, VSSUB_FUNCT6)
+ DEFINE_OPIVX(vssubu, VSSUBU_FUNCT6)
+ DEFINE_OPIVV(vssubu, VSSUBU_FUNCT6)
+ DEFINE_OPIVX(vrsub, VRSUB_FUNCT6)
+ DEFINE_OPIVI(vrsub, VRSUB_FUNCT6)
+ DEFINE_OPIVV(vminu, VMINU_FUNCT6)
+ DEFINE_OPIVX(vminu, VMINU_FUNCT6)
+ DEFINE_OPIVV(vmin, VMIN_FUNCT6)
+ DEFINE_OPIVX(vmin, VMIN_FUNCT6)
+ DEFINE_OPIVV(vmaxu, VMAXU_FUNCT6)
+ DEFINE_OPIVX(vmaxu, VMAXU_FUNCT6)
+ DEFINE_OPIVV(vmax, VMAX_FUNCT6)
+ DEFINE_OPIVX(vmax, VMAX_FUNCT6)
+ DEFINE_OPIVV(vand, VAND_FUNCT6)
+ DEFINE_OPIVX(vand, VAND_FUNCT6)
+ DEFINE_OPIVI(vand, VAND_FUNCT6)
+ DEFINE_OPIVV(vor, VOR_FUNCT6)
+ DEFINE_OPIVX(vor, VOR_FUNCT6)
+ DEFINE_OPIVI(vor, VOR_FUNCT6)
+ DEFINE_OPIVV(vxor, VXOR_FUNCT6)
+ DEFINE_OPIVX(vxor, VXOR_FUNCT6)
+ DEFINE_OPIVI(vxor, VXOR_FUNCT6)
+ DEFINE_OPIVV(vrgather, VRGATHER_FUNCT6)
+ DEFINE_OPIVX(vrgather, VRGATHER_FUNCT6)
+ DEFINE_OPIVI(vrgather, VRGATHER_FUNCT6)
+
+ DEFINE_OPIVX(vslidedown, VSLIDEDOWN_FUNCT6)
+ DEFINE_OPIVI(vslidedown, VSLIDEDOWN_FUNCT6)
+ DEFINE_OPIVX(vslideup, VSLIDEUP_FUNCT6)
+ DEFINE_OPIVI(vslideup, VSLIDEUP_FUNCT6)
+
+ DEFINE_OPIVV(vmseq, VMSEQ_FUNCT6)
+ DEFINE_OPIVX(vmseq, VMSEQ_FUNCT6)
+ DEFINE_OPIVI(vmseq, VMSEQ_FUNCT6)
+
+ DEFINE_OPIVV(vmsne, VMSNE_FUNCT6)
+ DEFINE_OPIVX(vmsne, VMSNE_FUNCT6)
+ DEFINE_OPIVI(vmsne, VMSNE_FUNCT6)
+
+ DEFINE_OPIVV(vmsltu, VMSLTU_FUNCT6)
+ DEFINE_OPIVX(vmsltu, VMSLTU_FUNCT6)
+
+ DEFINE_OPIVV(vmslt, VMSLT_FUNCT6)
+ DEFINE_OPIVX(vmslt, VMSLT_FUNCT6)
+
+ DEFINE_OPIVV(vmsle, VMSLE_FUNCT6)
+ DEFINE_OPIVX(vmsle, VMSLE_FUNCT6)
+ DEFINE_OPIVI(vmsle, VMSLE_FUNCT6)
+
+ DEFINE_OPIVV(vmsleu, VMSLEU_FUNCT6)
+ DEFINE_OPIVX(vmsleu, VMSLEU_FUNCT6)
+ DEFINE_OPIVI(vmsleu, VMSLEU_FUNCT6)
+
+ DEFINE_OPIVI(vmsgt, VMSGT_FUNCT6)
+ DEFINE_OPIVX(vmsgt, VMSGT_FUNCT6)
+
+ DEFINE_OPIVI(vmsgtu, VMSGTU_FUNCT6)
+ DEFINE_OPIVX(vmsgtu, VMSGTU_FUNCT6)
+
+ DEFINE_OPIVV(vsrl, VSRL_FUNCT6)
+ DEFINE_OPIVX(vsrl, VSRL_FUNCT6)
+ DEFINE_OPIVI(vsrl, VSRL_FUNCT6)
+
+ DEFINE_OPIVV(vsll, VSLL_FUNCT6)
+ DEFINE_OPIVX(vsll, VSLL_FUNCT6)
+ DEFINE_OPIVI(vsll, VSLL_FUNCT6)
+
+ DEFINE_OPMVV(vredmaxu, VREDMAXU_FUNCT6)
+ DEFINE_OPMVV(vredmax, VREDMAX_FUNCT6)
+ DEFINE_OPMVV(vredmin, VREDMIN_FUNCT6)
+ DEFINE_OPMVV(vredminu, VREDMINU_FUNCT6)
+
+ DEFINE_OPFVV(vfadd, VFADD_FUNCT6)
+ DEFINE_OPFVF(vfadd, VFADD_FUNCT6)
+ DEFINE_OPFVV(vfsub, VFSUB_FUNCT6)
+ DEFINE_OPFVF(vfsub, VFSUB_FUNCT6)
+ DEFINE_OPFVV(vfdiv, VFDIV_FUNCT6)
+ DEFINE_OPFVF(vfdiv, VFDIV_FUNCT6)
+ DEFINE_OPFVV(vfmul, VFMUL_FUNCT6)
+ DEFINE_OPFVF(vfmul, VFMUL_FUNCT6)
+
+ DEFINE_OPFVV(vmfeq, VMFEQ_FUNCT6)
+ DEFINE_OPFVV(vmfne, VMFNE_FUNCT6)
+ DEFINE_OPFVV(vmflt, VMFLT_FUNCT6)
+ DEFINE_OPFVV(vmfle, VMFLE_FUNCT6)
+ DEFINE_OPFVV(vfmax, VMFMAX_FUNCT6)
+ DEFINE_OPFVV(vfmin, VMFMIN_FUNCT6)
+
+ DEFINE_OPFVV(vfsngj, VFSGNJ_FUNCT6)
+ DEFINE_OPFVF(vfsngj, VFSGNJ_FUNCT6)
+ DEFINE_OPFVV(vfsngjn, VFSGNJN_FUNCT6)
+ DEFINE_OPFVF(vfsngjn, VFSGNJN_FUNCT6)
+ DEFINE_OPFVV(vfsngjx, VFSGNJX_FUNCT6)
+ DEFINE_OPFVF(vfsngjx, VFSGNJX_FUNCT6)
+
+#undef DEFINE_OPIVI
+#undef DEFINE_OPIVV
+#undef DEFINE_OPIVX
+#undef DEFINE_OPMVV
+#undef DEFINE_OPMVX
+#undef DEFINE_OPFVV
+#undef DEFINE_OPFVF
+
+#define DEFINE_VFUNARY(name, funct6, vs1) \
+ void name(VRegister vd, VRegister vs2, MaskType mask = NoMask) { \
+ GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
+ }
+
+ DEFINE_VFUNARY(vfcvt_xu_f_v, VFUNARY0_FUNCT6, VFCVT_XU_F_V)
+ DEFINE_VFUNARY(vfcvt_x_f_v, VFUNARY0_FUNCT6, VFCVT_X_F_V)
+ DEFINE_VFUNARY(vfcvt_f_x_v, VFUNARY0_FUNCT6, VFCVT_F_X_V)
+ DEFINE_VFUNARY(vfcvt_f_xu_v, VFUNARY0_FUNCT6, VFCVT_F_XU_V)
+ DEFINE_VFUNARY(vfncvt_f_f_w, VFUNARY0_FUNCT6, VFNCVT_F_F_W)
+
+ DEFINE_VFUNARY(vfclass_v, VFUNARY1_FUNCT6, VFCLASS_V)
+#undef DEFINE_VFUNARY
+
+ void vnot_vv(VRegister dst, VRegister src) { vxor_vi(dst, src, -1); }
+
+ void vneg_vv(VRegister dst, VRegister src) { vrsub_vx(dst, src, zero_reg); }
+
+ void vfneg_vv(VRegister dst, VRegister src) { vfsngjn_vv(dst, src, src); }
+ void vfabs_vv(VRegister dst, VRegister src) { vfsngjx_vv(dst, src, src); }
// Privileged
void uret();
void sret();
@@ -942,6 +1192,71 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
constpool_.RecordEntry(data, rmode);
}
+ class VectorUnit {
+ public:
+ inline int32_t sew() const { return 2 ^ (sew_ + 3); }
+
+ inline int32_t vlmax() const {
+ if ((lmul_ & 0b100) != 0) {
+ return (kRvvVLEN / sew()) >> (lmul_ & 0b11);
+ } else {
+ return ((kRvvVLEN << lmul_) / sew());
+ }
+ }
+
+ explicit VectorUnit(Assembler* assm) : assm_(assm) {}
+
+ void set(Register rd, VSew sew, Vlmul lmul) {
+ if (sew != sew_ || lmul != lmul_ || vl != vlmax()) {
+ sew_ = sew;
+ lmul_ = lmul;
+ vl = vlmax();
+ assm_->vsetvlmax(rd, sew_, lmul_);
+ }
+ }
+
+ void set(RoundingMode mode) {
+ if (mode_ != mode) {
+ assm_->addi(kScratchReg, zero_reg, mode << kFcsrFrmShift);
+ assm_->fscsr(kScratchReg);
+ mode_ = mode;
+ }
+ }
+ void set(Register rd, Register rs1, VSew sew, Vlmul lmul) {
+ if (sew != sew_ || lmul != lmul_) {
+ sew_ = sew;
+ lmul_ = lmul;
+ vl = 0;
+ assm_->vsetvli(rd, rs1, sew_, lmul_);
+ }
+ }
+
+ void set(VSew sew, Vlmul lmul) {
+ if (sew != sew_ || lmul != lmul_) {
+ sew_ = sew;
+ lmul_ = lmul;
+ assm_->vsetvl(sew_, lmul_);
+ }
+ }
+
+ private:
+ VSew sew_ = E8;
+ Vlmul lmul_ = m1;
+ int32_t vl = 0;
+ Assembler* assm_;
+ RoundingMode mode_ = RNE;
+ };
+
+ VectorUnit VU;
+
+ void CheckTrampolinePoolQuick(int extra_instructions = 0) {
+ DEBUG_PRINTF("\tpc_offset:%d %d\n", pc_offset(),
+ next_buffer_check_ - extra_instructions * kInstrSize);
+ if (pc_offset() >= next_buffer_check_ - extra_instructions * kInstrSize) {
+ CheckTrampolinePool();
+ }
+ }
+
protected:
// Readable constants for base and offset adjustment helper, these indicate if
// aside from offset, another value like offset + 4 should fit into int16.
@@ -1020,14 +1335,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
bool is_buffer_growth_blocked() const { return block_buffer_growth_; }
- void CheckTrampolinePoolQuick(int extra_instructions = 0) {
- DEBUG_PRINTF("\tpc_offset:%d %d\n", pc_offset(),
- next_buffer_check_ - extra_instructions * kInstrSize);
- if (pc_offset() >= next_buffer_check_ - extra_instructions * kInstrSize) {
- CheckTrampolinePool();
- }
- }
-
#ifdef DEBUG
bool EmbeddedObjectMatches(int pc_offset, Handle<Object> object) {
return target_address_at(
@@ -1192,6 +1499,46 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, Register rd,
FPURegister rs1, FPURegister rs2);
+ // ----------------------------RVV------------------------------------------
+ // vsetvl
+ void GenInstrV(Register rd, Register rs1, Register rs2);
+ // vsetvli
+ void GenInstrV(Register rd, Register rs1, uint32_t zimm);
+ // OPIVV OPFVV OPMVV
+ void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, VRegister vs1,
+ VRegister vs2, MaskType mask = NoMask);
+ void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, int8_t vs1,
+ VRegister vs2, MaskType mask = NoMask);
+ // OPMVV OPFVV
+ void GenInstrV(uint8_t funct6, Opcode opcode, Register rd, VRegister vs1,
+ VRegister vs2, MaskType mask = NoMask);
+
+ // OPIVX OPMVX
+ void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, Register rs1,
+ VRegister vs2, MaskType mask = NoMask);
+ // OPFVF
+ void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, FPURegister fs1,
+ VRegister vs2, MaskType mask = NoMask);
+ // OPMVX
+ void GenInstrV(uint8_t funct6, Register rd, Register rs1, VRegister vs2,
+ MaskType mask = NoMask);
+ // OPIVI
+ void GenInstrV(uint8_t funct6, VRegister vd, int8_t simm5, VRegister vs2,
+ MaskType mask = NoMask);
+
+ // VL VS
+ void GenInstrV(Opcode opcode, uint8_t width, VRegister vd, Register rs1,
+ uint8_t umop, MaskType mask, uint8_t IsMop, bool IsMew,
+ uint8_t Nf);
+
+ void GenInstrV(Opcode opcode, uint8_t width, VRegister vd, Register rs1,
+ Register rs2, MaskType mask, uint8_t IsMop, bool IsMew,
+ uint8_t Nf);
+ // VL VS AMO
+ void GenInstrV(Opcode opcode, uint8_t width, VRegister vd, Register rs1,
+ VRegister vs2, MaskType mask, uint8_t IsMop, bool IsMew,
+ uint8_t Nf);
+
// Labels.
void print(const Label* L);
void bind_to(Label* L, int pos);
diff --git a/chromium/v8/src/codegen/riscv64/constants-riscv64.cc b/chromium/v8/src/codegen/riscv64/constants-riscv64.cc
index d2709dc2c7c..655a97c12f5 100644
--- a/chromium/v8/src/codegen/riscv64/constants-riscv64.cc
+++ b/chromium/v8/src/codegen/riscv64/constants-riscv64.cc
@@ -105,6 +105,45 @@ int FPURegisters::Number(const char* name) {
return kInvalidFPURegister;
}
+const char* VRegisters::names_[kNumVRegisters] = {
+ "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10",
+ "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21",
+ "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"};
+
+const VRegisters::RegisterAlias VRegisters::aliases_[] = {
+ {kInvalidRegister, nullptr}};
+
+const char* VRegisters::Name(int creg) {
+ const char* result;
+ if ((0 <= creg) && (creg < kNumVRegisters)) {
+ result = names_[creg];
+ } else {
+ result = "nocreg";
+ }
+ return result;
+}
+
+int VRegisters::Number(const char* name) {
+ // Look through the canonical names.
+ for (int i = 0; i < kNumVRegisters; i++) {
+ if (strcmp(names_[i], name) == 0) {
+ return i;
+ }
+ }
+
+ // Look through the alias names.
+ int i = 0;
+ while (aliases_[i].creg != kInvalidRegister) {
+ if (strcmp(aliases_[i].name, name) == 0) {
+ return aliases_[i].creg;
+ }
+ i++;
+ }
+
+ // No Cregister with the reguested name found.
+ return kInvalidVRegister;
+}
+
InstructionBase::Type InstructionBase::InstructionType() const {
if (IsIllegalInstruction()) {
return kUnsupported;
@@ -193,6 +232,8 @@ InstructionBase::Type InstructionBase::InstructionType() const {
return kJType;
case SYSTEM:
return kIType;
+ case OP_V:
+ return kVType;
}
}
return kUnsupported;
diff --git a/chromium/v8/src/codegen/riscv64/constants-riscv64.h b/chromium/v8/src/codegen/riscv64/constants-riscv64.h
index c9cb7687fdf..424e966e150 100644
--- a/chromium/v8/src/codegen/riscv64/constants-riscv64.h
+++ b/chromium/v8/src/codegen/riscv64/constants-riscv64.h
@@ -12,14 +12,15 @@
// UNIMPLEMENTED_ macro for RISCV.
#ifdef DEBUG
-#define UNIMPLEMENTED_RISCV() \
- v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
- __FILE__, __LINE__, __func__)
+#define UNIMPLEMENTED_RISCV() \
+ v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
+ __FILE__, __LINE__, __func__);
#else
#define UNIMPLEMENTED_RISCV()
#endif
-#define UNSUPPORTED_RISCV() v8::internal::PrintF("Unsupported instruction.\n")
+#define UNSUPPORTED_RISCV() \
+ v8::internal::PrintF("Unsupported instruction %d.\n", __LINE__)
enum Endianness { kLittle, kBig };
@@ -56,7 +57,7 @@ const uint32_t kLessSignificantWordInDoublewordOffset = 4;
namespace v8 {
namespace internal {
-constexpr size_t kMaxPCRelativeCodeRangeInMB = 4096;
+constexpr size_t kMaxPCRelativeCodeRangeInMB = 4094;
// -----------------------------------------------------------------------------
// Registers and FPURegisters.
@@ -75,6 +76,9 @@ const int kPCRegister = 34;
const int kNumFPURegisters = 32;
const int kInvalidFPURegister = -1;
+// Number vectotr registers
+const int kNumVRegisters = 32;
+const int kInvalidVRegister = -1;
// 'pref' instruction hints
const int32_t kPrefHintLoad = 0;
const int32_t kPrefHintStore = 1;
@@ -131,6 +135,24 @@ class FPURegisters {
static const RegisterAlias aliases_[];
};
+class VRegisters {
+ public:
+ // Return the name of the register.
+ static const char* Name(int reg);
+
+ // Lookup the register number for the name provided.
+ static int Number(const char* name);
+
+ struct RegisterAlias {
+ int creg;
+ const char* name;
+ };
+
+ private:
+ static const char* names_[kNumVRegisters];
+ static const RegisterAlias aliases_[];
+};
+
// -----------------------------------------------------------------------------
// Instructions encoding constants.
@@ -170,6 +192,12 @@ const int kFunct2Shift = 25;
const int kFunct2Bits = 2;
const int kRs1Shift = 15;
const int kRs1Bits = 5;
+const int kVs1Shift = 15;
+const int kVs1Bits = 5;
+const int kVs2Shift = 20;
+const int kVs2Bits = 5;
+const int kVdShift = 7;
+const int kVdBits = 5;
const int kRs2Shift = 20;
const int kRs2Bits = 5;
const int kRs3Shift = 27;
@@ -215,6 +243,71 @@ const int kRvcFunct2Bits = 2;
const int kRvcFunct6Shift = 10;
const int kRvcFunct6Bits = 6;
+// for RVV extension
+constexpr int kRvvELEN = 64;
+constexpr int kRvvVLEN = 128;
+constexpr int kRvvSLEN = kRvvVLEN;
+const int kRvvFunct6Shift = 26;
+const int kRvvFunct6Bits = 6;
+const uint32_t kRvvFunct6Mask =
+ (((1 << kRvvFunct6Bits) - 1) << kRvvFunct6Shift);
+
+const int kRvvVmBits = 1;
+const int kRvvVmShift = 25;
+const uint32_t kRvvVmMask = (((1 << kRvvVmBits) - 1) << kRvvVmShift);
+
+const int kRvvVs2Bits = 5;
+const int kRvvVs2Shift = 20;
+const uint32_t kRvvVs2Mask = (((1 << kRvvVs2Bits) - 1) << kRvvVs2Shift);
+
+const int kRvvVs1Bits = 5;
+const int kRvvVs1Shift = 15;
+const uint32_t kRvvVs1Mask = (((1 << kRvvVs1Bits) - 1) << kRvvVs1Shift);
+
+const int kRvvRs1Bits = kRvvVs1Bits;
+const int kRvvRs1Shift = kRvvVs1Shift;
+const uint32_t kRvvRs1Mask = (((1 << kRvvRs1Bits) - 1) << kRvvRs1Shift);
+
+const int kRvvRs2Bits = 5;
+const int kRvvRs2Shift = 20;
+const uint32_t kRvvRs2Mask = (((1 << kRvvRs2Bits) - 1) << kRvvRs2Shift);
+
+const int kRvvImm5Bits = kRvvVs1Bits;
+const int kRvvImm5Shift = kRvvVs1Shift;
+const uint32_t kRvvImm5Mask = (((1 << kRvvImm5Bits) - 1) << kRvvImm5Shift);
+
+const int kRvvVdBits = 5;
+const int kRvvVdShift = 7;
+const uint32_t kRvvVdMask = (((1 << kRvvVdBits) - 1) << kRvvVdShift);
+
+const int kRvvRdBits = kRvvVdBits;
+const int kRvvRdShift = kRvvVdShift;
+const uint32_t kRvvRdMask = (((1 << kRvvRdBits) - 1) << kRvvRdShift);
+
+const int kRvvZimmBits = 11;
+const int kRvvZimmShift = 20;
+const uint32_t kRvvZimmMask = (((1 << kRvvZimmBits) - 1) << kRvvZimmShift);
+
+const int kRvvUimmShift = kRvvRs1Shift;
+const int kRvvUimmBits = kRvvRs1Bits;
+const uint32_t kRvvUimmMask = (((1 << kRvvUimmBits) - 1) << kRvvUimmShift);
+
+const int kRvvWidthBits = 3;
+const int kRvvWidthShift = 12;
+const uint32_t kRvvWidthMask = (((1 << kRvvWidthBits) - 1) << kRvvWidthShift);
+
+const int kRvvMopBits = 2;
+const int kRvvMopShift = 26;
+const uint32_t kRvvMopMask = (((1 << kRvvMopBits) - 1) << kRvvMopShift);
+
+const int kRvvMewBits = 1;
+const int kRvvMewShift = 28;
+const uint32_t kRvvMewMask = (((1 << kRvvMewBits) - 1) << kRvvMewShift);
+
+const int kRvvNfBits = 3;
+const int kRvvNfShift = 29;
+const uint32_t kRvvNfMask = (((1 << kRvvNfBits) - 1) << kRvvNfShift);
+
// RISCV Instruction bit masks
const uint32_t kBaseOpcodeMask = ((1 << kBaseOpcodeBits) - 1)
<< kBaseOpcodeShift;
@@ -231,6 +324,7 @@ const uint32_t kSTypeMask = kBaseOpcodeMask | kFunct3Mask;
const uint32_t kBTypeMask = kBaseOpcodeMask | kFunct3Mask;
const uint32_t kUTypeMask = kBaseOpcodeMask;
const uint32_t kJTypeMask = kBaseOpcodeMask;
+const uint32_t kVTypeMask = kRvvFunct6Mask | kFunct3Mask | kBaseOpcodeMask;
const uint32_t kRs1FieldMask = ((1 << kRs1Bits) - 1) << kRs1Shift;
const uint32_t kRs2FieldMask = ((1 << kRs2Bits) - 1) << kRs2Shift;
const uint32_t kRs3FieldMask = ((1 << kRs3Bits) - 1) << kRs3Shift;
@@ -535,6 +629,306 @@ enum Opcode : uint32_t {
RO_C_FSDSP = C2 | (0b101 << kRvcFunct3Shift),
RO_C_SWSP = C2 | (0b110 << kRvcFunct3Shift),
RO_C_SDSP = C2 | (0b111 << kRvcFunct3Shift),
+
+ // RVV Extension
+ OP_V = 0b1010111,
+ OP_IVV = OP_V | (0b000 << kFunct3Shift),
+ OP_FVV = OP_V | (0b001 << kFunct3Shift),
+ OP_MVV = OP_V | (0b010 << kFunct3Shift),
+ OP_IVI = OP_V | (0b011 << kFunct3Shift),
+ OP_IVX = OP_V | (0b100 << kFunct3Shift),
+ OP_FVF = OP_V | (0b101 << kFunct3Shift),
+ OP_MVX = OP_V | (0b110 << kFunct3Shift),
+
+ RO_V_VSETVLI = OP_V | (0b111 << kFunct3Shift) | 0b0 << 31,
+ RO_V_VSETIVLI = OP_V | (0b111 << kFunct3Shift) | 0b11 << 30,
+ RO_V_VSETVL = OP_V | (0b111 << kFunct3Shift) | 0b1 << 31,
+
+ // RVV LOAD/STORE
+ RO_V_VL = LOAD_FP | (0b00 << kRvvMopShift) | (0b000 << kRvvNfShift),
+ RO_V_VLS = LOAD_FP | (0b10 << kRvvMopShift) | (0b000 << kRvvNfShift),
+ RO_V_VLX = LOAD_FP | (0b11 << kRvvMopShift) | (0b000 << kRvvNfShift),
+
+ RO_V_VS = STORE_FP | (0b00 << kRvvMopShift) | (0b000 << kRvvNfShift),
+ RO_V_VSS = STORE_FP | (0b10 << kRvvMopShift) | (0b000 << kRvvNfShift),
+ RO_V_VSX = STORE_FP | (0b11 << kRvvMopShift) | (0b000 << kRvvNfShift),
+ RO_V_VSU = STORE_FP | (0b01 << kRvvMopShift) | (0b000 << kRvvNfShift),
+ // THE kFunct6Shift is mop
+ RO_V_VLSEG2 = LOAD_FP | (0b00 << kRvvMopShift) | (0b001 << kRvvNfShift),
+ RO_V_VLSEG3 = LOAD_FP | (0b00 << kRvvMopShift) | (0b010 << kRvvNfShift),
+ RO_V_VLSEG4 = LOAD_FP | (0b00 << kRvvMopShift) | (0b011 << kRvvNfShift),
+ RO_V_VLSEG5 = LOAD_FP | (0b00 << kRvvMopShift) | (0b100 << kRvvNfShift),
+ RO_V_VLSEG6 = LOAD_FP | (0b00 << kRvvMopShift) | (0b101 << kRvvNfShift),
+ RO_V_VLSEG7 = LOAD_FP | (0b00 << kRvvMopShift) | (0b110 << kRvvNfShift),
+ RO_V_VLSEG8 = LOAD_FP | (0b00 << kRvvMopShift) | (0b111 << kRvvNfShift),
+
+ RO_V_VSSEG2 = STORE_FP | (0b00 << kRvvMopShift) | (0b001 << kRvvNfShift),
+ RO_V_VSSEG3 = STORE_FP | (0b00 << kRvvMopShift) | (0b010 << kRvvNfShift),
+ RO_V_VSSEG4 = STORE_FP | (0b00 << kRvvMopShift) | (0b011 << kRvvNfShift),
+ RO_V_VSSEG5 = STORE_FP | (0b00 << kRvvMopShift) | (0b100 << kRvvNfShift),
+ RO_V_VSSEG6 = STORE_FP | (0b00 << kRvvMopShift) | (0b101 << kRvvNfShift),
+ RO_V_VSSEG7 = STORE_FP | (0b00 << kRvvMopShift) | (0b110 << kRvvNfShift),
+ RO_V_VSSEG8 = STORE_FP | (0b00 << kRvvMopShift) | (0b111 << kRvvNfShift),
+
+ RO_V_VLSSEG2 = LOAD_FP | (0b10 << kRvvMopShift) | (0b001 << kRvvNfShift),
+ RO_V_VLSSEG3 = LOAD_FP | (0b10 << kRvvMopShift) | (0b010 << kRvvNfShift),
+ RO_V_VLSSEG4 = LOAD_FP | (0b10 << kRvvMopShift) | (0b011 << kRvvNfShift),
+ RO_V_VLSSEG5 = LOAD_FP | (0b10 << kRvvMopShift) | (0b100 << kRvvNfShift),
+ RO_V_VLSSEG6 = LOAD_FP | (0b10 << kRvvMopShift) | (0b101 << kRvvNfShift),
+ RO_V_VLSSEG7 = LOAD_FP | (0b10 << kRvvMopShift) | (0b110 << kRvvNfShift),
+ RO_V_VLSSEG8 = LOAD_FP | (0b10 << kRvvMopShift) | (0b111 << kRvvNfShift),
+
+ RO_V_VSSSEG2 = STORE_FP | (0b10 << kRvvMopShift) | (0b001 << kRvvNfShift),
+ RO_V_VSSSEG3 = STORE_FP | (0b10 << kRvvMopShift) | (0b010 << kRvvNfShift),
+ RO_V_VSSSEG4 = STORE_FP | (0b10 << kRvvMopShift) | (0b011 << kRvvNfShift),
+ RO_V_VSSSEG5 = STORE_FP | (0b10 << kRvvMopShift) | (0b100 << kRvvNfShift),
+ RO_V_VSSSEG6 = STORE_FP | (0b10 << kRvvMopShift) | (0b101 << kRvvNfShift),
+ RO_V_VSSSEG7 = STORE_FP | (0b10 << kRvvMopShift) | (0b110 << kRvvNfShift),
+ RO_V_VSSSEG8 = STORE_FP | (0b10 << kRvvMopShift) | (0b111 << kRvvNfShift),
+
+ RO_V_VLXSEG2 = LOAD_FP | (0b11 << kRvvMopShift) | (0b001 << kRvvNfShift),
+ RO_V_VLXSEG3 = LOAD_FP | (0b11 << kRvvMopShift) | (0b010 << kRvvNfShift),
+ RO_V_VLXSEG4 = LOAD_FP | (0b11 << kRvvMopShift) | (0b011 << kRvvNfShift),
+ RO_V_VLXSEG5 = LOAD_FP | (0b11 << kRvvMopShift) | (0b100 << kRvvNfShift),
+ RO_V_VLXSEG6 = LOAD_FP | (0b11 << kRvvMopShift) | (0b101 << kRvvNfShift),
+ RO_V_VLXSEG7 = LOAD_FP | (0b11 << kRvvMopShift) | (0b110 << kRvvNfShift),
+ RO_V_VLXSEG8 = LOAD_FP | (0b11 << kRvvMopShift) | (0b111 << kRvvNfShift),
+
+ RO_V_VSXSEG2 = STORE_FP | (0b11 << kRvvMopShift) | (0b001 << kRvvNfShift),
+ RO_V_VSXSEG3 = STORE_FP | (0b11 << kRvvMopShift) | (0b010 << kRvvNfShift),
+ RO_V_VSXSEG4 = STORE_FP | (0b11 << kRvvMopShift) | (0b011 << kRvvNfShift),
+ RO_V_VSXSEG5 = STORE_FP | (0b11 << kRvvMopShift) | (0b100 << kRvvNfShift),
+ RO_V_VSXSEG6 = STORE_FP | (0b11 << kRvvMopShift) | (0b101 << kRvvNfShift),
+ RO_V_VSXSEG7 = STORE_FP | (0b11 << kRvvMopShift) | (0b110 << kRvvNfShift),
+ RO_V_VSXSEG8 = STORE_FP | (0b11 << kRvvMopShift) | (0b111 << kRvvNfShift),
+
+ // RVV Vector Arithmetic Instruction
+ VADD_FUNCT6 = 0b000000,
+ RO_V_VADD_VI = OP_IVI | (VADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VADD_VV = OP_IVV | (VADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VADD_VX = OP_IVX | (VADD_FUNCT6 << kRvvFunct6Shift),
+
+ VSUB_FUNCT6 = 0b000010,
+ RO_V_VSUB_VX = OP_IVX | (VSUB_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSUB_VV = OP_IVV | (VSUB_FUNCT6 << kRvvFunct6Shift),
+
+ VSADDU_FUNCT6 = 0b100000,
+ RO_V_VSADDU_VI = OP_IVI | (VSADDU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSADDU_VV = OP_IVV | (VSADDU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSADDU_VX = OP_IVX | (VSADDU_FUNCT6 << kRvvFunct6Shift),
+
+ VSADD_FUNCT6 = 0b100001,
+ RO_V_VSADD_VI = OP_IVI | (VSADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSADD_VV = OP_IVV | (VSADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSADD_VX = OP_IVX | (VSADD_FUNCT6 << kRvvFunct6Shift),
+
+ VSSUB_FUNCT6 = 0b100011,
+ RO_V_VSSUB_VV = OP_IVV | (VSSUB_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSSUB_VX = OP_IVX | (VSSUB_FUNCT6 << kRvvFunct6Shift),
+
+ VSSUBU_FUNCT6 = 0b100010,
+ RO_V_VSSUBU_VV = OP_IVV | (VSSUBU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSSUBU_VX = OP_IVX | (VSSUBU_FUNCT6 << kRvvFunct6Shift),
+
+ VRSUB_FUNCT6 = 0b000011,
+ RO_V_VRSUB_VX = OP_IVX | (VRSUB_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VRSUB_VI = OP_IVI | (VRSUB_FUNCT6 << kRvvFunct6Shift),
+
+ VMINU_FUNCT6 = 0b000100,
+ RO_V_VMINU_VX = OP_IVX | (VMINU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMINU_VV = OP_IVV | (VMINU_FUNCT6 << kRvvFunct6Shift),
+
+ VMIN_FUNCT6 = 0b000101,
+ RO_V_VMIN_VX = OP_IVX | (VMIN_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMIN_VV = OP_IVV | (VMIN_FUNCT6 << kRvvFunct6Shift),
+
+ VMAXU_FUNCT6 = 0b000110,
+ RO_V_VMAXU_VX = OP_IVX | (VMAXU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMAXU_VV = OP_IVV | (VMAXU_FUNCT6 << kRvvFunct6Shift),
+
+ VMAX_FUNCT6 = 0b000111,
+ RO_V_VMAX_VX = OP_IVX | (VMAX_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMAX_VV = OP_IVV | (VMAX_FUNCT6 << kRvvFunct6Shift),
+
+ VAND_FUNCT6 = 0b001001,
+ RO_V_VAND_VI = OP_IVI | (VAND_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VAND_VV = OP_IVV | (VAND_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VAND_VX = OP_IVX | (VAND_FUNCT6 << kRvvFunct6Shift),
+
+ VOR_FUNCT6 = 0b001010,
+ RO_V_VOR_VI = OP_IVI | (VOR_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VOR_VV = OP_IVV | (VOR_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VOR_VX = OP_IVX | (VOR_FUNCT6 << kRvvFunct6Shift),
+
+ VXOR_FUNCT6 = 0b001011,
+ RO_V_VXOR_VI = OP_IVI | (VXOR_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VXOR_VV = OP_IVV | (VXOR_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VXOR_VX = OP_IVX | (VXOR_FUNCT6 << kRvvFunct6Shift),
+
+ VRGATHER_FUNCT6 = 0b001100,
+ RO_V_VRGATHER_VI = OP_IVI | (VRGATHER_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VRGATHER_VV = OP_IVV | (VRGATHER_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VRGATHER_VX = OP_IVX | (VRGATHER_FUNCT6 << kRvvFunct6Shift),
+
+ VMV_FUNCT6 = 0b010111,
+ RO_V_VMV_VI = OP_IVI | (VMV_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMV_VV = OP_IVV | (VMV_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMV_VX = OP_IVX | (VMV_FUNCT6 << kRvvFunct6Shift),
+
+ RO_V_VMERGE_VI = RO_V_VMV_VI,
+ RO_V_VMERGE_VV = RO_V_VMV_VV,
+ RO_V_VMERGE_VX = RO_V_VMV_VX,
+
+ VMSEQ_FUNCT6 = 0b011000,
+ RO_V_VMSEQ_VI = OP_IVI | (VMSEQ_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSEQ_VV = OP_IVV | (VMSEQ_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSEQ_VX = OP_IVX | (VMSEQ_FUNCT6 << kRvvFunct6Shift),
+
+ VMSNE_FUNCT6 = 0b011001,
+ RO_V_VMSNE_VI = OP_IVI | (VMSNE_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSNE_VV = OP_IVV | (VMSNE_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSNE_VX = OP_IVX | (VMSNE_FUNCT6 << kRvvFunct6Shift),
+
+ VMSLTU_FUNCT6 = 0b011010,
+ RO_V_VMSLTU_VV = OP_IVV | (VMSLTU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSLTU_VX = OP_IVX | (VMSLTU_FUNCT6 << kRvvFunct6Shift),
+
+ VMSLT_FUNCT6 = 0b011011,
+ RO_V_VMSLT_VV = OP_IVV | (VMSLT_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSLT_VX = OP_IVX | (VMSLT_FUNCT6 << kRvvFunct6Shift),
+
+ VMSLE_FUNCT6 = 0b011101,
+ RO_V_VMSLE_VI = OP_IVI | (VMSLE_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSLE_VV = OP_IVV | (VMSLE_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSLE_VX = OP_IVX | (VMSLE_FUNCT6 << kRvvFunct6Shift),
+
+ VMSLEU_FUNCT6 = 0b011100,
+ RO_V_VMSLEU_VI = OP_IVI | (VMSLEU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSLEU_VV = OP_IVV | (VMSLEU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSLEU_VX = OP_IVX | (VMSLEU_FUNCT6 << kRvvFunct6Shift),
+
+ VMSGTU_FUNCT6 = 0b011110,
+ RO_V_VMSGTU_VI = OP_IVI | (VMSGTU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSGTU_VX = OP_IVX | (VMSGTU_FUNCT6 << kRvvFunct6Shift),
+
+ VMSGT_FUNCT6 = 0b011111,
+ RO_V_VMSGT_VI = OP_IVI | (VMSGT_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSGT_VX = OP_IVX | (VMSGT_FUNCT6 << kRvvFunct6Shift),
+
+ VSLIDEUP_FUNCT6 = 0b001110,
+ RO_V_VSLIDEUP_VI = OP_IVI | (VSLIDEUP_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSLIDEUP_VX = OP_IVX | (VSLIDEUP_FUNCT6 << kRvvFunct6Shift),
+
+ VSLIDEDOWN_FUNCT6 = 0b001111,
+ RO_V_VSLIDEDOWN_VI = OP_IVI | (VSLIDEDOWN_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSLIDEDOWN_VX = OP_IVX | (VSLIDEDOWN_FUNCT6 << kRvvFunct6Shift),
+
+ VSRL_FUNCT6 = 0b101000,
+ RO_V_VSRL_VI = OP_IVI | (VSRL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSRL_VV = OP_IVV | (VSRL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSRL_VX = OP_IVX | (VSRL_FUNCT6 << kRvvFunct6Shift),
+
+ VSLL_FUNCT6 = 0b100101,
+ RO_V_VSLL_VI = OP_IVI | (VSLL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSLL_VV = OP_IVV | (VSLL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSLL_VX = OP_IVX | (VSLL_FUNCT6 << kRvvFunct6Shift),
+
+ VADC_FUNCT6 = 0b010000,
+ RO_V_VADC_VI = OP_IVI | (VADC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VADC_VV = OP_IVV | (VADC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VADC_VX = OP_IVX | (VADC_FUNCT6 << kRvvFunct6Shift),
+
+ VMADC_FUNCT6 = 0b010001,
+ RO_V_VMADC_VI = OP_IVI | (VMADC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMADC_VV = OP_IVV | (VMADC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMADC_VX = OP_IVX | (VMADC_FUNCT6 << kRvvFunct6Shift),
+
+ VWXUNARY0_FUNCT6 = 0b010000,
+ VRXUNARY0_FUNCT6 = 0b010000,
+
+ RO_V_VWXUNARY0 = OP_MVV | (VWXUNARY0_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VRXUNARY0 = OP_MVX | (VRXUNARY0_FUNCT6 << kRvvFunct6Shift),
+
+ VREDMAXU_FUNCT6 = 0b000110,
+ RO_V_VREDMAXU = OP_MVV | (VREDMAXU_FUNCT6 << kRvvFunct6Shift),
+ VREDMAX_FUNCT6 = 0b000111,
+ RO_V_VREDMAX = OP_MVV | (VREDMAX_FUNCT6 << kRvvFunct6Shift),
+
+ VREDMINU_FUNCT6 = 0b000100,
+ RO_V_VREDMINU = OP_MVV | (VREDMINU_FUNCT6 << kRvvFunct6Shift),
+ VREDMIN_FUNCT6 = 0b000101,
+ RO_V_VREDMIN = OP_MVV | (VREDMIN_FUNCT6 << kRvvFunct6Shift),
+
+ VFUNARY0_FUNCT6 = 0b010010,
+ RO_V_VFUNARY0 = OP_FVV | (VFUNARY0_FUNCT6 << kRvvFunct6Shift),
+ VFUNARY1_FUNCT6 = 0b010011,
+ RO_V_VFUNARY1 = OP_FVV | (VFUNARY1_FUNCT6 << kRvvFunct6Shift),
+
+ VFCVT_XU_F_V = 0b00000,
+ VFCVT_X_F_V = 0b00001,
+ VFCVT_F_XU_V = 0b00010,
+ VFCVT_F_X_V = 0b00011,
+ VFNCVT_F_F_W = 0b10100,
+
+ VFCLASS_V = 0b10000,
+
+ VFADD_FUNCT6 = 0b000000,
+ RO_V_VFADD_VV = OP_FVV | (VFADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFADD_VF = OP_FVF | (VFADD_FUNCT6 << kRvvFunct6Shift),
+
+ VFSUB_FUNCT6 = 0b000010,
+ RO_V_VFSUB_VV = OP_FVV | (VFSUB_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFSUB_VF = OP_FVF | (VFSUB_FUNCT6 << kRvvFunct6Shift),
+
+ VFDIV_FUNCT6 = 0b100000,
+ RO_V_VFDIV_VV = OP_FVV | (VFDIV_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFDIV_VF = OP_FVF | (VFDIV_FUNCT6 << kRvvFunct6Shift),
+
+ VFMUL_FUNCT6 = 0b100100,
+ RO_V_VFMUL_VV = OP_FVV | (VFMUL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFMUL_VF = OP_FVF | (VFMUL_FUNCT6 << kRvvFunct6Shift),
+
+ VMFEQ_FUNCT6 = 0b011000,
+ RO_V_VMFEQ_VV = OP_FVV | (VMFEQ_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMFEQ_VF = OP_FVF | (VMFEQ_FUNCT6 << kRvvFunct6Shift),
+
+ VMFNE_FUNCT6 = 0b011100,
+ RO_V_VMFNE_VV = OP_FVV | (VMFNE_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMFNE_VF = OP_FVF | (VMFNE_FUNCT6 << kRvvFunct6Shift),
+
+ VMFLT_FUNCT6 = 0b011011,
+ RO_V_VMFLT_VV = OP_FVV | (VMFLT_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMFLT_VF = OP_FVF | (VMFLT_FUNCT6 << kRvvFunct6Shift),
+
+ VMFLE_FUNCT6 = 0b011001,
+ RO_V_VMFLE_VV = OP_FVV | (VMFLE_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMFLE_VF = OP_FVF | (VMFLE_FUNCT6 << kRvvFunct6Shift),
+
+ VMFGE_FUNCT6 = 0b011111,
+ RO_V_VMFGE_VF = OP_FVF | (VMFGE_FUNCT6 << kRvvFunct6Shift),
+
+ VMFGT_FUNCT6 = 0b011101,
+ RO_V_VMFGT_VF = OP_FVF | (VMFGT_FUNCT6 << kRvvFunct6Shift),
+
+ VFMAX_FUNCT6 = 0b000110,
+ RO_V_VFMAX_VV = OP_FVV | (VFMAX_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFMAX_VF = OP_FVF | (VFMAX_FUNCT6 << kRvvFunct6Shift),
+
+ VFMIN_FUNCT6 = 0b000100,
+ RO_V_VFMIN_VV = OP_FVV | (VFMIN_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFMIN_VF = OP_FVF | (VFMIN_FUNCT6 << kRvvFunct6Shift),
+
+ VFSGNJ_FUNCT6 = 0b001000,
+ RO_V_VFSGNJ_VV = OP_FVV | (VFSGNJ_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFSGNJ_VF = OP_FVF | (VFSGNJ_FUNCT6 << kRvvFunct6Shift),
+
+ VFSGNJN_FUNCT6 = 0b001001,
+ RO_V_VFSGNJN_VV = OP_FVV | (VFSGNJN_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFSGNJN_VF = OP_FVF | (VFSGNJN_FUNCT6 << kRvvFunct6Shift),
+
+ VFSGNJX_FUNCT6 = 0b001010,
+ RO_V_VFSGNJX_VV = OP_FVV | (VFSGNJX_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFSGNJX_VF = OP_FVF | (VFSGNJX_FUNCT6 << kRvvFunct6Shift),
};
// ----- Emulated conditions.
@@ -668,6 +1062,13 @@ enum MemoryOdering {
PSIORW = PSI | PSO | PSR | PSW
};
+const int kFloat32ExponentBias = 127;
+const int kFloat32MantissaBits = 23;
+const int kFloat32ExponentBits = 8;
+const int kFloat64ExponentBias = 1023;
+const int kFloat64MantissaBits = 52;
+const int kFloat64ExponentBits = 11;
+
enum FClassFlag {
kNegativeInfinity = 1,
kNegativeNormalNumber = 1 << 1,
@@ -681,6 +1082,52 @@ enum FClassFlag {
kQuietNaN = 1 << 9
};
+#define RVV_SEW(V) \
+ V(E8) \
+ V(E16) \
+ V(E32) \
+ V(E64) \
+ V(E128) \
+ V(E256) \
+ V(E512) \
+ V(E1024)
+
+enum VSew {
+#define DEFINE_FLAG(name) name,
+ RVV_SEW(DEFINE_FLAG)
+#undef DEFINE_FLAG
+};
+
+#define RVV_LMUL(V) \
+ V(m1) \
+ V(m2) \
+ V(m4) \
+ V(m8) \
+ V(RESERVERD) \
+ V(mf8) \
+ V(mf4) \
+ V(mf2)
+
+enum Vlmul {
+#define DEFINE_FLAG(name) name,
+ RVV_LMUL(DEFINE_FLAG)
+#undef DEFINE_FLAG
+};
+
+enum TailAgnosticType {
+ ta = 0x1, // Tail agnostic
+ tu = 0x0, // Tail undisturbed
+};
+
+enum MaskAgnosticType {
+ ma = 0x1, // Mask agnostic
+ mu = 0x0, // Mask undisturbed
+};
+enum MaskType {
+ Mask = 0x0, // use the mask
+ NoMask = 0x1,
+};
+
// -----------------------------------------------------------------------------
// Hints.
@@ -734,6 +1181,19 @@ class InstructionBase {
kCAType,
kCBType,
kCJType,
+ // V extension
+ kVType,
+ kVLType,
+ kVSType,
+ kVAMOType,
+ kVIVVType,
+ kVFVVType,
+ kVMVVType,
+ kVIVIType,
+ kVIVXType,
+ kVFVFType,
+ kVMVXType,
+ kVSETType,
kUnsupported = -1
};
@@ -840,7 +1300,9 @@ class InstructionGetters : public T {
this->InstructionType() == InstructionBase::kR4Type ||
this->InstructionType() == InstructionBase::kIType ||
this->InstructionType() == InstructionBase::kSType ||
- this->InstructionType() == InstructionBase::kBType);
+ this->InstructionType() == InstructionBase::kBType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kVType);
return this->Bits(kRs1Shift + kRs1Bits - 1, kRs1Shift);
}
@@ -848,7 +1310,9 @@ class InstructionGetters : public T {
DCHECK(this->InstructionType() == InstructionBase::kRType ||
this->InstructionType() == InstructionBase::kR4Type ||
this->InstructionType() == InstructionBase::kSType ||
- this->InstructionType() == InstructionBase::kBType);
+ this->InstructionType() == InstructionBase::kBType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kVType);
return this->Bits(kRs2Shift + kRs2Bits - 1, kRs2Shift);
}
@@ -857,12 +1321,35 @@ class InstructionGetters : public T {
return this->Bits(kRs3Shift + kRs3Bits - 1, kRs3Shift);
}
+ inline int Vs1Value() const {
+ DCHECK(this->InstructionType() == InstructionBase::kVType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kSType);
+ return this->Bits(kVs1Shift + kVs1Bits - 1, kVs1Shift);
+ }
+
+ inline int Vs2Value() const {
+ DCHECK(this->InstructionType() == InstructionBase::kVType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kSType);
+ return this->Bits(kVs2Shift + kVs2Bits - 1, kVs2Shift);
+ }
+
+ inline int VdValue() const {
+ DCHECK(this->InstructionType() == InstructionBase::kVType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kSType);
+ return this->Bits(kVdShift + kVdBits - 1, kVdShift);
+ }
+
inline int RdValue() const {
DCHECK(this->InstructionType() == InstructionBase::kRType ||
this->InstructionType() == InstructionBase::kR4Type ||
this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kSType ||
this->InstructionType() == InstructionBase::kUType ||
- this->InstructionType() == InstructionBase::kJType);
+ this->InstructionType() == InstructionBase::kJType ||
+ this->InstructionType() == InstructionBase::kVType);
return this->Bits(kRdShift + kRdBits - 1, kRdShift);
}
@@ -1149,6 +1636,129 @@ class InstructionGetters : public T {
return imm9 << 23 >> 23;
}
+ inline int vl_vs_width() {
+ int width = 0;
+ if ((this->InstructionBits() & kBaseOpcodeMask) != LOAD_FP &&
+ (this->InstructionBits() & kBaseOpcodeMask) != STORE_FP)
+ return -1;
+ switch (this->InstructionBits() & (kRvvWidthMask | kRvvMewMask)) {
+ case 0x0:
+ width = 8;
+ break;
+ case 0x00005000:
+ width = 16;
+ break;
+ case 0x00006000:
+ width = 32;
+ break;
+ case 0x00007000:
+ width = 64;
+ break;
+ case 0x10000000:
+ width = 128;
+ break;
+ case 0x10005000:
+ width = 256;
+ break;
+ case 0x10006000:
+ width = 512;
+ break;
+ case 0x10007000:
+ width = 1024;
+ break;
+ default:
+ width = -1;
+ break;
+ }
+ return width;
+ }
+
+ inline uint32_t Rvvzimm() const {
+ if ((this->InstructionBits() &
+ (kBaseOpcodeMask | kFunct3Mask | 0x80000000)) == RO_V_VSETVLI) {
+ uint32_t Bits = this->InstructionBits();
+ uint32_t zimm = Bits & kRvvZimmMask;
+ return zimm >> kRvvZimmShift;
+ } else {
+ DCHECK_EQ(this->InstructionBits() &
+ (kBaseOpcodeMask | kFunct3Mask | 0xC0000000),
+ RO_V_VSETIVLI);
+ uint32_t Bits = this->InstructionBits();
+ uint32_t zimm = Bits & kRvvZimmMask;
+ return (zimm >> kRvvZimmShift) & 0x3FF;
+ }
+ }
+
+ inline uint32_t Rvvuimm() const {
+ DCHECK_EQ(
+ this->InstructionBits() & (kBaseOpcodeMask | kFunct3Mask | 0xC0000000),
+ RO_V_VSETIVLI);
+ uint32_t Bits = this->InstructionBits();
+ uint32_t uimm = Bits & kRvvUimmMask;
+ return uimm >> kRvvUimmShift;
+ }
+
+ inline uint32_t RvvVsew() const {
+ uint32_t zimm = this->Rvvzimm();
+ uint32_t vsew = (zimm >> 3) & 0x7;
+ return vsew;
+ }
+
+ inline uint32_t RvvVlmul() const {
+ uint32_t zimm = this->Rvvzimm();
+ uint32_t vlmul = zimm & 0x7;
+ return vlmul;
+ }
+
+ inline uint8_t RvvVM() const {
+ DCHECK(this->InstructionType() == InstructionBase::kVType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kSType);
+ return this->Bits(kRvvVmShift + kRvvVmBits - 1, kRvvVmShift);
+ }
+
+ inline const char* RvvSEW() const {
+ uint32_t vsew = this->RvvVsew();
+ switch (vsew) {
+#define CAST_VSEW(name) \
+ case name: \
+ return #name;
+ RVV_SEW(CAST_VSEW)
+ default:
+ return "unknown";
+#undef CAST_VSEW
+ }
+ }
+
+ inline const char* RvvLMUL() const {
+ uint32_t vlmul = this->RvvVlmul();
+ switch (vlmul) {
+#define CAST_VLMUL(name) \
+ case name: \
+ return #name;
+ RVV_LMUL(CAST_VLMUL)
+ default:
+ return "unknown";
+#undef CAST_VSEW
+ }
+ }
+
+#define sext(x, len) (((int32_t)(x) << (32 - len)) >> (32 - len))
+#define zext(x, len) (((uint32_t)(x) << (32 - len)) >> (32 - len))
+
+ inline int32_t RvvSimm5() const {
+ DCHECK(this->InstructionType() == InstructionBase::kVType);
+ return sext(this->Bits(kRvvImm5Shift + kRvvImm5Bits - 1, kRvvImm5Shift),
+ kRvvImm5Bits);
+ }
+
+ inline uint32_t RvvUimm5() const {
+ DCHECK(this->InstructionType() == InstructionBase::kVType);
+ uint32_t imm = this->Bits(kRvvImm5Shift + kRvvImm5Bits - 1, kRvvImm5Shift);
+ return zext(imm, kRvvImm5Bits);
+ }
+#undef sext
+#undef zext
inline bool AqValue() const { return this->Bits(kAqShift, kAqShift); }
inline bool RlValue() const { return this->Bits(kRlShift, kRlShift); }
diff --git a/chromium/v8/src/codegen/riscv64/macro-assembler-riscv64.cc b/chromium/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
index 3baa71d1a2e..342660bcc01 100644
--- a/chromium/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
+++ b/chromium/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
@@ -1057,7 +1057,10 @@ void TurboAssembler::CalcScaledAddress(Register rd, Register rt, Register rs,
// ------------Pseudo-instructions-------------
// Change endianness
-void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size) {
+void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size,
+ Register scratch) {
+ DCHECK_NE(scratch, rs);
+ DCHECK_NE(scratch, rd);
DCHECK(operand_size == 4 || operand_size == 8);
if (operand_size == 4) {
// Uint32_t x1 = 0x00FF00FF;
@@ -1068,7 +1071,7 @@ void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size) {
DCHECK((rd != t6) && (rs != t6));
Register x0 = temps.Acquire();
Register x1 = temps.Acquire();
- Register x2 = temps.Acquire();
+ Register x2 = scratch;
li(x1, 0x00FF00FF);
slliw(x0, rs, 16);
srliw(rd, rs, 16);
@@ -1090,7 +1093,7 @@ void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size) {
DCHECK((rd != t6) && (rs != t6));
Register x0 = temps.Acquire();
Register x1 = temps.Acquire();
- Register x2 = temps.Acquire();
+ Register x2 = scratch;
li(x1, 0x0000FFFF0000FFFFl);
slli(x0, rs, 32);
srli(rd, rs, 32);
@@ -1193,20 +1196,19 @@ void TurboAssembler::UnalignedLoadHelper(Register rd, const MemOperand& rs) {
}
template <int NBYTES>
-void TurboAssembler::UnalignedFLoadHelper(FPURegister frd,
- const MemOperand& rs) {
+void TurboAssembler::UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs,
+ Register scratch_base) {
DCHECK(NBYTES == 4 || NBYTES == 8);
-
+ DCHECK_NE(scratch_base, rs.rm());
BlockTrampolinePoolScope block_trampoline_pool(this);
MemOperand source = rs;
- UseScratchRegisterScope temps(this);
- Register scratch_base = temps.Acquire();
if (NeedAdjustBaseAndOffset(rs, OffsetAccessType::TWO_ACCESSES, NBYTES - 1)) {
// Adjust offset for two accesses and check if offset + 3 fits into int12.
DCHECK(scratch_base != rs.rm());
AdjustBaseAndOffset(&source, scratch_base, OffsetAccessType::TWO_ACCESSES,
NBYTES - 1);
}
+ UseScratchRegisterScope temps(this);
Register scratch_other = temps.Acquire();
Register scratch = temps.Acquire();
DCHECK(scratch != rs.rm() && scratch_other != scratch &&
@@ -1258,10 +1260,10 @@ void TurboAssembler::UnalignedStoreHelper(Register rd, const MemOperand& rs,
template <int NBYTES>
void TurboAssembler::UnalignedFStoreHelper(FPURegister frd,
- const MemOperand& rs) {
+ const MemOperand& rs,
+ Register scratch) {
DCHECK(NBYTES == 8 || NBYTES == 4);
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
+ DCHECK_NE(scratch, rs.rm());
if (NBYTES == 4) {
fmv_x_w(scratch, frd);
} else {
@@ -1354,20 +1356,28 @@ void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs) {
Sw(scratch, MemOperand(rs.rm(), rs.offset() + kSystemPointerSize / 2));
}
-void TurboAssembler::ULoadFloat(FPURegister fd, const MemOperand& rs) {
- UnalignedFLoadHelper<4>(fd, rs);
+void TurboAssembler::ULoadFloat(FPURegister fd, const MemOperand& rs,
+ Register scratch) {
+ DCHECK_NE(scratch, rs.rm());
+ UnalignedFLoadHelper<4>(fd, rs, scratch);
}
-void TurboAssembler::UStoreFloat(FPURegister fd, const MemOperand& rs) {
- UnalignedFStoreHelper<4>(fd, rs);
+void TurboAssembler::UStoreFloat(FPURegister fd, const MemOperand& rs,
+ Register scratch) {
+ DCHECK_NE(scratch, rs.rm());
+ UnalignedFStoreHelper<4>(fd, rs, scratch);
}
-void TurboAssembler::ULoadDouble(FPURegister fd, const MemOperand& rs) {
- UnalignedFLoadHelper<8>(fd, rs);
+void TurboAssembler::ULoadDouble(FPURegister fd, const MemOperand& rs,
+ Register scratch) {
+ DCHECK_NE(scratch, rs.rm());
+ UnalignedFLoadHelper<8>(fd, rs, scratch);
}
-void TurboAssembler::UStoreDouble(FPURegister fd, const MemOperand& rs) {
- UnalignedFStoreHelper<8>(fd, rs);
+void TurboAssembler::UStoreDouble(FPURegister fd, const MemOperand& rs,
+ Register scratch) {
+ DCHECK_NE(scratch, rs.rm());
+ UnalignedFStoreHelper<8>(fd, rs, scratch);
}
void TurboAssembler::Lb(Register rd, const MemOperand& rs) {
@@ -1590,7 +1600,7 @@ void TurboAssembler::li(Register dst, Handle<HeapObject> value,
} else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
EmbeddedObjectIndex index = AddEmbeddedObject(value);
DCHECK(is_uint32(index));
- li(dst, Operand(static_cast<int>(index), rmode));
+ li(dst, Operand(index, rmode));
} else {
DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
li(dst, Operand(value.address(), rmode));
@@ -1623,7 +1633,7 @@ static inline int InstrCountForLiLower32Bit(int64_t value) {
}
int TurboAssembler::InstrCountForLi64Bit(int64_t value) {
- if (is_int32(value)) {
+ if (is_int32(value + 0x800)) {
return InstrCountForLiLower32Bit(value);
} else {
return li_estimate(value);
@@ -1664,8 +1674,7 @@ void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
BlockGrowBufferScope block_growbuffer(this);
int offset = pc_offset();
Address address = j.immediate();
- saved_handles_for_raw_object_ptr_.push_back(
- std::make_pair(offset, address));
+ saved_handles_for_raw_object_ptr_.emplace_back(offset, address);
Handle<HeapObject> object(reinterpret_cast<Address*>(address));
int64_t immediate = object->ptr();
RecordRelocInfo(j.rmode(), immediate);
@@ -2036,19 +2045,12 @@ void TurboAssembler::RoundHelper(FPURegister dst, FPURegister src,
// Need at least two FPRs, so check against dst == src == fpu_scratch
DCHECK(!(dst == src && dst == fpu_scratch));
- const int kFloat32ExponentBias = 127;
- const int kFloat32MantissaBits = 23;
- const int kFloat32ExponentBits = 8;
- const int kFloat64ExponentBias = 1023;
- const int kFloat64MantissaBits = 52;
- const int kFloat64ExponentBits = 11;
const int kFloatMantissaBits =
sizeof(F) == 4 ? kFloat32MantissaBits : kFloat64MantissaBits;
const int kFloatExponentBits =
sizeof(F) == 4 ? kFloat32ExponentBits : kFloat64ExponentBits;
const int kFloatExponentBias =
sizeof(F) == 4 ? kFloat32ExponentBias : kFloat64ExponentBias;
-
Label done;
{
@@ -2147,6 +2149,72 @@ void TurboAssembler::RoundHelper(FPURegister dst, FPURegister src,
bind(&done);
}
+// According to JS ECMA specification, for floating-point round operations, if
+// the input is NaN, +/-infinity, or +/-0, the same input is returned as the
+// rounded result; this differs from behavior of RISCV fcvt instructions (which
+// round out-of-range values to the nearest max or min value), therefore special
+// handling is needed by NaN, +/-Infinity, +/-0
+template <typename F>
+void TurboAssembler::RoundHelper(VRegister dst, VRegister src, Register scratch,
+ VRegister v_scratch, RoundingMode frm) {
+ VU.set(scratch, std::is_same<F, float>::value ? E32 : E64, m1);
+ // if src is NaN/+-Infinity/+-Zero or if the exponent is larger than # of bits
+ // in mantissa, the result is the same as src, so move src to dest (to avoid
+ // generating another branch)
+
+ // If real exponent (i.e., scratch2 - kFloatExponentBias) is greater than
+ // kFloat32MantissaBits, it means the floating-point value has no fractional
+ // part, thus the input is already rounded, jump to done. Note that, NaN and
+ // Infinity in floating-point representation sets maximal exponent value, so
+ // they also satisfy (scratch2 - kFloatExponentBias >= kFloatMantissaBits),
+ // and JS round semantics specify that rounding of NaN (Infinity) returns NaN
+ // (Infinity), so NaN and Infinity are considered rounded value too.
+ li(scratch, 64 - kFloat32MantissaBits - kFloat32ExponentBits);
+ vsll_vx(v_scratch, src, scratch);
+ li(scratch, 64 - kFloat32ExponentBits);
+ vsrl_vx(v_scratch, v_scratch, scratch);
+ li(scratch, kFloat32ExponentBias + kFloat32MantissaBits);
+ vmslt_vx(v0, v_scratch, scratch);
+
+ VU.set(frm);
+ vmv_vv(dst, src);
+ if (dst == src) {
+ vmv_vv(v_scratch, src);
+ }
+ vfcvt_x_f_v(dst, src, MaskType::Mask);
+ vfcvt_f_x_v(dst, dst, MaskType::Mask);
+
+ // A special handling is needed if the input is a very small positive/negative
+ // number that rounds to zero. JS semantics requires that the rounded result
+ // retains the sign of the input, so a very small positive (negative)
+ // floating-point number should be rounded to positive (negative) 0.
+ if (dst == src) {
+ vfsngj_vv(dst, dst, v_scratch);
+ } else {
+ vfsngj_vv(dst, dst, src);
+ }
+}
+
+void TurboAssembler::Ceil_f(VRegister vdst, VRegister vsrc, Register scratch,
+ VRegister v_scratch) {
+ RoundHelper<float>(vdst, vsrc, scratch, v_scratch, RUP);
+}
+
+void TurboAssembler::Ceil_d(VRegister vdst, VRegister vsrc, Register scratch,
+ VRegister v_scratch) {
+ RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RUP);
+}
+
+void TurboAssembler::Floor_f(VRegister vdst, VRegister vsrc, Register scratch,
+ VRegister v_scratch) {
+ RoundHelper<float>(vdst, vsrc, scratch, v_scratch, RDN);
+}
+
+void TurboAssembler::Floor_d(VRegister vdst, VRegister vsrc, Register scratch,
+ VRegister v_scratch) {
+ RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RDN);
+}
+
void TurboAssembler::Floor_d_d(FPURegister dst, FPURegister src,
FPURegister fpu_scratch) {
RoundHelper<double>(dst, src, fpu_scratch, RDN);
@@ -2442,7 +2510,6 @@ void TurboAssembler::CompareI(Register rd, Register rs, const Operand& rt,
break;
case cc_always:
UNREACHABLE();
- break;
default:
UNREACHABLE();
}
@@ -2620,7 +2687,9 @@ void TurboAssembler::Ctz64(Register rd, Register rs) {
}
}
-void TurboAssembler::Popcnt32(Register rd, Register rs) {
+void TurboAssembler::Popcnt32(Register rd, Register rs, Register scratch) {
+ DCHECK_NE(scratch, rs);
+ DCHECK_NE(scratch, rd);
// https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
//
// A generalization of the best bit counting method to integers of
@@ -2644,7 +2713,6 @@ void TurboAssembler::Popcnt32(Register rd, Register rs) {
uint32_t shift = 24;
UseScratchRegisterScope temps(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
- Register scratch = temps.Acquire();
Register scratch2 = temps.Acquire();
Register value = temps.Acquire();
DCHECK((rd != value) && (rs != value));
@@ -2669,7 +2737,9 @@ void TurboAssembler::Popcnt32(Register rd, Register rs) {
Srl32(rd, rd, shift);
}
-void TurboAssembler::Popcnt64(Register rd, Register rs) {
+void TurboAssembler::Popcnt64(Register rd, Register rs, Register scratch) {
+ DCHECK_NE(scratch, rs);
+ DCHECK_NE(scratch, rd);
// uint64_t B0 = 0x5555555555555555l; // (T)~(T)0/3
// uint64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3
// uint64_t B2 = 0x0F0F0F0F0F0F0F0Fl; // (T)~(T)0/255*15
@@ -2679,7 +2749,6 @@ void TurboAssembler::Popcnt64(Register rd, Register rs) {
uint64_t shift = 24;
UseScratchRegisterScope temps(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
- Register scratch = temps.Acquire();
Register scratch2 = temps.Acquire();
Register value = temps.Acquire();
DCHECK((rd != value) && (rs != value));
@@ -3006,7 +3075,6 @@ bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
DCHECK_EQ(offset, 0);
return BranchShortHelper(0, L, cond, rs, rt);
}
- return false;
}
void TurboAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
@@ -3122,7 +3190,6 @@ bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
DCHECK_EQ(offset, 0);
return BranchAndLinkShortHelper(0, L, cond, rs, rt);
}
- return false;
}
void TurboAssembler::LoadFromConstantsTable(Register destination,
@@ -3347,7 +3414,7 @@ void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin,
MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
DCHECK(root_array_available());
return MemOperand(kRootRegister,
- IsolateData::builtin_entry_slot_offset(builtin));
+ IsolateData::BuiltinEntrySlotOffset(builtin));
}
void TurboAssembler::PatchAndJump(Address target) {
@@ -3483,6 +3550,7 @@ void TurboAssembler::LoadAddress(Register dst, Label* target,
RelocInfo::Mode rmode) {
int32_t offset;
if (CalculateOffset(target, &offset, OffsetSize::kOffset32)) {
+ CHECK(is_int32(offset + 0x800));
int32_t Hi20 = (((int32_t)offset + 0x800) >> 12);
int32_t Lo12 = (int32_t)offset << 20 >> 20;
auipc(dst, Hi20);
@@ -3549,9 +3617,9 @@ void MacroAssembler::PushStackHandler() {
// Link the current handler as the next handler.
UseScratchRegisterScope temps(this);
Register handler_address = temps.Acquire();
- Register handler = temps.Acquire();
li(handler_address,
ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
+ Register handler = temps.Acquire();
Ld(handler, MemOperand(handler_address));
push(handler);
@@ -3660,9 +3728,10 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
// If the expected parameter count is equal to the adaptor sentinel, no need
// to push undefined value as arguments.
- Branch(&regular_invoke, eq, expected_parameter_count,
- Operand(kDontAdaptArgumentsSentinel));
-
+ if (kDontAdaptArgumentsSentinel != 0) {
+ Branch(&regular_invoke, eq, expected_parameter_count,
+ Operand(kDontAdaptArgumentsSentinel));
+ }
// If overapplication or if the actual argument count is equal to the
// formal parameter count, no need to push extra undefined values.
Sub64(expected_parameter_count, expected_parameter_count,
@@ -3709,8 +3778,8 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
bind(&stack_overflow);
{
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
CallRuntime(Runtime::kThrowStackOverflow);
break_(0xCC);
}
@@ -3735,8 +3804,8 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
Register receiver = temps.Acquire();
LoadReceiver(receiver, actual_parameter_count);
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
SmiTag(expected_parameter_count);
Push(expected_parameter_count);
@@ -3813,18 +3882,19 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
// Contract with called JS functions requires that function is passed in a1.
DCHECK_EQ(function, a1);
Register expected_parameter_count = a2;
- UseScratchRegisterScope temps(this);
- Register temp_reg = temps.Acquire();
- LoadTaggedPointerField(
- temp_reg,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- LoadTaggedPointerField(cp,
- FieldMemOperand(function, JSFunction::kContextOffset));
- // The argument count is stored as uint16_t
- Lhu(expected_parameter_count,
- FieldMemOperand(temp_reg,
- SharedFunctionInfo::kFormalParameterCountOffset));
-
+ {
+ UseScratchRegisterScope temps(this);
+ Register temp_reg = temps.Acquire();
+ LoadTaggedPointerField(
+ temp_reg,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ LoadTaggedPointerField(
+ cp, FieldMemOperand(function, JSFunction::kContextOffset));
+ // The argument count is stored as uint16_t
+ Lhu(expected_parameter_count,
+ FieldMemOperand(temp_reg,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+ }
InvokeFunctionCode(function, new_target, expected_parameter_count,
actual_parameter_count, type);
}
@@ -3861,7 +3931,74 @@ void MacroAssembler::GetInstanceTypeRange(Register map, Register type_reg,
Lhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
Sub64(range, type_reg, Operand(lower_limit));
}
-
+//------------------------------------------------------------------------------
+// Wasm
+void TurboAssembler::WasmRvvEq(VRegister dst, VRegister lhs, VRegister rhs,
+ VSew sew, Vlmul lmul) {
+ VU.set(kScratchReg, sew, lmul);
+ vmseq_vv(v0, lhs, rhs);
+ li(kScratchReg, -1);
+ vmv_vx(dst, zero_reg);
+ vmerge_vx(dst, kScratchReg, dst);
+}
+
+void TurboAssembler::WasmRvvNe(VRegister dst, VRegister lhs, VRegister rhs,
+ VSew sew, Vlmul lmul) {
+ VU.set(kScratchReg, sew, lmul);
+ vmsne_vv(v0, lhs, rhs);
+ li(kScratchReg, -1);
+ vmv_vx(dst, zero_reg);
+ vmerge_vx(dst, kScratchReg, dst);
+}
+
+void TurboAssembler::WasmRvvGeS(VRegister dst, VRegister lhs, VRegister rhs,
+ VSew sew, Vlmul lmul) {
+ VU.set(kScratchReg, sew, lmul);
+ vmsle_vv(v0, rhs, lhs);
+ li(kScratchReg, -1);
+ vmv_vx(dst, zero_reg);
+ vmerge_vx(dst, kScratchReg, dst);
+}
+
+void TurboAssembler::WasmRvvGeU(VRegister dst, VRegister lhs, VRegister rhs,
+ VSew sew, Vlmul lmul) {
+ VU.set(kScratchReg, sew, lmul);
+ vmsleu_vv(v0, rhs, lhs);
+ li(kScratchReg, -1);
+ vmv_vx(dst, zero_reg);
+ vmerge_vx(dst, kScratchReg, dst);
+}
+
+void TurboAssembler::WasmRvvGtS(VRegister dst, VRegister lhs, VRegister rhs,
+ VSew sew, Vlmul lmul) {
+ VU.set(kScratchReg, sew, lmul);
+ vmslt_vv(v0, rhs, lhs);
+ li(kScratchReg, -1);
+ vmv_vx(dst, zero_reg);
+ vmerge_vx(dst, kScratchReg, dst);
+}
+
+void TurboAssembler::WasmRvvGtU(VRegister dst, VRegister lhs, VRegister rhs,
+ VSew sew, Vlmul lmul) {
+ VU.set(kScratchReg, sew, lmul);
+ vmsltu_vv(v0, rhs, lhs);
+ li(kScratchReg, -1);
+ vmv_vx(dst, zero_reg);
+ vmerge_vx(dst, kScratchReg, dst);
+}
+
+void TurboAssembler::WasmRvvS128const(VRegister dst, const uint8_t imms[16]) {
+ uint64_t imm1 = *(reinterpret_cast<const uint64_t*>(imms));
+ uint64_t imm2 = *((reinterpret_cast<const uint64_t*>(imms)) + 1);
+ VU.set(kScratchReg, VSew::E64, Vlmul::m1);
+ li(kScratchReg, 1);
+ vmv_vx(v0, kScratchReg);
+ li(kScratchReg, imm1);
+ vmerge_vx(dst, kScratchReg, dst);
+ li(kScratchReg, imm2);
+ vsll_vi(v0, v0, 1);
+ vmerge_vx(dst, kScratchReg, dst);
+}
// -----------------------------------------------------------------------------
// Runtime calls.
@@ -4076,9 +4213,9 @@ void TurboAssembler::Abort(AbortReason reason) {
if (should_abort_hard()) {
// We don't care if we constructed a frame. Just pretend we did.
- FrameScope assume_frame(this, StackFrame::NONE);
+ FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
PrepareCallCFunction(0, a0);
- li(a0, Operand(static_cast<int>(reason)));
+ li(a0, Operand(static_cast<int64_t>(reason)));
CallCFunction(ExternalReference::abort_with_reason(), 1);
return;
}
@@ -4089,7 +4226,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (!has_frame()) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
+ FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
} else {
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
@@ -4341,6 +4478,14 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
}
}
+void TurboAssembler::SmiToInt32(Register smi) {
+ if (FLAG_enable_slow_asserts) {
+ AssertSmi(smi);
+ }
+ DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
+ SmiUntag(smi);
+}
+
void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) {
DCHECK_EQ(0, kSmiTag);
UseScratchRegisterScope temps(this);
@@ -4357,7 +4502,7 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
Branch(not_smi_label, ne, scratch, Operand(zero_reg));
}
-void MacroAssembler::AssertNotSmi(Register object) {
+void TurboAssembler::AssertNotSmi(Register object) {
if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
DCHECK(object != kScratchReg);
@@ -4366,7 +4511,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
}
-void MacroAssembler::AssertSmi(Register object) {
+void TurboAssembler::AssertSmi(Register object) {
if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
DCHECK(object != kScratchReg);
@@ -4743,16 +4888,12 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
pop(ra); // Restore ra
}
-void TurboAssembler::ResetSpeculationPoisonRegister() {
- li(kSpeculationPoisonRegister, -1);
-}
-
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Ld(t6,
- MemOperand(kRootRegister, IsolateData::builtin_entry_slot_offset(target)));
+ MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(target)));
Call(t6);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
(kind == DeoptimizeKind::kLazy)
diff --git a/chromium/v8/src/codegen/riscv64/macro-assembler-riscv64.h b/chromium/v8/src/codegen/riscv64/macro-assembler-riscv64.h
index 04285916bca..1dc4d2075c7 100644
--- a/chromium/v8/src/codegen/riscv64/macro-assembler-riscv64.h
+++ b/chromium/v8/src/codegen/riscv64/macro-assembler-riscv64.h
@@ -151,6 +151,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Branch(Label* target);
void Branch(int32_t target);
+ void BranchLong(Label* L);
void Branch(Label* target, Condition cond, Register r1, const Operand& r2,
Label::Distance near_jump = Label::kFar);
void Branch(int32_t target, Condition cond, Register r1, const Operand& r2,
@@ -210,7 +211,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadRootRelative(Register destination, int32_t offset) final;
inline void GenPCRelativeJump(Register rd, int64_t imm32) {
- DCHECK(is_int32(imm32));
+ DCHECK(is_int32(imm32 + 0x800));
int32_t Hi20 = (((int32_t)imm32 + 0x800) >> 12);
int32_t Lo12 = (int32_t)imm32 << 20 >> 20;
auipc(rd, Hi20); // Read PC + Hi20 into scratch.
@@ -218,7 +219,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
inline void GenPCRelativeJumpAndLink(Register rd, int64_t imm32) {
- DCHECK(is_int32(imm32));
+ DCHECK(is_int32(imm32 + 0x800));
int32_t Hi20 = (((int32_t)imm32 + 0x800) >> 12);
int32_t Lo12 = (int32_t)imm32 << 20 >> 20;
auipc(rd, Hi20); // Read PC + Hi20 into scratch.
@@ -491,6 +492,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
void SmiUntag(Register reg) { SmiUntag(reg, reg); }
+ void SmiToInt32(Register smi);
+
+ // Enabled via --debug-code.
+ void AssertNotSmi(Register object);
+ void AssertSmi(Register object);
int CalculateStackPassedDWords(int num_gp_arguments, int num_fp_arguments);
@@ -570,8 +576,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Clz64(Register rd, Register rs);
void Ctz32(Register rd, Register rs);
void Ctz64(Register rd, Register rs);
- void Popcnt32(Register rd, Register rs);
- void Popcnt64(Register rd, Register rs);
+ void Popcnt32(Register rd, Register rs, Register scratch);
+ void Popcnt64(Register rd, Register rs, Register scratch);
// Bit field starts at bit pos and extending for size bits is extracted from
// rs and stored zero/sign-extended and right-justified in rt
@@ -590,7 +596,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Neg_d(FPURegister fd, FPURegister fs);
// Change endianness
- void ByteSwap(Register dest, Register src, int operand_size);
+ void ByteSwap(Register dest, Register src, int operand_size,
+ Register scratch);
void Clear_if_nan_d(Register rd, FPURegister fs);
void Clear_if_nan_s(Register rd, FPURegister fs);
@@ -605,9 +612,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register scratch_other = no_reg);
template <int NBYTES>
- void UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs);
+ void UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs,
+ Register scratch);
template <int NBYTES>
- void UnalignedFStoreHelper(FPURegister frd, const MemOperand& rs);
+ void UnalignedFStoreHelper(FPURegister frd, const MemOperand& rs,
+ Register scratch);
template <typename Reg_T, typename Func>
void AlignedLoadHelper(Reg_T target, const MemOperand& rs, Func generator);
@@ -631,11 +640,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Uld(Register rd, const MemOperand& rs);
void Usd(Register rd, const MemOperand& rs);
- void ULoadFloat(FPURegister fd, const MemOperand& rs);
- void UStoreFloat(FPURegister fd, const MemOperand& rs);
+ void ULoadFloat(FPURegister fd, const MemOperand& rs, Register scratch);
+ void UStoreFloat(FPURegister fd, const MemOperand& rs, Register scratch);
- void ULoadDouble(FPURegister fd, const MemOperand& rs);
- void UStoreDouble(FPURegister fd, const MemOperand& rs);
+ void ULoadDouble(FPURegister fd, const MemOperand& rs, Register scratch);
+ void UStoreDouble(FPURegister fd, const MemOperand& rs, Register scratch);
void Lb(Register rd, const MemOperand& rs);
void Lbu(Register rd, const MemOperand& rs);
@@ -833,6 +842,16 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Floor_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
void Ceil_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
+ void Ceil_f(VRegister dst, VRegister src, Register scratch,
+ VRegister v_scratch);
+
+ void Ceil_d(VRegister dst, VRegister src, Register scratch,
+ VRegister v_scratch);
+
+ void Floor_f(VRegister dst, VRegister src, Register scratch,
+ VRegister v_scratch);
+ void Floor_d(VRegister dst, VRegister src, Register scratch,
+ VRegister v_scratch);
// Jump the register contains a smi.
void JumpIfSmi(Register value, Label* smi_label);
@@ -857,8 +876,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(Register dst);
- void ResetSpeculationPoisonRegister();
-
// Control-flow integrity:
// Define a function entrypoint. This doesn't emit any code for this
@@ -908,6 +925,31 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Sub64(rd, rs1, rs2);
}
}
+ // Wasm into RVV
+ void WasmRvvExtractLane(Register dst, VRegister src, int8_t idx, VSew sew,
+ Vlmul lmul) {
+ VU.set(kScratchReg, sew, lmul);
+ VRegister Vsrc = idx != 0 ? kSimd128ScratchReg : src;
+ if (idx != 0) {
+ vslidedown_vi(kSimd128ScratchReg, src, idx);
+ }
+ vmv_xs(dst, Vsrc);
+ }
+
+ void WasmRvvEq(VRegister dst, VRegister lhs, VRegister rhs, VSew sew,
+ Vlmul lmul);
+
+ void WasmRvvNe(VRegister dst, VRegister lhs, VRegister rhs, VSew sew,
+ Vlmul lmul);
+ void WasmRvvGeS(VRegister dst, VRegister lhs, VRegister rhs, VSew sew,
+ Vlmul lmul);
+ void WasmRvvGeU(VRegister dst, VRegister lhs, VRegister rhs, VSew sew,
+ Vlmul lmul);
+ void WasmRvvGtS(VRegister dst, VRegister lhs, VRegister rhs, VSew sew,
+ Vlmul lmul);
+ void WasmRvvGtU(VRegister dst, VRegister lhs, VRegister rhs, VSew sew,
+ Vlmul lmul);
+ void WasmRvvS128const(VRegister dst, const uint8_t imms[16]);
protected:
inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
@@ -945,13 +987,16 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register rs, const Operand& rt);
bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt);
- void BranchLong(Label* L);
void BranchAndLinkLong(Label* L);
template <typename F_TYPE>
void RoundHelper(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
RoundingMode mode);
+ template <typename F>
+ void RoundHelper(VRegister dst, VRegister src, Register scratch,
+ VRegister v_scratch, RoundingMode frm);
+
template <typename TruncFunc>
void RoundFloatingPointToInteger(Register rd, FPURegister fs, Register result,
TruncFunc trunc);
@@ -1210,9 +1255,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Jump if the register contains a non-smi.
void JumpIfNotSmi(Register value, Label* not_smi_label);
- // Abort execution if argument is a smi, enabled via --debug-code.
- void AssertNotSmi(Register object);
- void AssertSmi(Register object);
// Abort execution if argument is not a Constructor, enabled via --debug-code.
void AssertConstructor(Register object);
diff --git a/chromium/v8/src/codegen/riscv64/register-riscv64.h b/chromium/v8/src/codegen/riscv64/register-riscv64.h
index 69654a4f54d..14c993512f5 100644
--- a/chromium/v8/src/codegen/riscv64/register-riscv64.h
+++ b/chromium/v8/src/codegen/riscv64/register-riscv64.h
@@ -23,14 +23,14 @@ namespace internal {
// s3: scratch register s4: scratch register 2 used in code-generator-riscv64
// s6: roots in Javascript code s7: context register
// s11: PtrComprCageBaseRegister
-// t3 t5 s10 : scratch register used in scratch_register_list
-
+// t3 t5 : scratch register used in scratch_register_list
+// t6 : call reg.
// t0 t1 t2 t4:caller saved scratch register can be used in macroassembler and
// builtin-riscv64
#define ALWAYS_ALLOCATABLE_GENERAL_REGISTERS(V) \
V(a0) V(a1) V(a2) V(a3) \
V(a4) V(a5) V(a6) V(a7) V(t0) \
- V(t1) V(t2) V(t4) V(s7) V(s8) V(s9)
+ V(t1) V(t2) V(t4) V(s7) V(s8) V(s9) V(s10)
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
#define MAYBE_ALLOCATABLE_GENERAL_REGISTERS(V)
@@ -49,16 +49,16 @@ namespace internal {
V(fs8) V(fs9) V(fs10) V(fs11) V(ft8) V(ft9) V(ft10) V(ft11)
#define FLOAT_REGISTERS DOUBLE_REGISTERS
-#define SIMD128_REGISTERS(V) \
- V(w0) V(w1) V(w2) V(w3) V(w4) V(w5) V(w6) V(w7) \
- V(w8) V(w9) V(w10) V(w11) V(w12) V(w13) V(w14) V(w15) \
- V(w16) V(w17) V(w18) V(w19) V(w20) V(w21) V(w22) V(w23) \
- V(w24) V(w25) V(w26) V(w27) V(w28) V(w29) V(w30) V(w31)
+#define VECTOR_REGISTERS(V) \
+ V(v0) V(v1) V(v2) V(v3) V(v4) V(v5) V(v6) V(v7) \
+ V(v8) V(v9) V(v10) V(v11) V(v12) V(v13) V(v14) V(v15) \
+ V(v16) V(v17) V(v18) V(v19) V(v20) V(v21) V(v22) V(v23) \
+ V(v24) V(v25) V(v26) V(v27) V(v28) V(v29) V(v30) V(v31)
-#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
- V(ft0) V(ft1) V(ft2) V(ft3) \
- V(ft4) V(ft5) V(ft6) V(ft7) V(fa0) V(fa1) V(fa2) V(fa3) V(fa4) V(fa5) \
- V(fa6) V(fa7)
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(ft1) V(ft2) V(ft3) V(ft4) V(ft5) V(ft6) V(ft7) V(ft8) \
+ V(ft9) V(ft10) V(ft11) V(fa0) V(fa1) V(fa2) V(fa3) V(fa4) V(fa5) \
+ V(fa6) V(fa7)
// Returns the number of padding slots needed for stack pointer alignment.
constexpr int ArgumentPaddingSlots(int argument_count) {
@@ -256,6 +256,19 @@ enum DoubleRegisterCode {
kDoubleAfterLast
};
+enum VRegisterCode {
+#define REGISTER_CODE(R) kVRCode_##R,
+ VECTOR_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kVRAfterLast
+};
+class VRegister : public RegisterBase<VRegister, kVRAfterLast> {
+ friend class RegisterBase;
+
+ public:
+ explicit constexpr VRegister(int code) : RegisterBase(code) {}
+};
+
// Coprocessor register.
class FPURegister : public RegisterBase<FPURegister, kDoubleAfterLast> {
public:
@@ -274,25 +287,24 @@ class FPURegister : public RegisterBase<FPURegister, kDoubleAfterLast> {
return FPURegister::from_code(code() + 1);
}
+ // FIXME(riscv64): In Rvv, Vector regs is different from Float Regs. But in
+ // this cl, in order to facilitate modification, it is assumed that the vector
+ // register and floating point register are shared.
+ VRegister toV() const {
+ DCHECK(base::IsInRange(code(), 0, kVRAfterLast - 1));
+ // FIXME(riscv): Because V0 is a special mask reg, so can't allocate it.
+ // And v8 is unallocated so we replace v0 with v8
+ if (code() == 0) {
+ return VRegister(8);
+ }
+ return VRegister(code());
+ }
+
private:
friend class RegisterBase;
explicit constexpr FPURegister(int code) : RegisterBase(code) {}
};
-enum MSARegisterCode {
-#define REGISTER_CODE(R) kMsaCode_##R,
- SIMD128_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kMsaAfterLast
-};
-
-// MIPS SIMD (MSA) register
-// TODO(RISCV): Remove MIPS MSA registers.
-// https://github.com/v8-riscv/v8/issues/429
-class MSARegister : public RegisterBase<MSARegister, kMsaAfterLast> {
- friend class RegisterBase;
- explicit constexpr MSARegister(int code) : RegisterBase(code) {}
-};
// A few double registers are reserved: one as a scratch register and one to
// hold 0.0.
@@ -304,6 +316,8 @@ using FloatRegister = FPURegister;
using DoubleRegister = FPURegister;
+using Simd128Register = VRegister;
+
#define DECLARE_DOUBLE_REGISTER(R) \
constexpr DoubleRegister R = DoubleRegister::from_code(kDoubleCode_##R);
DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER)
@@ -311,15 +325,12 @@ DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER)
constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
-// SIMD registers.
-using Simd128Register = MSARegister;
-
-#define DECLARE_SIMD128_REGISTER(R) \
- constexpr Simd128Register R = Simd128Register::from_code(kMsaCode_##R);
-SIMD128_REGISTERS(DECLARE_SIMD128_REGISTER)
-#undef DECLARE_SIMD128_REGISTER
+#define DECLARE_VECTOR_REGISTER(R) \
+ constexpr VRegister R = VRegister::from_code(kVRCode_##R);
+VECTOR_REGISTERS(DECLARE_VECTOR_REGISTER)
+#undef DECLARE_VECTOR_REGISTER
-const Simd128Register no_msareg = Simd128Register::no_reg();
+const VRegister no_msareg = VRegister::no_reg();
// Register aliases.
// cp is assumed to be a callee saved register.
@@ -328,14 +339,14 @@ constexpr Register cp = s7;
constexpr Register kScratchReg = s3;
constexpr Register kScratchReg2 = s4;
-constexpr DoubleRegister kScratchDoubleReg = fs11;
+constexpr DoubleRegister kScratchDoubleReg = ft0;
constexpr DoubleRegister kDoubleRegZero = fs9;
// Define {RegisterName} methods for the register types.
DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS)
DEFINE_REGISTER_NAMES(FPURegister, DOUBLE_REGISTERS)
-DEFINE_REGISTER_NAMES(MSARegister, SIMD128_REGISTERS)
+DEFINE_REGISTER_NAMES(VRegister, VECTOR_REGISTERS)
// Give alias names to registers for calling conventions.
constexpr Register kReturnRegister0 = a0;
@@ -344,7 +355,6 @@ constexpr Register kReturnRegister2 = a2;
constexpr Register kJSFunctionRegister = a1;
constexpr Register kContextRegister = s7;
constexpr Register kAllocateSizeRegister = a1;
-constexpr Register kSpeculationPoisonRegister = a7;
constexpr Register kInterpreterAccumulatorRegister = a0;
constexpr Register kInterpreterBytecodeOffsetRegister = t0;
constexpr Register kInterpreterBytecodeArrayRegister = t1;
@@ -364,6 +374,9 @@ constexpr Register kWasmInstanceRegister = a0;
constexpr Register kWasmCompileLazyFuncIndexRegister = t0;
constexpr DoubleRegister kFPReturnRegister0 = fa0;
+constexpr VRegister kSimd128ScratchReg = v27;
+constexpr VRegister kSimd128ScratchReg2 = v26;
+constexpr VRegister kSimd128RegZero = v25;
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
constexpr Register kPtrComprCageBaseRegister = s11; // callee save
diff --git a/chromium/v8/src/codegen/s390/assembler-s390-inl.h b/chromium/v8/src/codegen/s390/assembler-s390-inl.h
index dc04acec613..6c4923194ad 100644
--- a/chromium/v8/src/codegen/s390/assembler-s390-inl.h
+++ b/chromium/v8/src/codegen/s390/assembler-s390-inl.h
@@ -153,10 +153,10 @@ HeapObject RelocInfo::target_object() {
}
}
-HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
+HeapObject RelocInfo::target_object_no_host(PtrComprCageBase cage_base) {
if (IsCompressedEmbeddedObject(rmode_)) {
return HeapObject::cast(Object(DecompressTaggedAny(
- isolate,
+ cage_base,
Assembler::target_compressed_address_at(pc_, constant_pool_))));
} else {
return target_object();
diff --git a/chromium/v8/src/codegen/s390/assembler-s390.cc b/chromium/v8/src/codegen/s390/assembler-s390.cc
index 511096e0db0..e799f8e8a46 100644
--- a/chromium/v8/src/codegen/s390/assembler-s390.cc
+++ b/chromium/v8/src/codegen/s390/assembler-s390.cc
@@ -440,7 +440,6 @@ Condition Assembler::GetCondition(Instr instr) {
default:
UNIMPLEMENTED();
}
- return al;
}
#if V8_TARGET_ARCH_S390X
diff --git a/chromium/v8/src/codegen/s390/constants-s390.h b/chromium/v8/src/codegen/s390/constants-s390.h
index b16963e52a2..23e77c93d72 100644
--- a/chromium/v8/src/codegen/s390/constants-s390.h
+++ b/chromium/v8/src/codegen/s390/constants-s390.h
@@ -1553,14 +1553,28 @@ using SixByteInstr = uint64_t;
V(vlrep, VLREP, 0xE705) /* type = VRX VECTOR LOAD AND REPLICATE */ \
V(vl, VL, 0xE706) /* type = VRX VECTOR LOAD */ \
V(vlbb, VLBB, 0xE707) /* type = VRX VECTOR LOAD TO BLOCK BOUNDARY */ \
+ V(vlbr, VLBR, 0xE606) /* type = VRX VECTOR LOAD BYTE REVERSED ELEMENTS */ \
+ V(vlbrrep, VLBRREP, \
+ 0xE605) /* type = VRX VECTOR LOAD BYTE REVERSED ELEMENT AND REPLICATE */ \
+ V(vlebrh, VLEBRH, \
+ 0xE601) /* type = VRX VECTOR LOAD BYTE REVERSED ELEMENT (16) */ \
+ V(vlebrf, VLEBRF, \
+ 0xE603) /* type = VRX VECTOR LOAD BYTE REVERSED ELEMENT (32) */ \
+ V(vlebrg, VLEBRG, \
+ 0xE602) /* type = VRX VECTOR LOAD BYTE REVERSED ELEMENT (64) */ \
V(vsteb, VSTEB, 0xE708) /* type = VRX VECTOR STORE ELEMENT (8) */ \
V(vsteh, VSTEH, 0xE709) /* type = VRX VECTOR STORE ELEMENT (16) */ \
V(vsteg, VSTEG, 0xE70A) /* type = VRX VECTOR STORE ELEMENT (64) */ \
V(vstef, VSTEF, 0xE70B) /* type = VRX VECTOR STORE ELEMENT (32) */ \
V(vst, VST, 0xE70E) /* type = VRX VECTOR STORE */ \
- V(vlbr, VLBR, 0xE606) /* type = VRX VECTOR LOAD BYTE REVERSED ELEMENTS */ \
- V(vstbr, VSTBR, 0xE60E) /* type = VRX VECTOR STORE BYTE REVERSED ELEMENTS \
- */
+ V(vstbr, VSTBR, \
+ 0xE60E) /* type = VRX VECTOR STORE BYTE REVERSED ELEMENTS */ \
+ V(vstebrh, VSTEBRH, \
+ 0xE609) /* type = VRX VECTOR STORE BYTE REVERSED ELEMENT (16) */ \
+ V(vstebrf, VSTEBRF, \
+ 0xE60B) /* type = VRX VECTOR STORE BYTE REVERSED ELEMENT (32) */ \
+ V(vstebrg, VSTEBRG, \
+ 0xE60A) /* type = VRX VECTOR STORE BYTE REVERSED ELEMENT (64) */
#define S390_RIE_G_OPCODE_LIST(V) \
V(lochi, LOCHI, \
diff --git a/chromium/v8/src/codegen/s390/macro-assembler-s390.cc b/chromium/v8/src/codegen/s390/macro-assembler-s390.cc
index 4de7f2cf4bb..9b888e50dad 100644
--- a/chromium/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/chromium/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -416,14 +416,14 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code));
- Builtin builtin_index = Builtin::kNoBuiltinId;
+ Builtin builtin = Builtin::kNoBuiltinId;
bool target_is_builtin =
- isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin);
if (options().inline_offheap_trampolines && target_is_builtin) {
// Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- mov(ip, Operand(BuiltinEntry(builtin_index), RelocInfo::OFF_HEAP_TARGET));
+ RecordCommentForOffHeapTrampoline(builtin);
+ mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
b(cond, ip);
return;
}
@@ -474,21 +474,28 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code));
- Builtin builtin_index = Builtin::kNoBuiltinId;
+ Builtin builtin = Builtin::kNoBuiltinId;
bool target_is_builtin =
- isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin);
if (target_is_builtin && options().inline_offheap_trampolines) {
// Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- mov(ip, Operand(BuiltinEntry(builtin_index), RelocInfo::OFF_HEAP_TARGET));
- Call(ip);
+ CallBuiltin(builtin);
return;
}
DCHECK(code->IsExecutable());
call(code, rmode);
}
+void TurboAssembler::CallBuiltin(Builtin builtin) {
+ ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin));
+ DCHECK(Builtins::IsBuiltinId(builtin));
+ // Use ip directly instead of using UseScratchRegisterScope, as we do not
+ // preserve scratch registers across calls.
+ mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
+ Call(ip);
+}
+
void TurboAssembler::Drop(int count) {
if (count > 0) {
int total = count * kSystemPointerSize;
@@ -1184,7 +1191,6 @@ void TurboAssembler::ConvertFloat32ToInt64(const Register dst,
break;
case kRoundToNearest:
UNIMPLEMENTED();
- break;
case kRoundToPlusInf:
m = Condition(6);
break;
@@ -1193,7 +1199,6 @@ void TurboAssembler::ConvertFloat32ToInt64(const Register dst,
break;
default:
UNIMPLEMENTED();
- break;
}
cgebr(m, dst, double_input);
}
@@ -1208,7 +1213,6 @@ void TurboAssembler::ConvertDoubleToInt64(const Register dst,
break;
case kRoundToNearest:
UNIMPLEMENTED();
- break;
case kRoundToPlusInf:
m = Condition(6);
break;
@@ -1217,7 +1221,6 @@ void TurboAssembler::ConvertDoubleToInt64(const Register dst,
break;
default:
UNIMPLEMENTED();
- break;
}
cgdbr(m, dst, double_input);
}
@@ -1241,7 +1244,6 @@ void TurboAssembler::ConvertDoubleToInt32(const Register dst,
break;
default:
UNIMPLEMENTED();
- break;
}
#ifdef V8_TARGET_ARCH_S390X
lghi(dst, Operand::Zero());
@@ -1268,7 +1270,6 @@ void TurboAssembler::ConvertFloat32ToInt32(const Register result,
break;
default:
UNIMPLEMENTED();
- break;
}
#ifdef V8_TARGET_ARCH_S390X
lghi(result, Operand::Zero());
@@ -1286,7 +1287,6 @@ void TurboAssembler::ConvertFloat32ToUnsignedInt32(
break;
case kRoundToNearest:
UNIMPLEMENTED();
- break;
case kRoundToPlusInf:
m = Condition(6);
break;
@@ -1295,7 +1295,6 @@ void TurboAssembler::ConvertFloat32ToUnsignedInt32(
break;
default:
UNIMPLEMENTED();
- break;
}
#ifdef V8_TARGET_ARCH_S390X
lghi(result, Operand::Zero());
@@ -1313,7 +1312,6 @@ void TurboAssembler::ConvertFloat32ToUnsignedInt64(
break;
case kRoundToNearest:
UNIMPLEMENTED();
- break;
case kRoundToPlusInf:
m = Condition(6);
break;
@@ -1322,7 +1320,6 @@ void TurboAssembler::ConvertFloat32ToUnsignedInt64(
break;
default:
UNIMPLEMENTED();
- break;
}
clgebr(m, Condition(0), result, double_input);
}
@@ -1337,7 +1334,6 @@ void TurboAssembler::ConvertDoubleToUnsignedInt64(
break;
case kRoundToNearest:
UNIMPLEMENTED();
- break;
case kRoundToPlusInf:
m = Condition(6);
break;
@@ -1346,7 +1342,6 @@ void TurboAssembler::ConvertDoubleToUnsignedInt64(
break;
default:
UNIMPLEMENTED();
- break;
}
clgdbr(m, Condition(0), dst, double_input);
}
@@ -1361,7 +1356,6 @@ void TurboAssembler::ConvertDoubleToUnsignedInt32(
break;
case kRoundToNearest:
UNIMPLEMENTED();
- break;
case kRoundToPlusInf:
m = Condition(6);
break;
@@ -1370,7 +1364,6 @@ void TurboAssembler::ConvertDoubleToUnsignedInt32(
break;
default:
UNIMPLEMENTED();
- break;
}
#ifdef V8_TARGET_ARCH_S390X
lghi(dst, Operand::Zero());
@@ -1663,8 +1656,10 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
// If the expected parameter count is equal to the adaptor sentinel, no need
// to push undefined value as arguments.
- CmpS64(expected_parameter_count, Operand(kDontAdaptArgumentsSentinel));
- beq(&regular_invoke);
+ if (kDontAdaptArgumentsSentinel != 0) {
+ CmpS64(expected_parameter_count, Operand(kDontAdaptArgumentsSentinel));
+ beq(&regular_invoke);
+ }
// If overapplication or if the actual argument count is equal to the
// formal parameter count, no need to push extra undefined values.
@@ -1713,8 +1708,8 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
bind(&stack_overflow);
{
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
CallRuntime(Runtime::kThrowStackOverflow);
bkpt(0);
}
@@ -1736,8 +1731,8 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
{
// Load receiver to pass it later to DebugOnFunctionCall hook.
LoadReceiver(r6, actual_parameter_count);
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
SmiTag(expected_parameter_count);
Push(expected_parameter_count);
@@ -1896,16 +1891,27 @@ void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
CmpS64(type_reg, Operand(type));
}
+void MacroAssembler::CompareRange(Register value, unsigned lower_limit,
+ unsigned higher_limit) {
+ ASM_CODE_COMMENT(this);
+ DCHECK_LT(lower_limit, higher_limit);
+ if (lower_limit != 0) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ mov(scratch, value);
+ slgfi(scratch, Operand(lower_limit));
+ CmpU64(scratch, Operand(higher_limit - lower_limit));
+ } else {
+ CmpU64(value, Operand(higher_limit));
+ }
+}
+
void MacroAssembler::CompareInstanceTypeRange(Register map, Register type_reg,
InstanceType lower_limit,
InstanceType higher_limit) {
DCHECK_LT(lower_limit, higher_limit);
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
LoadU16(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
- mov(scratch, type_reg);
- slgfi(scratch, Operand(lower_limit));
- CmpU64(scratch, Operand(higher_limit - lower_limit));
+ CompareRange(type_reg, lower_limit, higher_limit);
}
void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
@@ -1919,14 +1925,7 @@ void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
unsigned higher_limit,
Label* on_in_range) {
- if (lower_limit != 0) {
- Register scratch = r0;
- mov(scratch, value);
- slgfi(scratch, Operand(lower_limit));
- CmpU64(scratch, Operand(higher_limit - lower_limit));
- } else {
- CmpU64(value, Operand(higher_limit));
- }
+ CompareRange(value, lower_limit, higher_limit);
ble(on_in_range);
}
@@ -2086,7 +2085,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (should_abort_hard()) {
// We don't care if we constructed a frame. Just pretend we did.
- FrameScope assume_frame(this, StackFrame::NONE);
+ FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
lgfi(r2, Operand(static_cast<int>(reason)));
PrepareCallCFunction(1, 0, r3);
Move(r3, ExternalReference::abort_with_reason());
@@ -2102,7 +2101,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
+ FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
} else {
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
@@ -2123,7 +2122,7 @@ void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)));
}
-void MacroAssembler::AssertNotSmi(Register object) {
+void TurboAssembler::AssertNotSmi(Register object) {
if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object);
@@ -2131,7 +2130,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
}
-void MacroAssembler::AssertSmi(Register object) {
+void TurboAssembler::AssertSmi(Register object) {
if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object);
@@ -4670,10 +4669,6 @@ void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst,
lay(sp, MemOperand(sp, kSimd128Size));
}
-void TurboAssembler::ResetSpeculationPoisonRegister() {
- mov(kSpeculationPoisonRegister, Operand(-1));
-}
-
void TurboAssembler::ComputeCodeStartAddress(Register dst) {
larl(dst, Operand(-pc_offset() / 2));
}
@@ -4797,8 +4792,9 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
+ ASM_CODE_COMMENT(this);
LoadU64(ip, MemOperand(kRootRegister,
- IsolateData::builtin_entry_slot_offset(target)));
+ IsolateData::BuiltinEntrySlotOffset(target)));
Call(ip);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
(kind == DeoptimizeKind::kLazy)
@@ -5276,7 +5272,37 @@ SIMD_BINOP_LIST_VRR_C(EMIT_SIMD_BINOP_VRR_C)
#undef EMIT_SIMD_BINOP_VRR_C
#undef SIMD_BINOP_LIST_VRR_C
-// Opcodes without a 1-1 match.
+#define SIMD_SHIFT_LIST(V) \
+ V(I64x2Shl, veslv, 3) \
+ V(I64x2ShrS, vesrav, 3) \
+ V(I64x2ShrU, vesrlv, 3) \
+ V(I32x4Shl, veslv, 2) \
+ V(I32x4ShrS, vesrav, 2) \
+ V(I32x4ShrU, vesrlv, 2) \
+ V(I16x8Shl, veslv, 1) \
+ V(I16x8ShrS, vesrav, 1) \
+ V(I16x8ShrU, vesrlv, 1) \
+ V(I8x16Shl, veslv, 0) \
+ V(I8x16ShrS, vesrav, 0) \
+ V(I8x16ShrU, vesrlv, 0)
+
+#define EMIT_SIMD_SHIFT(name, op, c1) \
+ void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ Register src2) { \
+ vlvg(kScratchDoubleReg, src2, MemOperand(r0, 0), Condition(c1)); \
+ vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(0), Condition(c1)); \
+ op(dst, src1, kScratchDoubleReg, Condition(0), Condition(0), \
+ Condition(c1)); \
+ } \
+ void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ const Operand& src2) { \
+ mov(ip, src2); \
+ name(dst, src1, ip); \
+ }
+SIMD_SHIFT_LIST(EMIT_SIMD_SHIFT)
+#undef EMIT_SIMD_SHIFT
+#undef SIMD_SHIFT_LIST
+
void TurboAssembler::I64x2Mul(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
Register scratch_1 = r0;
@@ -5396,6 +5422,123 @@ void TurboAssembler::I8x16GeU(Simd128Register dst, Simd128Register src1,
vo(dst, dst, kScratchDoubleReg, Condition(0), Condition(0), Condition(0));
}
+// Vector LE Load and Transform instructions.
+#ifdef V8_TARGET_BIG_ENDIAN
+#define IS_BIG_ENDIAN true
+#else
+#define IS_BIG_ENDIAN false
+#endif
+
+#define CAN_LOAD_STORE_REVERSE \
+ IS_BIG_ENDIAN&& CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)
+
+#define LOAD_SPLAT_LIST(V) \
+ V(64x2, vlbrrep, LoadU64LE, 3) \
+ V(32x4, vlbrrep, LoadU32LE, 2) \
+ V(16x8, vlbrrep, LoadU16LE, 1) \
+ V(8x16, vlrep, LoadU8, 0)
+
+#define LOAD_SPLAT(name, vector_instr, scalar_instr, condition) \
+ void TurboAssembler::LoadAndSplat##name##LE(Simd128Register dst, \
+ const MemOperand& mem) { \
+ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
+ vector_instr(dst, mem, Condition(condition)); \
+ return; \
+ } \
+ scalar_instr(r1, mem); \
+ vlvg(dst, r1, MemOperand(r0, 0), Condition(condition)); \
+ vrep(dst, dst, Operand(0), Condition(condition)); \
+ }
+LOAD_SPLAT_LIST(LOAD_SPLAT)
+#undef LOAD_SPLAT
+#undef LOAD_SPLAT_LIST
+
+#define LOAD_EXTEND_LIST(V) \
+ V(32x2U, vuplh, 2) \
+ V(32x2S, vuph, 2) \
+ V(16x4U, vuplh, 1) \
+ V(16x4S, vuph, 1) \
+ V(8x8U, vuplh, 0) \
+ V(8x8S, vuph, 0)
+
+#define LOAD_EXTEND(name, unpack_instr, condition) \
+ void TurboAssembler::LoadAndExtend##name##LE(Simd128Register dst, \
+ const MemOperand& mem) { \
+ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
+ vlebrg(kScratchDoubleReg, mem, Condition(0)); \
+ } else { \
+ LoadU64LE(r1, mem); \
+ vlvg(kScratchDoubleReg, r1, MemOperand(r0, 0), Condition(3)); \
+ } \
+ unpack_instr(dst, kScratchDoubleReg, Condition(0), Condition(0), \
+ Condition(condition)); \
+ }
+LOAD_EXTEND_LIST(LOAD_EXTEND)
+#undef LOAD_EXTEND
+#undef LOAD_EXTEND
+
+void TurboAssembler::LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem) {
+ vx(dst, dst, dst, Condition(0), Condition(0), Condition(0));
+ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) {
+ vlebrf(dst, mem, Condition(3));
+ return;
+ }
+ LoadU32LE(r1, mem);
+ vlvg(dst, r1, MemOperand(r0, 3), Condition(2));
+}
+
+void TurboAssembler::LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem) {
+ vx(dst, dst, dst, Condition(0), Condition(0), Condition(0));
+ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) {
+ vlebrg(dst, mem, Condition(1));
+ return;
+ }
+ LoadU64LE(r1, mem);
+ vlvg(dst, r1, MemOperand(r0, 1), Condition(3));
+}
+
+#define LOAD_LANE_LIST(V) \
+ V(64, vlebrg, LoadU64LE, 3) \
+ V(32, vlebrf, LoadU32LE, 2) \
+ V(16, vlebrh, LoadU16LE, 1) \
+ V(8, vleb, LoadU8, 0)
+
+#define LOAD_LANE(name, vector_instr, scalar_instr, condition) \
+ void TurboAssembler::LoadLane##name##LE(Simd128Register dst, \
+ const MemOperand& mem, int lane) { \
+ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
+ vector_instr(dst, mem, Condition(lane)); \
+ return; \
+ } \
+ scalar_instr(r1, mem); \
+ vlvg(dst, r1, MemOperand(r0, lane), Condition(condition)); \
+ }
+LOAD_LANE_LIST(LOAD_LANE)
+#undef LOAD_LANE
+#undef LOAD_LANE_LIST
+
+#define STORE_LANE_LIST(V) \
+ V(64, vstebrg, StoreU64LE, 3) \
+ V(32, vstebrf, StoreU32LE, 2) \
+ V(16, vstebrh, StoreU16LE, 1) \
+ V(8, vsteb, StoreU8, 0)
+
+#define STORE_LANE(name, vector_instr, scalar_instr, condition) \
+ void TurboAssembler::StoreLane##name##LE(Simd128Register src, \
+ const MemOperand& mem, int lane) { \
+ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
+ vector_instr(src, mem, Condition(lane)); \
+ return; \
+ } \
+ vlgv(r1, src, MemOperand(r0, lane), Condition(condition)); \
+ scalar_instr(r1, mem); \
+ }
+STORE_LANE_LIST(STORE_LANE)
+#undef STORE_LANE
+#undef STORE_LANE_LIST
+#undef CAN_LOAD_STORE_REVERSE
+#undef IS_BIG_ENDIAN
+
#undef kScratchDoubleReg
} // namespace internal
diff --git a/chromium/v8/src/codegen/s390/macro-assembler-s390.h b/chromium/v8/src/codegen/s390/macro-assembler-s390.h
index 51cdb483263..e7c4e8994c3 100644
--- a/chromium/v8/src/codegen/s390/macro-assembler-s390.h
+++ b/chromium/v8/src/codegen/s390/macro-assembler-s390.h
@@ -44,6 +44,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
using TurboAssemblerBase::TurboAssemblerBase;
+ void CallBuiltin(Builtin builtin);
void AtomicCmpExchangeHelper(Register addr, Register output,
Register old_value, Register new_value,
int start, int end, int shift_amount, int offset,
@@ -392,6 +393,27 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register scratch1);
void LoadF64LE(DoubleRegister dst, const MemOperand& opnd, Register scratch);
void LoadF32LE(DoubleRegister dst, const MemOperand& opnd, Register scratch);
+ // Vector LE Load and Transform instructions.
+ void LoadAndSplat64x2LE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndSplat32x4LE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndSplat16x8LE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndSplat8x16LE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndExtend8x8ULE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndExtend8x8SLE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndExtend16x4ULE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndExtend16x4SLE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndExtend32x2ULE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndExtend32x2SLE(Simd128Register dst, const MemOperand& mem);
+ void LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem);
+ void LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem);
+ void LoadLane8LE(Simd128Register dst, const MemOperand& mem, int lane);
+ void LoadLane16LE(Simd128Register dst, const MemOperand& mem, int lane);
+ void LoadLane32LE(Simd128Register dst, const MemOperand& mem, int lane);
+ void LoadLane64LE(Simd128Register dst, const MemOperand& mem, int lane);
+ void StoreLane8LE(Simd128Register src, const MemOperand& mem, int lane);
+ void StoreLane16LE(Simd128Register src, const MemOperand& mem, int lane);
+ void StoreLane32LE(Simd128Register src, const MemOperand& mem, int lane);
+ void StoreLane64LE(Simd128Register src, const MemOperand& mem, int lane);
// Load And Test
void LoadAndTest32(Register dst, Register src);
@@ -999,6 +1021,17 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
lgfr(dst, dst);
}
+ void SmiToInt32(Register smi) {
+ if (FLAG_enable_slow_asserts) {
+ AssertSmi(smi);
+ }
+ DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
+ SmiUntag(smi);
+ }
+
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object);
+ void AssertSmi(Register object);
// Activation support.
void EnterFrame(StackFrame::Type type,
@@ -1015,7 +1048,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met);
- void ResetSpeculationPoisonRegister();
void ComputeCodeStartAddress(Register dst);
void LoadPC(Register dst);
@@ -1071,75 +1103,99 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void I8x16ReplaceLane(Simd128Register dst, Simd128Register src1,
Register src2, uint8_t imm_lane_idx);
-#define SIMD_BINOP_LIST(V) \
- V(F64x2Add) \
- V(F64x2Sub) \
- V(F64x2Mul) \
- V(F64x2Div) \
- V(F64x2Min) \
- V(F64x2Max) \
- V(F64x2Eq) \
- V(F64x2Ne) \
- V(F64x2Lt) \
- V(F64x2Le) \
- V(F32x4Add) \
- V(F32x4Sub) \
- V(F32x4Mul) \
- V(F32x4Div) \
- V(F32x4Min) \
- V(F32x4Max) \
- V(F32x4Eq) \
- V(F32x4Ne) \
- V(F32x4Lt) \
- V(F32x4Le) \
- V(I64x2Add) \
- V(I64x2Sub) \
- V(I64x2Mul) \
- V(I64x2Eq) \
- V(I64x2Ne) \
- V(I64x2GtS) \
- V(I64x2GeS) \
- V(I32x4Add) \
- V(I32x4Sub) \
- V(I32x4Mul) \
- V(I32x4Eq) \
- V(I32x4Ne) \
- V(I32x4GtS) \
- V(I32x4GeS) \
- V(I32x4GtU) \
- V(I32x4GeU) \
- V(I32x4MinS) \
- V(I32x4MinU) \
- V(I32x4MaxS) \
- V(I32x4MaxU) \
- V(I16x8Add) \
- V(I16x8Sub) \
- V(I16x8Mul) \
- V(I16x8Eq) \
- V(I16x8Ne) \
- V(I16x8GtS) \
- V(I16x8GeS) \
- V(I16x8GtU) \
- V(I16x8GeU) \
- V(I16x8MinS) \
- V(I16x8MinU) \
- V(I16x8MaxS) \
- V(I16x8MaxU) \
- V(I8x16Add) \
- V(I8x16Sub) \
- V(I8x16Eq) \
- V(I8x16Ne) \
- V(I8x16GtS) \
- V(I8x16GeS) \
- V(I8x16GtU) \
- V(I8x16GeU) \
- V(I8x16MinS) \
- V(I8x16MinU) \
- V(I8x16MaxS) \
- V(I8x16MaxU)
-
-#define PROTOTYPE_SIMD_BINOP(name) \
- void name(Simd128Register dst, Simd128Register src1, Simd128Register src2);
+#define SIMD_BINOP_LIST(V) \
+ V(F64x2Add, Simd128Register) \
+ V(F64x2Sub, Simd128Register) \
+ V(F64x2Mul, Simd128Register) \
+ V(F64x2Div, Simd128Register) \
+ V(F64x2Min, Simd128Register) \
+ V(F64x2Max, Simd128Register) \
+ V(F64x2Eq, Simd128Register) \
+ V(F64x2Ne, Simd128Register) \
+ V(F64x2Lt, Simd128Register) \
+ V(F64x2Le, Simd128Register) \
+ V(F32x4Add, Simd128Register) \
+ V(F32x4Sub, Simd128Register) \
+ V(F32x4Mul, Simd128Register) \
+ V(F32x4Div, Simd128Register) \
+ V(F32x4Min, Simd128Register) \
+ V(F32x4Max, Simd128Register) \
+ V(F32x4Eq, Simd128Register) \
+ V(F32x4Ne, Simd128Register) \
+ V(F32x4Lt, Simd128Register) \
+ V(F32x4Le, Simd128Register) \
+ V(I64x2Add, Simd128Register) \
+ V(I64x2Sub, Simd128Register) \
+ V(I64x2Mul, Simd128Register) \
+ V(I64x2Eq, Simd128Register) \
+ V(I64x2Ne, Simd128Register) \
+ V(I64x2GtS, Simd128Register) \
+ V(I64x2GeS, Simd128Register) \
+ V(I64x2Shl, Register) \
+ V(I64x2ShrS, Register) \
+ V(I64x2ShrU, Register) \
+ V(I64x2Shl, const Operand&) \
+ V(I64x2ShrS, const Operand&) \
+ V(I64x2ShrU, const Operand&) \
+ V(I32x4Add, Simd128Register) \
+ V(I32x4Sub, Simd128Register) \
+ V(I32x4Mul, Simd128Register) \
+ V(I32x4Eq, Simd128Register) \
+ V(I32x4Ne, Simd128Register) \
+ V(I32x4GtS, Simd128Register) \
+ V(I32x4GeS, Simd128Register) \
+ V(I32x4GtU, Simd128Register) \
+ V(I32x4GeU, Simd128Register) \
+ V(I32x4MinS, Simd128Register) \
+ V(I32x4MinU, Simd128Register) \
+ V(I32x4MaxS, Simd128Register) \
+ V(I32x4MaxU, Simd128Register) \
+ V(I32x4Shl, Register) \
+ V(I32x4ShrS, Register) \
+ V(I32x4ShrU, Register) \
+ V(I32x4Shl, const Operand&) \
+ V(I32x4ShrS, const Operand&) \
+ V(I32x4ShrU, const Operand&) \
+ V(I16x8Add, Simd128Register) \
+ V(I16x8Sub, Simd128Register) \
+ V(I16x8Mul, Simd128Register) \
+ V(I16x8Eq, Simd128Register) \
+ V(I16x8Ne, Simd128Register) \
+ V(I16x8GtS, Simd128Register) \
+ V(I16x8GeS, Simd128Register) \
+ V(I16x8GtU, Simd128Register) \
+ V(I16x8GeU, Simd128Register) \
+ V(I16x8MinS, Simd128Register) \
+ V(I16x8MinU, Simd128Register) \
+ V(I16x8MaxS, Simd128Register) \
+ V(I16x8MaxU, Simd128Register) \
+ V(I16x8Shl, Register) \
+ V(I16x8ShrS, Register) \
+ V(I16x8ShrU, Register) \
+ V(I16x8Shl, const Operand&) \
+ V(I16x8ShrS, const Operand&) \
+ V(I16x8ShrU, const Operand&) \
+ V(I8x16Add, Simd128Register) \
+ V(I8x16Sub, Simd128Register) \
+ V(I8x16Eq, Simd128Register) \
+ V(I8x16Ne, Simd128Register) \
+ V(I8x16GtS, Simd128Register) \
+ V(I8x16GeS, Simd128Register) \
+ V(I8x16GtU, Simd128Register) \
+ V(I8x16GeU, Simd128Register) \
+ V(I8x16MinS, Simd128Register) \
+ V(I8x16MinU, Simd128Register) \
+ V(I8x16MaxS, Simd128Register) \
+ V(I8x16MaxU, Simd128Register) \
+ V(I8x16Shl, Register) \
+ V(I8x16ShrS, Register) \
+ V(I8x16ShrU, Register) \
+ V(I8x16Shl, const Operand&) \
+ V(I8x16ShrS, const Operand&) \
+ V(I8x16ShrU, const Operand&)
+
+#define PROTOTYPE_SIMD_BINOP(name, stype) \
+ void name(Simd128Register dst, Simd128Register src1, stype src2);
SIMD_BINOP_LIST(PROTOTYPE_SIMD_BINOP)
#undef PROTOTYPE_SIMD_BINOP
#undef SIMD_BINOP_LIST
@@ -1309,6 +1365,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Checks if value is in range [lower_limit, higher_limit] using a single
// comparison.
+ void CompareRange(Register value, unsigned lower_limit,
+ unsigned higher_limit);
void JumpIfIsInRange(Register value, unsigned lower_limit,
unsigned higher_limit, Label* on_in_range);
@@ -1416,10 +1474,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
bne(not_smi_label /*, cr0*/);
}
- // Abort execution if argument is a smi, enabled via --debug-code.
- void AssertNotSmi(Register object);
- void AssertSmi(Register object);
-
#if !defined(V8_COMPRESS_POINTERS) && !defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
// Ensure it is permissible to read/write int value directly from
// upper half of the smi.
diff --git a/chromium/v8/src/codegen/s390/register-s390.h b/chromium/v8/src/codegen/s390/register-s390.h
index 48accf08c5d..6e3b6a3e2b2 100644
--- a/chromium/v8/src/codegen/s390/register-s390.h
+++ b/chromium/v8/src/codegen/s390/register-s390.h
@@ -253,7 +253,6 @@ constexpr Register kReturnRegister2 = r4;
constexpr Register kJSFunctionRegister = r3;
constexpr Register kContextRegister = r13;
constexpr Register kAllocateSizeRegister = r3;
-constexpr Register kSpeculationPoisonRegister = r9;
constexpr Register kInterpreterAccumulatorRegister = r2;
constexpr Register kInterpreterBytecodeOffsetRegister = r6;
constexpr Register kInterpreterBytecodeArrayRegister = r7;
diff --git a/chromium/v8/src/codegen/script-details.h b/chromium/v8/src/codegen/script-details.h
index a0a364c6b5c..e342e132d71 100644
--- a/chromium/v8/src/codegen/script-details.h
+++ b/chromium/v8/src/codegen/script-details.h
@@ -5,6 +5,7 @@
#ifndef V8_CODEGEN_SCRIPT_DETAILS_H_
#define V8_CODEGEN_SCRIPT_DETAILS_H_
+#include "include/v8-script.h"
#include "src/common/globals.h"
#include "src/objects/fixed-array.h"
#include "src/objects/objects.h"
diff --git a/chromium/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc b/chromium/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
index edd1a977e69..b8210303f4c 100644
--- a/chromium/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
+++ b/chromium/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
@@ -6,6 +6,7 @@
#include "src/codegen/assembler.h"
#include "src/codegen/cpu-features.h"
+#include "src/codegen/register-arch.h"
#if V8_TARGET_ARCH_IA32
#include "src/codegen/ia32/register-ia32.h"
@@ -15,9 +16,28 @@
#error Unsupported target architecture.
#endif
+// Operand on IA32 can be a wrapper for a single register, in which case they
+// should call I8x16Splat |src| being Register.
+#if V8_TARGET_ARCH_IA32
+#define DCHECK_OPERAND_IS_NOT_REG(op) DCHECK(!op.is_reg_only());
+#else
+#define DCHECK_OPERAND_IS_NOT_REG(op)
+#endif
+
namespace v8 {
namespace internal {
+void SharedTurboAssembler::Move(Register dst, uint32_t src) {
+ // Helper to paper over the different assembler function names.
+#if V8_TARGET_ARCH_IA32
+ mov(dst, Immediate(src));
+#elif V8_TARGET_ARCH_X64
+ movl(dst, Immediate(src));
+#else
+#error Unsupported target architecture.
+#endif
+}
+
void SharedTurboAssembler::Move(Register dst, Register src) {
// Helper to paper over the different assembler function names.
if (dst != src) {
@@ -31,6 +51,17 @@ void SharedTurboAssembler::Move(Register dst, Register src) {
}
}
+void SharedTurboAssembler::Add(Register dst, Immediate src) {
+ // Helper to paper over the different assembler function names.
+#if V8_TARGET_ARCH_IA32
+ add(dst, src);
+#elif V8_TARGET_ARCH_X64
+ addq(dst, src);
+#else
+#error Unsupported target architecture.
+#endif
+}
+
void SharedTurboAssembler::And(Register dst, Immediate src) {
// Helper to paper over the different assembler function names.
#if V8_TARGET_ARCH_IA32
@@ -42,14 +73,29 @@ void SharedTurboAssembler::And(Register dst, Immediate src) {
#endif
}
-void SharedTurboAssembler::Movapd(XMMRegister dst, XMMRegister src) {
+void SharedTurboAssembler::Movhps(XMMRegister dst, XMMRegister src1,
+ Operand src2) {
if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vmovapd(dst, src);
+ CpuFeatureScope scope(this, AVX);
+ vmovhps(dst, src1, src2);
} else {
- // On SSE, movaps is 1 byte shorter than movapd, and has the same
- // behavior.
- movaps(dst, src);
+ if (dst != src1) {
+ movaps(dst, src1);
+ }
+ movhps(dst, src2);
+ }
+}
+
+void SharedTurboAssembler::Movlps(XMMRegister dst, XMMRegister src1,
+ Operand src2) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovlps(dst, src1, src2);
+ } else {
+ if (dst != src1) {
+ movaps(dst, src1);
+ }
+ movlps(dst, src2);
}
}
@@ -68,6 +114,7 @@ void SharedTurboAssembler::Shufps(XMMRegister dst, XMMRegister src1,
void SharedTurboAssembler::F64x2ExtractLane(DoubleRegister dst, XMMRegister src,
uint8_t lane) {
+ ASM_CODE_COMMENT(this);
if (lane == 0) {
if (dst != src) {
Movaps(dst, src);
@@ -86,10 +133,11 @@ void SharedTurboAssembler::F64x2ExtractLane(DoubleRegister dst, XMMRegister src,
void SharedTurboAssembler::F64x2ReplaceLane(XMMRegister dst, XMMRegister src,
DoubleRegister rep, uint8_t lane) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
if (lane == 0) {
- vpblendw(dst, src, rep, 0b00001111);
+ vmovsd(dst, src, rep);
} else {
vmovlhps(dst, src, rep);
}
@@ -100,15 +148,77 @@ void SharedTurboAssembler::F64x2ReplaceLane(XMMRegister dst, XMMRegister src,
movaps(dst, src);
}
if (lane == 0) {
- pblendw(dst, rep, 0b00001111);
+ movsd(dst, rep);
} else {
movlhps(dst, rep);
}
}
}
+void SharedTurboAssembler::F32x4Min(XMMRegister dst, XMMRegister lhs,
+ XMMRegister rhs, XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
+ // The minps instruction doesn't propagate NaNs and +0's in its first
+ // operand. Perform minps in both orders, merge the results, and adjust.
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vminps(scratch, lhs, rhs);
+ vminps(dst, rhs, lhs);
+ } else if (dst == lhs || dst == rhs) {
+ XMMRegister src = dst == lhs ? rhs : lhs;
+ movaps(scratch, src);
+ minps(scratch, dst);
+ minps(dst, src);
+ } else {
+ movaps(scratch, lhs);
+ minps(scratch, rhs);
+ movaps(dst, rhs);
+ minps(dst, lhs);
+ }
+ // Propagate -0's and NaNs, which may be non-canonical.
+ Orps(scratch, dst);
+ // Canonicalize NaNs by quieting and clearing the payload.
+ Cmpunordps(dst, dst, scratch);
+ Orps(scratch, dst);
+ Psrld(dst, dst, byte{10});
+ Andnps(dst, dst, scratch);
+}
+
+void SharedTurboAssembler::F32x4Max(XMMRegister dst, XMMRegister lhs,
+ XMMRegister rhs, XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
+ // The maxps instruction doesn't propagate NaNs and +0's in its first
+ // operand. Perform maxps in both orders, merge the results, and adjust.
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmaxps(scratch, lhs, rhs);
+ vmaxps(dst, rhs, lhs);
+ } else if (dst == lhs || dst == rhs) {
+ XMMRegister src = dst == lhs ? rhs : lhs;
+ movaps(scratch, src);
+ maxps(scratch, dst);
+ maxps(dst, src);
+ } else {
+ movaps(scratch, lhs);
+ maxps(scratch, rhs);
+ movaps(dst, rhs);
+ maxps(dst, lhs);
+ }
+ // Find discrepancies.
+ Xorps(dst, scratch);
+ // Propagate NaNs, which may be non-canonical.
+ Orps(scratch, dst);
+ // Propagate sign discrepancy and (subtle) quiet NaNs.
+ Subps(scratch, scratch, dst);
+ // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
+ Cmpunordps(dst, dst, scratch);
+ Psrld(dst, dst, byte{10});
+ Andnps(dst, dst, scratch);
+}
+
void SharedTurboAssembler::F64x2Min(XMMRegister dst, XMMRegister lhs,
XMMRegister rhs, XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
// The minpd instruction doesn't propagate NaNs and +0's in its first
@@ -146,6 +256,7 @@ void SharedTurboAssembler::F64x2Min(XMMRegister dst, XMMRegister lhs,
void SharedTurboAssembler::F64x2Max(XMMRegister dst, XMMRegister lhs,
XMMRegister rhs, XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
// The maxpd instruction doesn't propagate NaNs and +0's in its first
@@ -184,6 +295,7 @@ void SharedTurboAssembler::F64x2Max(XMMRegister dst, XMMRegister lhs,
}
void SharedTurboAssembler::F32x4Splat(XMMRegister dst, DoubleRegister src) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX2)) {
CpuFeatureScope avx2_scope(this, AVX2);
vbroadcastss(dst, src);
@@ -202,6 +314,7 @@ void SharedTurboAssembler::F32x4Splat(XMMRegister dst, DoubleRegister src) {
void SharedTurboAssembler::F32x4ExtractLane(FloatRegister dst, XMMRegister src,
uint8_t lane) {
+ ASM_CODE_COMMENT(this);
DCHECK_LT(lane, 4);
// These instructions are shorter than insertps, but will leave junk in
// the top lanes of dst.
@@ -223,6 +336,7 @@ void SharedTurboAssembler::F32x4ExtractLane(FloatRegister dst, XMMRegister src,
void SharedTurboAssembler::S128Store32Lane(Operand dst, XMMRegister src,
uint8_t laneidx) {
+ ASM_CODE_COMMENT(this);
if (laneidx == 0) {
Movss(dst, src);
} else {
@@ -231,9 +345,202 @@ void SharedTurboAssembler::S128Store32Lane(Operand dst, XMMRegister src,
}
}
+template <typename Op>
+void SharedTurboAssembler::I8x16SplatPreAvx2(XMMRegister dst, Op src,
+ XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
+ DCHECK(!CpuFeatures::IsSupported(AVX2));
+ CpuFeatureScope ssse3_scope(this, SSSE3);
+ Movd(dst, src);
+ Xorps(scratch, scratch);
+ Pshufb(dst, scratch);
+}
+
+void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Register src,
+ XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
+ if (CpuFeatures::IsSupported(AVX2)) {
+ CpuFeatureScope avx2_scope(this, AVX2);
+ Movd(scratch, src);
+ vpbroadcastb(dst, scratch);
+ } else {
+ I8x16SplatPreAvx2(dst, src, scratch);
+ }
+}
+
+void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Operand src,
+ XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
+ DCHECK_OPERAND_IS_NOT_REG(src);
+ if (CpuFeatures::IsSupported(AVX2)) {
+ CpuFeatureScope avx2_scope(this, AVX2);
+ vpbroadcastb(dst, src);
+ } else {
+ I8x16SplatPreAvx2(dst, src, scratch);
+ }
+}
+
+void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1,
+ uint8_t src2, Register tmp1,
+ XMMRegister tmp2) {
+ ASM_CODE_COMMENT(this);
+ DCHECK_NE(dst, tmp2);
+ // Perform 16-bit shift, then mask away low bits.
+ if (!CpuFeatures::IsSupported(AVX) && (dst != src1)) {
+ movaps(dst, src1);
+ src1 = dst;
+ }
+
+ uint8_t shift = truncate_to_int3(src2);
+ Psllw(dst, src1, byte{shift});
+
+ uint8_t bmask = static_cast<uint8_t>(0xff << shift);
+ uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
+ Move(tmp1, mask);
+ Movd(tmp2, tmp1);
+ Pshufd(tmp2, tmp2, uint8_t{0});
+ Pand(dst, tmp2);
+}
+
+void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1,
+ Register src2, Register tmp1,
+ XMMRegister tmp2, XMMRegister tmp3) {
+ ASM_CODE_COMMENT(this);
+ DCHECK(!AreAliased(dst, tmp2, tmp3));
+ DCHECK(!AreAliased(src1, tmp2, tmp3));
+
+ // Take shift value modulo 8.
+ Move(tmp1, src2);
+ And(tmp1, Immediate(7));
+ Add(tmp1, Immediate(8));
+ // Create a mask to unset high bits.
+ Movd(tmp3, tmp1);
+ Pcmpeqd(tmp2, tmp2);
+ Psrlw(tmp2, tmp2, tmp3);
+ Packuswb(tmp2, tmp2);
+ if (!CpuFeatures::IsSupported(AVX) && (dst != src1)) {
+ movaps(dst, src1);
+ src1 = dst;
+ }
+ // Mask off the unwanted bits before word-shifting.
+ Pand(dst, src1, tmp2);
+ Add(tmp1, Immediate(-8));
+ Movd(tmp3, tmp1);
+ Psllw(dst, dst, tmp3);
+}
+
+void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1,
+ uint8_t src2, XMMRegister tmp) {
+ ASM_CODE_COMMENT(this);
+ // Unpack bytes into words, do word (16-bit) shifts, and repack.
+ DCHECK_NE(dst, tmp);
+ uint8_t shift = truncate_to_int3(src2) + 8;
+
+ Punpckhbw(tmp, src1);
+ Punpcklbw(dst, src1);
+ Psraw(tmp, shift);
+ Psraw(dst, shift);
+ Packsswb(dst, tmp);
+}
+
+void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1,
+ Register src2, Register tmp1,
+ XMMRegister tmp2, XMMRegister tmp3) {
+ ASM_CODE_COMMENT(this);
+ DCHECK(!AreAliased(dst, tmp2, tmp3));
+ DCHECK_NE(src1, tmp2);
+
+ // Unpack the bytes into words, do arithmetic shifts, and repack.
+ Punpckhbw(tmp2, src1);
+ Punpcklbw(dst, src1);
+ // Prepare shift value
+ Move(tmp1, src2);
+ // Take shift value modulo 8.
+ And(tmp1, Immediate(7));
+ Add(tmp1, Immediate(8));
+ Movd(tmp3, tmp1);
+ Psraw(tmp2, tmp3);
+ Psraw(dst, tmp3);
+ Packsswb(dst, tmp2);
+}
+
+void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1,
+ uint8_t src2, Register tmp1,
+ XMMRegister tmp2) {
+ ASM_CODE_COMMENT(this);
+ DCHECK_NE(dst, tmp2);
+ if (!CpuFeatures::IsSupported(AVX) && (dst != src1)) {
+ movaps(dst, src1);
+ src1 = dst;
+ }
+
+ // Perform 16-bit shift, then mask away high bits.
+ uint8_t shift = truncate_to_int3(src2);
+ Psrlw(dst, src1, shift);
+
+ uint8_t bmask = 0xff >> shift;
+ uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
+ Move(tmp1, mask);
+ Movd(tmp2, tmp1);
+ Pshufd(tmp2, tmp2, byte{0});
+ Pand(dst, tmp2);
+}
+
+void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1,
+ Register src2, Register tmp1,
+ XMMRegister tmp2, XMMRegister tmp3) {
+ ASM_CODE_COMMENT(this);
+ DCHECK(!AreAliased(dst, tmp2, tmp3));
+ DCHECK_NE(src1, tmp2);
+
+ // Unpack the bytes into words, do logical shifts, and repack.
+ Punpckhbw(tmp2, src1);
+ Punpcklbw(dst, src1);
+ // Prepare shift value.
+ Move(tmp1, src2);
+ // Take shift value modulo 8.
+ And(tmp1, Immediate(7));
+ Add(tmp1, Immediate(8));
+ Movd(tmp3, tmp1);
+ Psrlw(tmp2, tmp3);
+ Psrlw(dst, tmp3);
+ Packuswb(dst, tmp2);
+}
+
+template <typename Op>
+void SharedTurboAssembler::I16x8SplatPreAvx2(XMMRegister dst, Op src) {
+ DCHECK(!CpuFeatures::IsSupported(AVX2));
+ Movd(dst, src);
+ Pshuflw(dst, dst, uint8_t{0x0});
+ Punpcklqdq(dst, dst);
+}
+
+void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Register src) {
+ ASM_CODE_COMMENT(this);
+ if (CpuFeatures::IsSupported(AVX2)) {
+ CpuFeatureScope avx2_scope(this, AVX2);
+ Movd(dst, src);
+ vpbroadcastw(dst, dst);
+ } else {
+ I16x8SplatPreAvx2(dst, src);
+ }
+}
+
+void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Operand src) {
+ ASM_CODE_COMMENT(this);
+ DCHECK_OPERAND_IS_NOT_REG(src);
+ if (CpuFeatures::IsSupported(AVX2)) {
+ CpuFeatureScope avx2_scope(this, AVX2);
+ vpbroadcastw(dst, src);
+ } else {
+ I16x8SplatPreAvx2(dst, src);
+ }
+}
+
void SharedTurboAssembler::I16x8ExtMulLow(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister scratch,
bool is_signed) {
+ ASM_CODE_COMMENT(this);
is_signed ? Pmovsxbw(scratch, src1) : Pmovzxbw(scratch, src1);
is_signed ? Pmovsxbw(dst, src2) : Pmovzxbw(dst, src2);
Pmullw(dst, scratch);
@@ -242,6 +549,7 @@ void SharedTurboAssembler::I16x8ExtMulLow(XMMRegister dst, XMMRegister src1,
void SharedTurboAssembler::I16x8ExtMulHighS(XMMRegister dst, XMMRegister src1,
XMMRegister src2,
XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
vpunpckhbw(scratch, src1, src1);
@@ -265,6 +573,7 @@ void SharedTurboAssembler::I16x8ExtMulHighS(XMMRegister dst, XMMRegister src1,
void SharedTurboAssembler::I16x8ExtMulHighU(XMMRegister dst, XMMRegister src1,
XMMRegister src2,
XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
// The logic here is slightly complicated to handle all the cases of register
// aliasing. This allows flexibility for callers in TurboFan and Liftoff.
if (CpuFeatures::IsSupported(AVX)) {
@@ -313,6 +622,7 @@ void SharedTurboAssembler::I16x8ExtMulHighU(XMMRegister dst, XMMRegister src1,
void SharedTurboAssembler::I16x8SConvertI8x16High(XMMRegister dst,
XMMRegister src) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
// src = |a|b|c|d|e|f|g|h|i|j|k|l|m|n|o|p| (high)
@@ -336,6 +646,7 @@ void SharedTurboAssembler::I16x8SConvertI8x16High(XMMRegister dst,
void SharedTurboAssembler::I16x8UConvertI8x16High(XMMRegister dst,
XMMRegister src,
XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
// tmp = |0|0|0|0|0|0|0|0 | 0|0|0|0|0|0|0|0|
@@ -358,12 +669,72 @@ void SharedTurboAssembler::I16x8UConvertI8x16High(XMMRegister dst,
}
}
+void SharedTurboAssembler::I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2,
+ XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
+ // k = i16x8.splat(0x8000)
+ Pcmpeqd(scratch, scratch);
+ Psllw(scratch, scratch, byte{15});
+
+ if (!CpuFeatures::IsSupported(AVX) && (dst != src1)) {
+ movaps(dst, src1);
+ src1 = dst;
+ }
+
+ Pmulhrsw(dst, src1, src2);
+ Pcmpeqw(scratch, dst);
+ Pxor(dst, scratch);
+}
+
+void SharedTurboAssembler::I32x4ExtAddPairwiseI16x8U(XMMRegister dst,
+ XMMRegister src,
+ XMMRegister tmp) {
+ ASM_CODE_COMMENT(this);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ // src = |a|b|c|d|e|f|g|h| (low)
+ // scratch = |0|a|0|c|0|e|0|g|
+ vpsrld(tmp, src, 16);
+ // dst = |0|b|0|d|0|f|0|h|
+ vpblendw(dst, src, tmp, 0xAA);
+ // dst = |a+b|c+d|e+f|g+h|
+ vpaddd(dst, tmp, dst);
+ } else if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ // There is a potentially better lowering if we get rip-relative
+ // constants, see https://github.com/WebAssembly/simd/pull/380.
+ movaps(tmp, src);
+ psrld(tmp, 16);
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ pblendw(dst, tmp, 0xAA);
+ paddd(dst, tmp);
+ } else {
+ // src = |a|b|c|d|e|f|g|h|
+ // tmp = i32x4.splat(0x0000FFFF)
+ pcmpeqd(tmp, tmp);
+ psrld(tmp, byte{16});
+ // tmp =|0|b|0|d|0|f|0|h|
+ andps(tmp, src);
+ // dst = |0|a|0|c|0|e|0|g|
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ psrld(dst, byte{16});
+ // dst = |a+b|c+d|e+f|g+h|
+ paddd(dst, tmp);
+ }
+}
+
// 1. Multiply low word into scratch.
// 2. Multiply high word (can be signed or unsigned) into dst.
// 3. Unpack and interleave scratch and dst into dst.
void SharedTurboAssembler::I32x4ExtMul(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister scratch,
bool low, bool is_signed) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
vpmullw(scratch, src1, src2);
@@ -380,6 +751,7 @@ void SharedTurboAssembler::I32x4ExtMul(XMMRegister dst, XMMRegister src1,
void SharedTurboAssembler::I32x4SConvertI16x8High(XMMRegister dst,
XMMRegister src) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
// src = |a|b|c|d|e|f|g|h| (high)
@@ -403,6 +775,7 @@ void SharedTurboAssembler::I32x4SConvertI16x8High(XMMRegister dst,
void SharedTurboAssembler::I32x4UConvertI16x8High(XMMRegister dst,
XMMRegister src,
XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
// scratch = |0|0|0|0|0|0|0|0|
@@ -427,6 +800,7 @@ void SharedTurboAssembler::I32x4UConvertI16x8High(XMMRegister dst,
void SharedTurboAssembler::I64x2Neg(XMMRegister dst, XMMRegister src,
XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vpxor(scratch, scratch, scratch);
@@ -443,6 +817,7 @@ void SharedTurboAssembler::I64x2Neg(XMMRegister dst, XMMRegister src,
void SharedTurboAssembler::I64x2Abs(XMMRegister dst, XMMRegister src,
XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
XMMRegister tmp = dst == src ? scratch : dst;
@@ -463,13 +838,22 @@ void SharedTurboAssembler::I64x2Abs(XMMRegister dst, XMMRegister src,
void SharedTurboAssembler::I64x2GtS(XMMRegister dst, XMMRegister src0,
XMMRegister src1, XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
vpcmpgtq(dst, src0, src1);
} else if (CpuFeatures::IsSupported(SSE4_2)) {
CpuFeatureScope sse_scope(this, SSE4_2);
- DCHECK_EQ(dst, src0);
- pcmpgtq(dst, src1);
+ if (dst == src0) {
+ pcmpgtq(dst, src1);
+ } else if (dst == src1) {
+ movaps(scratch, src0);
+ pcmpgtq(scratch, src1);
+ movaps(dst, scratch);
+ } else {
+ movaps(dst, src0);
+ pcmpgtq(dst, src1);
+ }
} else {
CpuFeatureScope sse_scope(this, SSE3);
DCHECK_NE(dst, src0);
@@ -488,6 +872,7 @@ void SharedTurboAssembler::I64x2GtS(XMMRegister dst, XMMRegister src0,
void SharedTurboAssembler::I64x2GeS(XMMRegister dst, XMMRegister src0,
XMMRegister src1, XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
vpcmpgtq(dst, src1, src0);
@@ -522,6 +907,7 @@ void SharedTurboAssembler::I64x2GeS(XMMRegister dst, XMMRegister src0,
void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src,
uint8_t shift, XMMRegister xmm_tmp) {
+ ASM_CODE_COMMENT(this);
DCHECK_GT(64, shift);
DCHECK_NE(xmm_tmp, dst);
DCHECK_NE(xmm_tmp, src);
@@ -539,7 +925,7 @@ void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src,
Psllq(xmm_tmp, byte{63});
if (!CpuFeatures::IsSupported(AVX) && (dst != src)) {
- Movapd(dst, src);
+ movaps(dst, src);
src = dst;
}
// Add a bias of 2^63 to convert signed to unsigned.
@@ -556,6 +942,7 @@ void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src,
Register shift, XMMRegister xmm_tmp,
XMMRegister xmm_shift,
Register tmp_shift) {
+ ASM_CODE_COMMENT(this);
DCHECK_NE(xmm_tmp, dst);
DCHECK_NE(xmm_tmp, src);
DCHECK_NE(xmm_shift, dst);
@@ -572,7 +959,7 @@ void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src,
Movd(xmm_shift, tmp_shift);
if (!CpuFeatures::IsSupported(AVX) && (dst != src)) {
- Movapd(dst, src);
+ movaps(dst, src);
src = dst;
}
Pxor(dst, src, xmm_tmp);
@@ -581,6 +968,52 @@ void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src,
Psubq(dst, xmm_tmp);
}
+void SharedTurboAssembler::I64x2Mul(XMMRegister dst, XMMRegister lhs,
+ XMMRegister rhs, XMMRegister tmp1,
+ XMMRegister tmp2) {
+ ASM_CODE_COMMENT(this);
+ DCHECK(!AreAliased(dst, tmp1, tmp2));
+ DCHECK(!AreAliased(lhs, tmp1, tmp2));
+ DCHECK(!AreAliased(rhs, tmp1, tmp2));
+
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ // 1. Multiply high dword of each qword of left with right.
+ vpsrlq(tmp1, lhs, byte{32});
+ vpmuludq(tmp1, tmp1, rhs);
+ // 2. Multiply high dword of each qword of right with left.
+ vpsrlq(tmp2, rhs, byte{32});
+ vpmuludq(tmp2, tmp2, lhs);
+ // 3. Add 1 and 2, then shift left by 32 (this is the high dword of result).
+ vpaddq(tmp2, tmp2, tmp1);
+ vpsllq(tmp2, tmp2, byte{32});
+ // 4. Multiply low dwords (this is the low dword of result).
+ vpmuludq(dst, lhs, rhs);
+ // 5. Add 3 and 4.
+ vpaddq(dst, dst, tmp2);
+ } else {
+ // Same algorithm as AVX version, but with moves to not overwrite inputs.
+ movaps(tmp1, lhs);
+ movaps(tmp2, rhs);
+ psrlq(tmp1, byte{32});
+ pmuludq(tmp1, rhs);
+ psrlq(tmp2, byte{32});
+ pmuludq(tmp2, lhs);
+ paddq(tmp2, tmp1);
+ psllq(tmp2, byte{32});
+ if (dst == rhs) {
+ // pmuludq is commutative
+ pmuludq(dst, lhs);
+ } else {
+ if (dst != lhs) {
+ movaps(dst, lhs);
+ }
+ pmuludq(dst, rhs);
+ }
+ paddq(dst, tmp2);
+ }
+}
+
// 1. Unpack src0, src1 into even-number elements of scratch.
// 2. Unpack src1, src0 into even-number elements of dst.
// 3. Multiply 1. with 2.
@@ -588,6 +1021,7 @@ void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src,
void SharedTurboAssembler::I64x2ExtMul(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister scratch,
bool low, bool is_signed) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
if (low) {
@@ -617,6 +1051,7 @@ void SharedTurboAssembler::I64x2ExtMul(XMMRegister dst, XMMRegister src1,
void SharedTurboAssembler::I64x2SConvertI32x4High(XMMRegister dst,
XMMRegister src) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
vpunpckhqdq(dst, src, src);
@@ -635,21 +1070,28 @@ void SharedTurboAssembler::I64x2SConvertI32x4High(XMMRegister dst,
void SharedTurboAssembler::I64x2UConvertI32x4High(XMMRegister dst,
XMMRegister src,
XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
vpxor(scratch, scratch, scratch);
vpunpckhdq(dst, src, scratch);
} else {
- if (dst != src) {
- movaps(dst, src);
+ if (dst == src) {
+ // xorps can be executed on more ports than pshufd.
+ xorps(scratch, scratch);
+ punpckhdq(dst, scratch);
+ } else {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ // No dependency on dst.
+ pshufd(dst, src, 0xEE);
+ pmovzxdq(dst, dst);
}
- xorps(scratch, scratch);
- punpckhdq(dst, scratch);
}
}
void SharedTurboAssembler::S128Not(XMMRegister dst, XMMRegister src,
XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
if (dst == src) {
Pcmpeqd(scratch, scratch);
Pxor(dst, scratch);
@@ -662,6 +1104,7 @@ void SharedTurboAssembler::S128Not(XMMRegister dst, XMMRegister src,
void SharedTurboAssembler::S128Select(XMMRegister dst, XMMRegister mask,
XMMRegister src1, XMMRegister src2,
XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
// v128.select = v128.or(v128.and(v1, c), v128.andnot(v2, c)).
// pandn(x, y) = !x & y, so we have to flip the mask and input.
if (CpuFeatures::IsSupported(AVX)) {
@@ -679,5 +1122,78 @@ void SharedTurboAssembler::S128Select(XMMRegister dst, XMMRegister mask,
}
}
+void SharedTurboAssembler::S128Load8Splat(XMMRegister dst, Operand src,
+ XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
+ // The trap handler uses the current pc to creating a landing, so that it can
+ // determine if a trap occured in Wasm code due to a OOB load. Make sure the
+ // first instruction in each case below is the one that loads.
+ if (CpuFeatures::IsSupported(AVX2)) {
+ CpuFeatureScope avx2_scope(this, AVX2);
+ vpbroadcastb(dst, src);
+ } else if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ // Avoid dependency on previous value of dst.
+ vpinsrb(dst, scratch, src, uint8_t{0});
+ vpxor(scratch, scratch, scratch);
+ vpshufb(dst, dst, scratch);
+ } else {
+ CpuFeatureScope ssse4_scope(this, SSE4_1);
+ CpuFeatureScope ssse3_scope(this, SSSE3);
+ pinsrb(dst, src, uint8_t{0});
+ xorps(scratch, scratch);
+ pshufb(dst, scratch);
+ }
+}
+
+void SharedTurboAssembler::S128Load16Splat(XMMRegister dst, Operand src,
+ XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
+ // The trap handler uses the current pc to creating a landing, so that it can
+ // determine if a trap occured in Wasm code due to a OOB load. Make sure the
+ // first instruction in each case below is the one that loads.
+ if (CpuFeatures::IsSupported(AVX2)) {
+ CpuFeatureScope avx2_scope(this, AVX2);
+ vpbroadcastw(dst, src);
+ } else if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ // Avoid dependency on previous value of dst.
+ vpinsrw(dst, scratch, src, uint8_t{0});
+ vpshuflw(dst, dst, uint8_t{0});
+ vpunpcklqdq(dst, dst, dst);
+ } else {
+ pinsrw(dst, src, uint8_t{0});
+ pshuflw(dst, dst, uint8_t{0});
+ movlhps(dst, dst);
+ }
+}
+
+void SharedTurboAssembler::S128Load32Splat(XMMRegister dst, Operand src) {
+ ASM_CODE_COMMENT(this);
+ // The trap handler uses the current pc to creating a landing, so that it can
+ // determine if a trap occured in Wasm code due to a OOB load. Make sure the
+ // first instruction in each case below is the one that loads.
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vbroadcastss(dst, src);
+ } else {
+ movss(dst, src);
+ shufps(dst, dst, byte{0});
+ }
+}
+
+void SharedTurboAssembler::S128Store64Lane(Operand dst, XMMRegister src,
+ uint8_t laneidx) {
+ ASM_CODE_COMMENT(this);
+ if (laneidx == 0) {
+ Movlps(dst, src);
+ } else {
+ DCHECK_EQ(1, laneidx);
+ Movhps(dst, src);
+ }
+}
+
} // namespace internal
} // namespace v8
+
+#undef DCHECK_OPERAND_IS_NOT_REG
diff --git a/chromium/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h b/chromium/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
index 7c6f7185b9b..82c01e82925 100644
--- a/chromium/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
+++ b/chromium/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
@@ -7,6 +7,7 @@
#include "src/base/macros.h"
#include "src/codegen/cpu-features.h"
+#include "src/codegen/external-reference.h"
#include "src/codegen/turbo-assembler.h"
#if V8_TARGET_ARCH_IA32
@@ -29,28 +30,62 @@ constexpr int kStackSavedSavedFPSize = 2 * kDoubleSize;
constexpr int kStackSavedSavedFPSize = kDoubleSize;
#endif // V8_ENABLE_WEBASSEMBLY
+// Base class for SharedTurboAssemblerBase. This class contains macro-assembler
+// functions that can be shared across ia32 and x64 without any template
+// machinery, i.e. does not require the CRTP pattern that
+// SharedTurboAssemblerBase exposes. This allows us to keep the bulk of
+// definition inside a separate source file, rather than putting everything
+// inside this header.
class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
public:
using TurboAssemblerBase::TurboAssemblerBase;
+ void Move(Register dst, uint32_t src);
// Move if registers are not identical.
void Move(Register dst, Register src);
+ void Add(Register dst, Immediate src);
void And(Register dst, Immediate src);
- void Movapd(XMMRegister dst, XMMRegister src);
+ // Will move src1 to dst if AVX is not supported.
+ void Movhps(XMMRegister dst, XMMRegister src1, Operand src2);
+ void Movlps(XMMRegister dst, XMMRegister src1, Operand src2);
- template <typename Dst, typename Src>
- void Movdqu(Dst dst, Src src) {
+ template <typename Op>
+ void Pinsrb(XMMRegister dst, XMMRegister src1, Op src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr) {
+ PinsrHelper(this, &Assembler::vpinsrb, &Assembler::pinsrb, dst, src1, src2,
+ imm8, load_pc_offset, {SSE4_1});
+ }
+
+ template <typename Op>
+ void Pinsrw(XMMRegister dst, XMMRegister src1, Op src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr) {
+ PinsrHelper(this, &Assembler::vpinsrw, &Assembler::pinsrw, dst, src1, src2,
+ imm8, load_pc_offset);
+ }
+
+ // Supports both SSE and AVX. Move src1 to dst if they are not equal on SSE.
+ template <typename Op>
+ void Pshufb(XMMRegister dst, XMMRegister src, Op mask) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
- vmovdqu(dst, src);
+ vpshufb(dst, src, mask);
} else {
- // movups is 1 byte shorter than movdqu. On most SSE systems, this incurs
- // no delay moving between integer and floating-point domain.
- movups(dst, src);
+ // Make sure these are different so that we won't overwrite mask.
+ DCHECK_NE(mask, dst);
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ CpuFeatureScope sse_scope(this, SSSE3);
+ pshufb(dst, mask);
}
}
+ template <typename Op>
+ void Pshufb(XMMRegister dst, Op mask) {
+ Pshufb(dst, dst, mask);
+ }
+
// Shufps that will mov src1 into dst if AVX is not supported.
void Shufps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
uint8_t imm8);
@@ -128,6 +163,25 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
args...); \
}
+// Define a macro which uses |avx_name| when AVX is supported, and |sse_name|
+// when AVX is not supported. This is useful for bit-wise instructions like
+// andpd/andps, where the behavior is exactly the same, but the *ps
+// version is 1 byte shorter, and on SSE-only processors there is no
+// performance difference since those processors don't differentiate integer
+// and floating-point domains.
+// Note: we require |avx_name| to be the AVX instruction without the "v"
+// prefix. If we require the full AVX instruction name and the caller
+// accidentally passes in a SSE instruction, we compile without any issues and
+// generate the SSE instruction. By appending "v" here, we ensure that we will
+// generate an AVX instruction.
+#define AVX_OP_WITH_DIFF_SSE_INSTR(macro_name, avx_name, sse_name) \
+ template <typename Dst, typename Arg, typename... Args> \
+ void macro_name(Dst dst, Arg arg, Args... args) { \
+ AvxHelper<Dst, Arg, Args...>{this} \
+ .template emit<&Assembler::v##avx_name, &Assembler::sse_name>( \
+ dst, arg, args...); \
+ }
+
#define AVX_OP_SSE3(macro_name, name) \
template <typename Dst, typename Arg, typename... Args> \
void macro_name(Dst dst, Arg arg, Args... args) { \
@@ -163,24 +217,35 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
// Keep this list sorted by required extension, then instruction name.
AVX_OP(Addpd, addpd)
AVX_OP(Addps, addps)
+ AVX_OP(Addsd, addsd)
+ AVX_OP(Addss, addss)
AVX_OP(Andnpd, andnpd)
AVX_OP(Andnps, andnps)
AVX_OP(Andpd, andpd)
AVX_OP(Andps, andps)
AVX_OP(Cmpeqpd, cmpeqpd)
+ AVX_OP(Cmpeqps, cmpeqps)
AVX_OP(Cmplepd, cmplepd)
AVX_OP(Cmpleps, cmpleps)
AVX_OP(Cmpltpd, cmpltpd)
+ AVX_OP(Cmpltps, cmpltps)
AVX_OP(Cmpneqpd, cmpneqpd)
+ AVX_OP(Cmpneqps, cmpneqps)
AVX_OP(Cmpunordpd, cmpunordpd)
AVX_OP(Cmpunordps, cmpunordps)
AVX_OP(Cvtdq2pd, cvtdq2pd)
AVX_OP(Cvtdq2ps, cvtdq2ps)
AVX_OP(Cvtpd2ps, cvtpd2ps)
AVX_OP(Cvtps2pd, cvtps2pd)
+ AVX_OP(Cvtsd2ss, cvtsd2ss)
+ AVX_OP(Cvtss2sd, cvtss2sd)
AVX_OP(Cvttps2dq, cvttps2dq)
+ AVX_OP(Cvttsd2si, cvttsd2si)
+ AVX_OP(Cvttss2si, cvttss2si)
AVX_OP(Divpd, divpd)
AVX_OP(Divps, divps)
+ AVX_OP(Divsd, divsd)
+ AVX_OP(Divss, divss)
AVX_OP(Maxpd, maxpd)
AVX_OP(Maxps, maxps)
AVX_OP(Minpd, minpd)
@@ -198,6 +263,8 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
AVX_OP(Movups, movups)
AVX_OP(Mulpd, mulpd)
AVX_OP(Mulps, mulps)
+ AVX_OP(Mulsd, mulsd)
+ AVX_OP(Mulss, mulss)
AVX_OP(Orpd, orpd)
AVX_OP(Orps, orps)
AVX_OP(Packssdw, packssdw)
@@ -207,20 +274,26 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
AVX_OP(Paddd, paddd)
AVX_OP(Paddq, paddq)
AVX_OP(Paddsb, paddsb)
+ AVX_OP(Paddsw, paddsw)
AVX_OP(Paddusb, paddusb)
AVX_OP(Paddusw, paddusw)
AVX_OP(Paddw, paddw)
- AVX_OP(Pand, pand)
AVX_OP(Pavgb, pavgb)
AVX_OP(Pavgw, pavgw)
AVX_OP(Pcmpgtb, pcmpgtb)
+ AVX_OP(Pcmpgtd, pcmpgtd)
+ AVX_OP(Pcmpgtw, pcmpgtw)
+ AVX_OP(Pcmpeqb, pcmpeqb)
AVX_OP(Pcmpeqd, pcmpeqd)
+ AVX_OP(Pcmpeqw, pcmpeqw)
+ AVX_OP(Pmaddwd, pmaddwd)
+ AVX_OP(Pmaxsw, pmaxsw)
AVX_OP(Pmaxub, pmaxub)
+ AVX_OP(Pminsw, pminsw)
AVX_OP(Pminub, pminub)
AVX_OP(Pmovmskb, pmovmskb)
AVX_OP(Pmullw, pmullw)
AVX_OP(Pmuludq, pmuludq)
- AVX_OP(Por, por)
AVX_OP(Pshufd, pshufd)
AVX_OP(Pshufhw, pshufhw)
AVX_OP(Pshuflw, pshuflw)
@@ -236,7 +309,9 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
AVX_OP(Psubd, psubd)
AVX_OP(Psubq, psubq)
AVX_OP(Psubsb, psubsb)
+ AVX_OP(Psubsw, psubsw)
AVX_OP(Psubusb, psubusb)
+ AVX_OP(Psubusw, psubusw)
AVX_OP(Psubw, psubw)
AVX_OP(Punpckhbw, punpckhbw)
AVX_OP(Punpckhdq, punpckhdq)
@@ -246,7 +321,6 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
AVX_OP(Punpckldq, punpckldq)
AVX_OP(Punpcklqdq, punpcklqdq)
AVX_OP(Punpcklwd, punpcklwd)
- AVX_OP(Pxor, pxor)
AVX_OP(Rcpps, rcpps)
AVX_OP(Rsqrtps, rsqrtps)
AVX_OP(Sqrtpd, sqrtpd)
@@ -255,10 +329,25 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
AVX_OP(Sqrtss, sqrtss)
AVX_OP(Subpd, subpd)
AVX_OP(Subps, subps)
+ AVX_OP(Subsd, subsd)
+ AVX_OP(Subss, subss)
+ AVX_OP(Ucomisd, ucomisd)
+ AVX_OP(Ucomiss, ucomiss)
AVX_OP(Unpcklps, unpcklps)
AVX_OP(Xorpd, xorpd)
AVX_OP(Xorps, xorps)
+ // Many AVX processors have separate integer/floating-point domains, so use
+ // vmovaps if AVX is supported. On SSE, movaps is 1 byte shorter than movdqa,
+ // and has the same behavior. Most SSE processors also don't have the same
+ // delay moving between integer and floating-point domains.
+ AVX_OP_WITH_DIFF_SSE_INSTR(Movapd, movapd, movaps)
+ AVX_OP_WITH_DIFF_SSE_INSTR(Movdqa, movdqa, movaps)
+ AVX_OP_WITH_DIFF_SSE_INSTR(Movdqu, movdqu, movups)
+ AVX_OP_WITH_DIFF_SSE_INSTR(Pand, pand, andps)
+ AVX_OP_WITH_DIFF_SSE_INSTR(Por, por, orps)
+ AVX_OP_WITH_DIFF_SSE_INSTR(Pxor, pxor, xorps)
+
AVX_OP_SSE3(Haddps, haddps)
AVX_OP_SSE3(Movddup, movddup)
AVX_OP_SSE3(Movshdup, movshdup)
@@ -267,26 +356,44 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
AVX_OP_SSSE3(Pabsd, pabsd)
AVX_OP_SSSE3(Pabsw, pabsw)
AVX_OP_SSSE3(Palignr, palignr)
+ AVX_OP_SSSE3(Pmulhrsw, pmulhrsw)
AVX_OP_SSSE3(Psignb, psignb)
AVX_OP_SSSE3(Psignd, psignd)
AVX_OP_SSSE3(Psignw, psignw)
AVX_OP_SSE4_1(Extractps, extractps)
+ AVX_OP_SSE4_1(Insertps, insertps)
+ AVX_OP_SSE4_1(Packusdw, packusdw)
AVX_OP_SSE4_1(Pblendw, pblendw)
+ AVX_OP_SSE4_1(Pcmpeqq, pcmpeqq)
AVX_OP_SSE4_1(Pextrb, pextrb)
AVX_OP_SSE4_1(Pextrw, pextrw)
AVX_OP_SSE4_1(Pmaxsb, pmaxsb)
AVX_OP_SSE4_1(Pmaxsd, pmaxsd)
+ AVX_OP_SSE4_1(Pmaxud, pmaxud)
+ AVX_OP_SSE4_1(Pmaxuw, pmaxuw)
AVX_OP_SSE4_1(Pminsb, pminsb)
+ AVX_OP_SSE4_1(Pminsd, pminsd)
+ AVX_OP_SSE4_1(Pminud, pminud)
+ AVX_OP_SSE4_1(Pminuw, pminuw)
AVX_OP_SSE4_1(Pmovsxbw, pmovsxbw)
AVX_OP_SSE4_1(Pmovsxdq, pmovsxdq)
AVX_OP_SSE4_1(Pmovsxwd, pmovsxwd)
AVX_OP_SSE4_1(Pmovzxbw, pmovzxbw)
AVX_OP_SSE4_1(Pmovzxdq, pmovzxdq)
AVX_OP_SSE4_1(Pmovzxwd, pmovzxwd)
+ AVX_OP_SSE4_1(Pmulld, pmulld)
AVX_OP_SSE4_1(Ptest, ptest)
AVX_OP_SSE4_1(Roundpd, roundpd)
AVX_OP_SSE4_1(Roundps, roundps)
+ AVX_OP_SSE4_1(Roundsd, roundsd)
+ AVX_OP_SSE4_1(Roundss, roundss)
+
+#undef AVX_OP
+#undef AVX_OP_SSE3
+#undef AVX_OP_SSSE3
+#undef AVX_OP_SSE4_1
+#undef AVX_OP_SSE4_2
void F64x2ExtractLane(DoubleRegister dst, XMMRegister src, uint8_t lane);
void F64x2ReplaceLane(XMMRegister dst, XMMRegister src, DoubleRegister rep,
@@ -297,7 +404,27 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
XMMRegister scratch);
void F32x4Splat(XMMRegister dst, DoubleRegister src);
void F32x4ExtractLane(FloatRegister dst, XMMRegister src, uint8_t lane);
+ void F32x4Min(XMMRegister dst, XMMRegister lhs, XMMRegister rhs,
+ XMMRegister scratch);
+ void F32x4Max(XMMRegister dst, XMMRegister lhs, XMMRegister rhs,
+ XMMRegister scratch);
void S128Store32Lane(Operand dst, XMMRegister src, uint8_t laneidx);
+ void I8x16Splat(XMMRegister dst, Register src, XMMRegister scratch);
+ void I8x16Splat(XMMRegister dst, Operand src, XMMRegister scratch);
+ void I8x16Shl(XMMRegister dst, XMMRegister src1, uint8_t src2, Register tmp1,
+ XMMRegister tmp2);
+ void I8x16Shl(XMMRegister dst, XMMRegister src1, Register src2, Register tmp1,
+ XMMRegister tmp2, XMMRegister tmp3);
+ void I8x16ShrS(XMMRegister dst, XMMRegister src1, uint8_t src2,
+ XMMRegister tmp);
+ void I8x16ShrS(XMMRegister dst, XMMRegister src1, Register src2,
+ Register tmp1, XMMRegister tmp2, XMMRegister tmp3);
+ void I8x16ShrU(XMMRegister dst, XMMRegister src1, uint8_t src2, Register tmp1,
+ XMMRegister tmp2);
+ void I8x16ShrU(XMMRegister dst, XMMRegister src1, Register src2,
+ Register tmp1, XMMRegister tmp2, XMMRegister tmp3);
+ void I16x8Splat(XMMRegister dst, Register src);
+ void I16x8Splat(XMMRegister dst, Operand src);
void I16x8ExtMulLow(XMMRegister dst, XMMRegister src1, XMMRegister src2,
XMMRegister scrat, bool is_signed);
void I16x8ExtMulHighS(XMMRegister dst, XMMRegister src1, XMMRegister src2,
@@ -307,6 +434,11 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
void I16x8SConvertI8x16High(XMMRegister dst, XMMRegister src);
void I16x8UConvertI8x16High(XMMRegister dst, XMMRegister src,
XMMRegister scratch);
+ // Will move src1 to dst if AVX is not supported.
+ void I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister scratch);
+ void I32x4ExtAddPairwiseI16x8U(XMMRegister dst, XMMRegister src,
+ XMMRegister tmp);
// Requires that dst == src1 if AVX is not supported.
void I32x4ExtMul(XMMRegister dst, XMMRegister src1, XMMRegister src2,
XMMRegister scratch, bool low, bool is_signed);
@@ -324,6 +456,8 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
void I64x2ShrS(XMMRegister dst, XMMRegister src, Register shift,
XMMRegister xmm_tmp, XMMRegister xmm_shift,
Register tmp_shift);
+ void I64x2Mul(XMMRegister dst, XMMRegister lhs, XMMRegister rhs,
+ XMMRegister tmp1, XMMRegister tmp2);
void I64x2ExtMul(XMMRegister dst, XMMRegister src1, XMMRegister src2,
XMMRegister scratch, bool low, bool is_signed);
void I64x2SConvertI32x4High(XMMRegister dst, XMMRegister src);
@@ -333,7 +467,443 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
// Requires dst == mask when AVX is not supported.
void S128Select(XMMRegister dst, XMMRegister mask, XMMRegister src1,
XMMRegister src2, XMMRegister scratch);
+ void S128Load8Splat(XMMRegister dst, Operand src, XMMRegister scratch);
+ void S128Load16Splat(XMMRegister dst, Operand src, XMMRegister scratch);
+ void S128Load32Splat(XMMRegister dst, Operand src);
+ void S128Store64Lane(Operand dst, XMMRegister src, uint8_t laneidx);
+
+ protected:
+ template <typename Op>
+ using AvxFn = void (Assembler::*)(XMMRegister, XMMRegister, Op, uint8_t);
+ template <typename Op>
+ using NoAvxFn = void (Assembler::*)(XMMRegister, Op, uint8_t);
+
+ template <typename Op>
+ void PinsrHelper(Assembler* assm, AvxFn<Op> avx, NoAvxFn<Op> noavx,
+ XMMRegister dst, XMMRegister src1, Op src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr,
+ base::Optional<CpuFeature> feature = base::nullopt) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(assm, AVX);
+ if (load_pc_offset) *load_pc_offset = assm->pc_offset();
+ (assm->*avx)(dst, src1, src2, imm8);
+ return;
+ }
+
+ if (dst != src1) assm->movaps(dst, src1);
+ if (load_pc_offset) *load_pc_offset = assm->pc_offset();
+ if (feature.has_value()) {
+ DCHECK(CpuFeatures::IsSupported(*feature));
+ CpuFeatureScope scope(assm, *feature);
+ (assm->*noavx)(dst, src2, imm8);
+ } else {
+ (assm->*noavx)(dst, src2, imm8);
+ }
+ }
+
+ private:
+ template <typename Op>
+ void I8x16SplatPreAvx2(XMMRegister dst, Op src, XMMRegister scratch);
+ template <typename Op>
+ void I16x8SplatPreAvx2(XMMRegister dst, Op src);
};
+
+// Common base class template shared by ia32 and x64 TurboAssembler. This uses
+// the Curiously Recurring Template Pattern (CRTP), where Impl is the actual
+// class (subclass of SharedTurboAssemblerBase instantiated with the actual
+// class). This allows static polymorphism, where member functions can be move
+// into SharedTurboAssembler, and we can also call into member functions
+// defined in ia32 or x64 specific TurboAssembler from within this template
+// class, via Impl.
+//
+// Note: all member functions must be defined in this header file so that the
+// compiler can generate code for the function definitions. See
+// https://isocpp.org/wiki/faq/templates#templates-defn-vs-decl for rationale.
+// If a function does not need polymorphism, move it into SharedTurboAssembler,
+// and define it outside of this header.
+template <typename Impl>
+class V8_EXPORT_PRIVATE SharedTurboAssemblerBase : public SharedTurboAssembler {
+ using SharedTurboAssembler::SharedTurboAssembler;
+
+ public:
+ void Abspd(XMMRegister dst, XMMRegister src, Register tmp) {
+ FloatUnop(dst, src, tmp, &SharedTurboAssembler::Andps,
+ ExternalReference::address_of_double_abs_constant());
+ }
+
+ void Absps(XMMRegister dst, XMMRegister src, Register tmp) {
+ FloatUnop(dst, src, tmp, &SharedTurboAssembler::Andps,
+ ExternalReference::address_of_float_abs_constant());
+ }
+
+ void Negpd(XMMRegister dst, XMMRegister src, Register tmp) {
+ FloatUnop(dst, src, tmp, &SharedTurboAssembler::Xorps,
+ ExternalReference::address_of_double_neg_constant());
+ }
+
+ void Negps(XMMRegister dst, XMMRegister src, Register tmp) {
+ FloatUnop(dst, src, tmp, &SharedTurboAssembler::Xorps,
+ ExternalReference::address_of_float_neg_constant());
+ }
+#undef FLOAT_UNOP
+
+ void Pextrd(Register dst, XMMRegister src, uint8_t imm8) {
+ if (imm8 == 0) {
+ Movd(dst, src);
+ return;
+ }
+
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpextrd(dst, src, imm8);
+ } else if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ pextrd(dst, src, imm8);
+ } else {
+ DCHECK_LT(imm8, 2);
+ impl()->PextrdPreSse41(dst, src, imm8);
+ }
+ }
+
+ template <typename Op>
+ void Pinsrd(XMMRegister dst, XMMRegister src1, Op src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr) {
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ PinsrHelper(this, &Assembler::vpinsrd, &Assembler::pinsrd, dst, src1,
+ src2, imm8, load_pc_offset,
+ base::Optional<CpuFeature>(SSE4_1));
+ } else {
+ if (dst != src1) {
+ movaps(dst, src1);
+ }
+ impl()->PinsrdPreSse41(dst, src2, imm8, load_pc_offset);
+ }
+ }
+
+ template <typename Op>
+ void Pinsrd(XMMRegister dst, Op src, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr) {
+ Pinsrd(dst, dst, src, imm8, load_pc_offset);
+ }
+
+ void F64x2ConvertLowI32x4U(XMMRegister dst, XMMRegister src,
+ Register scratch) {
+ ASM_CODE_COMMENT(this);
+ // dst = [ src_low, 0x43300000, src_high, 0x4330000 ];
+ // 0x43300000'00000000 is a special double where the significand bits
+ // precisely represents all uint32 numbers.
+ if (!CpuFeatures::IsSupported(AVX) && dst != src) {
+ movaps(dst, src);
+ src = dst;
+ }
+ Unpcklps(dst, src,
+ ExternalReferenceAsOperand(
+ ExternalReference::
+ address_of_wasm_f64x2_convert_low_i32x4_u_int_mask(),
+ scratch));
+ Subpd(dst,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_double_2_power_52(), scratch));
+ }
+
+ void I32x4SConvertF32x4(XMMRegister dst, XMMRegister src, XMMRegister tmp,
+ Register scratch) {
+ ASM_CODE_COMMENT(this);
+ Operand op = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_int32_overflow_as_float(), scratch);
+
+ // This algorithm works by:
+ // 1. lanes with NaNs are zero-ed
+ // 2. lanes ge than 2147483648.0f (MAX_INT32+1) set to 0xffff'ffff
+ // 3. cvttps2dq sets all out of range lanes to 0x8000'0000
+ // a. correct for underflows (< MIN_INT32)
+ // b. wrong for overflow, and we know which lanes overflow from 2.
+ // 4. adjust for 3b by xor-ing 2 and 3
+ // a. 0x8000'0000 xor 0xffff'ffff = 0x7fff'ffff (MAX_INT32)
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcmpeqps(tmp, src, src);
+ vandps(dst, src, tmp);
+ vcmpgeps(tmp, src, op);
+ vcvttps2dq(dst, dst);
+ vpxor(dst, dst, tmp);
+ } else {
+ if (src == dst) {
+ movaps(tmp, src);
+ cmpeqps(tmp, tmp);
+ andps(dst, tmp);
+ movaps(tmp, op);
+ cmpleps(tmp, dst);
+ cvttps2dq(dst, dst);
+ xorps(dst, tmp);
+ } else {
+ movaps(tmp, op);
+ cmpleps(tmp, src);
+ cvttps2dq(dst, src);
+ xorps(dst, tmp);
+ movaps(tmp, src);
+ cmpeqps(tmp, tmp);
+ andps(dst, tmp);
+ }
+ }
+ }
+
+ void I32x4TruncSatF64x2SZero(XMMRegister dst, XMMRegister src,
+ XMMRegister scratch, Register tmp) {
+ ASM_CODE_COMMENT(this);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ XMMRegister original_dst = dst;
+ // Make sure we don't overwrite src.
+ if (dst == src) {
+ DCHECK_NE(src, scratch);
+ dst = scratch;
+ }
+ // dst = 0 if src == NaN, else all ones.
+ vcmpeqpd(dst, src, src);
+ // dst = 0 if src == NaN, else INT32_MAX as double.
+ vandpd(
+ dst, dst,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_int32_max_as_double(), tmp));
+ // dst = 0 if src == NaN, src is saturated to INT32_MAX as double.
+ vminpd(dst, src, dst);
+ // Values > INT32_MAX already saturated, values < INT32_MIN raises an
+ // exception, which is masked and returns 0x80000000.
+ vcvttpd2dq(original_dst, dst);
+ } else {
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ movaps(scratch, dst);
+ cmpeqpd(scratch, dst);
+ andps(scratch,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_int32_max_as_double(), tmp));
+ minpd(dst, scratch);
+ cvttpd2dq(dst, dst);
+ }
+ }
+
+ void I32x4TruncSatF64x2UZero(XMMRegister dst, XMMRegister src,
+ XMMRegister scratch, Register tmp) {
+ ASM_CODE_COMMENT(this);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vxorpd(scratch, scratch, scratch);
+ // Saturate to 0.
+ vmaxpd(dst, src, scratch);
+ // Saturate to UINT32_MAX.
+ vminpd(
+ dst, dst,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_uint32_max_as_double(), tmp));
+ // Truncate.
+ vroundpd(dst, dst, kRoundToZero);
+ // Add to special double where significant bits == uint32.
+ vaddpd(dst, dst,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_double_2_power_52(), tmp));
+ // Extract low 32 bits of each double's significand, zero top lanes.
+ // dst = [dst[0], dst[2], 0, 0]
+ vshufps(dst, dst, scratch, 0x88);
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ xorps(scratch, scratch);
+ maxpd(dst, scratch);
+ minpd(dst, ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_uint32_max_as_double(),
+ tmp));
+ roundpd(dst, dst, kRoundToZero);
+ addpd(dst,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_double_2_power_52(), tmp));
+ shufps(dst, scratch, 0x88);
+ }
+ }
+
+ void I32x4ExtAddPairwiseI16x8S(XMMRegister dst, XMMRegister src,
+ Register scratch) {
+ ASM_CODE_COMMENT(this);
+ Operand op = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i16x8_splat_0x0001(), scratch);
+ // pmaddwd multiplies signed words in src and op, producing
+ // signed doublewords, then adds pairwise.
+ // src = |a|b|c|d|e|f|g|h|
+ // dst = | a*1 + b*1 | c*1 + d*1 | e*1 + f*1 | g*1 + h*1 |
+ if (!CpuFeatures::IsSupported(AVX) && (dst != src)) {
+ movaps(dst, src);
+ src = dst;
+ }
+
+ Pmaddwd(dst, src, op);
+ }
+
+ void I16x8ExtAddPairwiseI8x16S(XMMRegister dst, XMMRegister src,
+ XMMRegister scratch, Register tmp) {
+ ASM_CODE_COMMENT(this);
+ // pmaddubsw treats the first operand as unsigned, so pass the external
+ // reference to it as the first operand.
+ Operand op = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x01(), tmp);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vmovdqa(scratch, op);
+ vpmaddubsw(dst, scratch, src);
+ } else {
+ CpuFeatureScope sse_scope(this, SSSE3);
+ if (dst == src) {
+ movaps(scratch, op);
+ pmaddubsw(scratch, src);
+ movaps(dst, scratch);
+ } else {
+ movaps(dst, op);
+ pmaddubsw(dst, src);
+ }
+ }
+ }
+
+ void I16x8ExtAddPairwiseI8x16U(XMMRegister dst, XMMRegister src,
+ Register scratch) {
+ ASM_CODE_COMMENT(this);
+ Operand op = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x01(), scratch);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpmaddubsw(dst, src, op);
+ } else {
+ CpuFeatureScope sse_scope(this, SSSE3);
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ pmaddubsw(dst, op);
+ }
+ }
+
+ void I8x16Swizzle(XMMRegister dst, XMMRegister src, XMMRegister mask,
+ XMMRegister scratch, Register tmp, bool omit_add = false) {
+ ASM_CODE_COMMENT(this);
+ if (omit_add) {
+ // We have determined that the indices are immediates, and they are either
+ // within bounds, or the top bit is set, so we can omit the add.
+ Pshufb(dst, src, mask);
+ return;
+ }
+
+ // Out-of-range indices should return 0, add 112 so that any value > 15
+ // saturates to 128 (top bit set), so pshufb will zero that lane.
+ Operand op = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_swizzle_mask(), tmp);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpaddusb(scratch, mask, op);
+ vpshufb(dst, src, scratch);
+ } else {
+ CpuFeatureScope sse_scope(this, SSSE3);
+ movaps(scratch, op);
+ if (dst != src) {
+ DCHECK_NE(dst, mask);
+ movaps(dst, src);
+ }
+ paddusb(scratch, mask);
+ pshufb(dst, scratch);
+ }
+ }
+
+ void I8x16Popcnt(XMMRegister dst, XMMRegister src, XMMRegister tmp1,
+ XMMRegister tmp2, Register scratch) {
+ ASM_CODE_COMMENT(this);
+ DCHECK_NE(dst, tmp1);
+ DCHECK_NE(src, tmp1);
+ DCHECK_NE(dst, tmp2);
+ DCHECK_NE(src, tmp2);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vmovdqa(tmp1, ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x0f(),
+ scratch));
+ vpandn(tmp2, tmp1, src);
+ vpand(dst, tmp1, src);
+ vmovdqa(tmp1, ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_popcnt_mask(),
+ scratch));
+ vpsrlw(tmp2, tmp2, 4);
+ vpshufb(dst, tmp1, dst);
+ vpshufb(tmp2, tmp1, tmp2);
+ vpaddb(dst, dst, tmp2);
+ } else if (CpuFeatures::IsSupported(ATOM)) {
+ // Pre-Goldmont low-power Intel microarchitectures have very slow
+ // PSHUFB instruction, thus use PSHUFB-free divide-and-conquer
+ // algorithm on these processors. ATOM CPU feature captures exactly
+ // the right set of processors.
+ movaps(tmp1, src);
+ psrlw(tmp1, 1);
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ andps(tmp1, ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x55(),
+ scratch));
+ psubb(dst, tmp1);
+ Operand splat_0x33 = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x33(), scratch);
+ movaps(tmp1, dst);
+ andps(dst, splat_0x33);
+ psrlw(tmp1, 2);
+ andps(tmp1, splat_0x33);
+ paddb(dst, tmp1);
+ movaps(tmp1, dst);
+ psrlw(dst, 4);
+ paddb(dst, tmp1);
+ andps(dst, ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x0f(),
+ scratch));
+ } else {
+ CpuFeatureScope sse_scope(this, SSSE3);
+ movaps(tmp1, ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x0f(),
+ scratch));
+ Operand mask = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_popcnt_mask(), scratch);
+ if (tmp2 != tmp1) {
+ movaps(tmp2, tmp1);
+ }
+ andps(tmp1, src);
+ andnps(tmp2, src);
+ psrlw(tmp2, 4);
+ movaps(dst, mask);
+ pshufb(dst, tmp1);
+ movaps(tmp1, mask);
+ pshufb(tmp1, tmp2);
+ paddb(dst, tmp1);
+ }
+ }
+
+ private:
+ // All implementation-specific methods must be called through this.
+ Impl* impl() { return static_cast<Impl*>(this); }
+
+ Operand ExternalReferenceAsOperand(ExternalReference reference,
+ Register scratch) {
+ return impl()->ExternalReferenceAsOperand(reference, scratch);
+ }
+
+ using FloatInstruction = void (SharedTurboAssembler::*)(XMMRegister,
+ XMMRegister, Operand);
+ void FloatUnop(XMMRegister dst, XMMRegister src, Register tmp,
+ FloatInstruction op, ExternalReference ext) {
+ if (!CpuFeatures::IsSupported(AVX) && (dst != src)) {
+ movaps(dst, src);
+ src = dst;
+ }
+ SharedTurboAssembler* assm = this;
+ (assm->*op)(dst, src, ExternalReferenceAsOperand(ext, tmp));
+ }
+};
+
} // namespace internal
} // namespace v8
#endif // V8_CODEGEN_SHARED_IA32_X64_MACRO_ASSEMBLER_SHARED_IA32_X64_H_
diff --git a/chromium/v8/src/codegen/source-position.h b/chromium/v8/src/codegen/source-position.h
index 0db12aea22c..9ec845f9073 100644
--- a/chromium/v8/src/codegen/source-position.h
+++ b/chromium/v8/src/codegen/source-position.h
@@ -5,7 +5,7 @@
#ifndef V8_CODEGEN_SOURCE_POSITION_H_
#define V8_CODEGEN_SOURCE_POSITION_H_
-#include <ostream>
+#include <iosfwd>
#include "src/base/bit-field.h"
#include "src/common/globals.h"
diff --git a/chromium/v8/src/codegen/turbo-assembler.cc b/chromium/v8/src/codegen/turbo-assembler.cc
index 59de4733ffb..09c4559813b 100644
--- a/chromium/v8/src/codegen/turbo-assembler.cc
+++ b/chromium/v8/src/codegen/turbo-assembler.cc
@@ -97,7 +97,7 @@ int32_t TurboAssemblerBase::RootRegisterOffsetForRootIndex(
// static
int32_t TurboAssemblerBase::RootRegisterOffsetForBuiltin(Builtin builtin) {
- return IsolateData::builtin_slot_offset(builtin);
+ return IsolateData::BuiltinSlotOffset(builtin);
}
// static
diff --git a/chromium/v8/src/codegen/x64/assembler-x64-inl.h b/chromium/v8/src/codegen/x64/assembler-x64-inl.h
index 4d30f01c08f..851e9c2957e 100644
--- a/chromium/v8/src/codegen/x64/assembler-x64-inl.h
+++ b/chromium/v8/src/codegen/x64/assembler-x64-inl.h
@@ -42,8 +42,7 @@ void Assembler::emit_runtime_entry(Address entry, RelocInfo::Mode rmode) {
RecordRelocInfo(rmode);
uint32_t offset = static_cast<uint32_t>(entry - options().code_range_start);
if (IsOnHeap()) {
- saved_offsets_for_runtime_entries_.push_back(
- std::make_pair(pc_offset(), offset));
+ saved_offsets_for_runtime_entries_.emplace_back(pc_offset(), offset);
emitl(relative_target_offset(entry, reinterpret_cast<Address>(pc_)));
// We must ensure that `emitl` is not growing the assembler buffer
// and falling back to off-heap compilation.
@@ -66,8 +65,7 @@ void Assembler::emit(Immediate64 x) {
if (x.rmode_ == RelocInfo::FULL_EMBEDDED_OBJECT && IsOnHeap()) {
int offset = pc_offset();
Handle<HeapObject> object(reinterpret_cast<Address*>(x.value_));
- saved_handles_for_raw_object_ptr_.push_back(
- std::make_pair(offset, x.value_));
+ saved_handles_for_raw_object_ptr_.emplace_back(offset, x.value_);
emitq(static_cast<uint64_t>(object->ptr()));
DCHECK(EmbeddedObjectMatches(offset, object));
return;
@@ -344,12 +342,12 @@ HeapObject RelocInfo::target_object() {
return HeapObject::cast(Object(ReadUnalignedValue<Address>(pc_)));
}
-HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
+HeapObject RelocInfo::target_object_no_host(PtrComprCageBase cage_base) {
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
if (IsCompressedEmbeddedObject(rmode_)) {
Tagged_t compressed = ReadUnalignedValue<Tagged_t>(pc_);
DCHECK(!HAS_SMI_TAG(compressed));
- Object obj(DecompressTaggedPointer(isolate, compressed));
+ Object obj(DecompressTaggedPointer(cage_base, compressed));
return HeapObject::cast(obj);
}
DCHECK(IsFullEmbeddedObject(rmode_) || IsDataEmbeddedObject(rmode_));
diff --git a/chromium/v8/src/codegen/x64/assembler-x64.cc b/chromium/v8/src/codegen/x64/assembler-x64.cc
index 1e66311d952..1c5723c5a33 100644
--- a/chromium/v8/src/codegen/x64/assembler-x64.cc
+++ b/chromium/v8/src/codegen/x64/assembler-x64.cc
@@ -3347,26 +3347,6 @@ void Assembler::cvtqsi2sd(XMMRegister dst, Register src) {
emit_sse_operand(dst, src);
}
-void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
- DCHECK(!IsEnabled(AVX));
- EnsureSpace ensure_space(this);
- emit(0xF3);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x5A);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::cvtss2sd(XMMRegister dst, Operand src) {
- DCHECK(!IsEnabled(AVX));
- EnsureSpace ensure_space(this);
- emit(0xF3);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x5A);
- emit_sse_operand(dst, src);
-}
-
void Assembler::cvtsd2si(Register dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3601,6 +3581,22 @@ void Assembler::vmovdqa(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
+void Assembler::vmovdqa(YMMRegister dst, Operand src) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, ymm0, src, kL256, k66, k0F, kWIG);
+ emit(0x6F);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::vmovdqa(YMMRegister dst, YMMRegister src) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, ymm0, src, kL256, k66, k0F, kWIG);
+ emit(0x6F);
+ emit_sse_operand(dst, src);
+}
+
void Assembler::vmovdqu(XMMRegister dst, Operand src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3625,10 +3621,34 @@ void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) {
emit_sse_operand(src, dst);
}
+void Assembler::vmovdqu(YMMRegister dst, Operand src) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, ymm0, src, kL256, kF3, k0F, kWIG);
+ emit(0x6F);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::vmovdqu(Operand dst, YMMRegister src) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(src, ymm0, dst, kL256, kF3, k0F, kWIG);
+ emit(0x7F);
+ emit_sse_operand(src, dst);
+}
+
+void Assembler::vmovdqu(YMMRegister dst, YMMRegister src) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(src, ymm0, dst, kL256, kF3, k0F, kWIG);
+ emit(0x7F);
+ emit_sse_operand(src, dst);
+}
+
void Assembler::vmovlps(XMMRegister dst, XMMRegister src1, Operand src2) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, src1, src2, kL128, kNone, k0F, kWIG);
+ emit_vex_prefix(dst, src1, src2, kL128, kNoPrefix, k0F, kWIG);
emit(0x12);
emit_sse_operand(dst, src2);
}
@@ -3636,7 +3656,7 @@ void Assembler::vmovlps(XMMRegister dst, XMMRegister src1, Operand src2) {
void Assembler::vmovlps(Operand dst, XMMRegister src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(src, xmm0, dst, kL128, kNone, k0F, kWIG);
+ emit_vex_prefix(src, xmm0, dst, kL128, kNoPrefix, k0F, kWIG);
emit(0x13);
emit_sse_operand(src, dst);
}
@@ -3644,7 +3664,7 @@ void Assembler::vmovlps(Operand dst, XMMRegister src) {
void Assembler::vmovhps(XMMRegister dst, XMMRegister src1, Operand src2) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, src1, src2, kL128, kNone, k0F, kWIG);
+ emit_vex_prefix(dst, src1, src2, kL128, kNoPrefix, k0F, kWIG);
emit(0x16);
emit_sse_operand(dst, src2);
}
@@ -3652,7 +3672,7 @@ void Assembler::vmovhps(XMMRegister dst, XMMRegister src1, Operand src2) {
void Assembler::vmovhps(Operand dst, XMMRegister src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(src, xmm0, dst, kL128, kNone, k0F, kWIG);
+ emit_vex_prefix(src, xmm0, dst, kL128, kNoPrefix, k0F, kWIG);
emit(0x17);
emit_sse_operand(src, dst);
}
@@ -3668,6 +3688,17 @@ void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1,
emit_sse_operand(dst, src2);
}
+void Assembler::vinstr(byte op, YMMRegister dst, YMMRegister src1,
+ YMMRegister src2, SIMDPrefix pp, LeadingOpcode m, VexW w,
+ CpuFeature feature) {
+ DCHECK(IsEnabled(feature));
+ DCHECK(feature == AVX || feature == AVX2);
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, src1, src2, kL256, pp, m, w);
+ emit(op);
+ emit_sse_operand(dst, src2);
+}
+
void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
SIMDPrefix pp, LeadingOpcode m, VexW w,
CpuFeature feature) {
@@ -3679,11 +3710,31 @@ void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
emit_sse_operand(dst, src2);
}
+void Assembler::vinstr(byte op, YMMRegister dst, YMMRegister src1, Operand src2,
+ SIMDPrefix pp, LeadingOpcode m, VexW w,
+ CpuFeature feature) {
+ DCHECK(IsEnabled(feature));
+ DCHECK(feature == AVX || feature == AVX2);
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, src1, src2, kL256, pp, m, w);
+ emit(op);
+ emit_sse_operand(dst, src2);
+}
+
void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, src1, src2, kL128, kNone, k0F, kWIG);
+ emit_vex_prefix(dst, src1, src2, kL128, kNoPrefix, k0F, kWIG);
+ emit(op);
+ emit_sse_operand(dst, src2);
+}
+
+void Assembler::vps(byte op, YMMRegister dst, YMMRegister src1,
+ YMMRegister src2) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, src1, src2, kL256, kNoPrefix, k0F, kWIG);
emit(op);
emit_sse_operand(dst, src2);
}
@@ -3691,7 +3742,15 @@ void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1,
void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, src1, src2, kL128, kNone, k0F, kWIG);
+ emit_vex_prefix(dst, src1, src2, kL128, kNoPrefix, k0F, kWIG);
+ emit(op);
+ emit_sse_operand(dst, src2);
+}
+
+void Assembler::vps(byte op, YMMRegister dst, YMMRegister src1, Operand src2) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, src1, src2, kL256, kNoPrefix, k0F, kWIG);
emit(op);
emit_sse_operand(dst, src2);
}
@@ -3700,7 +3759,7 @@ void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2, byte imm8) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, src1, src2, kL128, kNone, k0F, kWIG);
+ emit_vex_prefix(dst, src1, src2, kL128, kNoPrefix, k0F, kWIG);
emit(op);
emit_sse_operand(dst, src2);
emit(imm8);
@@ -3726,7 +3785,7 @@ void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
void Assembler::vucomiss(XMMRegister dst, XMMRegister src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, xmm0, src, kLIG, kNone, k0F, kWIG);
+ emit_vex_prefix(dst, xmm0, src, kLIG, kNoPrefix, k0F, kWIG);
emit(0x2E);
emit_sse_operand(dst, src);
}
@@ -3734,7 +3793,7 @@ void Assembler::vucomiss(XMMRegister dst, XMMRegister src) {
void Assembler::vucomiss(XMMRegister dst, Operand src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, xmm0, src, kLIG, kNone, k0F, kWIG);
+ emit_vex_prefix(dst, xmm0, src, kLIG, kNoPrefix, k0F, kWIG);
emit(0x2E);
emit_sse_operand(dst, src);
}
@@ -3768,7 +3827,7 @@ void Assembler::vss(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
void Assembler::bmi1q(byte op, Register reg, Register vreg, Register rm) {
DCHECK(IsEnabled(BMI1));
EnsureSpace ensure_space(this);
- emit_vex_prefix(reg, vreg, rm, kLZ, kNone, k0F38, kW1);
+ emit_vex_prefix(reg, vreg, rm, kLZ, kNoPrefix, k0F38, kW1);
emit(op);
emit_modrm(reg, rm);
}
@@ -3776,7 +3835,7 @@ void Assembler::bmi1q(byte op, Register reg, Register vreg, Register rm) {
void Assembler::bmi1q(byte op, Register reg, Register vreg, Operand rm) {
DCHECK(IsEnabled(BMI1));
EnsureSpace ensure_space(this);
- emit_vex_prefix(reg, vreg, rm, kLZ, kNone, k0F38, kW1);
+ emit_vex_prefix(reg, vreg, rm, kLZ, kNoPrefix, k0F38, kW1);
emit(op);
emit_operand(reg, rm);
}
@@ -3784,7 +3843,7 @@ void Assembler::bmi1q(byte op, Register reg, Register vreg, Operand rm) {
void Assembler::bmi1l(byte op, Register reg, Register vreg, Register rm) {
DCHECK(IsEnabled(BMI1));
EnsureSpace ensure_space(this);
- emit_vex_prefix(reg, vreg, rm, kLZ, kNone, k0F38, kW0);
+ emit_vex_prefix(reg, vreg, rm, kLZ, kNoPrefix, k0F38, kW0);
emit(op);
emit_modrm(reg, rm);
}
@@ -3792,7 +3851,7 @@ void Assembler::bmi1l(byte op, Register reg, Register vreg, Register rm) {
void Assembler::bmi1l(byte op, Register reg, Register vreg, Operand rm) {
DCHECK(IsEnabled(BMI1));
EnsureSpace ensure_space(this);
- emit_vex_prefix(reg, vreg, rm, kLZ, kNone, k0F38, kW0);
+ emit_vex_prefix(reg, vreg, rm, kLZ, kNoPrefix, k0F38, kW0);
emit(op);
emit_operand(reg, rm);
}
diff --git a/chromium/v8/src/codegen/x64/assembler-x64.h b/chromium/v8/src/codegen/x64/assembler-x64.h
index c3d3af100ba..fa85f2eedca 100644
--- a/chromium/v8/src/codegen/x64/assembler-x64.h
+++ b/chromium/v8/src/codegen/x64/assembler-x64.h
@@ -235,6 +235,7 @@ class V8_EXPORT_PRIVATE Operand {
}
Operand(const Operand&) V8_NOEXCEPT = default;
+ Operand& operator=(const Operand&) V8_NOEXCEPT = default;
const Data& data() const { return data_; }
@@ -485,7 +486,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static constexpr byte kJzShortOpcode = kJccShortPrefix | zero;
// VEX prefix encodings.
- enum SIMDPrefix { kNone = 0x0, k66 = 0x1, kF3 = 0x2, kF2 = 0x3 };
+ enum SIMDPrefix { kNoPrefix = 0x0, k66 = 0x1, kF3 = 0x2, kF2 = 0x3 };
enum VectorLength { kL128 = 0x0, kL256 = 0x4, kLIG = kL128, kLZ = kL128 };
enum VexW { kW0 = 0x0, kW1 = 0x80, kWIG = kW0 };
enum LeadingOpcode { k0F = 0x1, k0F38 = 0x2, k0F3A = 0x3 };
@@ -507,45 +508,20 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// - Instructions on 64-bit (quadword) operands/registers use 'q'.
// - Instructions on operands/registers with pointer size use 'p'.
-#define DECLARE_INSTRUCTION(instruction) \
- template <class P1> \
- void instruction##_tagged(P1 p1) { \
- emit_##instruction(p1, kTaggedSize); \
- } \
- \
- template <class P1> \
- void instruction##l(P1 p1) { \
- emit_##instruction(p1, kInt32Size); \
- } \
- \
- template <class P1> \
- void instruction##q(P1 p1) { \
- emit_##instruction(p1, kInt64Size); \
- } \
- \
- template <class P1, class P2> \
- void instruction##_tagged(P1 p1, P2 p2) { \
- emit_##instruction(p1, p2, kTaggedSize); \
- } \
- \
- template <class P1, class P2> \
- void instruction##l(P1 p1, P2 p2) { \
- emit_##instruction(p1, p2, kInt32Size); \
- } \
- \
- template <class P1, class P2> \
- void instruction##q(P1 p1, P2 p2) { \
- emit_##instruction(p1, p2, kInt64Size); \
- } \
- \
- template <class P1, class P2, class P3> \
- void instruction##l(P1 p1, P2 p2, P3 p3) { \
- emit_##instruction(p1, p2, p3, kInt32Size); \
- } \
- \
- template <class P1, class P2, class P3> \
- void instruction##q(P1 p1, P2 p2, P3 p3) { \
- emit_##instruction(p1, p2, p3, kInt64Size); \
+#define DECLARE_INSTRUCTION(instruction) \
+ template <typename... Ps> \
+ void instruction##_tagged(Ps... ps) { \
+ emit_##instruction(ps..., kTaggedSize); \
+ } \
+ \
+ template <typename... Ps> \
+ void instruction##l(Ps... ps) { \
+ emit_##instruction(ps..., kInt32Size); \
+ } \
+ \
+ template <typename... Ps> \
+ void instruction##q(Ps... ps) { \
+ emit_##instruction(ps..., kInt64Size); \
}
ASSEMBLER_INSTRUCTION_LIST(DECLARE_INSTRUCTION)
#undef DECLARE_INSTRUCTION
@@ -963,8 +939,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vinstr(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2,
SIMDPrefix pp, LeadingOpcode m, VexW w, CpuFeature feature = AVX);
+ void vinstr(byte op, YMMRegister dst, YMMRegister src1, YMMRegister src2,
+ SIMDPrefix pp, LeadingOpcode m, VexW w,
+ CpuFeature feature = AVX2);
void vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
SIMDPrefix pp, LeadingOpcode m, VexW w, CpuFeature feature = AVX);
+ void vinstr(byte op, YMMRegister dst, YMMRegister src1, Operand src2,
+ SIMDPrefix pp, LeadingOpcode m, VexW w,
+ CpuFeature feature = AVX2);
// SSE instructions
void sse_instr(XMMRegister dst, XMMRegister src, byte escape, byte opcode);
@@ -1241,9 +1223,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void cvtqsi2sd(XMMRegister dst, Operand src);
void cvtqsi2sd(XMMRegister dst, Register src);
- void cvtss2sd(XMMRegister dst, XMMRegister src);
- void cvtss2sd(XMMRegister dst, Operand src);
-
void cvtsd2si(Register dst, XMMRegister src);
void cvtsd2siq(Register dst, XMMRegister src);
@@ -1256,14 +1235,15 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void pmovmskb(Register dst, XMMRegister src);
+ void pinsrw(XMMRegister dst, Register src, uint8_t imm8);
+ void pinsrw(XMMRegister dst, Operand src, uint8_t imm8);
+
// SSE 4.1 instruction
void insertps(XMMRegister dst, XMMRegister src, byte imm8);
void insertps(XMMRegister dst, Operand src, byte imm8);
void pextrq(Register dst, XMMRegister src, int8_t imm8);
void pinsrb(XMMRegister dst, Register src, uint8_t imm8);
void pinsrb(XMMRegister dst, Operand src, uint8_t imm8);
- void pinsrw(XMMRegister dst, Register src, uint8_t imm8);
- void pinsrw(XMMRegister dst, Operand src, uint8_t imm8);
void pinsrd(XMMRegister dst, Register src, uint8_t imm8);
void pinsrd(XMMRegister dst, Operand src, uint8_t imm8);
void pinsrq(XMMRegister dst, Register src, uint8_t imm8);
@@ -1351,9 +1331,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vmovsd(Operand dst, XMMRegister src) { vsd(0x11, src, xmm0, dst); }
void vmovdqa(XMMRegister dst, Operand src);
void vmovdqa(XMMRegister dst, XMMRegister src);
+ void vmovdqa(YMMRegister dst, Operand src);
+ void vmovdqa(YMMRegister dst, YMMRegister src);
void vmovdqu(XMMRegister dst, Operand src);
void vmovdqu(Operand dst, XMMRegister src);
void vmovdqu(XMMRegister dst, XMMRegister src);
+ void vmovdqu(YMMRegister dst, Operand src);
+ void vmovdqu(Operand dst, YMMRegister src);
+ void vmovdqu(YMMRegister dst, YMMRegister src);
void vmovlps(XMMRegister dst, XMMRegister src1, Operand src2);
void vmovlps(Operand dst, XMMRegister src);
@@ -1367,6 +1352,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
} \
void v##instr(XMMRegister dst, Operand src2) { \
vps(0x##opcode, dst, xmm0, src2); \
+ } \
+ void v##instr(YMMRegister dst, YMMRegister src2) { \
+ vps(0x##opcode, dst, ymm0, src2); \
+ } \
+ void v##instr(YMMRegister dst, Operand src2) { \
+ vps(0x##opcode, dst, ymm0, src2); \
}
SSE_UNOP_INSTRUCTION_LIST(AVX_SSE_UNOP)
#undef AVX_SSE_UNOP
@@ -1377,19 +1368,26 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
} \
void v##instr(XMMRegister dst, XMMRegister src1, Operand src2) { \
vps(0x##opcode, dst, src1, src2); \
+ } \
+ void v##instr(YMMRegister dst, YMMRegister src1, YMMRegister src2) { \
+ vps(0x##opcode, dst, src1, src2); \
+ } \
+ void v##instr(YMMRegister dst, YMMRegister src1, Operand src2) { \
+ vps(0x##opcode, dst, src1, src2); \
}
SSE_BINOP_INSTRUCTION_LIST(AVX_SSE_BINOP)
#undef AVX_SSE_BINOP
-#define AVX_3(instr, opcode, impl) \
- void instr(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
- impl(opcode, dst, src1, src2); \
- } \
- void instr(XMMRegister dst, XMMRegister src1, Operand src2) { \
- impl(opcode, dst, src1, src2); \
+#define AVX_3(instr, opcode, impl, SIMDRegister) \
+ void instr(SIMDRegister dst, SIMDRegister src1, SIMDRegister src2) { \
+ impl(opcode, dst, src1, src2); \
+ } \
+ void instr(SIMDRegister dst, SIMDRegister src1, Operand src2) { \
+ impl(opcode, dst, src1, src2); \
}
- AVX_3(vhaddps, 0x7c, vsd)
+ AVX_3(vhaddps, 0x7c, vsd, XMMRegister)
+ AVX_3(vhaddps, 0x7c, vsd, YMMRegister)
#define AVX_SCALAR(instr, prefix, escape, opcode) \
void v##instr(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
@@ -1414,20 +1412,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
#undef AVX_SSE2_SHIFT_IMM
void vmovlhps(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vinstr(0x16, dst, src1, src2, kNone, k0F, kWIG);
+ vinstr(0x16, dst, src1, src2, kNoPrefix, k0F, kWIG);
}
void vmovhlps(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vinstr(0x12, dst, src1, src2, kNone, k0F, kWIG);
+ vinstr(0x12, dst, src1, src2, kNoPrefix, k0F, kWIG);
}
void vcvtdq2pd(XMMRegister dst, XMMRegister src) {
vinstr(0xe6, dst, xmm0, src, kF3, k0F, kWIG);
}
- void vcvtss2sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
- }
- void vcvtss2sd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
- }
void vcvttps2dq(XMMRegister dst, XMMRegister src) {
vinstr(0x5b, dst, xmm0, src, kF3, k0F, kWIG);
}
@@ -1509,16 +1501,22 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
vinstr(0x08, dst, xmm0, src, k66, k0F3A, kWIG);
emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
}
+ void vroundps(YMMRegister dst, YMMRegister src, RoundingMode mode) {
+ vinstr(0x08, dst, ymm0, src, k66, k0F3A, kWIG, AVX);
+ emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
+ }
void vroundpd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
vinstr(0x09, dst, xmm0, src, k66, k0F3A, kWIG);
emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
}
-
- void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vinstr(op, dst, src1, src2, kF2, k0F, kWIG);
+ void vroundpd(YMMRegister dst, YMMRegister src, RoundingMode mode) {
+ vinstr(0x09, dst, ymm0, src, k66, k0F3A, kWIG, AVX);
+ emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
}
- void vsd(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
- vinstr(op, dst, src1, src2, kF2, k0F, kWIG);
+
+ template <typename Reg, typename Op>
+ void vsd(byte op, Reg dst, Reg src1, Op src2) {
+ vinstr(op, dst, src1, src2, kF2, k0F, kWIG, AVX);
}
void vmovss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1590,6 +1588,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
AVX_CMP_P(vcmpneq, 0x4)
AVX_CMP_P(vcmpnlt, 0x5)
AVX_CMP_P(vcmpnle, 0x6)
+ AVX_CMP_P(vcmpge, 0xd)
#undef AVX_CMP_P
@@ -1651,24 +1650,48 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
vinstr(0x70, dst, xmm0, src, k66, k0F, kWIG);
emit(imm8);
}
+ void vpshufd(YMMRegister dst, YMMRegister src, uint8_t imm8) {
+ vinstr(0x70, dst, ymm0, src, k66, k0F, kWIG);
+ emit(imm8);
+ }
void vpshufd(XMMRegister dst, Operand src, uint8_t imm8) {
vinstr(0x70, dst, xmm0, src, k66, k0F, kWIG);
emit(imm8);
}
+ void vpshufd(YMMRegister dst, Operand src, uint8_t imm8) {
+ vinstr(0x70, dst, ymm0, src, k66, k0F, kWIG);
+ emit(imm8);
+ }
void vpshuflw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
vinstr(0x70, dst, xmm0, src, kF2, k0F, kWIG);
emit(imm8);
}
+ void vpshuflw(YMMRegister dst, YMMRegister src, uint8_t imm8) {
+ vinstr(0x70, dst, ymm0, src, kF2, k0F, kWIG);
+ emit(imm8);
+ }
void vpshuflw(XMMRegister dst, Operand src, uint8_t imm8) {
vinstr(0x70, dst, xmm0, src, kF2, k0F, kWIG);
emit(imm8);
}
+ void vpshuflw(YMMRegister dst, Operand src, uint8_t imm8) {
+ vinstr(0x70, dst, ymm0, src, kF2, k0F, kWIG);
+ emit(imm8);
+ }
void vpshufhw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
vinstr(0x70, dst, xmm0, src, kF3, k0F, kWIG);
emit(imm8);
}
+ void vpshufhw(YMMRegister dst, YMMRegister src, uint8_t imm8) {
+ vinstr(0x70, dst, ymm0, src, kF3, k0F, kWIG);
+ emit(imm8);
+ }
void vpshufhw(XMMRegister dst, Operand src, uint8_t imm8) {
- vinstr(0x70, dst, xmm0, src, kF2, k0F, kWIG);
+ vinstr(0x70, dst, xmm0, src, kF3, k0F, kWIG);
+ emit(imm8);
+ }
+ void vpshufhw(YMMRegister dst, Operand src, uint8_t imm8) {
+ vinstr(0x70, dst, ymm0, src, kF3, k0F, kWIG);
emit(imm8);
}
@@ -1677,23 +1700,43 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
vinstr(0x0E, dst, src1, src2, k66, k0F3A, kWIG);
emit(mask);
}
+ void vpblendw(YMMRegister dst, YMMRegister src1, YMMRegister src2,
+ uint8_t mask) {
+ vinstr(0x0E, dst, src1, src2, k66, k0F3A, kWIG);
+ emit(mask);
+ }
void vpblendw(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t mask) {
vinstr(0x0E, dst, src1, src2, k66, k0F3A, kWIG);
emit(mask);
}
+ void vpblendw(YMMRegister dst, YMMRegister src1, Operand src2, uint8_t mask) {
+ vinstr(0x0E, dst, src1, src2, k66, k0F3A, kWIG);
+ emit(mask);
+ }
void vpalignr(XMMRegister dst, XMMRegister src1, XMMRegister src2,
uint8_t imm8) {
vinstr(0x0F, dst, src1, src2, k66, k0F3A, kWIG);
emit(imm8);
}
+ void vpalignr(YMMRegister dst, YMMRegister src1, YMMRegister src2,
+ uint8_t imm8) {
+ vinstr(0x0F, dst, src1, src2, k66, k0F3A, kWIG);
+ emit(imm8);
+ }
void vpalignr(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8) {
vinstr(0x0F, dst, src1, src2, k66, k0F3A, kWIG);
emit(imm8);
}
+ void vpalignr(YMMRegister dst, YMMRegister src1, Operand src2, uint8_t imm8) {
+ vinstr(0x0F, dst, src1, src2, k66, k0F3A, kWIG);
+ emit(imm8);
+ }
void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
+ void vps(byte op, YMMRegister dst, YMMRegister src1, YMMRegister src2);
void vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
+ void vps(byte op, YMMRegister dst, YMMRegister src1, Operand src2);
void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2,
byte imm8);
void vpd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
@@ -1765,16 +1808,16 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void popcntl(Register dst, Operand src);
void bzhiq(Register dst, Register src1, Register src2) {
- bmi2q(kNone, 0xf5, dst, src2, src1);
+ bmi2q(kNoPrefix, 0xf5, dst, src2, src1);
}
void bzhiq(Register dst, Operand src1, Register src2) {
- bmi2q(kNone, 0xf5, dst, src2, src1);
+ bmi2q(kNoPrefix, 0xf5, dst, src2, src1);
}
void bzhil(Register dst, Register src1, Register src2) {
- bmi2l(kNone, 0xf5, dst, src2, src1);
+ bmi2l(kNoPrefix, 0xf5, dst, src2, src1);
}
void bzhil(Register dst, Operand src1, Register src2) {
- bmi2l(kNone, 0xf5, dst, src2, src1);
+ bmi2l(kNoPrefix, 0xf5, dst, src2, src1);
}
void mulxq(Register dst1, Register dst2, Register src) {
bmi2q(kF2, 0xf6, dst1, dst2, src);
diff --git a/chromium/v8/src/codegen/x64/cpu-x64.cc b/chromium/v8/src/codegen/x64/cpu-x64.cc
index cce76d8c6a0..7fd3635683e 100644
--- a/chromium/v8/src/codegen/x64/cpu-x64.cc
+++ b/chromium/v8/src/codegen/x64/cpu-x64.cc
@@ -4,7 +4,7 @@
// CPU specific code for x64 independent of OS goes here.
-#if defined(__GNUC__) && !defined(__MINGW64__)
+#if defined(__GNUC__) && !defined(__MINGW64__) && !defined(GOOGLE3)
#include "src/third_party/valgrind/valgrind.h"
#endif
diff --git a/chromium/v8/src/codegen/x64/fma-instr.h b/chromium/v8/src/codegen/x64/fma-instr.h
index f41c91ee512..c607429e33c 100644
--- a/chromium/v8/src/codegen/x64/fma-instr.h
+++ b/chromium/v8/src/codegen/x64/fma-instr.h
@@ -30,9 +30,17 @@
V(vfnmsub132ss, LIG, 66, 0F, 38, W0, 9f) \
V(vfnmsub213ss, LIG, 66, 0F, 38, W0, af) \
V(vfnmsub231ss, LIG, 66, 0F, 38, W0, bf) \
+ V(vfmadd132ps, L128, 66, 0F, 38, W0, 98) \
+ V(vfmadd213ps, L128, 66, 0F, 38, W0, a8) \
V(vfmadd231ps, L128, 66, 0F, 38, W0, b8) \
+ V(vfnmadd132ps, L128, 66, 0F, 38, W0, 9c) \
+ V(vfnmadd213ps, L128, 66, 0F, 38, W0, ac) \
V(vfnmadd231ps, L128, 66, 0F, 38, W0, bc) \
+ V(vfmadd132pd, L128, 66, 0F, 38, W1, 98) \
+ V(vfmadd213pd, L128, 66, 0F, 38, W1, a8) \
V(vfmadd231pd, L128, 66, 0F, 38, W1, b8) \
+ V(vfnmadd132pd, L128, 66, 0F, 38, W1, 9c) \
+ V(vfnmadd213pd, L128, 66, 0F, 38, W1, ac) \
V(vfnmadd231pd, L128, 66, 0F, 38, W1, bc)
#endif // V8_CODEGEN_X64_FMA_INSTR_H_
diff --git a/chromium/v8/src/codegen/x64/interface-descriptors-x64-inl.h b/chromium/v8/src/codegen/x64/interface-descriptors-x64-inl.h
index 50ba12b836f..fade1eda99f 100644
--- a/chromium/v8/src/codegen/x64/interface-descriptors-x64-inl.h
+++ b/chromium/v8/src/codegen/x64/interface-descriptors-x64-inl.h
@@ -43,12 +43,12 @@ constexpr auto WriteBarrierDescriptor::registers() {
#ifdef V8_IS_TSAN
// static
-constexpr auto TSANRelaxedStoreDescriptor::registers() {
+constexpr auto TSANStoreDescriptor::registers() {
return RegisterArray(arg_reg_1, arg_reg_2, kReturnRegister0);
}
// static
-constexpr auto TSANRelaxedLoadDescriptor::registers() {
+constexpr auto TSANLoadDescriptor::registers() {
return RegisterArray(arg_reg_1, kReturnRegister0);
}
#endif // V8_IS_TSAN
diff --git a/chromium/v8/src/codegen/x64/macro-assembler-x64.cc b/chromium/v8/src/codegen/x64/macro-assembler-x64.cc
index 5a8dc356b8f..f7a50f786bf 100644
--- a/chromium/v8/src/codegen/x64/macro-assembler-x64.cc
+++ b/chromium/v8/src/codegen/x64/macro-assembler-x64.cc
@@ -294,6 +294,17 @@ void TurboAssembler::StoreTaggedSignedField(Operand dst_field_operand,
}
}
+void TurboAssembler::AtomicStoreTaggedField(Operand dst_field_operand,
+ Register value) {
+ if (COMPRESS_POINTERS_BOOL) {
+ movl(kScratchRegister, value);
+ xchgl(kScratchRegister, dst_field_operand);
+ } else {
+ movq(kScratchRegister, value);
+ xchgq(kScratchRegister, dst_field_operand);
+ }
+}
+
void TurboAssembler::DecompressTaggedSigned(Register destination,
Operand field_operand) {
ASM_CODE_COMMENT(this);
@@ -483,26 +494,27 @@ void TurboAssembler::CallRecordWriteStub(
}
#ifdef V8_IS_TSAN
-void TurboAssembler::CallTSANRelaxedStoreStub(Register address, Register value,
- SaveFPRegsMode fp_mode, int size,
- StubCallMode mode) {
+void TurboAssembler::CallTSANStoreStub(Register address, Register value,
+ SaveFPRegsMode fp_mode, int size,
+ StubCallMode mode,
+ std::memory_order order) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(address, value));
- TSANRelaxedStoreDescriptor descriptor;
+ TSANStoreDescriptor descriptor;
RegList registers = descriptor.allocatable_registers();
MaybeSaveRegisters(registers);
Register address_parameter(
- descriptor.GetRegisterParameter(TSANRelaxedStoreDescriptor::kAddress));
+ descriptor.GetRegisterParameter(TSANStoreDescriptor::kAddress));
Register value_parameter(
- descriptor.GetRegisterParameter(TSANRelaxedStoreDescriptor::kValue));
+ descriptor.GetRegisterParameter(TSANStoreDescriptor::kValue));
- // Prepare argument registers for calling GetTSANRelaxedStoreStub.
+ // Prepare argument registers for calling GetTSANStoreStub.
MovePair(address_parameter, address, value_parameter, value);
if (isolate()) {
- Builtin builtin = CodeFactory::GetTSANRelaxedStoreStub(fp_mode, size);
+ Builtin builtin = CodeFactory::GetTSANStoreStub(fp_mode, size, order);
Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
Call(code_target, RelocInfo::CODE_TARGET);
}
@@ -520,7 +532,7 @@ void TurboAssembler::CallTSANRelaxedStoreStub(Register address, Register value,
else {
DCHECK_EQ(mode, StubCallMode::kCallWasmRuntimeStub);
// Use {near_call} for direct Wasm call within a module.
- auto wasm_target = wasm::WasmCode::GetTSANRelaxedStoreStub(fp_mode, size);
+ auto wasm_target = wasm::WasmCode::GetTSANStoreStub(fp_mode, size, order);
near_call(wasm_target, RelocInfo::WASM_STUB_CALL);
}
#endif // V8_ENABLE_WEBASSEMBLY
@@ -531,13 +543,13 @@ void TurboAssembler::CallTSANRelaxedStoreStub(Register address, Register value,
void TurboAssembler::CallTSANRelaxedLoadStub(Register address,
SaveFPRegsMode fp_mode, int size,
StubCallMode mode) {
- TSANRelaxedLoadDescriptor descriptor;
+ TSANLoadDescriptor descriptor;
RegList registers = descriptor.allocatable_registers();
MaybeSaveRegisters(registers);
Register address_parameter(
- descriptor.GetRegisterParameter(TSANRelaxedLoadDescriptor::kAddress));
+ descriptor.GetRegisterParameter(TSANLoadDescriptor::kAddress));
// Prepare argument registers for calling TSANRelaxedLoad.
Move(address_parameter, address);
@@ -672,7 +684,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (should_abort_hard()) {
// We don't care if we constructed a frame. Just pretend we did.
- FrameScope assume_frame(this, StackFrame::NONE);
+ FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
Move(arg_reg_1, static_cast<int>(reason));
PrepareCallCFunction(1);
LoadAddress(rax, ExternalReference::abort_with_reason());
@@ -685,7 +697,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (!has_frame()) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
+ FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
} else {
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
@@ -847,29 +859,108 @@ void TurboAssembler::Movq(Register dst, XMMRegister src) {
}
}
-void TurboAssembler::Movdqa(XMMRegister dst, Operand src) {
- // See comments in Movdqa(XMMRegister, XMMRegister).
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vmovdqa(dst, src);
- } else {
- movaps(dst, src);
- }
-}
-
-void TurboAssembler::Movdqa(XMMRegister dst, XMMRegister src) {
+void TurboAssembler::Pextrq(Register dst, XMMRegister src, int8_t imm8) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
- // Many AVX processors have separate integer/floating-point domains. Use the
- // appropriate instructions.
- vmovdqa(dst, src);
+ vpextrq(dst, src, imm8);
} else {
- // On SSE, movaps is 1 byte shorter than movdqa, and has the same behavior.
- // Most SSE processors also don't have the same delay moving between integer
- // and floating-point domains.
- movaps(dst, src);
- }
-}
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ pextrq(dst, src, imm8);
+ }
+}
+
+// Helper macro to define qfma macro-assembler. This takes care of every
+// possible case of register aliasing to minimize the number of instructions.
+#define QFMA(ps_or_pd) \
+ if (CpuFeatures::IsSupported(FMA3)) { \
+ CpuFeatureScope fma3_scope(this, FMA3); \
+ if (dst == src1) { \
+ vfmadd231##ps_or_pd(dst, src2, src3); \
+ } else if (dst == src2) { \
+ vfmadd132##ps_or_pd(dst, src1, src3); \
+ } else if (dst == src3) { \
+ vfmadd213##ps_or_pd(dst, src2, src1); \
+ } else { \
+ vmovups(dst, src1); \
+ vfmadd231##ps_or_pd(dst, src2, src3); \
+ } \
+ } else if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope avx_scope(this, AVX); \
+ vmul##ps_or_pd(tmp, src2, src3); \
+ vadd##ps_or_pd(dst, src1, tmp); \
+ } else { \
+ if (dst == src1) { \
+ movaps(tmp, src2); \
+ mul##ps_or_pd(tmp, src3); \
+ add##ps_or_pd(dst, tmp); \
+ } else if (dst == src2) { \
+ DCHECK_NE(src2, src1); \
+ mul##ps_or_pd(src2, src3); \
+ add##ps_or_pd(src2, src1); \
+ } else if (dst == src3) { \
+ DCHECK_NE(src3, src1); \
+ mul##ps_or_pd(src3, src2); \
+ add##ps_or_pd(src3, src1); \
+ } else { \
+ movaps(dst, src2); \
+ mul##ps_or_pd(dst, src3); \
+ add##ps_or_pd(dst, src1); \
+ } \
+ }
+
+// Helper macro to define qfms macro-assembler. This takes care of every
+// possible case of register aliasing to minimize the number of instructions.
+#define QFMS(ps_or_pd) \
+ if (CpuFeatures::IsSupported(FMA3)) { \
+ CpuFeatureScope fma3_scope(this, FMA3); \
+ if (dst == src1) { \
+ vfnmadd231##ps_or_pd(dst, src2, src3); \
+ } else if (dst == src2) { \
+ vfnmadd132##ps_or_pd(dst, src1, src3); \
+ } else if (dst == src3) { \
+ vfnmadd213##ps_or_pd(dst, src2, src1); \
+ } else { \
+ vmovups(dst, src1); \
+ vfnmadd231##ps_or_pd(dst, src2, src3); \
+ } \
+ } else if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope avx_scope(this, AVX); \
+ vmul##ps_or_pd(tmp, src2, src3); \
+ vsub##ps_or_pd(dst, src1, tmp); \
+ } else { \
+ movaps(tmp, src2); \
+ mul##ps_or_pd(tmp, src3); \
+ if (dst != src1) { \
+ movaps(dst, src1); \
+ } \
+ sub##ps_or_pd(dst, tmp); \
+ }
+
+void TurboAssembler::F32x4Qfma(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister src3,
+ XMMRegister tmp) {
+ QFMA(ps)
+}
+
+void TurboAssembler::F32x4Qfms(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister src3,
+ XMMRegister tmp) {
+ QFMS(ps)
+}
+
+void TurboAssembler::F64x2Qfma(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister src3,
+ XMMRegister tmp) {
+ QFMA(pd);
+}
+
+void TurboAssembler::F64x2Qfms(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister src3,
+ XMMRegister tmp) {
+ QFMS(pd);
+}
+
+#undef QFMOP
void TurboAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
@@ -1265,6 +1356,16 @@ void TurboAssembler::SmiUntag(Register dst, Operand src) {
}
}
+void TurboAssembler::SmiToInt32(Register reg) {
+ STATIC_ASSERT(kSmiTag == 0);
+ DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
+ if (COMPRESS_POINTERS_BOOL) {
+ sarl(reg, Immediate(kSmiShift));
+ } else {
+ shrq(reg, Immediate(kSmiShift));
+ }
+}
+
void TurboAssembler::SmiCompare(Register smi1, Register smi2) {
AssertSmi(smi1);
AssertSmi(smi2);
@@ -1544,23 +1645,19 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
}
void TurboAssembler::Move(XMMRegister dst, uint64_t high, uint64_t low) {
+ if (high == low) {
+ Move(dst, low);
+ Punpcklqdq(dst, dst);
+ return;
+ }
+
Move(dst, low);
movq(kScratchRegister, high);
- Pinsrq(dst, kScratchRegister, uint8_t{1});
+ Pinsrq(dst, dst, kScratchRegister, uint8_t{1});
}
// ----------------------------------------------------------------------------
-void MacroAssembler::Absps(XMMRegister dst) {
- Andps(dst, ExternalReferenceAsOperand(
- ExternalReference::address_of_float_abs_constant()));
-}
-
-void MacroAssembler::Negps(XMMRegister dst) {
- Xorps(dst, ExternalReferenceAsOperand(
- ExternalReference::address_of_float_neg_constant()));
-}
-
void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
if (source->IsSmi()) {
Cmp(dst, Smi::cast(*source));
@@ -1579,15 +1676,22 @@ void MacroAssembler::Cmp(Operand dst, Handle<Object> source) {
}
}
-void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
- unsigned higher_limit, Label* on_in_range,
- Label::Distance near_jump) {
+void MacroAssembler::CompareRange(Register value, unsigned lower_limit,
+ unsigned higher_limit) {
+ ASM_CODE_COMMENT(this);
+ DCHECK_LT(lower_limit, higher_limit);
if (lower_limit != 0) {
leal(kScratchRegister, Operand(value, 0u - lower_limit));
cmpl(kScratchRegister, Immediate(higher_limit - lower_limit));
} else {
cmpl(value, Immediate(higher_limit));
}
+}
+
+void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
+ unsigned higher_limit, Label* on_in_range,
+ Label::Distance near_jump) {
+ CompareRange(value, lower_limit, higher_limit);
j(below_equal, on_in_range, near_jump);
}
@@ -1820,8 +1924,7 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
Operand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
DCHECK(root_array_available());
- return Operand(kRootRegister,
- IsolateData::builtin_entry_slot_offset(builtin));
+ return Operand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin));
}
Operand TurboAssembler::EntryFromBuiltinIndexAsOperand(Register builtin_index) {
@@ -1993,566 +2096,52 @@ void TurboAssembler::JumpCodeTObject(Register code, JumpMode jump_mode) {
}
}
-void TurboAssembler::RetpolineCall(Register reg) {
- ASM_CODE_COMMENT(this);
- Label setup_return, setup_target, inner_indirect_branch, capture_spec;
-
- jmp(&setup_return); // Jump past the entire retpoline below.
-
- bind(&inner_indirect_branch);
- call(&setup_target);
-
- bind(&capture_spec);
- pause();
- jmp(&capture_spec);
-
- bind(&setup_target);
- movq(Operand(rsp, 0), reg);
- ret(0);
-
- bind(&setup_return);
- call(&inner_indirect_branch); // Callee will return after this instruction.
-}
-
-void TurboAssembler::RetpolineCall(Address destination, RelocInfo::Mode rmode) {
- Move(kScratchRegister, destination, rmode);
- RetpolineCall(kScratchRegister);
-}
-
-void TurboAssembler::RetpolineJump(Register reg) {
- ASM_CODE_COMMENT(this);
- Label setup_target, capture_spec;
-
- call(&setup_target);
-
- bind(&capture_spec);
- pause();
- jmp(&capture_spec);
-
- bind(&setup_target);
- movq(Operand(rsp, 0), reg);
- ret(0);
-}
-
-void TurboAssembler::Pmaddwd(XMMRegister dst, XMMRegister src1, Operand src2) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpmaddwd(dst, src1, src2);
- } else {
- if (dst != src1) {
- movaps(dst, src1);
- }
- pmaddwd(dst, src2);
- }
-}
-
-void TurboAssembler::Pmaddwd(XMMRegister dst, XMMRegister src1,
- XMMRegister src2) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpmaddwd(dst, src1, src2);
- } else {
- if (dst != src1) {
- movaps(dst, src1);
- }
- pmaddwd(dst, src2);
- }
-}
-
-void TurboAssembler::Pmaddubsw(XMMRegister dst, XMMRegister src1,
- Operand src2) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpmaddubsw(dst, src1, src2);
- } else {
- CpuFeatureScope ssse3_scope(this, SSSE3);
- if (dst != src1) {
- movaps(dst, src1);
- }
- pmaddubsw(dst, src2);
- }
-}
-
-void TurboAssembler::Pmaddubsw(XMMRegister dst, XMMRegister src1,
- XMMRegister src2) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpmaddubsw(dst, src1, src2);
- } else {
- CpuFeatureScope ssse3_scope(this, SSSE3);
- if (dst != src1) {
- movaps(dst, src1);
- }
- pmaddubsw(dst, src2);
- }
-}
-
-void TurboAssembler::Pextrd(Register dst, XMMRegister src, uint8_t imm8) {
+void TurboAssembler::PextrdPreSse41(Register dst, XMMRegister src,
+ uint8_t imm8) {
if (imm8 == 0) {
Movd(dst, src);
return;
}
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpextrd(dst, src, imm8);
- return;
- } else if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope sse_scope(this, SSE4_1);
- pextrd(dst, src, imm8);
- return;
- }
DCHECK_EQ(1, imm8);
movq(dst, src);
shrq(dst, Immediate(32));
}
namespace {
-
-template <typename Src>
-using AvxFn = void (Assembler::*)(XMMRegister, XMMRegister, Src, uint8_t);
-template <typename Src>
-using NoAvxFn = void (Assembler::*)(XMMRegister, Src, uint8_t);
-
-template <typename Src>
-void PinsrHelper(Assembler* assm, AvxFn<Src> avx, NoAvxFn<Src> noavx,
- XMMRegister dst, XMMRegister src1, Src src2, uint8_t imm8,
- base::Optional<CpuFeature> feature = base::nullopt) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(assm, AVX);
- (assm->*avx)(dst, src1, src2, imm8);
- return;
- }
-
- if (dst != src1) {
- assm->movaps(dst, src1);
- }
- if (feature.has_value()) {
- DCHECK(CpuFeatures::IsSupported(*feature));
- CpuFeatureScope scope(assm, *feature);
- (assm->*noavx)(dst, src2, imm8);
- } else {
- (assm->*noavx)(dst, src2, imm8);
- }
-}
-} // namespace
-
-void TurboAssembler::Pinsrb(XMMRegister dst, XMMRegister src1, Register src2,
- uint8_t imm8) {
- PinsrHelper(this, &Assembler::vpinsrb, &Assembler::pinsrb, dst, src1, src2,
- imm8, base::Optional<CpuFeature>(SSE4_1));
-}
-
-void TurboAssembler::Pinsrb(XMMRegister dst, XMMRegister src1, Operand src2,
- uint8_t imm8) {
- PinsrHelper(this, &Assembler::vpinsrb, &Assembler::pinsrb, dst, src1, src2,
- imm8, base::Optional<CpuFeature>(SSE4_1));
-}
-
-void TurboAssembler::Pinsrw(XMMRegister dst, XMMRegister src1, Register src2,
- uint8_t imm8) {
- PinsrHelper(this, &Assembler::vpinsrw, &Assembler::pinsrw, dst, src1, src2,
- imm8);
-}
-
-void TurboAssembler::Pinsrw(XMMRegister dst, XMMRegister src1, Operand src2,
- uint8_t imm8) {
- PinsrHelper(this, &Assembler::vpinsrw, &Assembler::pinsrw, dst, src1, src2,
- imm8);
-}
-
-void TurboAssembler::Pinsrd(XMMRegister dst, XMMRegister src1, Register src2,
- uint8_t imm8) {
- // Need a fall back when SSE4_1 is unavailable. Pinsrb and Pinsrq are used
- // only by Wasm SIMD, which requires SSE4_1 already.
- if (CpuFeatures::IsSupported(SSE4_1)) {
- PinsrHelper(this, &Assembler::vpinsrd, &Assembler::pinsrd, dst, src1, src2,
- imm8, base::Optional<CpuFeature>(SSE4_1));
- return;
- }
-
- Movd(kScratchDoubleReg, src2);
- if (imm8 == 1) {
- punpckldq(dst, kScratchDoubleReg);
- } else {
- DCHECK_EQ(0, imm8);
- Movss(dst, kScratchDoubleReg);
- }
-}
-
-void TurboAssembler::Pinsrd(XMMRegister dst, XMMRegister src1, Operand src2,
- uint8_t imm8) {
- // Need a fall back when SSE4_1 is unavailable. Pinsrb and Pinsrq are used
- // only by Wasm SIMD, which requires SSE4_1 already.
- if (CpuFeatures::IsSupported(SSE4_1)) {
- PinsrHelper(this, &Assembler::vpinsrd, &Assembler::pinsrd, dst, src1, src2,
- imm8, base::Optional<CpuFeature>(SSE4_1));
- return;
- }
-
- Movd(kScratchDoubleReg, src2);
+template <typename Op>
+void PinsrdPreSse41Helper(TurboAssembler* tasm, XMMRegister dst, Op src,
+ uint8_t imm8, uint32_t* load_pc_offset) {
+ tasm->Movd(kScratchDoubleReg, src);
+ if (load_pc_offset) *load_pc_offset = tasm->pc_offset();
if (imm8 == 1) {
- punpckldq(dst, kScratchDoubleReg);
+ tasm->punpckldq(dst, kScratchDoubleReg);
} else {
DCHECK_EQ(0, imm8);
- Movss(dst, kScratchDoubleReg);
+ tasm->Movss(dst, kScratchDoubleReg);
}
}
+} // namespace
-void TurboAssembler::Pinsrd(XMMRegister dst, Register src2, uint8_t imm8) {
- Pinsrd(dst, dst, src2, imm8);
+void TurboAssembler::PinsrdPreSse41(XMMRegister dst, Register src, uint8_t imm8,
+ uint32_t* load_pc_offset) {
+ PinsrdPreSse41Helper(this, dst, src, imm8, load_pc_offset);
}
-void TurboAssembler::Pinsrd(XMMRegister dst, Operand src2, uint8_t imm8) {
- Pinsrd(dst, dst, src2, imm8);
+void TurboAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8,
+ uint32_t* load_pc_offset) {
+ PinsrdPreSse41Helper(this, dst, src, imm8, load_pc_offset);
}
void TurboAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Register src2,
- uint8_t imm8) {
+ uint8_t imm8, uint32_t* load_pc_offset) {
PinsrHelper(this, &Assembler::vpinsrq, &Assembler::pinsrq, dst, src1, src2,
- imm8, base::Optional<CpuFeature>(SSE4_1));
+ imm8, load_pc_offset, {SSE4_1});
}
void TurboAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Operand src2,
- uint8_t imm8) {
+ uint8_t imm8, uint32_t* load_pc_offset) {
PinsrHelper(this, &Assembler::vpinsrq, &Assembler::pinsrq, dst, src1, src2,
- imm8, base::Optional<CpuFeature>(SSE4_1));
-}
-
-void TurboAssembler::Pblendvb(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, XMMRegister mask) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpblendvb(dst, src1, src2, mask);
- } else {
- CpuFeatureScope scope(this, SSE4_1);
- DCHECK_EQ(dst, src1);
- DCHECK_EQ(xmm0, mask);
- pblendvb(dst, src2);
- }
-}
-
-void TurboAssembler::Blendvps(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, XMMRegister mask) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vblendvps(dst, src1, src2, mask);
- } else {
- CpuFeatureScope scope(this, SSE4_1);
- DCHECK_EQ(dst, src1);
- DCHECK_EQ(xmm0, mask);
- blendvps(dst, src2);
- }
-}
-
-void TurboAssembler::Blendvpd(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, XMMRegister mask) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vblendvpd(dst, src1, src2, mask);
- } else {
- CpuFeatureScope scope(this, SSE4_1);
- DCHECK_EQ(dst, src1);
- DCHECK_EQ(xmm0, mask);
- blendvpd(dst, src2);
- }
-}
-
-void TurboAssembler::Pshufb(XMMRegister dst, XMMRegister src,
- XMMRegister mask) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpshufb(dst, src, mask);
- } else {
- // Make sure these are different so that we won't overwrite mask.
- DCHECK_NE(dst, mask);
- if (dst != src) {
- movaps(dst, src);
- }
- CpuFeatureScope sse_scope(this, SSSE3);
- pshufb(dst, mask);
- }
-}
-
-void TurboAssembler::Pmulhrsw(XMMRegister dst, XMMRegister src1,
- XMMRegister src2) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpmulhrsw(dst, src1, src2);
- } else {
- if (dst != src1) {
- Movdqa(dst, src1);
- }
- CpuFeatureScope sse_scope(this, SSSE3);
- pmulhrsw(dst, src2);
- }
-}
-
-void TurboAssembler::I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1,
- XMMRegister src2) {
- // k = i16x8.splat(0x8000)
- Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- Psllw(kScratchDoubleReg, byte{15});
-
- Pmulhrsw(dst, src1, src2);
- Pcmpeqw(kScratchDoubleReg, dst);
- Pxor(dst, kScratchDoubleReg);
-}
-
-void TurboAssembler::S128Store64Lane(Operand dst, XMMRegister src,
- uint8_t laneidx) {
- if (laneidx == 0) {
- Movlps(dst, src);
- } else {
- DCHECK_EQ(1, laneidx);
- Movhps(dst, src);
- }
-}
-
-void TurboAssembler::I8x16Popcnt(XMMRegister dst, XMMRegister src,
- XMMRegister tmp) {
- DCHECK_NE(dst, tmp);
- DCHECK_NE(src, tmp);
- DCHECK_NE(kScratchDoubleReg, tmp);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vmovdqa(tmp, ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x0f()));
- vpandn(kScratchDoubleReg, tmp, src);
- vpand(dst, tmp, src);
- vmovdqa(tmp, ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_popcnt_mask()));
- vpsrlw(kScratchDoubleReg, kScratchDoubleReg, 4);
- vpshufb(dst, tmp, dst);
- vpshufb(kScratchDoubleReg, tmp, kScratchDoubleReg);
- vpaddb(dst, dst, kScratchDoubleReg);
- } else if (CpuFeatures::IsSupported(ATOM)) {
- // Pre-Goldmont low-power Intel microarchitectures have very slow
- // PSHUFB instruction, thus use PSHUFB-free divide-and-conquer
- // algorithm on these processors. ATOM CPU feature captures exactly
- // the right set of processors.
- movaps(tmp, src);
- psrlw(tmp, 1);
- if (dst != src) {
- movaps(dst, src);
- }
- andps(tmp, ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x55()));
- psubb(dst, tmp);
- Operand splat_0x33 = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x33());
- movaps(tmp, dst);
- andps(dst, splat_0x33);
- psrlw(tmp, 2);
- andps(tmp, splat_0x33);
- paddb(dst, tmp);
- movaps(tmp, dst);
- psrlw(dst, 4);
- paddb(dst, tmp);
- andps(dst, ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x0f()));
- } else {
- movaps(tmp, ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x0f()));
- Operand mask = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_popcnt_mask());
- Move(kScratchDoubleReg, tmp);
- andps(tmp, src);
- andnps(kScratchDoubleReg, src);
- psrlw(kScratchDoubleReg, 4);
- movaps(dst, mask);
- pshufb(dst, tmp);
- movaps(tmp, mask);
- pshufb(tmp, kScratchDoubleReg);
- paddb(dst, tmp);
- }
-}
-
-void TurboAssembler::F64x2ConvertLowI32x4U(XMMRegister dst, XMMRegister src) {
- // dst = [ src_low, 0x43300000, src_high, 0x4330000 ];
- // 0x43300000'00000000 is a special double where the significand bits
- // precisely represents all uint32 numbers.
- if (!CpuFeatures::IsSupported(AVX) && dst != src) {
- movaps(dst, src);
- src = dst;
- }
- Unpcklps(dst, src,
- ExternalReferenceAsOperand(
- ExternalReference::
- address_of_wasm_f64x2_convert_low_i32x4_u_int_mask()));
- Subpd(dst, ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_double_2_power_52()));
-}
-
-void TurboAssembler::I32x4TruncSatF64x2SZero(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- XMMRegister original_dst = dst;
- // Make sure we don't overwrite src.
- if (dst == src) {
- DCHECK_NE(src, kScratchDoubleReg);
- dst = kScratchDoubleReg;
- }
- // dst = 0 if src == NaN, else all ones.
- vcmpeqpd(dst, src, src);
- // dst = 0 if src == NaN, else INT32_MAX as double.
- vandpd(dst, dst,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_int32_max_as_double()));
- // dst = 0 if src == NaN, src is saturated to INT32_MAX as double.
- vminpd(dst, src, dst);
- // Values > INT32_MAX already saturated, values < INT32_MIN raises an
- // exception, which is masked and returns 0x80000000.
- vcvttpd2dq(dst, dst);
- if (original_dst != dst) {
- Move(original_dst, dst);
- }
- } else {
- if (dst != src) {
- Move(dst, src);
- }
- Move(kScratchDoubleReg, dst);
- cmpeqpd(kScratchDoubleReg, dst);
- andps(kScratchDoubleReg,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_int32_max_as_double()));
- minpd(dst, kScratchDoubleReg);
- cvttpd2dq(dst, dst);
- }
-}
-
-void TurboAssembler::I32x4TruncSatF64x2UZero(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vxorpd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
- // Saturate to 0.
- vmaxpd(dst, src, kScratchDoubleReg);
- // Saturate to UINT32_MAX.
- vminpd(dst, dst,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_uint32_max_as_double()));
- // Truncate.
- vroundpd(dst, dst, kRoundToZero);
- // Add to special double where significant bits == uint32.
- vaddpd(dst, dst,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_double_2_power_52()));
- // Extract low 32 bits of each double's significand, zero top lanes.
- // dst = [dst[0], dst[2], 0, 0]
- vshufps(dst, dst, kScratchDoubleReg, 0x88);
- } else {
- CpuFeatureScope scope(this, SSE4_1);
- if (dst != src) {
- Move(dst, src);
- }
- xorps(kScratchDoubleReg, kScratchDoubleReg);
- maxpd(dst, kScratchDoubleReg);
- minpd(dst, ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_uint32_max_as_double()));
- roundpd(dst, dst, kRoundToZero);
- addpd(dst, ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_double_2_power_52()));
- shufps(dst, kScratchDoubleReg, 0x88);
- }
-}
-
-void TurboAssembler::I16x8ExtAddPairwiseI8x16S(XMMRegister dst,
- XMMRegister src) {
- // pmaddubsw treats the first operand as unsigned, so the external reference
- // to be passed to it as the first operand.
- Operand op = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x01());
- if (dst == src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vmovdqa(kScratchDoubleReg, op);
- vpmaddubsw(dst, kScratchDoubleReg, src);
- } else {
- CpuFeatureScope sse_scope(this, SSSE3);
- movaps(kScratchDoubleReg, op);
- pmaddubsw(kScratchDoubleReg, src);
- movaps(dst, kScratchDoubleReg);
- }
- } else {
- Movdqa(dst, op);
- Pmaddubsw(dst, dst, src);
- }
-}
-
-void TurboAssembler::I32x4ExtAddPairwiseI16x8U(XMMRegister dst,
- XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- // src = |a|b|c|d|e|f|g|h| (low)
- // scratch = |0|a|0|c|0|e|0|g|
- vpsrld(kScratchDoubleReg, src, 16);
- // dst = |0|b|0|d|0|f|0|h|
- vpblendw(dst, src, kScratchDoubleReg, 0xAA);
- // dst = |a+b|c+d|e+f|g+h|
- vpaddd(dst, kScratchDoubleReg, dst);
- } else if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope sse_scope(this, SSE4_1);
- // There is a potentially better lowering if we get rip-relative constants,
- // see https://github.com/WebAssembly/simd/pull/380.
- movaps(kScratchDoubleReg, src);
- psrld(kScratchDoubleReg, 16);
- if (dst != src) {
- movaps(dst, src);
- }
- pblendw(dst, kScratchDoubleReg, 0xAA);
- paddd(dst, kScratchDoubleReg);
- } else {
- // src = |a|b|c|d|e|f|g|h|
- // kScratchDoubleReg = i32x4.splat(0x0000FFFF)
- pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- psrld(kScratchDoubleReg, byte{16});
- // kScratchDoubleReg =|0|b|0|d|0|f|0|h|
- andps(kScratchDoubleReg, src);
- // dst = |0|a|0|c|0|e|0|g|
- if (dst != src) {
- movaps(dst, src);
- }
- psrld(dst, byte{16});
- // dst = |a+b|c+d|e+f|g+h|
- paddd(dst, kScratchDoubleReg);
- }
-}
-
-void TurboAssembler::I8x16Swizzle(XMMRegister dst, XMMRegister src,
- XMMRegister mask, bool omit_add) {
- if (omit_add) {
- // We have determined that the indices are immediates, and they are either
- // within bounds, or the top bit is set, so we can omit the add.
- Pshufb(dst, src, mask);
- return;
- }
-
- // Out-of-range indices should return 0, add 112 so that any value > 15
- // saturates to 128 (top bit set), so pshufb will zero that lane.
- Operand op = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_swizzle_mask());
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpaddusb(kScratchDoubleReg, mask, op);
- vpshufb(dst, src, kScratchDoubleReg);
- } else {
- CpuFeatureScope sse_scope(this, SSSE3);
- movaps(kScratchDoubleReg, op);
- if (dst != src) {
- movaps(dst, src);
- }
- paddusb(kScratchDoubleReg, mask);
- pshufb(dst, kScratchDoubleReg);
- }
-}
-
-void TurboAssembler::Abspd(XMMRegister dst) {
- Andps(dst, ExternalReferenceAsOperand(
- ExternalReference::address_of_double_abs_constant()));
-}
-
-void TurboAssembler::Negpd(XMMRegister dst) {
- Xorps(dst, ExternalReferenceAsOperand(
- ExternalReference::address_of_double_neg_constant()));
+ imm8, load_pc_offset, {SSE4_1});
}
void TurboAssembler::Lzcntl(Register dst, Register src) {
@@ -2749,12 +2338,12 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
}
void MacroAssembler::CmpInstanceTypeRange(Register map,
+ Register instance_type_out,
InstanceType lower_limit,
InstanceType higher_limit) {
DCHECK_LT(lower_limit, higher_limit);
- movzxwl(kScratchRegister, FieldOperand(map, Map::kInstanceTypeOffset));
- leal(kScratchRegister, Operand(kScratchRegister, 0u - lower_limit));
- cmpl(kScratchRegister, Immediate(higher_limit - lower_limit));
+ movzxwl(instance_type_out, FieldOperand(map, Map::kInstanceTypeOffset));
+ CompareRange(instance_type_out, lower_limit, higher_limit);
}
void TurboAssembler::AssertNotSmi(Register object) {
@@ -2794,8 +2383,7 @@ void MacroAssembler::AssertCodeT(Register object) {
Check(not_equal, AbortReason::kOperandIsNotACodeT);
Push(object);
LoadMap(object, object);
- CmpInstanceType(object, V8_EXTERNAL_CODE_SPACE_BOOL ? CODE_DATA_CONTAINER_TYPE
- : CODE_TYPE);
+ CmpInstanceType(object, CODET_TYPE);
Pop(object);
Check(equal, AbortReason::kOperandIsNotACodeT);
}
@@ -2820,7 +2408,8 @@ void MacroAssembler::AssertFunction(Register object) {
Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
Push(object);
LoadMap(object, object);
- CmpInstanceTypeRange(object, FIRST_JS_FUNCTION_TYPE, LAST_JS_FUNCTION_TYPE);
+ CmpInstanceTypeRange(object, object, FIRST_JS_FUNCTION_TYPE,
+ LAST_JS_FUNCTION_TYPE);
Pop(object);
Check(below_equal, AbortReason::kOperandIsNotAFunction);
}
@@ -3047,8 +2636,10 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Label regular_invoke;
// If the expected parameter count is equal to the adaptor sentinel, no need
// to push undefined value as arguments.
- cmpl(expected_parameter_count, Immediate(kDontAdaptArgumentsSentinel));
- j(equal, &regular_invoke, Label::kFar);
+ if (kDontAdaptArgumentsSentinel != 0) {
+ cmpl(expected_parameter_count, Immediate(kDontAdaptArgumentsSentinel));
+ j(equal, &regular_invoke, Label::kFar);
+ }
// If overapplication or if the actual argument count is equal to the
// formal parameter count, no need to push extra undefined values.
@@ -3067,8 +2658,12 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
leaq(kScratchRegister,
Operand(expected_parameter_count, times_system_pointer_size, 0));
AllocateStackSpace(kScratchRegister);
- // Extra words are the receiver and the return address (if a jump).
- int extra_words = type == InvokeType::kCall ? 1 : 2;
+ // Extra words are the receiver (if not already included in argc) and the
+ // return address (if a jump).
+ int extra_words =
+ type == InvokeType::kCall ? 0 : kReturnAddressStackSlotCount;
+ if (!kJSArgcIncludesReceiver) extra_words++;
+
leaq(num, Operand(rax, extra_words)); // Number of words to copy.
Move(current, 0);
// Fall-through to the loop body because there are non-zero words to copy.
@@ -3097,8 +2692,8 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
bind(&stack_overflow);
{
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
CallRuntime(Runtime::kThrowStackOverflow);
int3(); // This should be unreachable.
}
@@ -3109,7 +2704,8 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count) {
ASM_CODE_COMMENT(this);
- FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
SmiTag(expected_parameter_count);
Push(expected_parameter_count);
@@ -3197,7 +2793,7 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
bind(&check_offset);
cmpq(bytes_scratch, Immediate(kStackPageSize));
- j(greater, &touch_next_page);
+ j(greater_equal, &touch_next_page);
subq(rsp, bytes_scratch);
}
@@ -3205,7 +2801,7 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
void TurboAssembler::AllocateStackSpace(int bytes) {
ASM_CODE_COMMENT(this);
DCHECK_GE(bytes, 0);
- while (bytes > kStackPageSize) {
+ while (bytes >= kStackPageSize) {
subq(rsp, Immediate(kStackPageSize));
movb(Operand(rsp, 0), Immediate(0));
bytes -= kStackPageSize;
@@ -3523,11 +3119,6 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
leaq(dst, Operand(&current, -pc));
}
-void TurboAssembler::ResetSpeculationPoisonRegister() {
- // TODO(turbofan): Perhaps, we want to put an lfence here.
- Move(kSpeculationPoisonRegister, -1);
-}
-
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
diff --git a/chromium/v8/src/codegen/x64/macro-assembler-x64.h b/chromium/v8/src/codegen/x64/macro-assembler-x64.h
index 02b9eb410ec..cf3981a2555 100644
--- a/chromium/v8/src/codegen/x64/macro-assembler-x64.h
+++ b/chromium/v8/src/codegen/x64/macro-assembler-x64.h
@@ -57,61 +57,10 @@ class StackArgumentsAccessor {
DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor);
};
-class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
+class V8_EXPORT_PRIVATE TurboAssembler
+ : public SharedTurboAssemblerBase<TurboAssembler> {
public:
- using SharedTurboAssembler::SharedTurboAssembler;
- AVX_OP(Subsd, subsd)
- AVX_OP(Divss, divss)
- AVX_OP(Divsd, divsd)
- AVX_OP(Pcmpgtw, pcmpgtw)
- AVX_OP(Pmaxsw, pmaxsw)
- AVX_OP(Pminsw, pminsw)
- AVX_OP(Addss, addss)
- AVX_OP(Addsd, addsd)
- AVX_OP(Mulsd, mulsd)
- AVX_OP(Cmpeqps, cmpeqps)
- AVX_OP(Cmpltps, cmpltps)
- AVX_OP(Cmpneqps, cmpneqps)
- AVX_OP(Cmpnltps, cmpnltps)
- AVX_OP(Cmpnleps, cmpnleps)
- AVX_OP(Cmpnltpd, cmpnltpd)
- AVX_OP(Cmpnlepd, cmpnlepd)
- AVX_OP(Cvttpd2dq, cvttpd2dq)
- AVX_OP(Ucomiss, ucomiss)
- AVX_OP(Ucomisd, ucomisd)
- AVX_OP(Psubsw, psubsw)
- AVX_OP(Psubusw, psubusw)
- AVX_OP(Paddsw, paddsw)
- AVX_OP(Pcmpgtd, pcmpgtd)
- AVX_OP(Pcmpeqb, pcmpeqb)
- AVX_OP(Pcmpeqw, pcmpeqw)
- AVX_OP(Pcmpeqd, pcmpeqd)
- AVX_OP(Movlhps, movlhps)
- AVX_OP_SSSE3(Phaddd, phaddd)
- AVX_OP_SSSE3(Phaddw, phaddw)
- AVX_OP_SSSE3(Pshufb, pshufb)
- AVX_OP_SSE4_1(Pcmpeqq, pcmpeqq)
- AVX_OP_SSE4_1(Packusdw, packusdw)
- AVX_OP_SSE4_1(Pminsd, pminsd)
- AVX_OP_SSE4_1(Pminuw, pminuw)
- AVX_OP_SSE4_1(Pminud, pminud)
- AVX_OP_SSE4_1(Pmaxuw, pmaxuw)
- AVX_OP_SSE4_1(Pmaxud, pmaxud)
- AVX_OP_SSE4_1(Pmulld, pmulld)
- AVX_OP_SSE4_1(Insertps, insertps)
- AVX_OP_SSE4_1(Pinsrq, pinsrq)
- AVX_OP_SSE4_1(Pextrq, pextrq)
- AVX_OP_SSE4_1(Roundss, roundss)
- AVX_OP_SSE4_1(Roundsd, roundsd)
- AVX_OP_SSE4_2(Pcmpgtq, pcmpgtq)
-
-#undef AVX_OP
-
- // Define movq here instead of using AVX_OP. movq is defined using templates
- // and there is a function template `void movq(P1)`, while technically
- // impossible, will be selected when deducing the arguments for AvxHelper.
- void Movq(XMMRegister dst, Register src);
- void Movq(Register dst, XMMRegister src);
+ using SharedTurboAssemblerBase<TurboAssembler>::SharedTurboAssemblerBase;
void PushReturnAddressFrom(Register src) { pushq(src); }
void PopReturnAddressTo(Register dst) { popq(dst); }
@@ -170,8 +119,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
- void Movdqa(XMMRegister dst, Operand src);
- void Movdqa(XMMRegister dst, XMMRegister src);
+ // Define movq here instead of using AVX_OP. movq is defined using templates
+ // and there is a function template `void movq(P1)`, while technically
+ // impossible, will be selected when deducing the arguments for AvxHelper.
+ void Movq(XMMRegister dst, Register src);
+ void Movq(Register dst, XMMRegister src);
void Cvtss2sd(XMMRegister dst, XMMRegister src);
void Cvtss2sd(XMMRegister dst, Operand src);
@@ -212,6 +164,28 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void Cvtlsi2sd(XMMRegister dst, Register src);
void Cvtlsi2sd(XMMRegister dst, Operand src);
+ void PextrdPreSse41(Register dst, XMMRegister src, uint8_t imm8);
+ void Pextrq(Register dst, XMMRegister src, int8_t imm8);
+
+ void PinsrdPreSse41(XMMRegister dst, Register src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr);
+ void PinsrdPreSse41(XMMRegister dst, Operand src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr);
+
+ void Pinsrq(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr);
+ void Pinsrq(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr);
+
+ void F64x2Qfma(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister src3, XMMRegister tmp);
+ void F64x2Qfms(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister src3, XMMRegister tmp);
+ void F32x4Qfma(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister src3, XMMRegister tmp);
+ void F32x4Qfms(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister src3, XMMRegister tmp);
+
void Lzcntq(Register dst, Register src);
void Lzcntq(Register dst, Operand src);
void Lzcntl(Register dst, Register src);
@@ -378,6 +352,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void SmiUntag(Register dst, Register src);
void SmiUntag(Register dst, Operand src);
+ // Convert smi to 32-bit value.
+ void SmiToInt32(Register reg);
+
// Loads the address of the external reference into the destination
// register.
void LoadAddress(Register destination, ExternalReference source);
@@ -432,17 +409,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void CallCodeTObject(Register code);
void JumpCodeTObject(Register code, JumpMode jump_mode = JumpMode::kJump);
- void RetpolineCall(Register reg);
- void RetpolineCall(Address destination, RelocInfo::Mode rmode);
-
void Jump(Address destination, RelocInfo::Mode rmode);
void Jump(const ExternalReference& reference);
void Jump(Operand op);
void Jump(Handle<Code> code_object, RelocInfo::Mode rmode,
Condition cc = always);
- void RetpolineJump(Register reg);
-
void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
DeoptimizeKind kind, Label* ret,
Label* jump_deoptimization_entry_label);
@@ -450,59 +422,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void Trap();
void DebugBreak();
- // Will move src1 to dst if dst != src1.
- void Pmaddwd(XMMRegister dst, XMMRegister src1, Operand src2);
- void Pmaddwd(XMMRegister dst, XMMRegister src1, XMMRegister src2);
- void Pmaddubsw(XMMRegister dst, XMMRegister src1, Operand src2);
- void Pmaddubsw(XMMRegister dst, XMMRegister src1, XMMRegister src2);
-
- // Non-SSE2 instructions.
- void Pextrd(Register dst, XMMRegister src, uint8_t imm8);
-
- void Pinsrb(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8);
- void Pinsrb(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8);
- void Pinsrw(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8);
- void Pinsrw(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8);
- void Pinsrd(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8);
- void Pinsrd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8);
- void Pinsrd(XMMRegister dst, Register src2, uint8_t imm8);
- void Pinsrd(XMMRegister dst, Operand src2, uint8_t imm8);
- void Pinsrq(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8);
- void Pinsrq(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8);
-
- void Pblendvb(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- XMMRegister mask);
- void Blendvps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- XMMRegister mask);
- void Blendvpd(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- XMMRegister mask);
-
- // Supports both SSE and AVX. Move src1 to dst if they are not equal on SSE.
- void Pshufb(XMMRegister dst, XMMRegister src1, XMMRegister src2);
- void Pmulhrsw(XMMRegister dst, XMMRegister src1, XMMRegister src2);
-
- // These Wasm SIMD ops do not have direct lowerings on x64. These
- // helpers are optimized to produce the fastest and smallest codegen.
- // Defined here to allow usage on both TurboFan and Liftoff.
- void I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1, XMMRegister src2);
-
- void S128Store64Lane(Operand dst, XMMRegister src, uint8_t laneidx);
-
- void I8x16Popcnt(XMMRegister dst, XMMRegister src, XMMRegister tmp);
-
- void F64x2ConvertLowI32x4U(XMMRegister dst, XMMRegister src);
- void I32x4TruncSatF64x2SZero(XMMRegister dst, XMMRegister src);
- void I32x4TruncSatF64x2UZero(XMMRegister dst, XMMRegister src);
-
- void I16x8ExtAddPairwiseI8x16S(XMMRegister dst, XMMRegister src);
- void I32x4ExtAddPairwiseI16x8U(XMMRegister dst, XMMRegister src);
-
- void I8x16Swizzle(XMMRegister dst, XMMRegister src, XMMRegister mask,
- bool omit_add = false);
-
- void Abspd(XMMRegister dst);
- void Negpd(XMMRegister dst);
-
void CompareRoot(Register with, RootIndex index);
void CompareRoot(Operand with, RootIndex index);
@@ -595,9 +514,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
StubCallMode mode = StubCallMode::kCallBuiltinPointer);
#ifdef V8_IS_TSAN
- void CallTSANRelaxedStoreStub(Register address, Register value,
- SaveFPRegsMode fp_mode, int size,
- StubCallMode mode);
+ void CallTSANStoreStub(Register address, Register value,
+ SaveFPRegsMode fp_mode, int size, StubCallMode mode,
+ std::memory_order order);
void CallTSANRelaxedLoadStub(Register address, SaveFPRegsMode fp_mode,
int size, StubCallMode mode);
#endif // V8_IS_TSAN
@@ -632,8 +551,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(Register dst);
- void ResetSpeculationPoisonRegister();
-
// Control-flow integrity:
// Define a function entrypoint. This doesn't emit any code for this
@@ -676,6 +593,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void StoreTaggedField(Operand dst_field_operand, Immediate immediate);
void StoreTaggedField(Operand dst_field_operand, Register value);
void StoreTaggedSignedField(Operand dst_field_operand, Smi value);
+ void AtomicStoreTaggedField(Operand dst_field_operand, Register value);
// The following macros work even when pointer compression is not enabled.
void DecompressTaggedSigned(Register destination, Operand field_operand);
@@ -832,7 +750,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void Cmp(Operand dst, Handle<Object> source);
// Checks if value is in range [lower_limit, higher_limit] using a single
- // comparison.
+ // comparison. Flags CF=1 or ZF=1 indicate the value is in the range
+ // (condition below_equal).
+ void CompareRange(Register value, unsigned lower_limit,
+ unsigned higher_limit);
void JumpIfIsInRange(Register value, unsigned lower_limit,
unsigned higher_limit, Label* on_in_range,
Label::Distance near_jump = Label::kFar);
@@ -851,10 +772,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void Pop(Operand dst);
void PopQuad(Operand dst);
- // ---------------------------------------------------------------------------
- // SIMD macros.
- void Absps(XMMRegister dst);
- void Negps(XMMRegister dst);
// Generates a trampoline to jump to the off-heap instruction stream.
void JumpToInstructionStream(Address entry);
@@ -870,7 +787,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Compare instance type ranges for a map (low and high inclusive)
// Always use unsigned comparisons: below_equal for a positive result.
- void CmpInstanceTypeRange(Register map, InstanceType low, InstanceType high);
+ void CmpInstanceTypeRange(Register map, Register instance_type_out,
+ InstanceType low, InstanceType high);
template <typename Field>
void DecodeField(Register reg) {
diff --git a/chromium/v8/src/codegen/x64/register-x64.h b/chromium/v8/src/codegen/x64/register-x64.h
index 61e7ccf396a..f36763f2e48 100644
--- a/chromium/v8/src/codegen/x64/register-x64.h
+++ b/chromium/v8/src/codegen/x64/register-x64.h
@@ -155,6 +155,24 @@ constexpr Register arg_reg_4 = rcx;
V(xmm13) \
V(xmm14)
+#define YMM_REGISTERS(V) \
+ V(ymm0) \
+ V(ymm1) \
+ V(ymm2) \
+ V(ymm3) \
+ V(ymm4) \
+ V(ymm5) \
+ V(ymm6) \
+ V(ymm7) \
+ V(ymm8) \
+ V(ymm9) \
+ V(ymm10) \
+ V(ymm11) \
+ V(ymm12) \
+ V(ymm13) \
+ V(ymm14) \
+ V(ymm15)
+
// Returns the number of padding slots needed for stack pointer alignment.
constexpr int ArgumentPaddingSlots(int argument_count) {
// No argument padding required.
@@ -171,6 +189,17 @@ enum DoubleRegisterCode {
kDoubleAfterLast
};
+enum YMMRegisterCode {
+#define REGISTER_CODE(R) kYMMCode_##R,
+ YMM_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kYMMAfterLast
+};
+static_assert(static_cast<int>(kDoubleAfterLast) ==
+ static_cast<int>(kYMMAfterLast),
+ "The number of XMM register codes must match the number of YMM "
+ "register codes");
+
class XMMRegister : public RegisterBase<XMMRegister, kDoubleAfterLast> {
public:
// Return the high bit of the register code as a 0 or 1. Used often
@@ -180,7 +209,7 @@ class XMMRegister : public RegisterBase<XMMRegister, kDoubleAfterLast> {
// in modR/M, SIB, and opcode bytes.
int low_bits() const { return code() & 0x7; }
- private:
+ protected:
friend class RegisterBase<XMMRegister, kDoubleAfterLast>;
explicit constexpr XMMRegister(int code) : RegisterBase(code) {}
};
@@ -189,6 +218,22 @@ ASSERT_TRIVIALLY_COPYABLE(XMMRegister);
static_assert(sizeof(XMMRegister) == sizeof(int),
"XMMRegister can efficiently be passed by value");
+class YMMRegister : public XMMRegister {
+ public:
+ static constexpr YMMRegister from_code(int code) {
+ DCHECK(base::IsInRange(code, 0, XMMRegister::kNumRegisters - 1));
+ return YMMRegister(code);
+ }
+
+ private:
+ friend class XMMRegister;
+ explicit constexpr YMMRegister(int code) : XMMRegister(code) {}
+};
+
+ASSERT_TRIVIALLY_COPYABLE(YMMRegister);
+static_assert(sizeof(YMMRegister) == sizeof(int),
+ "YMMRegister can efficiently be passed by value");
+
using FloatRegister = XMMRegister;
using DoubleRegister = XMMRegister;
@@ -201,9 +246,15 @@ DOUBLE_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER
constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
+#define DECLARE_REGISTER(R) \
+ constexpr YMMRegister R = YMMRegister::from_code(kYMMCode_##R);
+YMM_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+
// Define {RegisterName} methods for the register types.
DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS)
DEFINE_REGISTER_NAMES(XMMRegister, DOUBLE_REGISTERS)
+DEFINE_REGISTER_NAMES(YMMRegister, YMM_REGISTERS)
// Give alias names to registers for calling conventions.
constexpr Register kReturnRegister0 = rax;
@@ -212,7 +263,6 @@ constexpr Register kReturnRegister2 = r8;
constexpr Register kJSFunctionRegister = rdi;
constexpr Register kContextRegister = rsi;
constexpr Register kAllocateSizeRegister = rdx;
-constexpr Register kSpeculationPoisonRegister = r11;
constexpr Register kInterpreterAccumulatorRegister = rax;
constexpr Register kInterpreterBytecodeOffsetRegister = r9;
constexpr Register kInterpreterBytecodeArrayRegister = r12;
diff --git a/chromium/v8/src/codegen/x64/sse-instr.h b/chromium/v8/src/codegen/x64/sse-instr.h
index 452cc0f6901..d1223b69a17 100644
--- a/chromium/v8/src/codegen/x64/sse-instr.h
+++ b/chromium/v8/src/codegen/x64/sse-instr.h
@@ -32,6 +32,7 @@
V(sqrtss, F3, 0F, 51) \
V(addss, F3, 0F, 58) \
V(mulss, F3, 0F, 59) \
+ V(cvtss2sd, F3, 0F, 5A) \
V(subss, F3, 0F, 5C) \
V(minss, F3, 0F, 5D) \
V(divss, F3, 0F, 5E) \
diff --git a/chromium/v8/src/common/globals.h b/chromium/v8/src/common/globals.h
index 6aee59eb83f..795a6cc8267 100644
--- a/chromium/v8/src/common/globals.h
+++ b/chromium/v8/src/common/globals.h
@@ -62,6 +62,9 @@ constexpr int GB = MB * 1024;
#if (V8_TARGET_ARCH_RISCV64 && !V8_HOST_ARCH_RISCV64)
#define USE_SIMULATOR 1
#endif
+#if (V8_TARGET_ARCH_LOONG64 && !V8_HOST_ARCH_LOONG64)
+#define USE_SIMULATOR 1
+#endif
#endif
// Determine whether the architecture uses an embedded constant pool
@@ -587,9 +590,14 @@ constexpr intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
constexpr intptr_t kDoubleAlignment = 8;
constexpr intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
-// Desired alignment for generated code is 32 bytes (to improve cache line
-// utilization).
+// Desired alignment for generated code is 64 bytes on x64 (to allow 64-bytes
+// loop header alignment) and 32 bytes (to improve cache line utilization) on
+// other architectures.
+#if V8_TARGET_ARCH_X64
+constexpr int kCodeAlignmentBits = 6;
+#else
constexpr int kCodeAlignmentBits = 5;
+#endif
constexpr intptr_t kCodeAlignment = 1 << kCodeAlignmentBits;
constexpr intptr_t kCodeAlignmentMask = kCodeAlignment - 1;
@@ -869,7 +877,7 @@ enum MinimumCapacity {
USE_CUSTOM_MINIMUM_CAPACITY
};
-enum GarbageCollector { SCAVENGER, MARK_COMPACTOR, MINOR_MARK_COMPACTOR };
+enum class GarbageCollector { SCAVENGER, MARK_COMPACTOR, MINOR_MARK_COMPACTOR };
enum class CompactionSpaceKind {
kNone,
@@ -1701,20 +1709,6 @@ enum IsolateAddressId {
kIsolateAddressCount
};
-enum class PoisoningMitigationLevel {
- kPoisonAll,
- kDontPoison,
- kPoisonCriticalOnly
-};
-
-enum class LoadSensitivity {
- kCritical, // Critical loads are poisoned whenever we can run untrusted
- // code (i.e., when --untrusted-code-mitigations is on).
- kUnsafe, // Unsafe loads are poisoned when full poisoning is on
- // (--branch-load-poisoning).
- kSafe // Safe loads are never poisoned.
-};
-
// The reason for a WebAssembly trap.
#define FOREACH_WASM_TRAPREASON(V) \
V(TrapUnreachable) \
@@ -1785,7 +1779,20 @@ constexpr int kSwissNameDictionaryInitialCapacity = 4;
constexpr int kSmallOrderedHashSetMinCapacity = 4;
constexpr int kSmallOrderedHashMapMinCapacity = 4;
-static const uint16_t kDontAdaptArgumentsSentinel = static_cast<uint16_t>(-1);
+#ifdef V8_INCLUDE_RECEIVER_IN_ARGC
+constexpr bool kJSArgcIncludesReceiver = true;
+constexpr int kJSArgcReceiverSlots = 1;
+constexpr uint16_t kDontAdaptArgumentsSentinel = 0;
+#else
+constexpr bool kJSArgcIncludesReceiver = false;
+constexpr int kJSArgcReceiverSlots = 0;
+constexpr uint16_t kDontAdaptArgumentsSentinel = static_cast<uint16_t>(-1);
+#endif
+
+// Helper to get the parameter count for functions with JS linkage.
+inline constexpr int JSParameterCount(int param_count_without_receiver) {
+ return param_count_without_receiver + kJSArgcReceiverSlots;
+}
// Opaque data type for identifying stack frames. Used extensively
// by the debugger.
diff --git a/chromium/v8/src/common/message-template.h b/chromium/v8/src/common/message-template.h
index 89ef319db1f..a925300c5c4 100644
--- a/chromium/v8/src/common/message-template.h
+++ b/chromium/v8/src/common/message-template.h
@@ -380,6 +380,7 @@ namespace internal {
T(TypedArrayTooLargeToSort, \
"Custom comparefn not supported for huge TypedArrays") \
T(ValueOutOfRange, "Value % out of range for % options property %") \
+ T(CollectionGrowFailed, "% maximum size exceeded") \
/* SyntaxError */ \
T(AmbiguousExport, \
"The requested module '%' contains conflicting star exports for name '%'") \
@@ -439,6 +440,10 @@ namespace internal {
T(InvalidRegExpFlags, "Invalid flags supplied to RegExp constructor '%'") \
T(InvalidOrUnexpectedToken, "Invalid or unexpected token") \
T(InvalidPrivateBrand, "Object must be an instance of class %") \
+ T(InvalidPrivateBrandReinitialization, \
+ "Cannot initialize private methods of class % twice on the same object") \
+ T(InvalidPrivateFieldReitialization, \
+ "Cannot initialize % twice on the same object") \
T(InvalidPrivateFieldResolution, \
"Private field '%' must be declared in an enclosing class") \
T(InvalidPrivateMemberRead, \
diff --git a/chromium/v8/src/compiler-dispatcher/OWNERS b/chromium/v8/src/compiler-dispatcher/OWNERS
index f08a5493851..84cd0368eba 100644
--- a/chromium/v8/src/compiler-dispatcher/OWNERS
+++ b/chromium/v8/src/compiler-dispatcher/OWNERS
@@ -1,4 +1,3 @@
jkummerow@chromium.org
leszeks@chromium.org
-rmcilroy@chromium.org
victorgomes@chromium.org
diff --git a/chromium/v8/src/compiler-dispatcher/lazy-compile-dispatcher.cc b/chromium/v8/src/compiler-dispatcher/lazy-compile-dispatcher.cc
index e57463d404a..03b2fe55121 100644
--- a/chromium/v8/src/compiler-dispatcher/lazy-compile-dispatcher.cc
+++ b/chromium/v8/src/compiler-dispatcher/lazy-compile-dispatcher.cc
@@ -8,7 +8,7 @@
#include "src/base/platform/time.h"
#include "src/codegen/compiler.h"
#include "src/flags/flags.h"
-#include "src/handles/global-handles.h"
+#include "src/handles/global-handles-inl.h"
#include "src/logging/counters.h"
#include "src/logging/runtime-call-stats-scope.h"
#include "src/objects/objects-inl.h"
diff --git a/chromium/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/chromium/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index f8a7fa88144..45f3684fb6e 100644
--- a/chromium/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
+++ b/chromium/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -173,7 +173,6 @@ void OptimizingCompileDispatcher::AwaitCompileTasks() {
void OptimizingCompileDispatcher::FlushQueues(
BlockingBehavior blocking_behavior, bool restore_function_code) {
- if (FLAG_block_concurrent_recompilation) Unblock();
FlushInputQueue();
if (blocking_behavior == BlockingBehavior::kBlock) {
base::MutexGuard lock_guard(&ref_count_mutex_);
@@ -231,7 +230,7 @@ bool OptimizingCompileDispatcher::HasJobs() {
// Note: This relies on {output_queue_} being mutated by a background thread
// only when {ref_count_} is not zero. Also, {ref_count_} is never incremented
// by a background thread.
- return ref_count_ != 0 || !output_queue_.empty() || blocked_jobs_ != 0;
+ return ref_count_ != 0 || !output_queue_.empty();
}
void OptimizingCompileDispatcher::QueueForOptimization(
@@ -244,20 +243,8 @@ void OptimizingCompileDispatcher::QueueForOptimization(
input_queue_[InputQueueIndex(input_queue_length_)] = job;
input_queue_length_++;
}
- if (FLAG_block_concurrent_recompilation) {
- blocked_jobs_++;
- } else {
- V8::GetCurrentPlatform()->CallOnWorkerThread(
- std::make_unique<CompileTask>(isolate_, this));
- }
-}
-
-void OptimizingCompileDispatcher::Unblock() {
- while (blocked_jobs_ > 0) {
- V8::GetCurrentPlatform()->CallOnWorkerThread(
- std::make_unique<CompileTask>(isolate_, this));
- blocked_jobs_--;
- }
+ V8::GetCurrentPlatform()->CallOnWorkerThread(
+ std::make_unique<CompileTask>(isolate_, this));
}
} // namespace internal
diff --git a/chromium/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h b/chromium/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
index 56592ed9b44..ccfb4f2a4a2 100644
--- a/chromium/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
+++ b/chromium/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
@@ -30,7 +30,6 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
input_queue_capacity_(FLAG_concurrent_recompilation_queue_length),
input_queue_length_(0),
input_queue_shift_(0),
- blocked_jobs_(0),
ref_count_(0),
recompilation_delay_(FLAG_concurrent_recompilation_delay) {
input_queue_ = NewArray<OptimizedCompilationJob*>(input_queue_capacity_);
@@ -42,7 +41,6 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
void Flush(BlockingBehavior blocking_behavior);
// Takes ownership of |job|.
void QueueForOptimization(OptimizedCompilationJob* job);
- void Unblock();
void AwaitCompileTasks();
void InstallOptimizedFunctions();
@@ -99,8 +97,6 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
// different threads.
base::Mutex output_queue_mutex_;
- int blocked_jobs_;
-
std::atomic<int> ref_count_;
base::Mutex ref_count_mutex_;
base::ConditionVariable ref_count_zero_;
diff --git a/chromium/v8/src/compiler/OWNERS b/chromium/v8/src/compiler/OWNERS
index 1626bc54876..a415cbfa669 100644
--- a/chromium/v8/src/compiler/OWNERS
+++ b/chromium/v8/src/compiler/OWNERS
@@ -4,7 +4,6 @@ mvstanton@chromium.org
neis@chromium.org
nicohartmann@chromium.org
sigurds@chromium.org
-solanes@chromium.org
per-file wasm-*=ahaas@chromium.org
per-file wasm-*=bbudge@chromium.org
diff --git a/chromium/v8/src/compiler/access-builder.cc b/chromium/v8/src/compiler/access-builder.cc
index 675371df57a..fda0727dd1c 100644
--- a/chromium/v8/src/compiler/access-builder.cc
+++ b/chromium/v8/src/compiler/access-builder.cc
@@ -82,25 +82,25 @@ FieldAccess AccessBuilder::ForJSObjectPropertiesOrHash() {
FieldAccess access = {kTaggedBase, JSObject::kPropertiesOrHashOffset,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::Any(), MachineType::AnyTagged(),
- kFullWriteBarrier, LoadSensitivity::kCritical};
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer() {
- FieldAccess access = {kTaggedBase, JSObject::kPropertiesOrHashOffset,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::TaggedPointer(),
- kPointerWriteBarrier, LoadSensitivity::kCritical};
+ FieldAccess access = {kTaggedBase, JSObject::kPropertiesOrHashOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSObjectElements() {
- FieldAccess access = {kTaggedBase, JSObject::kElementsOffset,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::Internal(), MachineType::TaggedPointer(),
- kPointerWriteBarrier, LoadSensitivity::kCritical};
+ FieldAccess access = {kTaggedBase, JSObject::kElementsOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Internal(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
@@ -410,26 +410,22 @@ FieldAccess AccessBuilder::ForJSTypedArrayBasePointer() {
FieldAccess access = {kTaggedBase, JSTypedArray::kBasePointerOffset,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::OtherInternal(), MachineType::AnyTagged(),
- kFullWriteBarrier, LoadSensitivity::kCritical};
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSTypedArrayExternalPointer() {
- FieldAccess access = {kTaggedBase,
- JSTypedArray::kExternalPointerOffset,
- MaybeHandle<Name>(),
- MaybeHandle<Map>(),
- V8_HEAP_SANDBOX_BOOL ? Type::SandboxedExternalPointer()
- : Type::ExternalPointer(),
- MachineType::Pointer(),
- kNoWriteBarrier,
- LoadSensitivity::kCritical,
- ConstFieldInfo::None(),
- false,
-#ifdef V8_HEAP_SANDBOX
- kTypedArrayExternalPointerTag
-#endif
+ FieldAccess access = {
+ kTaggedBase,
+ JSTypedArray::kExternalPointerOffset,
+ MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
+ Type::ExternalPointer(),
+ MachineType::Pointer(),
+ kNoWriteBarrier,
+ ConstFieldInfo::None(),
+ false,
};
return access;
}
@@ -441,16 +437,11 @@ FieldAccess AccessBuilder::ForJSDataViewDataPointer() {
JSDataView::kDataPointerOffset,
MaybeHandle<Name>(),
MaybeHandle<Map>(),
- V8_HEAP_SANDBOX_BOOL ? Type::SandboxedExternalPointer()
- : Type::ExternalPointer(),
+ Type::ExternalPointer(),
MachineType::Pointer(),
kNoWriteBarrier,
- LoadSensitivity::kUnsafe,
ConstFieldInfo::None(),
false,
-#ifdef V8_HEAP_SANDBOX
- kDataViewDataPointerTag,
-#endif
};
return access;
}
@@ -756,7 +747,6 @@ FieldAccess AccessBuilder::ForExternalStringResourceData() {
: Type::ExternalPointer(),
MachineType::Pointer(),
kNoWriteBarrier,
- LoadSensitivity::kUnsafe,
ConstFieldInfo::None(),
false,
#ifdef V8_HEAP_SANDBOX
@@ -902,10 +892,10 @@ FieldAccess AccessBuilder::ForWeakFixedArraySlot(int index) {
}
// static
FieldAccess AccessBuilder::ForCellValue() {
- FieldAccess access = {kTaggedBase, Cell::kValueOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::AnyTagged(),
- kFullWriteBarrier, LoadSensitivity::kCritical};
+ FieldAccess access = {kTaggedBase, Cell::kValueOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
@@ -966,11 +956,9 @@ ElementAccess AccessBuilder::ForSloppyArgumentsElementsMappedEntry() {
}
// statics
-ElementAccess AccessBuilder::ForFixedArrayElement(
- ElementsKind kind, LoadSensitivity load_sensitivity) {
- ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize,
- Type::Any(), MachineType::AnyTagged(),
- kFullWriteBarrier, load_sensitivity};
+ElementAccess AccessBuilder::ForFixedArrayElement(ElementsKind kind) {
+ ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
+ MachineType::AnyTagged(), kFullWriteBarrier};
switch (kind) {
case PACKED_SMI_ELEMENTS:
access.type = Type::SignedSmall();
@@ -1038,59 +1026,50 @@ FieldAccess AccessBuilder::ForEnumCacheIndices() {
}
// static
-ElementAccess AccessBuilder::ForTypedArrayElement(
- ExternalArrayType type, bool is_external,
- LoadSensitivity load_sensitivity) {
+ElementAccess AccessBuilder::ForTypedArrayElement(ExternalArrayType type,
+ bool is_external) {
BaseTaggedness taggedness = is_external ? kUntaggedBase : kTaggedBase;
int header_size = is_external ? 0 : ByteArray::kHeaderSize;
switch (type) {
case kExternalInt8Array: {
- ElementAccess access = {taggedness, header_size,
- Type::Signed32(), MachineType::Int8(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Signed32(),
+ MachineType::Int8(), kNoWriteBarrier};
return access;
}
case kExternalUint8Array:
case kExternalUint8ClampedArray: {
- ElementAccess access = {taggedness, header_size,
- Type::Unsigned32(), MachineType::Uint8(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
+ MachineType::Uint8(), kNoWriteBarrier};
return access;
}
case kExternalInt16Array: {
- ElementAccess access = {taggedness, header_size,
- Type::Signed32(), MachineType::Int16(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Signed32(),
+ MachineType::Int16(), kNoWriteBarrier};
return access;
}
case kExternalUint16Array: {
- ElementAccess access = {taggedness, header_size,
- Type::Unsigned32(), MachineType::Uint16(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
+ MachineType::Uint16(), kNoWriteBarrier};
return access;
}
case kExternalInt32Array: {
- ElementAccess access = {taggedness, header_size,
- Type::Signed32(), MachineType::Int32(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Signed32(),
+ MachineType::Int32(), kNoWriteBarrier};
return access;
}
case kExternalUint32Array: {
- ElementAccess access = {taggedness, header_size,
- Type::Unsigned32(), MachineType::Uint32(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
+ MachineType::Uint32(), kNoWriteBarrier};
return access;
}
case kExternalFloat32Array: {
- ElementAccess access = {taggedness, header_size,
- Type::Number(), MachineType::Float32(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Number(),
+ MachineType::Float32(), kNoWriteBarrier};
return access;
}
case kExternalFloat64Array: {
- ElementAccess access = {taggedness, header_size,
- Type::Number(), MachineType::Float64(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Number(),
+ MachineType::Float64(), kNoWriteBarrier};
return access;
}
case kExternalBigInt64Array:
@@ -1239,15 +1218,6 @@ FieldAccess AccessBuilder::ForDictionaryObjectHashIndex() {
}
// static
-FieldAccess AccessBuilder::ForFeedbackCellValue() {
- FieldAccess access = {kTaggedBase, FeedbackCell::kValueOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::TaggedPointer(),
- kFullWriteBarrier};
- return access;
-}
-
-// static
FieldAccess AccessBuilder::ForFeedbackCellInterruptBudget() {
FieldAccess access = {kTaggedBase,
FeedbackCell::kInterruptBudgetOffset,
diff --git a/chromium/v8/src/compiler/access-builder.h b/chromium/v8/src/compiler/access-builder.h
index fa68628cf80..99ffde19c48 100644
--- a/chromium/v8/src/compiler/access-builder.h
+++ b/chromium/v8/src/compiler/access-builder.h
@@ -299,9 +299,7 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to FixedArray elements.
static ElementAccess ForFixedArrayElement();
- static ElementAccess ForFixedArrayElement(
- ElementsKind kind,
- LoadSensitivity load_sensitivity = LoadSensitivity::kUnsafe);
+ static ElementAccess ForFixedArrayElement(ElementsKind kind);
// Provides access to SloppyArgumentsElements elements.
static ElementAccess ForSloppyArgumentsElementsMappedEntry();
@@ -319,9 +317,8 @@ class V8_EXPORT_PRIVATE AccessBuilder final
static FieldAccess ForEnumCacheIndices();
// Provides access to Fixed{type}TypedArray and External{type}Array elements.
- static ElementAccess ForTypedArrayElement(
- ExternalArrayType type, bool is_external,
- LoadSensitivity load_sensitivity = LoadSensitivity::kUnsafe);
+ static ElementAccess ForTypedArrayElement(ExternalArrayType type,
+ bool is_external);
// Provides access to HashTable fields.
static FieldAccess ForHashTableBaseNumberOfElements();
@@ -342,7 +339,6 @@ class V8_EXPORT_PRIVATE AccessBuilder final
static FieldAccess ForDictionaryObjectHashIndex();
// Provides access to FeedbackCell fields.
- static FieldAccess ForFeedbackCellValue();
static FieldAccess ForFeedbackCellInterruptBudget();
// Provides access to a FeedbackVector fields.
diff --git a/chromium/v8/src/compiler/access-info.cc b/chromium/v8/src/compiler/access-info.cc
index 21f453f4d87..2ad2c9e945d 100644
--- a/chromium/v8/src/compiler/access-info.cc
+++ b/chromium/v8/src/compiler/access-info.cc
@@ -8,7 +8,6 @@
#include "src/builtins/accessors.h"
#include "src/compiler/compilation-dependencies.h"
-#include "src/compiler/compilation-dependency.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/type-cache.h"
#include "src/ic/call-optimization.h"
@@ -57,7 +56,8 @@ bool HasFieldRepresentationDependenciesOnMap(
ZoneVector<CompilationDependency const*>& dependencies,
Handle<Map> const& field_owner_map) {
for (auto dep : dependencies) {
- if (dep->IsFieldRepresentationDependencyOnMap(field_owner_map)) {
+ if (CompilationDependencies::IsFieldRepresentationDependencyOnMap(
+ dep, field_owner_map)) {
return true;
}
}
@@ -109,6 +109,7 @@ PropertyAccessInfo PropertyAccessInfo::DataField(
FieldIndex field_index, Representation field_representation,
Type field_type, MapRef field_owner_map, base::Optional<MapRef> field_map,
base::Optional<JSObjectRef> holder, base::Optional<MapRef> transition_map) {
+ DCHECK(!field_representation.IsNone());
DCHECK_IMPLIES(
field_representation.IsDouble(),
HasFieldRepresentationDependenciesOnMap(
@@ -129,6 +130,7 @@ PropertyAccessInfo PropertyAccessInfo::FastDataConstant(
FieldIndex field_index, Representation field_representation,
Type field_type, MapRef field_owner_map, base::Optional<MapRef> field_map,
base::Optional<JSObjectRef> holder, base::Optional<MapRef> transition_map) {
+ DCHECK(!field_representation.IsNone());
return PropertyAccessInfo(kFastDataConstant, holder, transition_map,
field_index, field_representation, field_type,
field_owner_map, field_map, {{receiver_map}, zone},
@@ -384,7 +386,7 @@ AccessInfoFactory::AccessInfoFactory(JSHeapBroker* broker,
base::Optional<ElementAccessInfo> AccessInfoFactory::ComputeElementAccessInfo(
MapRef map, AccessMode access_mode) const {
- if (!CanInlineElementAccess(map)) return {};
+ if (!map.CanInlineElementAccess()) return {};
return ElementAccessInfo({{map}, zone()}, map.elements_kind(), zone());
}
@@ -542,7 +544,7 @@ PropertyAccessInfo AccessorAccessInfoHelper(
Handle<Cell> cell = broker->CanonicalPersistentHandle(
Cell::cast(module_namespace->module().exports().Lookup(
isolate, name.object(), Smi::ToInt(name.object()->GetHash()))));
- if (cell->value().IsTheHole(isolate)) {
+ if (cell->value(kRelaxedLoad).IsTheHole(isolate)) {
// This module has not been fully initialized yet.
return PropertyAccessInfo::Invalid(zone);
}
@@ -834,7 +836,7 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
// occuring before a fast mode holder on the chain.
return Invalid();
}
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
if (details.kind() == kData) {
return ComputeDataFieldAccessInfo(receiver_map, map, holder, index,
access_mode);
@@ -844,7 +846,7 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
return Invalid();
}
} else {
- DCHECK_EQ(kDescriptor, details.location());
+ DCHECK_EQ(PropertyLocation::kDescriptor, details.location());
DCHECK_EQ(kAccessor, details.kind());
return ComputeAccessorDescriptorAccessInfo(receiver_map, name, map,
holder, index, access_mode);
@@ -1050,7 +1052,7 @@ base::Optional<ElementAccessInfo> AccessInfoFactory::ConsolidateElementLoad(
base::Optional<MapRef> map = TryMakeRef(broker(), map_handle);
if (!map.has_value()) return {};
if (map->instance_type() != instance_type ||
- !CanInlineElementAccess(*map)) {
+ !map->CanInlineElementAccess()) {
return {};
}
if (!GeneralizeElementsKind(elements_kind, map->elements_kind())
@@ -1128,10 +1130,12 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
if (details.IsReadOnly()) return Invalid();
// TODO(bmeurer): Handle transition to data constant?
- if (details.location() != kField) return Invalid();
+ if (details.location() != PropertyLocation::kField) return Invalid();
int const index = details.field_index();
Representation details_representation = details.representation();
+ if (details_representation.IsNone()) return Invalid();
+
FieldIndex field_index = FieldIndex::ForPropertyIndex(
*transition_map.object(), index, details_representation);
Type field_type = Type::NonInternal();
@@ -1168,8 +1172,7 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
if (descriptors_field_type->IsClass()) {
unrecorded_dependencies.push_back(
dependencies()->FieldTypeDependencyOffTheRecord(
- transition_map, number,
- MakeRef<Object>(broker(), descriptors_field_type)));
+ transition_map, number, *descriptors_field_type_ref));
// Remember the field map, and try to infer a useful type.
base::Optional<MapRef> maybe_field_map =
TryMakeRef(broker(), descriptors_field_type->AsClass());
diff --git a/chromium/v8/src/compiler/backend/arm/code-generator-arm.cc b/chromium/v8/src/compiler/backend/arm/code-generator-arm.cc
index 29c7897ec9e..b70c641db88 100644
--- a/chromium/v8/src/compiler/backend/arm/code-generator-arm.cc
+++ b/chromium/v8/src/compiler/backend/arm/code-generator-arm.cc
@@ -36,9 +36,7 @@ class ArmOperandConverter final : public InstructionOperandConverter {
SBit OutputSBit() const {
switch (instr_->flags_mode()) {
case kFlags_branch:
- case kFlags_branch_and_poison:
case kFlags_deoptimize:
- case kFlags_deoptimize_and_poison:
case kFlags_set:
case kFlags_trap:
case kFlags_select:
@@ -322,35 +320,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode,
- ArmOperandConverter const& i) {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- codegen->tasm()->and_(value, value, Operand(kSpeculationPoisonRegister));
- }
-}
-
-void ComputePoisonedAddressForLoad(CodeGenerator* codegen,
- InstructionCode opcode,
- ArmOperandConverter const& i,
- Register address) {
- DCHECK_EQ(kMemoryAccessPoisoned, AccessModeField::decode(opcode));
- switch (AddressingModeField::decode(opcode)) {
- case kMode_Offset_RI:
- codegen->tasm()->mov(address, i.InputImmediate(1));
- codegen->tasm()->add(address, address, i.InputRegister(0));
- break;
- case kMode_Offset_RR:
- codegen->tasm()->add(address, i.InputRegister(0), i.InputRegister(1));
- break;
- default:
- UNREACHABLE();
- }
- codegen->tasm()->and_(address, address, Operand(kSpeculationPoisonRegister));
-}
-
} // namespace
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
@@ -360,12 +329,11 @@ void ComputePoisonedAddressForLoad(CodeGenerator* codegen,
__ dmb(ISH); \
} while (0)
-#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
- do { \
- __ dmb(ISH); \
- __ asm_instr(i.InputRegister(2), \
- MemOperand(i.InputRegister(0), i.InputRegister(1))); \
- __ dmb(ISH); \
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, order) \
+ do { \
+ __ dmb(ISH); \
+ __ asm_instr(i.InputRegister(0), i.InputOffset(1)); \
+ if (order == AtomicMemoryOrder::kSeqCst) __ dmb(ISH); \
} while (0)
#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr) \
@@ -691,25 +659,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, ne);
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- UseScratchRegisterScope temps(tasm());
- Register scratch = temps.Acquire();
-
- // Set a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- __ ComputeCodeStartAddress(scratch);
- __ cmp(kJavaScriptCallCodeStartRegister, scratch);
- __ mov(kSpeculationPoisonRegister, Operand(-1), SBit::LeaveCC, eq);
- __ mov(kSpeculationPoisonRegister, Operand(0), SBit::LeaveCC, ne);
- __ csdb();
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- __ and_(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
- __ and_(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
- __ and_(sp, sp, kSpeculationPoisonRegister);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -818,8 +767,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchPrepareCallCFunction: {
- int const num_parameters = MiscField::decode(instr->opcode());
- __ PrepareCallCFunction(num_parameters);
+ int const num_gp_parameters = ParamField::decode(instr->opcode());
+ int const num_fp_parameters = FPParamField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_gp_parameters + num_fp_parameters);
// Frame alignment requires using FP-relative frame addressing.
frame_access_state()->SetFrameAccessToFP();
break;
@@ -904,13 +854,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssembleArchTableSwitch(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
DCHECK(i.InputRegister(0) == r1);
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
__ stop();
@@ -977,15 +927,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputDoubleRegister(0), DetermineStubCallMode());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
- case kArchStoreWithWriteBarrier: {
- RecordWriteMode mode =
- static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ case kArchStoreWithWriteBarrier: // Fall through.
+ case kArchAtomicStoreWithWriteBarrier: {
+ RecordWriteMode mode;
+ if (arch_opcode == kArchStoreWithWriteBarrier) {
+ mode = static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ } else {
+ mode = AtomicStoreRecordWriteModeField::decode(instr->opcode());
+ }
Register object = i.InputRegister(0);
Register value = i.InputRegister(2);
AddressingMode addressing_mode =
AddressingModeField::decode(instr->opcode());
Operand offset(0);
+
+ if (arch_opcode == kArchAtomicStoreWithWriteBarrier) {
+ __ dmb(ISH);
+ }
if (addressing_mode == kMode_Offset_RI) {
int32_t immediate = i.InputInt32(1);
offset = Operand(immediate);
@@ -996,6 +955,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
offset = Operand(reg);
__ str(value, MemOperand(object, reg));
}
+ if (arch_opcode == kArchAtomicStoreWithWriteBarrier &&
+ AtomicMemoryOrderField::decode(instr->opcode()) ==
+ AtomicMemoryOrder::kSeqCst) {
+ __ dmb(ISH);
+ }
+
auto ool = zone()->New<OutOfLineRecordWrite>(
this, object, offset, value, mode, DetermineStubCallMode(),
&unwinding_info_writer_);
@@ -1619,12 +1584,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmLdrb:
__ ldrb(i.OutputRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
- EmitWordLoadPoisoningIfNeeded(this, opcode, i);
break;
case kArmLdrsb:
__ ldrsb(i.OutputRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
- EmitWordLoadPoisoningIfNeeded(this, opcode, i);
break;
case kArmStrb:
__ strb(i.InputRegister(0), i.InputOffset(1));
@@ -1632,11 +1595,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArmLdrh:
__ ldrh(i.OutputRegister(), i.InputOffset());
- EmitWordLoadPoisoningIfNeeded(this, opcode, i);
break;
case kArmLdrsh:
__ ldrsh(i.OutputRegister(), i.InputOffset());
- EmitWordLoadPoisoningIfNeeded(this, opcode, i);
break;
case kArmStrh:
__ strh(i.InputRegister(0), i.InputOffset(1));
@@ -1644,22 +1605,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArmLdr:
__ ldr(i.OutputRegister(), i.InputOffset());
- EmitWordLoadPoisoningIfNeeded(this, opcode, i);
break;
case kArmStr:
__ str(i.InputRegister(0), i.InputOffset(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVldrF32: {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- UseScratchRegisterScope temps(tasm());
- Register address = temps.Acquire();
- ComputePoisonedAddressForLoad(this, opcode, i, address);
- __ vldr(i.OutputFloatRegister(), address, 0);
- } else {
- __ vldr(i.OutputFloatRegister(), i.InputOffset());
- }
+ __ vldr(i.OutputFloatRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
@@ -1688,15 +1640,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVldrF64: {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- UseScratchRegisterScope temps(tasm());
- Register address = temps.Acquire();
- ComputePoisonedAddressForLoad(this, opcode, i, address);
- __ vldr(i.OutputDoubleRegister(), address, 0);
- } else {
- __ vldr(i.OutputDoubleRegister(), i.InputOffset());
- }
+ __ vldr(i.OutputDoubleRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
@@ -1832,10 +1776,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ isb(SY);
break;
}
- case kArchWordPoisonOnSpeculation:
- __ and_(i.OutputRegister(0), i.InputRegister(0),
- Operand(kSpeculationPoisonRegister));
- break;
case kArmVmullLow: {
auto dt = static_cast<NeonDataType>(MiscField::decode(instr->opcode()));
__ vmull(dt, i.OutputSimd128Register(), i.InputSimd128Register(0).low(),
@@ -3373,94 +3313,97 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ StoreLane(sz, src_list, i.InputUint8(1), i.NeonInputOperand(2));
break;
}
- case kWord32AtomicLoadInt8:
+ case kAtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrsb);
break;
- case kWord32AtomicLoadUint8:
+ case kAtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrb);
break;
- case kWord32AtomicLoadInt16:
+ case kAtomicLoadInt16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrsh);
break;
- case kWord32AtomicLoadUint16:
+ case kAtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrh);
break;
- case kWord32AtomicLoadWord32:
+ case kAtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldr);
break;
- case kWord32AtomicStoreWord8:
- ASSEMBLE_ATOMIC_STORE_INTEGER(strb);
+ case kAtomicStoreWord8:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(strb,
+ AtomicMemoryOrderField::decode(opcode));
break;
- case kWord32AtomicStoreWord16:
- ASSEMBLE_ATOMIC_STORE_INTEGER(strh);
+ case kAtomicStoreWord16:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(strh,
+ AtomicMemoryOrderField::decode(opcode));
break;
- case kWord32AtomicStoreWord32:
- ASSEMBLE_ATOMIC_STORE_INTEGER(str);
+ case kAtomicStoreWord32:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(str,
+ AtomicMemoryOrderField::decode(opcode));
break;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexb, strexb);
__ sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicExchangeUint8:
+ case kAtomicExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexb, strexb);
break;
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexh, strexh);
__ sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicExchangeUint16:
+ case kAtomicExchangeUint16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexh, strexh);
break;
- case kWord32AtomicExchangeWord32:
+ case kAtomicExchangeWord32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrex, strex);
break;
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxtb(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb,
i.TempRegister(2));
__ sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeUint8:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxtb(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb,
i.TempRegister(2));
break;
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxth(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh,
i.TempRegister(2));
__ sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeUint16:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxth(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh,
i.TempRegister(2));
break;
- case kWord32AtomicCompareExchangeWord32:
+ case kAtomicCompareExchangeWord32:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrex, strex,
i.InputRegister(2));
break;
#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
+ case kAtomic##op##Int8: \
ASSEMBLE_ATOMIC_BINOP(ldrexb, strexb, inst); \
__ sxtb(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
- case kWord32Atomic##op##Uint8: \
+ case kAtomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP(ldrexb, strexb, inst); \
break; \
- case kWord32Atomic##op##Int16: \
+ case kAtomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP(ldrexh, strexh, inst); \
__ sxth(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
- case kWord32Atomic##op##Uint16: \
+ case kAtomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP(ldrexh, strexh, inst); \
break; \
- case kWord32Atomic##op##Word32: \
+ case kAtomic##op##Word32: \
ASSEMBLE_ATOMIC_BINOP(ldrex, strex, inst); \
break;
ATOMIC_BINOP_CASE(Add, add)
@@ -3597,20 +3540,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
- return;
- }
-
- condition = NegateFlagsCondition(condition);
- __ eor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- Operand(kSpeculationPoisonRegister), SBit::LeaveCC,
- FlagsConditionToCondition(condition));
- __ csdb();
-}
-
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -3805,7 +3734,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
- ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
@@ -3955,12 +3883,20 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// DropArguments().
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & argc_reg.bit());
if (parameter_slots > 1) {
- const int parameter_slots_without_receiver = parameter_slots - 1;
- __ cmp(argc_reg, Operand(parameter_slots_without_receiver));
- __ mov(argc_reg, Operand(parameter_slots_without_receiver), LeaveCC, lt);
+ if (kJSArgcIncludesReceiver) {
+ __ cmp(argc_reg, Operand(parameter_slots));
+ __ mov(argc_reg, Operand(parameter_slots), LeaveCC, lt);
+ } else {
+ const int parameter_slots_without_receiver = parameter_slots - 1;
+ __ cmp(argc_reg, Operand(parameter_slots_without_receiver));
+ __ mov(argc_reg, Operand(parameter_slots_without_receiver), LeaveCC,
+ lt);
+ }
}
__ DropArguments(argc_reg, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
} else if (additional_pop_count->IsImmediate()) {
DCHECK_EQ(Constant::kInt32, g.ToConstant(additional_pop_count).type());
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
diff --git a/chromium/v8/src/compiler/backend/arm/instruction-codes-arm.h b/chromium/v8/src/compiler/backend/arm/instruction-codes-arm.h
index c0200917b96..d4e0c2c4578 100644
--- a/chromium/v8/src/compiler/backend/arm/instruction-codes-arm.h
+++ b/chromium/v8/src/compiler/backend/arm/instruction-codes-arm.h
@@ -11,357 +11,362 @@ namespace compiler {
// ARM-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(ArmAdd) \
- V(ArmAnd) \
- V(ArmBic) \
- V(ArmClz) \
- V(ArmCmp) \
- V(ArmCmn) \
- V(ArmTst) \
- V(ArmTeq) \
- V(ArmOrr) \
- V(ArmEor) \
- V(ArmSub) \
- V(ArmRsb) \
- V(ArmMul) \
- V(ArmMla) \
- V(ArmMls) \
- V(ArmSmull) \
- V(ArmSmmul) \
- V(ArmSmmla) \
- V(ArmUmull) \
- V(ArmSdiv) \
- V(ArmUdiv) \
- V(ArmMov) \
- V(ArmMvn) \
- V(ArmBfc) \
- V(ArmUbfx) \
- V(ArmSbfx) \
- V(ArmSxtb) \
- V(ArmSxth) \
- V(ArmSxtab) \
- V(ArmSxtah) \
- V(ArmUxtb) \
- V(ArmUxth) \
- V(ArmUxtab) \
- V(ArmRbit) \
- V(ArmRev) \
- V(ArmUxtah) \
- V(ArmAddPair) \
- V(ArmSubPair) \
- V(ArmMulPair) \
- V(ArmLslPair) \
- V(ArmLsrPair) \
- V(ArmAsrPair) \
- V(ArmVcmpF32) \
- V(ArmVaddF32) \
- V(ArmVsubF32) \
- V(ArmVmulF32) \
- V(ArmVmlaF32) \
- V(ArmVmlsF32) \
- V(ArmVdivF32) \
- V(ArmVabsF32) \
- V(ArmVnegF32) \
- V(ArmVsqrtF32) \
- V(ArmVcmpF64) \
- V(ArmVaddF64) \
- V(ArmVsubF64) \
- V(ArmVmulF64) \
- V(ArmVmlaF64) \
- V(ArmVmlsF64) \
- V(ArmVdivF64) \
- V(ArmVmodF64) \
- V(ArmVabsF64) \
- V(ArmVnegF64) \
- V(ArmVsqrtF64) \
- V(ArmVmullLow) \
- V(ArmVmullHigh) \
- V(ArmVrintmF32) \
- V(ArmVrintmF64) \
- V(ArmVrintpF32) \
- V(ArmVrintpF64) \
- V(ArmVrintzF32) \
- V(ArmVrintzF64) \
- V(ArmVrintaF64) \
- V(ArmVrintnF32) \
- V(ArmVrintnF64) \
- V(ArmVcvtF32F64) \
- V(ArmVcvtF64F32) \
- V(ArmVcvtF32S32) \
- V(ArmVcvtF32U32) \
- V(ArmVcvtF64S32) \
- V(ArmVcvtF64U32) \
- V(ArmVcvtS32F32) \
- V(ArmVcvtU32F32) \
- V(ArmVcvtS32F64) \
- V(ArmVcvtU32F64) \
- V(ArmVmovU32F32) \
- V(ArmVmovF32U32) \
- V(ArmVmovLowU32F64) \
- V(ArmVmovLowF64U32) \
- V(ArmVmovHighU32F64) \
- V(ArmVmovHighF64U32) \
- V(ArmVmovF64U32U32) \
- V(ArmVmovU32U32F64) \
- V(ArmVldrF32) \
- V(ArmVstrF32) \
- V(ArmVldrF64) \
- V(ArmVld1F64) \
- V(ArmVstrF64) \
- V(ArmVst1F64) \
- V(ArmVld1S128) \
- V(ArmVst1S128) \
- V(ArmVcnt) \
- V(ArmVpadal) \
- V(ArmVpaddl) \
- V(ArmFloat32Max) \
- V(ArmFloat64Max) \
- V(ArmFloat32Min) \
- V(ArmFloat64Min) \
- V(ArmFloat64SilenceNaN) \
- V(ArmLdrb) \
- V(ArmLdrsb) \
- V(ArmStrb) \
- V(ArmLdrh) \
- V(ArmLdrsh) \
- V(ArmStrh) \
- V(ArmLdr) \
- V(ArmStr) \
- V(ArmPush) \
- V(ArmPoke) \
- V(ArmPeek) \
- V(ArmDmbIsh) \
- V(ArmDsbIsb) \
- V(ArmF64x2Splat) \
- V(ArmF64x2ExtractLane) \
- V(ArmF64x2ReplaceLane) \
- V(ArmF64x2Abs) \
- V(ArmF64x2Neg) \
- V(ArmF64x2Sqrt) \
- V(ArmF64x2Add) \
- V(ArmF64x2Sub) \
- V(ArmF64x2Mul) \
- V(ArmF64x2Div) \
- V(ArmF64x2Min) \
- V(ArmF64x2Max) \
- V(ArmF64x2Eq) \
- V(ArmF64x2Ne) \
- V(ArmF64x2Lt) \
- V(ArmF64x2Le) \
- V(ArmF64x2Pmin) \
- V(ArmF64x2Pmax) \
- V(ArmF64x2Ceil) \
- V(ArmF64x2Floor) \
- V(ArmF64x2Trunc) \
- V(ArmF64x2NearestInt) \
- V(ArmF64x2ConvertLowI32x4S) \
- V(ArmF64x2ConvertLowI32x4U) \
- V(ArmF64x2PromoteLowF32x4) \
- V(ArmF32x4Splat) \
- V(ArmF32x4ExtractLane) \
- V(ArmF32x4ReplaceLane) \
- V(ArmF32x4SConvertI32x4) \
- V(ArmF32x4UConvertI32x4) \
- V(ArmF32x4Abs) \
- V(ArmF32x4Neg) \
- V(ArmF32x4Sqrt) \
- V(ArmF32x4RecipApprox) \
- V(ArmF32x4RecipSqrtApprox) \
- V(ArmF32x4Add) \
- V(ArmF32x4Sub) \
- V(ArmF32x4Mul) \
- V(ArmF32x4Div) \
- V(ArmF32x4Min) \
- V(ArmF32x4Max) \
- V(ArmF32x4Eq) \
- V(ArmF32x4Ne) \
- V(ArmF32x4Lt) \
- V(ArmF32x4Le) \
- V(ArmF32x4Pmin) \
- V(ArmF32x4Pmax) \
- V(ArmF32x4DemoteF64x2Zero) \
- V(ArmI64x2SplatI32Pair) \
- V(ArmI64x2ReplaceLaneI32Pair) \
- V(ArmI64x2Abs) \
- V(ArmI64x2Neg) \
- V(ArmI64x2Shl) \
- V(ArmI64x2ShrS) \
- V(ArmI64x2Add) \
- V(ArmI64x2Sub) \
- V(ArmI64x2Mul) \
- V(ArmI64x2ShrU) \
- V(ArmI64x2BitMask) \
- V(ArmI64x2Eq) \
- V(ArmI64x2Ne) \
- V(ArmI64x2GtS) \
- V(ArmI64x2GeS) \
- V(ArmI64x2SConvertI32x4Low) \
- V(ArmI64x2SConvertI32x4High) \
- V(ArmI64x2UConvertI32x4Low) \
- V(ArmI64x2UConvertI32x4High) \
- V(ArmI32x4Splat) \
- V(ArmI32x4ExtractLane) \
- V(ArmI32x4ReplaceLane) \
- V(ArmI32x4SConvertF32x4) \
- V(ArmI32x4SConvertI16x8Low) \
- V(ArmI32x4SConvertI16x8High) \
- V(ArmI32x4Neg) \
- V(ArmI32x4Shl) \
- V(ArmI32x4ShrS) \
- V(ArmI32x4Add) \
- V(ArmI32x4Sub) \
- V(ArmI32x4Mul) \
- V(ArmI32x4MinS) \
- V(ArmI32x4MaxS) \
- V(ArmI32x4Eq) \
- V(ArmI32x4Ne) \
- V(ArmI32x4GtS) \
- V(ArmI32x4GeS) \
- V(ArmI32x4UConvertF32x4) \
- V(ArmI32x4UConvertI16x8Low) \
- V(ArmI32x4UConvertI16x8High) \
- V(ArmI32x4ShrU) \
- V(ArmI32x4MinU) \
- V(ArmI32x4MaxU) \
- V(ArmI32x4GtU) \
- V(ArmI32x4GeU) \
- V(ArmI32x4Abs) \
- V(ArmI32x4BitMask) \
- V(ArmI32x4DotI16x8S) \
- V(ArmI32x4TruncSatF64x2SZero) \
- V(ArmI32x4TruncSatF64x2UZero) \
- V(ArmI16x8Splat) \
- V(ArmI16x8ExtractLaneS) \
- V(ArmI16x8ReplaceLane) \
- V(ArmI16x8SConvertI8x16Low) \
- V(ArmI16x8SConvertI8x16High) \
- V(ArmI16x8Neg) \
- V(ArmI16x8Shl) \
- V(ArmI16x8ShrS) \
- V(ArmI16x8SConvertI32x4) \
- V(ArmI16x8Add) \
- V(ArmI16x8AddSatS) \
- V(ArmI16x8Sub) \
- V(ArmI16x8SubSatS) \
- V(ArmI16x8Mul) \
- V(ArmI16x8MinS) \
- V(ArmI16x8MaxS) \
- V(ArmI16x8Eq) \
- V(ArmI16x8Ne) \
- V(ArmI16x8GtS) \
- V(ArmI16x8GeS) \
- V(ArmI16x8ExtractLaneU) \
- V(ArmI16x8UConvertI8x16Low) \
- V(ArmI16x8UConvertI8x16High) \
- V(ArmI16x8ShrU) \
- V(ArmI16x8UConvertI32x4) \
- V(ArmI16x8AddSatU) \
- V(ArmI16x8SubSatU) \
- V(ArmI16x8MinU) \
- V(ArmI16x8MaxU) \
- V(ArmI16x8GtU) \
- V(ArmI16x8GeU) \
- V(ArmI16x8RoundingAverageU) \
- V(ArmI16x8Abs) \
- V(ArmI16x8BitMask) \
- V(ArmI16x8Q15MulRSatS) \
- V(ArmI8x16Splat) \
- V(ArmI8x16ExtractLaneS) \
- V(ArmI8x16ReplaceLane) \
- V(ArmI8x16Neg) \
- V(ArmI8x16Shl) \
- V(ArmI8x16ShrS) \
- V(ArmI8x16SConvertI16x8) \
- V(ArmI8x16Add) \
- V(ArmI8x16AddSatS) \
- V(ArmI8x16Sub) \
- V(ArmI8x16SubSatS) \
- V(ArmI8x16MinS) \
- V(ArmI8x16MaxS) \
- V(ArmI8x16Eq) \
- V(ArmI8x16Ne) \
- V(ArmI8x16GtS) \
- V(ArmI8x16GeS) \
- V(ArmI8x16ExtractLaneU) \
- V(ArmI8x16ShrU) \
- V(ArmI8x16UConvertI16x8) \
- V(ArmI8x16AddSatU) \
- V(ArmI8x16SubSatU) \
- V(ArmI8x16MinU) \
- V(ArmI8x16MaxU) \
- V(ArmI8x16GtU) \
- V(ArmI8x16GeU) \
- V(ArmI8x16RoundingAverageU) \
- V(ArmI8x16Abs) \
- V(ArmI8x16BitMask) \
- V(ArmS128Const) \
- V(ArmS128Zero) \
- V(ArmS128AllOnes) \
- V(ArmS128Dup) \
- V(ArmS128And) \
- V(ArmS128Or) \
- V(ArmS128Xor) \
- V(ArmS128Not) \
- V(ArmS128Select) \
- V(ArmS128AndNot) \
- V(ArmS32x4ZipLeft) \
- V(ArmS32x4ZipRight) \
- V(ArmS32x4UnzipLeft) \
- V(ArmS32x4UnzipRight) \
- V(ArmS32x4TransposeLeft) \
- V(ArmS32x4TransposeRight) \
- V(ArmS32x4Shuffle) \
- V(ArmS16x8ZipLeft) \
- V(ArmS16x8ZipRight) \
- V(ArmS16x8UnzipLeft) \
- V(ArmS16x8UnzipRight) \
- V(ArmS16x8TransposeLeft) \
- V(ArmS16x8TransposeRight) \
- V(ArmS8x16ZipLeft) \
- V(ArmS8x16ZipRight) \
- V(ArmS8x16UnzipLeft) \
- V(ArmS8x16UnzipRight) \
- V(ArmS8x16TransposeLeft) \
- V(ArmS8x16TransposeRight) \
- V(ArmS8x16Concat) \
- V(ArmI8x16Swizzle) \
- V(ArmI8x16Shuffle) \
- V(ArmS32x2Reverse) \
- V(ArmS16x4Reverse) \
- V(ArmS16x2Reverse) \
- V(ArmS8x8Reverse) \
- V(ArmS8x4Reverse) \
- V(ArmS8x2Reverse) \
- V(ArmI64x2AllTrue) \
- V(ArmI32x4AllTrue) \
- V(ArmI16x8AllTrue) \
- V(ArmV128AnyTrue) \
- V(ArmI8x16AllTrue) \
- V(ArmS128Load8Splat) \
- V(ArmS128Load16Splat) \
- V(ArmS128Load32Splat) \
- V(ArmS128Load64Splat) \
- V(ArmS128Load8x8S) \
- V(ArmS128Load8x8U) \
- V(ArmS128Load16x4S) \
- V(ArmS128Load16x4U) \
- V(ArmS128Load32x2S) \
- V(ArmS128Load32x2U) \
- V(ArmS128Load32Zero) \
- V(ArmS128Load64Zero) \
- V(ArmS128LoadLaneLow) \
- V(ArmS128LoadLaneHigh) \
- V(ArmS128StoreLaneLow) \
- V(ArmS128StoreLaneHigh) \
- V(ArmWord32AtomicPairLoad) \
- V(ArmWord32AtomicPairStore) \
- V(ArmWord32AtomicPairAdd) \
- V(ArmWord32AtomicPairSub) \
- V(ArmWord32AtomicPairAnd) \
- V(ArmWord32AtomicPairOr) \
- V(ArmWord32AtomicPairXor) \
- V(ArmWord32AtomicPairExchange) \
+
+// Opcodes that support a MemoryAccessMode.
+#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) // None.
+
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(ArmAdd) \
+ V(ArmAnd) \
+ V(ArmBic) \
+ V(ArmClz) \
+ V(ArmCmp) \
+ V(ArmCmn) \
+ V(ArmTst) \
+ V(ArmTeq) \
+ V(ArmOrr) \
+ V(ArmEor) \
+ V(ArmSub) \
+ V(ArmRsb) \
+ V(ArmMul) \
+ V(ArmMla) \
+ V(ArmMls) \
+ V(ArmSmull) \
+ V(ArmSmmul) \
+ V(ArmSmmla) \
+ V(ArmUmull) \
+ V(ArmSdiv) \
+ V(ArmUdiv) \
+ V(ArmMov) \
+ V(ArmMvn) \
+ V(ArmBfc) \
+ V(ArmUbfx) \
+ V(ArmSbfx) \
+ V(ArmSxtb) \
+ V(ArmSxth) \
+ V(ArmSxtab) \
+ V(ArmSxtah) \
+ V(ArmUxtb) \
+ V(ArmUxth) \
+ V(ArmUxtab) \
+ V(ArmRbit) \
+ V(ArmRev) \
+ V(ArmUxtah) \
+ V(ArmAddPair) \
+ V(ArmSubPair) \
+ V(ArmMulPair) \
+ V(ArmLslPair) \
+ V(ArmLsrPair) \
+ V(ArmAsrPair) \
+ V(ArmVcmpF32) \
+ V(ArmVaddF32) \
+ V(ArmVsubF32) \
+ V(ArmVmulF32) \
+ V(ArmVmlaF32) \
+ V(ArmVmlsF32) \
+ V(ArmVdivF32) \
+ V(ArmVabsF32) \
+ V(ArmVnegF32) \
+ V(ArmVsqrtF32) \
+ V(ArmVcmpF64) \
+ V(ArmVaddF64) \
+ V(ArmVsubF64) \
+ V(ArmVmulF64) \
+ V(ArmVmlaF64) \
+ V(ArmVmlsF64) \
+ V(ArmVdivF64) \
+ V(ArmVmodF64) \
+ V(ArmVabsF64) \
+ V(ArmVnegF64) \
+ V(ArmVsqrtF64) \
+ V(ArmVmullLow) \
+ V(ArmVmullHigh) \
+ V(ArmVrintmF32) \
+ V(ArmVrintmF64) \
+ V(ArmVrintpF32) \
+ V(ArmVrintpF64) \
+ V(ArmVrintzF32) \
+ V(ArmVrintzF64) \
+ V(ArmVrintaF64) \
+ V(ArmVrintnF32) \
+ V(ArmVrintnF64) \
+ V(ArmVcvtF32F64) \
+ V(ArmVcvtF64F32) \
+ V(ArmVcvtF32S32) \
+ V(ArmVcvtF32U32) \
+ V(ArmVcvtF64S32) \
+ V(ArmVcvtF64U32) \
+ V(ArmVcvtS32F32) \
+ V(ArmVcvtU32F32) \
+ V(ArmVcvtS32F64) \
+ V(ArmVcvtU32F64) \
+ V(ArmVmovU32F32) \
+ V(ArmVmovF32U32) \
+ V(ArmVmovLowU32F64) \
+ V(ArmVmovLowF64U32) \
+ V(ArmVmovHighU32F64) \
+ V(ArmVmovHighF64U32) \
+ V(ArmVmovF64U32U32) \
+ V(ArmVmovU32U32F64) \
+ V(ArmVldrF32) \
+ V(ArmVstrF32) \
+ V(ArmVldrF64) \
+ V(ArmVld1F64) \
+ V(ArmVstrF64) \
+ V(ArmVst1F64) \
+ V(ArmVld1S128) \
+ V(ArmVst1S128) \
+ V(ArmVcnt) \
+ V(ArmVpadal) \
+ V(ArmVpaddl) \
+ V(ArmFloat32Max) \
+ V(ArmFloat64Max) \
+ V(ArmFloat32Min) \
+ V(ArmFloat64Min) \
+ V(ArmFloat64SilenceNaN) \
+ V(ArmLdrb) \
+ V(ArmLdrsb) \
+ V(ArmStrb) \
+ V(ArmLdrh) \
+ V(ArmLdrsh) \
+ V(ArmStrh) \
+ V(ArmLdr) \
+ V(ArmStr) \
+ V(ArmPush) \
+ V(ArmPoke) \
+ V(ArmPeek) \
+ V(ArmDmbIsh) \
+ V(ArmDsbIsb) \
+ V(ArmF64x2Splat) \
+ V(ArmF64x2ExtractLane) \
+ V(ArmF64x2ReplaceLane) \
+ V(ArmF64x2Abs) \
+ V(ArmF64x2Neg) \
+ V(ArmF64x2Sqrt) \
+ V(ArmF64x2Add) \
+ V(ArmF64x2Sub) \
+ V(ArmF64x2Mul) \
+ V(ArmF64x2Div) \
+ V(ArmF64x2Min) \
+ V(ArmF64x2Max) \
+ V(ArmF64x2Eq) \
+ V(ArmF64x2Ne) \
+ V(ArmF64x2Lt) \
+ V(ArmF64x2Le) \
+ V(ArmF64x2Pmin) \
+ V(ArmF64x2Pmax) \
+ V(ArmF64x2Ceil) \
+ V(ArmF64x2Floor) \
+ V(ArmF64x2Trunc) \
+ V(ArmF64x2NearestInt) \
+ V(ArmF64x2ConvertLowI32x4S) \
+ V(ArmF64x2ConvertLowI32x4U) \
+ V(ArmF64x2PromoteLowF32x4) \
+ V(ArmF32x4Splat) \
+ V(ArmF32x4ExtractLane) \
+ V(ArmF32x4ReplaceLane) \
+ V(ArmF32x4SConvertI32x4) \
+ V(ArmF32x4UConvertI32x4) \
+ V(ArmF32x4Abs) \
+ V(ArmF32x4Neg) \
+ V(ArmF32x4Sqrt) \
+ V(ArmF32x4RecipApprox) \
+ V(ArmF32x4RecipSqrtApprox) \
+ V(ArmF32x4Add) \
+ V(ArmF32x4Sub) \
+ V(ArmF32x4Mul) \
+ V(ArmF32x4Div) \
+ V(ArmF32x4Min) \
+ V(ArmF32x4Max) \
+ V(ArmF32x4Eq) \
+ V(ArmF32x4Ne) \
+ V(ArmF32x4Lt) \
+ V(ArmF32x4Le) \
+ V(ArmF32x4Pmin) \
+ V(ArmF32x4Pmax) \
+ V(ArmF32x4DemoteF64x2Zero) \
+ V(ArmI64x2SplatI32Pair) \
+ V(ArmI64x2ReplaceLaneI32Pair) \
+ V(ArmI64x2Abs) \
+ V(ArmI64x2Neg) \
+ V(ArmI64x2Shl) \
+ V(ArmI64x2ShrS) \
+ V(ArmI64x2Add) \
+ V(ArmI64x2Sub) \
+ V(ArmI64x2Mul) \
+ V(ArmI64x2ShrU) \
+ V(ArmI64x2BitMask) \
+ V(ArmI64x2Eq) \
+ V(ArmI64x2Ne) \
+ V(ArmI64x2GtS) \
+ V(ArmI64x2GeS) \
+ V(ArmI64x2SConvertI32x4Low) \
+ V(ArmI64x2SConvertI32x4High) \
+ V(ArmI64x2UConvertI32x4Low) \
+ V(ArmI64x2UConvertI32x4High) \
+ V(ArmI32x4Splat) \
+ V(ArmI32x4ExtractLane) \
+ V(ArmI32x4ReplaceLane) \
+ V(ArmI32x4SConvertF32x4) \
+ V(ArmI32x4SConvertI16x8Low) \
+ V(ArmI32x4SConvertI16x8High) \
+ V(ArmI32x4Neg) \
+ V(ArmI32x4Shl) \
+ V(ArmI32x4ShrS) \
+ V(ArmI32x4Add) \
+ V(ArmI32x4Sub) \
+ V(ArmI32x4Mul) \
+ V(ArmI32x4MinS) \
+ V(ArmI32x4MaxS) \
+ V(ArmI32x4Eq) \
+ V(ArmI32x4Ne) \
+ V(ArmI32x4GtS) \
+ V(ArmI32x4GeS) \
+ V(ArmI32x4UConvertF32x4) \
+ V(ArmI32x4UConvertI16x8Low) \
+ V(ArmI32x4UConvertI16x8High) \
+ V(ArmI32x4ShrU) \
+ V(ArmI32x4MinU) \
+ V(ArmI32x4MaxU) \
+ V(ArmI32x4GtU) \
+ V(ArmI32x4GeU) \
+ V(ArmI32x4Abs) \
+ V(ArmI32x4BitMask) \
+ V(ArmI32x4DotI16x8S) \
+ V(ArmI32x4TruncSatF64x2SZero) \
+ V(ArmI32x4TruncSatF64x2UZero) \
+ V(ArmI16x8Splat) \
+ V(ArmI16x8ExtractLaneS) \
+ V(ArmI16x8ReplaceLane) \
+ V(ArmI16x8SConvertI8x16Low) \
+ V(ArmI16x8SConvertI8x16High) \
+ V(ArmI16x8Neg) \
+ V(ArmI16x8Shl) \
+ V(ArmI16x8ShrS) \
+ V(ArmI16x8SConvertI32x4) \
+ V(ArmI16x8Add) \
+ V(ArmI16x8AddSatS) \
+ V(ArmI16x8Sub) \
+ V(ArmI16x8SubSatS) \
+ V(ArmI16x8Mul) \
+ V(ArmI16x8MinS) \
+ V(ArmI16x8MaxS) \
+ V(ArmI16x8Eq) \
+ V(ArmI16x8Ne) \
+ V(ArmI16x8GtS) \
+ V(ArmI16x8GeS) \
+ V(ArmI16x8ExtractLaneU) \
+ V(ArmI16x8UConvertI8x16Low) \
+ V(ArmI16x8UConvertI8x16High) \
+ V(ArmI16x8ShrU) \
+ V(ArmI16x8UConvertI32x4) \
+ V(ArmI16x8AddSatU) \
+ V(ArmI16x8SubSatU) \
+ V(ArmI16x8MinU) \
+ V(ArmI16x8MaxU) \
+ V(ArmI16x8GtU) \
+ V(ArmI16x8GeU) \
+ V(ArmI16x8RoundingAverageU) \
+ V(ArmI16x8Abs) \
+ V(ArmI16x8BitMask) \
+ V(ArmI16x8Q15MulRSatS) \
+ V(ArmI8x16Splat) \
+ V(ArmI8x16ExtractLaneS) \
+ V(ArmI8x16ReplaceLane) \
+ V(ArmI8x16Neg) \
+ V(ArmI8x16Shl) \
+ V(ArmI8x16ShrS) \
+ V(ArmI8x16SConvertI16x8) \
+ V(ArmI8x16Add) \
+ V(ArmI8x16AddSatS) \
+ V(ArmI8x16Sub) \
+ V(ArmI8x16SubSatS) \
+ V(ArmI8x16MinS) \
+ V(ArmI8x16MaxS) \
+ V(ArmI8x16Eq) \
+ V(ArmI8x16Ne) \
+ V(ArmI8x16GtS) \
+ V(ArmI8x16GeS) \
+ V(ArmI8x16ExtractLaneU) \
+ V(ArmI8x16ShrU) \
+ V(ArmI8x16UConvertI16x8) \
+ V(ArmI8x16AddSatU) \
+ V(ArmI8x16SubSatU) \
+ V(ArmI8x16MinU) \
+ V(ArmI8x16MaxU) \
+ V(ArmI8x16GtU) \
+ V(ArmI8x16GeU) \
+ V(ArmI8x16RoundingAverageU) \
+ V(ArmI8x16Abs) \
+ V(ArmI8x16BitMask) \
+ V(ArmS128Const) \
+ V(ArmS128Zero) \
+ V(ArmS128AllOnes) \
+ V(ArmS128Dup) \
+ V(ArmS128And) \
+ V(ArmS128Or) \
+ V(ArmS128Xor) \
+ V(ArmS128Not) \
+ V(ArmS128Select) \
+ V(ArmS128AndNot) \
+ V(ArmS32x4ZipLeft) \
+ V(ArmS32x4ZipRight) \
+ V(ArmS32x4UnzipLeft) \
+ V(ArmS32x4UnzipRight) \
+ V(ArmS32x4TransposeLeft) \
+ V(ArmS32x4TransposeRight) \
+ V(ArmS32x4Shuffle) \
+ V(ArmS16x8ZipLeft) \
+ V(ArmS16x8ZipRight) \
+ V(ArmS16x8UnzipLeft) \
+ V(ArmS16x8UnzipRight) \
+ V(ArmS16x8TransposeLeft) \
+ V(ArmS16x8TransposeRight) \
+ V(ArmS8x16ZipLeft) \
+ V(ArmS8x16ZipRight) \
+ V(ArmS8x16UnzipLeft) \
+ V(ArmS8x16UnzipRight) \
+ V(ArmS8x16TransposeLeft) \
+ V(ArmS8x16TransposeRight) \
+ V(ArmS8x16Concat) \
+ V(ArmI8x16Swizzle) \
+ V(ArmI8x16Shuffle) \
+ V(ArmS32x2Reverse) \
+ V(ArmS16x4Reverse) \
+ V(ArmS16x2Reverse) \
+ V(ArmS8x8Reverse) \
+ V(ArmS8x4Reverse) \
+ V(ArmS8x2Reverse) \
+ V(ArmI64x2AllTrue) \
+ V(ArmI32x4AllTrue) \
+ V(ArmI16x8AllTrue) \
+ V(ArmV128AnyTrue) \
+ V(ArmI8x16AllTrue) \
+ V(ArmS128Load8Splat) \
+ V(ArmS128Load16Splat) \
+ V(ArmS128Load32Splat) \
+ V(ArmS128Load64Splat) \
+ V(ArmS128Load8x8S) \
+ V(ArmS128Load8x8U) \
+ V(ArmS128Load16x4S) \
+ V(ArmS128Load16x4U) \
+ V(ArmS128Load32x2S) \
+ V(ArmS128Load32x2U) \
+ V(ArmS128Load32Zero) \
+ V(ArmS128Load64Zero) \
+ V(ArmS128LoadLaneLow) \
+ V(ArmS128LoadLaneHigh) \
+ V(ArmS128StoreLaneLow) \
+ V(ArmS128StoreLaneHigh) \
+ V(ArmWord32AtomicPairLoad) \
+ V(ArmWord32AtomicPairStore) \
+ V(ArmWord32AtomicPairAdd) \
+ V(ArmWord32AtomicPairSub) \
+ V(ArmWord32AtomicPairAnd) \
+ V(ArmWord32AtomicPairOr) \
+ V(ArmWord32AtomicPairXor) \
+ V(ArmWord32AtomicPairExchange) \
V(ArmWord32AtomicPairCompareExchange)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/chromium/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/chromium/v8/src/compiler/backend/arm/instruction-selector-arm.cc
index 2698d45ae7f..d0511ae62b1 100644
--- a/chromium/v8/src/compiler/backend/arm/instruction-selector-arm.cc
+++ b/chromium/v8/src/compiler/backend/arm/instruction-selector-arm.cc
@@ -430,17 +430,18 @@ void EmitLoad(InstructionSelector* selector, InstructionCode opcode,
void EmitStore(InstructionSelector* selector, InstructionCode opcode,
size_t input_count, InstructionOperand* inputs, Node* index) {
ArmOperandGenerator g(selector);
+ ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
if (g.CanBeImmediate(index, opcode)) {
inputs[input_count++] = g.UseImmediate(index);
opcode |= AddressingModeField::encode(kMode_Offset_RI);
- } else if ((opcode == kArmStr) &&
+ } else if ((arch_opcode == kArmStr || arch_opcode == kAtomicStoreWord32) &&
TryMatchLSLImmediate(selector, &opcode, index, &inputs[2],
&inputs[3])) {
input_count = 4;
} else {
inputs[input_count++] = g.UseRegister(index);
- if (opcode == kArmVst1S128) {
+ if (arch_opcode == kArmVst1S128) {
// Inputs are value, base, index, only care about base and index.
EmitAddBeforeS128LoadStore(selector, &opcode, &input_count, &inputs[1]);
} else {
@@ -497,9 +498,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+void InstructionSelector::VisitAbortCSADcheck(Node* node) {
ArmOperandGenerator g(this);
- Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r1));
+ Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), r1));
}
void InstructionSelector::VisitStoreLane(Node* node) {
@@ -630,29 +631,69 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kNone:
UNREACHABLE();
}
- if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
- }
InstructionOperand output = g.DefineAsRegister(node);
EmitLoad(this, opcode, &output, base, index);
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
-
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
}
-void InstructionSelector::VisitStore(Node* node) {
- ArmOperandGenerator g(this);
+namespace {
+
+ArchOpcode GetStoreOpcode(MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ return kArmVstrF32;
+ case MachineRepresentation::kFloat64:
+ return kArmVstrF64;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ return kArmStrb;
+ case MachineRepresentation::kWord16:
+ return kArmStrh;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ return kArmStr;
+ case MachineRepresentation::kSimd128:
+ return kArmVst1S128;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ }
+}
+
+ArchOpcode GetAtomicStoreOpcode(MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ return kAtomicStoreWord8;
+ case MachineRepresentation::kWord16:
+ return kAtomicStoreWord16;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ return kAtomicStoreWord32;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void VisitStoreCommon(InstructionSelector* selector, Node* node,
+ StoreRepresentation store_rep,
+ base::Optional<AtomicMemoryOrder> atomic_order) {
+ ArmOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = StoreRepresentationOf(node->op());
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
MachineRepresentation rep = store_rep.representation();
@@ -678,58 +719,44 @@ void InstructionSelector::VisitStore(Node* node) {
inputs[input_count++] = g.UseUniqueRegister(value);
RecordWriteMode record_write_mode =
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
- InstructionCode code = kArchStoreWithWriteBarrier;
+ InstructionCode code;
+ if (!atomic_order) {
+ code = kArchStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ } else {
+ code = kArchAtomicStoreWithWriteBarrier;
+ code |= AtomicMemoryOrderField::encode(*atomic_order);
+ code |= AtomicStoreRecordWriteModeField::encode(record_write_mode);
+ }
code |= AddressingModeField::encode(addressing_mode);
- code |= MiscField::encode(static_cast<int>(record_write_mode));
- Emit(code, 0, nullptr, input_count, inputs);
+ selector->Emit(code, 0, nullptr, input_count, inputs);
} else {
InstructionCode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kFloat32:
- opcode = kArmVstrF32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kArmVstrF64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kWord8:
- opcode = kArmStrb;
- break;
- case MachineRepresentation::kWord16:
- opcode = kArmStrh;
- break;
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord32:
- opcode = kArmStr;
- break;
- case MachineRepresentation::kSimd128:
- opcode = kArmVst1S128;
- break;
- case MachineRepresentation::kCompressedPointer: // Fall through.
- case MachineRepresentation::kCompressed: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kMapWord: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
+ if (!atomic_order) {
+ opcode = GetStoreOpcode(rep);
+ } else {
+ // Release stores emit DMB ISH; STR while sequentially consistent stores
+ // emit DMB ISH; STR; DMB ISH.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+ opcode = GetAtomicStoreOpcode(rep);
+ opcode |= AtomicMemoryOrderField::encode(*atomic_order);
}
ExternalReferenceMatcher m(base);
if (m.HasResolvedValue() &&
- CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
+ selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
Int32Matcher int_matcher(index);
if (int_matcher.HasResolvedValue()) {
ptrdiff_t const delta =
int_matcher.ResolvedValue() +
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
- isolate(), m.ResolvedValue());
+ selector->isolate(), m.ResolvedValue());
int input_count = 2;
InstructionOperand inputs[2];
inputs[0] = g.UseRegister(value);
inputs[1] = g.UseImmediate(static_cast<int32_t>(delta));
opcode |= AddressingModeField::encode(kMode_Root);
- Emit(opcode, 0, nullptr, input_count, inputs);
+ selector->Emit(opcode, 0, nullptr, input_count, inputs);
return;
}
}
@@ -738,10 +765,17 @@ void InstructionSelector::VisitStore(Node* node) {
size_t input_count = 0;
inputs[input_count++] = g.UseRegister(value);
inputs[input_count++] = g.UseRegister(base);
- EmitStore(this, opcode, input_count, inputs, index);
+ EmitStore(selector, opcode, input_count, inputs, index);
}
}
+} // namespace
+
+void InstructionSelector::VisitStore(Node* node) {
+ VisitStoreCommon(this, node, StoreRepresentationOf(node->op()),
+ base::nullopt);
+}
+
void InstructionSelector::VisitProtectedStore(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -2236,22 +2270,27 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ // The memory order is ignored as both acquire and sequentially consistent
+ // loads can emit LDR; DMB ISH.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
ArmOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
- opcode =
- load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
- : kWord32AtomicLoadUint16;
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
- opcode = kWord32AtomicLoadWord32;
+ opcode = kAtomicLoadWord32;
break;
default:
UNREACHABLE();
@@ -2261,34 +2300,9 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArmOperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kWord32AtomicStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kWord32AtomicStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicStoreWord32;
- break;
- default:
- UNREACHABLE();
- }
-
- AddressingMode addressing_mode = kMode_Offset_RR;
- InstructionOperand inputs[4];
- size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
- inputs[input_count++] = g.UseUniqueRegister(index);
- inputs[input_count++] = g.UseUniqueRegister(value);
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 0, nullptr, input_count, inputs);
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ VisitStoreCommon(this, node, store_params.store_representation(),
+ store_params.order());
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
@@ -2299,15 +2313,15 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
@@ -2334,15 +2348,15 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
@@ -2399,12 +2413,11 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
diff --git a/chromium/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/chromium/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index c1213834269..d04bcf245c7 100644
--- a/chromium/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/chromium/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -235,7 +235,6 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
constant.ToDelayedStringConstant());
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): RPO immediates on arm64.
- break;
}
UNREACHABLE();
}
@@ -460,47 +459,6 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
}
#endif // V8_ENABLE_WEBASSEMBLY
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- Arm64OperandConverter const& i) {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- Register poison = value.Is64Bits() ? kSpeculationPoisonRegister
- : kSpeculationPoisonRegister.W();
- codegen->tasm()->And(value, value, Operand(poison));
- }
-}
-
-void EmitMaybePoisonedFPLoad(CodeGenerator* codegen, InstructionCode opcode,
- Arm64OperandConverter* i, VRegister output_reg) {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- AddressingMode address_mode = AddressingModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned && address_mode != kMode_Root) {
- UseScratchRegisterScope temps(codegen->tasm());
- Register address = temps.AcquireX();
- switch (address_mode) {
- case kMode_MRI: // Fall through.
- case kMode_MRR:
- codegen->tasm()->Add(address, i->InputRegister(0), i->InputOperand(1));
- break;
- case kMode_Operand2_R_LSL_I:
- codegen->tasm()->Add(address, i->InputRegister(0),
- i->InputOperand2_64(1));
- break;
- default:
- // Note: we don't need poisoning for kMode_Root loads as those loads
- // target a fixed offset from root register which is set once when
- // initializing the vm.
- UNREACHABLE();
- }
- codegen->tasm()->And(address, address, Operand(kSpeculationPoisonRegister));
- codegen->tasm()->Ldr(output_reg, MemOperand(address));
- } else {
- codegen->tasm()->Ldr(output_reg, i->MemoryOperand());
- }
-}
-
// Handles unary ops that work for float (scalar), double (scalar), or NEON.
template <typename Fn>
void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
@@ -714,29 +672,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ Bind(&not_deoptimized);
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- UseScratchRegisterScope temps(tasm());
- Register scratch = temps.AcquireX();
-
- // Set a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- __ ComputeCodeStartAddress(scratch);
- __ Cmp(kJavaScriptCallCodeStartRegister, scratch);
- __ Csetm(kSpeculationPoisonRegister, eq);
- __ Csdb();
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- UseScratchRegisterScope temps(tasm());
- Register scratch = temps.AcquireX();
-
- __ Mov(scratch, sp);
- __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
- __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
- __ And(scratch, scratch, kSpeculationPoisonRegister);
- __ Mov(sp, scratch);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -886,7 +821,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssemblePrepareTailCall();
break;
case kArchCallCFunction: {
- int const num_parameters = MiscField::decode(instr->opcode());
+ int const num_gp_parameters = ParamField::decode(instr->opcode());
+ int const num_fp_parameters = FPParamField::decode(instr->opcode());
Label return_location;
#if V8_ENABLE_WEBASSEMBLY
if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
@@ -897,10 +833,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
- __ CallCFunction(ref, num_parameters, 0);
+ __ CallCFunction(ref, num_gp_parameters, num_fp_parameters);
} else {
Register func = i.InputRegister(0);
- __ CallCFunction(func, num_parameters, 0);
+ __ CallCFunction(func, num_gp_parameters, num_fp_parameters);
}
__ Bind(&return_location);
#if V8_ENABLE_WEBASSEMBLY
@@ -936,16 +872,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchBinarySearchSwitch:
AssembleArchBinarySearchSwitch(instr);
break;
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
DCHECK_EQ(i.InputRegister(0), x1);
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
- __ Debug("kArchAbortCSAAssert", 0, BREAK);
+ __ Debug("kArchAbortCSADcheck", 0, BREAK);
unwinding_info_writer_.MarkBlockWillExit();
break;
case kArchDebugBreak:
@@ -1034,6 +970,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Bind(ool->exit());
break;
}
+ case kArchAtomicStoreWithWriteBarrier: {
+ DCHECK_EQ(AddressingModeField::decode(instr->opcode()), kMode_MRR);
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ Register offset = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ auto ool = zone()->New<OutOfLineRecordWrite>(
+ this, object, offset, value, mode, DetermineStubCallMode(),
+ &unwinding_info_writer_);
+ __ AtomicStoreTaggedField(value, object, offset, i.TempRegister(0));
+ if (mode > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value, ool->exit());
+ }
+ __ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask,
+ eq, ool->entry());
+ __ Bind(ool->exit());
+ break;
+ }
case kArchStackSlot: {
FrameOffset offset =
frame_access_state()->GetFrameOffset(i.InputInt32(0));
@@ -1232,6 +1187,39 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0).Format(src_f));
break;
}
+ case kArm64ISplat: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ Register src = LaneSizeField::decode(opcode) == 64 ? i.InputRegister64(0)
+ : i.InputRegister32(0);
+ __ Dup(i.OutputSimd128Register().Format(f), src);
+ break;
+ }
+ case kArm64FSplat: {
+ VectorFormat src_f =
+ ScalarFormatFromLaneSize(LaneSizeField::decode(opcode));
+ VectorFormat dst_f = VectorFormatFillQ(src_f);
+ __ Dup(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(0).Format(src_f), 0);
+ break;
+ }
+ case kArm64Smlal: {
+ VectorFormat dst_f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VectorFormat src_f = VectorFormatHalfWidth(dst_f);
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ Smlal(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(1).Format(src_f),
+ i.InputSimd128Register(2).Format(src_f));
+ break;
+ }
+ case kArm64Smlal2: {
+ VectorFormat dst_f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VectorFormat src_f = VectorFormatHalfWidthDoubleLanes(dst_f);
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ Smlal2(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(1).Format(src_f),
+ i.InputSimd128Register(2).Format(src_f));
+ break;
+ }
case kArm64Smull: {
if (instr->InputAt(0)->IsRegister()) {
__ Smull(i.OutputRegister(), i.InputRegister32(0),
@@ -1254,6 +1242,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1).Format(src_f));
break;
}
+ case kArm64Umlal: {
+ VectorFormat dst_f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VectorFormat src_f = VectorFormatHalfWidth(dst_f);
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ Umlal(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(1).Format(src_f),
+ i.InputSimd128Register(2).Format(src_f));
+ break;
+ }
+ case kArm64Umlal2: {
+ VectorFormat dst_f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VectorFormat src_f = VectorFormatHalfWidthDoubleLanes(dst_f);
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ Umlal2(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(1).Format(src_f),
+ i.InputSimd128Register(2).Format(src_f));
+ break;
+ }
case kArm64Umull: {
if (instr->InputAt(0)->IsRegister()) {
__ Umull(i.OutputRegister(), i.InputRegister32(0),
@@ -1551,6 +1557,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Cmn32:
__ Cmn(i.InputOrZeroRegister32(0), i.InputOperand2_32(1));
break;
+ case kArm64Cnt32: {
+ __ PopcntHelper(i.OutputRegister32(), i.InputRegister32(0));
+ break;
+ }
+ case kArm64Cnt64: {
+ __ PopcntHelper(i.OutputRegister64(), i.InputRegister64(0));
+ break;
+ }
case kArm64Cnt: {
VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
__ Cnt(i.OutputSimd128Register().Format(f),
@@ -1814,12 +1828,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Ldrb:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrb(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Ldrsb:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrsb(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrsbW:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -1832,12 +1844,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Ldrh:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Ldrsh:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrsh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrshW:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -1850,12 +1860,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Ldrsw:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrsw(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrW:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldr(i.OutputRegister32(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64StrW:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -1864,19 +1872,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Ldr:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldr(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrDecompressTaggedSigned:
__ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrDecompressTaggedPointer:
__ DecompressTaggedPointer(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrDecompressAnyTagged:
__ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ break;
+ case kArm64LdarDecompressTaggedSigned:
+ __ AtomicDecompressTaggedSigned(i.OutputRegister(), i.InputRegister(0),
+ i.InputRegister(1), i.TempRegister(0));
+ break;
+ case kArm64LdarDecompressTaggedPointer:
+ __ AtomicDecompressTaggedPointer(i.OutputRegister(), i.InputRegister(0),
+ i.InputRegister(1), i.TempRegister(0));
+ break;
+ case kArm64LdarDecompressAnyTagged:
+ __ AtomicDecompressAnyTagged(i.OutputRegister(), i.InputRegister(0),
+ i.InputRegister(1), i.TempRegister(0));
break;
case kArm64Str:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -1885,9 +1901,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64StrCompressTagged:
__ StoreTaggedField(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
+ case kArm64StlrCompressTagged:
+ // To be consistent with other STLR instructions, the value is stored at
+ // the 3rd input register instead of the 1st.
+ __ AtomicStoreTaggedField(i.InputRegister(2), i.InputRegister(0),
+ i.InputRegister(1), i.TempRegister(0));
+ break;
case kArm64LdrS:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
- EmitMaybePoisonedFPLoad(this, opcode, &i, i.OutputDoubleRegister().S());
+ __ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand());
break;
case kArm64StrS:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -1895,7 +1917,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArm64LdrD:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
- EmitMaybePoisonedFPLoad(this, opcode, &i, i.OutputDoubleRegister());
+ __ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
break;
case kArm64StrD:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -1916,117 +1938,100 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Dsb(FullSystem, BarrierAll);
__ Isb();
break;
- case kArchWordPoisonOnSpeculation:
- __ And(i.OutputRegister(0), i.InputRegister(0),
- Operand(kSpeculationPoisonRegister));
- break;
- case kWord32AtomicLoadInt8:
+ case kAtomicLoadInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb, Register32);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicLoadUint8:
- case kArm64Word64AtomicLoadUint8:
+ case kAtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb, Register32);
break;
- case kWord32AtomicLoadInt16:
+ case kAtomicLoadInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarh, Register32);
__ Sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicLoadUint16:
- case kArm64Word64AtomicLoadUint16:
+ case kAtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarh, Register32);
break;
- case kWord32AtomicLoadWord32:
- case kArm64Word64AtomicLoadUint32:
+ case kAtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldar, Register32);
break;
case kArm64Word64AtomicLoadUint64:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldar, Register);
break;
- case kWord32AtomicStoreWord8:
- case kArm64Word64AtomicStoreWord8:
+ case kAtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(Stlrb, Register32);
break;
- case kWord32AtomicStoreWord16:
- case kArm64Word64AtomicStoreWord16:
+ case kAtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(Stlrh, Register32);
break;
- case kWord32AtomicStoreWord32:
- case kArm64Word64AtomicStoreWord32:
+ case kAtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(Stlr, Register32);
break;
case kArm64Word64AtomicStoreWord64:
ASSEMBLE_ATOMIC_STORE_INTEGER(Stlr, Register);
break;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrb, stlxrb, Register32);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicExchangeUint8:
- case kArm64Word64AtomicExchangeUint8:
+ case kAtomicExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrb, stlxrb, Register32);
break;
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrh, stlxrh, Register32);
__ Sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicExchangeUint16:
- case kArm64Word64AtomicExchangeUint16:
+ case kAtomicExchangeUint16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrh, stlxrh, Register32);
break;
- case kWord32AtomicExchangeWord32:
- case kArm64Word64AtomicExchangeUint32:
+ case kAtomicExchangeWord32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxr, stlxr, Register32);
break;
case kArm64Word64AtomicExchangeUint64:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxr, stlxr, Register);
break;
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb, UXTB,
Register32);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicCompareExchangeUint8:
- case kArm64Word64AtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeUint8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb, UXTB,
Register32);
break;
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh, UXTH,
Register32);
__ Sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicCompareExchangeUint16:
- case kArm64Word64AtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeUint16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh, UXTH,
Register32);
break;
- case kWord32AtomicCompareExchangeWord32:
- case kArm64Word64AtomicCompareExchangeUint32:
+ case kAtomicCompareExchangeWord32:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxr, stlxr, UXTW, Register32);
break;
case kArm64Word64AtomicCompareExchangeUint64:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxr, stlxr, UXTX, Register);
break;
#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
+ case kAtomic##op##Int8: \
ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst, Register32); \
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
- case kWord32Atomic##op##Uint8: \
- case kArm64Word64Atomic##op##Uint8: \
+ case kAtomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst, Register32); \
break; \
- case kWord32Atomic##op##Int16: \
+ case kAtomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst, Register32); \
__ Sxth(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
- case kWord32Atomic##op##Uint16: \
- case kArm64Word64Atomic##op##Uint16: \
+ case kAtomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst, Register32); \
break; \
- case kWord32Atomic##op##Word32: \
- case kArm64Word64Atomic##op##Uint32: \
+ case kAtomic##op##Word32: \
ASSEMBLE_ATOMIC_BINOP(ldaxr, stlxr, inst, Register32); \
break; \
case kArm64Word64Atomic##op##Uint64: \
@@ -2052,12 +2057,49 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Instr(i.OutputSimd128Register().V##FORMAT(), \
i.InputSimd128Register(0).V##FORMAT()); \
break;
+#define SIMD_UNOP_LANE_SIZE_CASE(Op, Instr) \
+ case Op: { \
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode)); \
+ __ Instr(i.OutputSimd128Register().Format(f), \
+ i.InputSimd128Register(0).Format(f)); \
+ break; \
+ }
#define SIMD_BINOP_CASE(Op, Instr, FORMAT) \
case Op: \
__ Instr(i.OutputSimd128Register().V##FORMAT(), \
i.InputSimd128Register(0).V##FORMAT(), \
i.InputSimd128Register(1).V##FORMAT()); \
break;
+#define SIMD_BINOP_LANE_SIZE_CASE(Op, Instr) \
+ case Op: { \
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode)); \
+ __ Instr(i.OutputSimd128Register().Format(f), \
+ i.InputSimd128Register(0).Format(f), \
+ i.InputSimd128Register(1).Format(f)); \
+ break; \
+ }
+#define SIMD_FCM_L_CASE(Op, ImmOp, RegOp) \
+ case Op: { \
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode)); \
+ if (instr->InputCount() == 1) { \
+ __ Fcm##ImmOp(i.OutputSimd128Register().Format(f), \
+ i.InputSimd128Register(0).Format(f), +0.0); \
+ } else { \
+ __ Fcm##RegOp(i.OutputSimd128Register().Format(f), \
+ i.InputSimd128Register(1).Format(f), \
+ i.InputSimd128Register(0).Format(f)); \
+ } \
+ break; \
+ }
+#define SIMD_FCM_G_CASE(Op, ImmOp) \
+ case Op: { \
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode)); \
+ /* Currently Gt/Ge instructions are only used with zero */ \
+ DCHECK_EQ(instr->InputCount(), 1); \
+ __ Fcm##ImmOp(i.OutputSimd128Register().Format(f), \
+ i.InputSimd128Register(0).Format(f), +0.0); \
+ break; \
+ }
#define SIMD_DESTRUCTIVE_BINOP_CASE(Op, Instr, FORMAT) \
case Op: { \
VRegister dst = i.OutputSimd128Register().V##FORMAT(); \
@@ -2066,7 +2108,33 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(2).V##FORMAT()); \
break; \
}
-
+#define SIMD_DESTRUCTIVE_BINOP_LANE_SIZE_CASE(Op, Instr) \
+ case Op: { \
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode)); \
+ VRegister dst = i.OutputSimd128Register().Format(f); \
+ DCHECK_EQ(dst, i.InputSimd128Register(0).Format(f)); \
+ __ Instr(dst, i.InputSimd128Register(1).Format(f), \
+ i.InputSimd128Register(2).Format(f)); \
+ break; \
+ }
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64FMin, Fmin);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64FMax, Fmax);
+ SIMD_UNOP_LANE_SIZE_CASE(kArm64FAbs, Fabs);
+ SIMD_UNOP_LANE_SIZE_CASE(kArm64FSqrt, Fsqrt);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64FAdd, Fadd);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64FSub, Fsub);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64FMul, Fmul);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64FDiv, Fdiv);
+ SIMD_UNOP_LANE_SIZE_CASE(kArm64FNeg, Fneg);
+ SIMD_UNOP_LANE_SIZE_CASE(kArm64IAbs, Abs);
+ SIMD_UNOP_LANE_SIZE_CASE(kArm64INeg, Neg);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64RoundingAverageU, Urhadd);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IMinS, Smin);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IMaxS, Smax);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IMinU, Umin);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IMaxU, Umax);
+ SIMD_DESTRUCTIVE_BINOP_LANE_SIZE_CASE(kArm64Mla, Mla);
+ SIMD_DESTRUCTIVE_BINOP_LANE_SIZE_CASE(kArm64Mls, Mls);
case kArm64Sxtl: {
VectorFormat wide = VectorFormatFillQ(LaneSizeField::decode(opcode));
VectorFormat narrow = VectorFormatHalfWidth(wide);
@@ -2129,51 +2197,41 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0).V2S());
break;
}
- case kArm64F64x2Splat: {
- __ Dup(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).D(), 0);
+ case kArm64FExtractLane: {
+ VectorFormat dst_f =
+ ScalarFormatFromLaneSize(LaneSizeField::decode(opcode));
+ VectorFormat src_f = VectorFormatFillQ(dst_f);
+ __ Mov(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(0).Format(src_f), i.InputInt8(1));
break;
}
- case kArm64F64x2ExtractLane: {
- __ Mov(i.OutputSimd128Register().D(), i.InputSimd128Register(0).V2D(),
- i.InputInt8(1));
- break;
- }
- case kArm64F64x2ReplaceLane: {
- VRegister dst = i.OutputSimd128Register().V2D(),
- src1 = i.InputSimd128Register(0).V2D();
+ case kArm64FReplaceLane: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VRegister dst = i.OutputSimd128Register().Format(f),
+ src1 = i.InputSimd128Register(0).Format(f);
if (dst != src1) {
__ Mov(dst, src1);
}
- __ Mov(dst, i.InputInt8(1), i.InputSimd128Register(2).V2D(), 0);
- break;
- }
- SIMD_UNOP_CASE(kArm64F64x2Abs, Fabs, 2D);
- SIMD_UNOP_CASE(kArm64F64x2Neg, Fneg, 2D);
- SIMD_UNOP_CASE(kArm64F64x2Sqrt, Fsqrt, 2D);
- SIMD_BINOP_CASE(kArm64F64x2Add, Fadd, 2D);
- SIMD_BINOP_CASE(kArm64F64x2Sub, Fsub, 2D);
- SIMD_BINOP_CASE(kArm64F64x2Mul, Fmul, 2D);
- SIMD_BINOP_CASE(kArm64F64x2Div, Fdiv, 2D);
- SIMD_BINOP_CASE(kArm64F64x2Min, Fmin, 2D);
- SIMD_BINOP_CASE(kArm64F64x2Max, Fmax, 2D);
- SIMD_BINOP_CASE(kArm64F64x2Eq, Fcmeq, 2D);
- case kArm64F64x2Ne: {
- VRegister dst = i.OutputSimd128Register().V2D();
- __ Fcmeq(dst, i.InputSimd128Register(0).V2D(),
- i.InputSimd128Register(1).V2D());
- __ Mvn(dst, dst);
+ __ Mov(dst, i.InputInt8(1), i.InputSimd128Register(2).Format(f), 0);
break;
}
- case kArm64F64x2Lt: {
- __ Fcmgt(i.OutputSimd128Register().V2D(), i.InputSimd128Register(1).V2D(),
- i.InputSimd128Register(0).V2D());
- break;
- }
- case kArm64F64x2Le: {
- __ Fcmge(i.OutputSimd128Register().V2D(), i.InputSimd128Register(1).V2D(),
- i.InputSimd128Register(0).V2D());
+ SIMD_FCM_L_CASE(kArm64FEq, eq, eq);
+ case kArm64FNe: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VRegister dst = i.OutputSimd128Register().Format(f);
+ if (instr->InputCount() == 1) {
+ __ Fcmeq(dst, i.InputSimd128Register(0).Format(f), +0.0);
+ } else {
+ __ Fcmeq(dst, i.InputSimd128Register(0).Format(f),
+ i.InputSimd128Register(1).Format(f));
+ }
+ __ Mvn(dst, dst);
break;
}
+ SIMD_FCM_L_CASE(kArm64FLt, lt, gt);
+ SIMD_FCM_L_CASE(kArm64FLe, le, ge);
+ SIMD_FCM_G_CASE(kArm64FGt, gt);
+ SIMD_FCM_G_CASE(kArm64FGe, ge);
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F64x2Qfma, Fmla, 2D);
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F64x2Qfms, Fmls, 2D);
case kArm64F64x2Pmin: {
@@ -2197,63 +2255,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Bsl(dst.V16B(), rhs.V16B(), lhs.V16B());
break;
}
- case kArm64F32x4Splat: {
- __ Dup(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).S(), 0);
- break;
- }
- case kArm64F32x4ExtractLane: {
- __ Mov(i.OutputSimd128Register().S(), i.InputSimd128Register(0).V4S(),
- i.InputInt8(1));
- break;
- }
- case kArm64F32x4ReplaceLane: {
- VRegister dst = i.OutputSimd128Register().V4S(),
- src1 = i.InputSimd128Register(0).V4S();
- if (dst != src1) {
- __ Mov(dst, src1);
- }
- __ Mov(dst, i.InputInt8(1), i.InputSimd128Register(2).V4S(), 0);
- break;
- }
SIMD_UNOP_CASE(kArm64F32x4SConvertI32x4, Scvtf, 4S);
SIMD_UNOP_CASE(kArm64F32x4UConvertI32x4, Ucvtf, 4S);
- SIMD_UNOP_CASE(kArm64F32x4Abs, Fabs, 4S);
- SIMD_UNOP_CASE(kArm64F32x4Neg, Fneg, 4S);
- SIMD_UNOP_CASE(kArm64F32x4Sqrt, Fsqrt, 4S);
SIMD_UNOP_CASE(kArm64F32x4RecipApprox, Frecpe, 4S);
SIMD_UNOP_CASE(kArm64F32x4RecipSqrtApprox, Frsqrte, 4S);
- SIMD_BINOP_CASE(kArm64F32x4Add, Fadd, 4S);
- SIMD_BINOP_CASE(kArm64F32x4Sub, Fsub, 4S);
- SIMD_BINOP_CASE(kArm64F32x4Mul, Fmul, 4S);
- SIMD_BINOP_CASE(kArm64F32x4Div, Fdiv, 4S);
- SIMD_BINOP_CASE(kArm64F32x4Min, Fmin, 4S);
- SIMD_BINOP_CASE(kArm64F32x4Max, Fmax, 4S);
- SIMD_BINOP_CASE(kArm64F32x4Eq, Fcmeq, 4S);
- case kArm64F32x4MulElement: {
- __ Fmul(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(),
- i.InputSimd128Register(1).S(), i.InputInt8(2));
- break;
- }
- case kArm64F64x2MulElement: {
- __ Fmul(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).V2D(),
- i.InputSimd128Register(1).D(), i.InputInt8(2));
- break;
- }
- case kArm64F32x4Ne: {
- VRegister dst = i.OutputSimd128Register().V4S();
- __ Fcmeq(dst, i.InputSimd128Register(0).V4S(),
- i.InputSimd128Register(1).V4S());
- __ Mvn(dst, dst);
- break;
- }
- case kArm64F32x4Lt: {
- __ Fcmgt(i.OutputSimd128Register().V4S(), i.InputSimd128Register(1).V4S(),
- i.InputSimd128Register(0).V4S());
- break;
- }
- case kArm64F32x4Le: {
- __ Fcmge(i.OutputSimd128Register().V4S(), i.InputSimd128Register(1).V4S(),
- i.InputSimd128Register(0).V4S());
+ case kArm64FMulElement: {
+ VectorFormat s_f =
+ ScalarFormatFromLaneSize(LaneSizeField::decode(opcode));
+ VectorFormat v_f = VectorFormatFillQ(s_f);
+ __ Fmul(i.OutputSimd128Register().Format(v_f),
+ i.InputSimd128Register(0).Format(v_f),
+ i.InputSimd128Register(1).Format(s_f), i.InputInt8(2));
break;
}
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F32x4Qfma, Fmla, 4S);
@@ -2279,26 +2291,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Bsl(dst.V16B(), rhs.V16B(), lhs.V16B());
break;
}
- case kArm64I64x2Splat: {
- __ Dup(i.OutputSimd128Register().V2D(), i.InputRegister64(0));
- break;
- }
- case kArm64I64x2ExtractLane: {
- __ Mov(i.OutputRegister64(), i.InputSimd128Register(0).V2D(),
- i.InputInt8(1));
+ case kArm64IExtractLane: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ Register dst =
+ f == kFormat2D ? i.OutputRegister64() : i.OutputRegister32();
+ __ Mov(dst, i.InputSimd128Register(0).Format(f), i.InputInt8(1));
break;
}
- case kArm64I64x2ReplaceLane: {
- VRegister dst = i.OutputSimd128Register().V2D(),
- src1 = i.InputSimd128Register(0).V2D();
+ case kArm64IReplaceLane: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VRegister dst = i.OutputSimd128Register().Format(f),
+ src1 = i.InputSimd128Register(0).Format(f);
+ Register src2 =
+ f == kFormat2D ? i.InputRegister64(2) : i.InputRegister32(2);
if (dst != src1) {
__ Mov(dst, src1);
}
- __ Mov(dst, i.InputInt8(1), i.InputRegister64(2));
+ __ Mov(dst, i.InputInt8(1), src2);
break;
}
- SIMD_UNOP_CASE(kArm64I64x2Abs, Abs, 2D);
- SIMD_UNOP_CASE(kArm64I64x2Neg, Neg, 2D);
case kArm64I64x2Shl: {
ASSEMBLE_SIMD_SHIFT_LEFT(Shl, 6, V2D, Sshl, X);
break;
@@ -2307,8 +2318,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_SHIFT_RIGHT(Sshr, 6, V2D, Sshl, X);
break;
}
- SIMD_BINOP_CASE(kArm64I64x2Add, Add, 2D);
- SIMD_BINOP_CASE(kArm64I64x2Sub, Sub, 2D);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IAdd, Add);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64ISub, Sub);
case kArm64I64x2Mul: {
UseScratchRegisterScope scope(tasm());
VRegister dst = i.OutputSimd128Register();
@@ -2368,16 +2379,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
- SIMD_BINOP_CASE(kArm64I64x2Eq, Cmeq, 2D);
- case kArm64I64x2Ne: {
- VRegister dst = i.OutputSimd128Register().V2D();
- __ Cmeq(dst, i.InputSimd128Register(0).V2D(),
- i.InputSimd128Register(1).V2D());
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IEq, Cmeq);
+ case kArm64INe: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VRegister dst = i.OutputSimd128Register().Format(f);
+ __ Cmeq(dst, i.InputSimd128Register(0).Format(f),
+ i.InputSimd128Register(1).Format(f));
__ Mvn(dst, dst);
break;
}
- SIMD_BINOP_CASE(kArm64I64x2GtS, Cmgt, 2D);
- SIMD_BINOP_CASE(kArm64I64x2GeS, Cmge, 2D);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IGtS, Cmgt);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IGeS, Cmge);
case kArm64I64x2ShrU: {
ASSEMBLE_SIMD_SHIFT_RIGHT(Ushr, 6, V2D, Ushl, X);
break;
@@ -2386,26 +2398,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ I64x2BitMask(i.OutputRegister32(), i.InputSimd128Register(0));
break;
}
- case kArm64I32x4Splat: {
- __ Dup(i.OutputSimd128Register().V4S(), i.InputRegister32(0));
- break;
- }
- case kArm64I32x4ExtractLane: {
- __ Mov(i.OutputRegister32(), i.InputSimd128Register(0).V4S(),
- i.InputInt8(1));
- break;
- }
- case kArm64I32x4ReplaceLane: {
- VRegister dst = i.OutputSimd128Register().V4S(),
- src1 = i.InputSimd128Register(0).V4S();
- if (dst != src1) {
- __ Mov(dst, src1);
- }
- __ Mov(dst, i.InputInt8(1), i.InputRegister32(2));
- break;
- }
SIMD_UNOP_CASE(kArm64I32x4SConvertF32x4, Fcvtzs, 4S);
- SIMD_UNOP_CASE(kArm64I32x4Neg, Neg, 4S);
case kArm64I32x4Shl: {
ASSEMBLE_SIMD_SHIFT_LEFT(Shl, 5, V4S, Sshl, W);
break;
@@ -2414,33 +2407,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_SHIFT_RIGHT(Sshr, 5, V4S, Sshl, W);
break;
}
- SIMD_BINOP_CASE(kArm64I32x4Add, Add, 4S);
- SIMD_BINOP_CASE(kArm64I32x4Sub, Sub, 4S);
SIMD_BINOP_CASE(kArm64I32x4Mul, Mul, 4S);
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I32x4Mla, Mla, 4S);
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I32x4Mls, Mls, 4S);
- SIMD_BINOP_CASE(kArm64I32x4MinS, Smin, 4S);
- SIMD_BINOP_CASE(kArm64I32x4MaxS, Smax, 4S);
- SIMD_BINOP_CASE(kArm64I32x4Eq, Cmeq, 4S);
- case kArm64I32x4Ne: {
- VRegister dst = i.OutputSimd128Register().V4S();
- __ Cmeq(dst, i.InputSimd128Register(0).V4S(),
- i.InputSimd128Register(1).V4S());
- __ Mvn(dst, dst);
- break;
- }
- SIMD_BINOP_CASE(kArm64I32x4GtS, Cmgt, 4S);
- SIMD_BINOP_CASE(kArm64I32x4GeS, Cmge, 4S);
SIMD_UNOP_CASE(kArm64I32x4UConvertF32x4, Fcvtzu, 4S);
case kArm64I32x4ShrU: {
ASSEMBLE_SIMD_SHIFT_RIGHT(Ushr, 5, V4S, Ushl, W);
break;
}
- SIMD_BINOP_CASE(kArm64I32x4MinU, Umin, 4S);
- SIMD_BINOP_CASE(kArm64I32x4MaxU, Umax, 4S);
- SIMD_BINOP_CASE(kArm64I32x4GtU, Cmhi, 4S);
- SIMD_BINOP_CASE(kArm64I32x4GeU, Cmhs, 4S);
- SIMD_UNOP_CASE(kArm64I32x4Abs, Abs, 4S);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IGtU, Cmhi);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IGeU, Cmhs);
case kArm64I32x4BitMask: {
UseScratchRegisterScope scope(tasm());
Register dst = i.OutputRegister32();
@@ -2468,30 +2442,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Addp(i.OutputSimd128Register().V4S(), tmp1, tmp2);
break;
}
- case kArm64I16x8Splat: {
- __ Dup(i.OutputSimd128Register().V8H(), i.InputRegister32(0));
- break;
- }
- case kArm64I16x8ExtractLaneU: {
- __ Umov(i.OutputRegister32(), i.InputSimd128Register(0).V8H(),
+ case kArm64IExtractLaneU: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ __ Umov(i.OutputRegister32(), i.InputSimd128Register(0).Format(f),
i.InputInt8(1));
break;
}
- case kArm64I16x8ExtractLaneS: {
- __ Smov(i.OutputRegister32(), i.InputSimd128Register(0).V8H(),
+ case kArm64IExtractLaneS: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ __ Smov(i.OutputRegister32(), i.InputSimd128Register(0).Format(f),
i.InputInt8(1));
break;
}
- case kArm64I16x8ReplaceLane: {
- VRegister dst = i.OutputSimd128Register().V8H(),
- src1 = i.InputSimd128Register(0).V8H();
- if (dst != src1) {
- __ Mov(dst, src1);
- }
- __ Mov(dst, i.InputInt8(1), i.InputRegister32(2));
- break;
- }
- SIMD_UNOP_CASE(kArm64I16x8Neg, Neg, 8H);
case kArm64I16x8Shl: {
ASSEMBLE_SIMD_SHIFT_LEFT(Shl, 4, V8H, Sshl, W);
break;
@@ -2514,25 +2476,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Sqxtn2(dst.V8H(), src1.V4S());
break;
}
- SIMD_BINOP_CASE(kArm64I16x8Add, Add, 8H);
- SIMD_BINOP_CASE(kArm64I16x8AddSatS, Sqadd, 8H);
- SIMD_BINOP_CASE(kArm64I16x8Sub, Sub, 8H);
- SIMD_BINOP_CASE(kArm64I16x8SubSatS, Sqsub, 8H);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IAddSatS, Sqadd);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64ISubSatS, Sqsub);
SIMD_BINOP_CASE(kArm64I16x8Mul, Mul, 8H);
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I16x8Mla, Mla, 8H);
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I16x8Mls, Mls, 8H);
- SIMD_BINOP_CASE(kArm64I16x8MinS, Smin, 8H);
- SIMD_BINOP_CASE(kArm64I16x8MaxS, Smax, 8H);
- SIMD_BINOP_CASE(kArm64I16x8Eq, Cmeq, 8H);
- case kArm64I16x8Ne: {
- VRegister dst = i.OutputSimd128Register().V8H();
- __ Cmeq(dst, i.InputSimd128Register(0).V8H(),
- i.InputSimd128Register(1).V8H());
- __ Mvn(dst, dst);
- break;
- }
- SIMD_BINOP_CASE(kArm64I16x8GtS, Cmgt, 8H);
- SIMD_BINOP_CASE(kArm64I16x8GeS, Cmge, 8H);
case kArm64I16x8ShrU: {
ASSEMBLE_SIMD_SHIFT_RIGHT(Ushr, 4, V8H, Ushl, W);
break;
@@ -2551,15 +2497,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Sqxtun2(dst.V8H(), src1.V4S());
break;
}
- SIMD_BINOP_CASE(kArm64I16x8AddSatU, Uqadd, 8H);
- SIMD_BINOP_CASE(kArm64I16x8SubSatU, Uqsub, 8H);
- SIMD_BINOP_CASE(kArm64I16x8MinU, Umin, 8H);
- SIMD_BINOP_CASE(kArm64I16x8MaxU, Umax, 8H);
- SIMD_BINOP_CASE(kArm64I16x8GtU, Cmhi, 8H);
- SIMD_BINOP_CASE(kArm64I16x8GeU, Cmhs, 8H);
- SIMD_BINOP_CASE(kArm64I16x8RoundingAverageU, Urhadd, 8H);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IAddSatU, Uqadd);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64ISubSatU, Uqsub);
SIMD_BINOP_CASE(kArm64I16x8Q15MulRSatS, Sqrdmulh, 8H);
- SIMD_UNOP_CASE(kArm64I16x8Abs, Abs, 8H);
case kArm64I16x8BitMask: {
UseScratchRegisterScope scope(tasm());
Register dst = i.OutputRegister32();
@@ -2576,30 +2516,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Mov(dst.W(), tmp.V8H(), 0);
break;
}
- case kArm64I8x16Splat: {
- __ Dup(i.OutputSimd128Register().V16B(), i.InputRegister32(0));
- break;
- }
- case kArm64I8x16ExtractLaneU: {
- __ Umov(i.OutputRegister32(), i.InputSimd128Register(0).V16B(),
- i.InputInt8(1));
- break;
- }
- case kArm64I8x16ExtractLaneS: {
- __ Smov(i.OutputRegister32(), i.InputSimd128Register(0).V16B(),
- i.InputInt8(1));
- break;
- }
- case kArm64I8x16ReplaceLane: {
- VRegister dst = i.OutputSimd128Register().V16B(),
- src1 = i.InputSimd128Register(0).V16B();
- if (dst != src1) {
- __ Mov(dst, src1);
- }
- __ Mov(dst, i.InputInt8(1), i.InputRegister32(2));
- break;
- }
- SIMD_UNOP_CASE(kArm64I8x16Neg, Neg, 16B);
case kArm64I8x16Shl: {
ASSEMBLE_SIMD_SHIFT_LEFT(Shl, 3, V16B, Sshl, W);
break;
@@ -2622,24 +2538,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Sqxtn2(dst.V16B(), src1.V8H());
break;
}
- SIMD_BINOP_CASE(kArm64I8x16Add, Add, 16B);
- SIMD_BINOP_CASE(kArm64I8x16AddSatS, Sqadd, 16B);
- SIMD_BINOP_CASE(kArm64I8x16Sub, Sub, 16B);
- SIMD_BINOP_CASE(kArm64I8x16SubSatS, Sqsub, 16B);
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I8x16Mla, Mla, 16B);
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I8x16Mls, Mls, 16B);
- SIMD_BINOP_CASE(kArm64I8x16MinS, Smin, 16B);
- SIMD_BINOP_CASE(kArm64I8x16MaxS, Smax, 16B);
- SIMD_BINOP_CASE(kArm64I8x16Eq, Cmeq, 16B);
- case kArm64I8x16Ne: {
- VRegister dst = i.OutputSimd128Register().V16B();
- __ Cmeq(dst, i.InputSimd128Register(0).V16B(),
- i.InputSimd128Register(1).V16B());
- __ Mvn(dst, dst);
- break;
- }
- SIMD_BINOP_CASE(kArm64I8x16GtS, Cmgt, 16B);
- SIMD_BINOP_CASE(kArm64I8x16GeS, Cmge, 16B);
case kArm64I8x16ShrU: {
ASSEMBLE_SIMD_SHIFT_RIGHT(Ushr, 3, V16B, Ushl, W);
break;
@@ -2658,14 +2556,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Sqxtun2(dst.V16B(), src1.V8H());
break;
}
- SIMD_BINOP_CASE(kArm64I8x16AddSatU, Uqadd, 16B);
- SIMD_BINOP_CASE(kArm64I8x16SubSatU, Uqsub, 16B);
- SIMD_BINOP_CASE(kArm64I8x16MinU, Umin, 16B);
- SIMD_BINOP_CASE(kArm64I8x16MaxU, Umax, 16B);
- SIMD_BINOP_CASE(kArm64I8x16GtU, Cmhi, 16B);
- SIMD_BINOP_CASE(kArm64I8x16GeU, Cmhs, 16B);
- SIMD_BINOP_CASE(kArm64I8x16RoundingAverageU, Urhadd, 16B);
- SIMD_UNOP_CASE(kArm64I8x16Abs, Abs, 16B);
case kArm64I8x16BitMask: {
UseScratchRegisterScope scope(tasm());
Register dst = i.OutputRegister32();
@@ -2716,12 +2606,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
default:
UNREACHABLE();
- break;
}
break;
}
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64S128Select, Bsl, 16B);
SIMD_BINOP_CASE(kArm64S128AndNot, Bic, 16B);
+ case kArm64Ssra: {
+ int8_t laneSize = LaneSizeField::decode(opcode);
+ VectorFormat f = VectorFormatFillQ(laneSize);
+ int8_t mask = laneSize - 1;
+ VRegister dst = i.OutputSimd128Register().Format(f);
+ DCHECK_EQ(dst, i.InputSimd128Register(0).Format(f));
+ __ Ssra(dst, i.InputSimd128Register(1).Format(f), i.InputInt8(2) & mask);
+ break;
+ }
+ case kArm64Usra: {
+ int8_t laneSize = LaneSizeField::decode(opcode);
+ VectorFormat f = VectorFormatFillQ(laneSize);
+ int8_t mask = laneSize - 1;
+ VRegister dst = i.OutputSimd128Register().Format(f);
+ DCHECK_EQ(dst, i.InputSimd128Register(0).Format(f));
+ __ Usra(dst, i.InputSimd128Register(1).Format(f), i.InputUint8(2) & mask);
+ break;
+ }
case kArm64S32x4Shuffle: {
Simd128Register dst = i.OutputSimd128Register().V4S(),
src0 = i.InputSimd128Register(0).V4S(),
@@ -2892,8 +2799,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
#undef SIMD_UNOP_CASE
+#undef SIMD_UNOP_LANE_SIZE_CASE
#undef SIMD_BINOP_CASE
+#undef SIMD_BINOP_LANE_SIZE_CASE
#undef SIMD_DESTRUCTIVE_BINOP_CASE
+#undef SIMD_DESTRUCTIVE_BINOP_LANE_SIZE_CASE
#undef SIMD_REDUCE_OP_CASE
#undef ASSEMBLE_SIMD_SHIFT_LEFT
#undef ASSEMBLE_SIMD_SHIFT_RIGHT
@@ -2907,7 +2817,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
ArchOpcode opcode = instr->arch_opcode();
if (opcode == kArm64CompareAndBranch32) {
- DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Cbz(i.InputRegister32(0), tlabel);
@@ -2919,7 +2828,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
UNREACHABLE();
}
} else if (opcode == kArm64CompareAndBranch) {
- DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Cbz(i.InputRegister64(0), tlabel);
@@ -2931,7 +2839,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
UNREACHABLE();
}
} else if (opcode == kArm64TestAndBranch32) {
- DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Tbz(i.InputRegister32(0), i.InputInt5(1), tlabel);
@@ -2943,7 +2850,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
UNREACHABLE();
}
} else if (opcode == kArm64TestAndBranch) {
- DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Tbz(i.InputRegister64(0), i.InputInt6(1), tlabel);
@@ -2961,19 +2867,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ B(flabel); // no fallthru to flabel.
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
- return;
- }
-
- condition = NegateFlagsCondition(condition);
- __ CmovX(kSpeculationPoisonRegister, xzr,
- FlagsConditionToCondition(condition));
- __ Csdb();
-}
-
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -3143,7 +3036,6 @@ void CodeGenerator::AssembleConstructFrame() {
// arguments count was pushed.
required_slots -=
unoptimized_frame_slots - TurboAssembler::kExtraSlotClaimedByPrologue;
- ResetSpeculationPoison();
}
#if V8_ENABLE_WEBASSEMBLY
@@ -3343,7 +3235,9 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// number of arguments is given by max(1 + argc_reg, parameter_slots).
Label argc_reg_has_final_count;
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & argc_reg.bit());
- __ Add(argc_reg, argc_reg, 1); // Consider the receiver.
+ if (!kJSArgcIncludesReceiver) {
+ __ Add(argc_reg, argc_reg, 1); // Consider the receiver.
+ }
if (parameter_slots > 1) {
__ Cmp(argc_reg, Operand(parameter_slots));
__ B(&argc_reg_has_final_count, ge);
diff --git a/chromium/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/chromium/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
index 3f2e6151b60..d8ee8099189 100644
--- a/chromium/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
+++ b/chromium/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
@@ -11,423 +11,344 @@ namespace compiler {
// ARM64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(Arm64Add) \
- V(Arm64Add32) \
- V(Arm64And) \
- V(Arm64And32) \
- V(Arm64Bic) \
- V(Arm64Bic32) \
- V(Arm64Clz) \
- V(Arm64Clz32) \
- V(Arm64Cmp) \
- V(Arm64Cmp32) \
- V(Arm64Cmn) \
- V(Arm64Cmn32) \
- V(Arm64Cnt) \
- V(Arm64Tst) \
- V(Arm64Tst32) \
- V(Arm64Or) \
- V(Arm64Or32) \
- V(Arm64Orn) \
- V(Arm64Orn32) \
- V(Arm64Eor) \
- V(Arm64Eor32) \
- V(Arm64Eon) \
- V(Arm64Eon32) \
- V(Arm64Sadalp) \
- V(Arm64Saddlp) \
- V(Arm64Sub) \
- V(Arm64Sub32) \
- V(Arm64Mul) \
- V(Arm64Mul32) \
- V(Arm64Smull) \
- V(Arm64Smull2) \
- V(Arm64Uadalp) \
- V(Arm64Uaddlp) \
- V(Arm64Umull) \
- V(Arm64Umull2) \
- V(Arm64Madd) \
- V(Arm64Madd32) \
- V(Arm64Msub) \
- V(Arm64Msub32) \
- V(Arm64Mneg) \
- V(Arm64Mneg32) \
- V(Arm64Idiv) \
- V(Arm64Idiv32) \
- V(Arm64Udiv) \
- V(Arm64Udiv32) \
- V(Arm64Imod) \
- V(Arm64Imod32) \
- V(Arm64Umod) \
- V(Arm64Umod32) \
- V(Arm64Not) \
- V(Arm64Not32) \
- V(Arm64Lsl) \
- V(Arm64Lsl32) \
- V(Arm64Lsr) \
- V(Arm64Lsr32) \
- V(Arm64Asr) \
- V(Arm64Asr32) \
- V(Arm64Ror) \
- V(Arm64Ror32) \
- V(Arm64Mov32) \
- V(Arm64Sxtb32) \
- V(Arm64Sxth32) \
- V(Arm64Sxtb) \
- V(Arm64Sxth) \
- V(Arm64Sxtw) \
- V(Arm64Sbfx) \
- V(Arm64Sbfx32) \
- V(Arm64Ubfx) \
- V(Arm64Ubfx32) \
- V(Arm64Ubfiz32) \
- V(Arm64Bfi) \
- V(Arm64Rbit) \
- V(Arm64Rbit32) \
- V(Arm64Rev) \
- V(Arm64Rev32) \
- V(Arm64TestAndBranch32) \
- V(Arm64TestAndBranch) \
- V(Arm64CompareAndBranch32) \
- V(Arm64CompareAndBranch) \
- V(Arm64Claim) \
- V(Arm64Poke) \
- V(Arm64PokePair) \
- V(Arm64Peek) \
- V(Arm64Float32Cmp) \
- V(Arm64Float32Add) \
- V(Arm64Float32Sub) \
- V(Arm64Float32Mul) \
- V(Arm64Float32Div) \
- V(Arm64Float32Abs) \
- V(Arm64Float32Abd) \
- V(Arm64Float32Neg) \
- V(Arm64Float32Sqrt) \
- V(Arm64Float32Fnmul) \
- V(Arm64Float32RoundDown) \
- V(Arm64Float32Max) \
- V(Arm64Float32Min) \
- V(Arm64Float64Cmp) \
- V(Arm64Float64Add) \
- V(Arm64Float64Sub) \
- V(Arm64Float64Mul) \
- V(Arm64Float64Div) \
- V(Arm64Float64Mod) \
- V(Arm64Float64Max) \
- V(Arm64Float64Min) \
- V(Arm64Float64Abs) \
- V(Arm64Float64Abd) \
- V(Arm64Float64Neg) \
- V(Arm64Float64Sqrt) \
- V(Arm64Float64Fnmul) \
- V(Arm64Float64RoundDown) \
- V(Arm64Float32RoundUp) \
- V(Arm64Float64RoundUp) \
- V(Arm64Float64RoundTiesAway) \
- V(Arm64Float32RoundTruncate) \
- V(Arm64Float64RoundTruncate) \
- V(Arm64Float32RoundTiesEven) \
- V(Arm64Float64RoundTiesEven) \
- V(Arm64Float64SilenceNaN) \
- V(Arm64Float32ToFloat64) \
- V(Arm64Float64ToFloat32) \
- V(Arm64Float32ToInt32) \
- V(Arm64Float64ToInt32) \
- V(Arm64Float32ToUint32) \
- V(Arm64Float64ToUint32) \
- V(Arm64Float32ToInt64) \
- V(Arm64Float64ToInt64) \
- V(Arm64Float32ToUint64) \
- V(Arm64Float64ToUint64) \
- V(Arm64Int32ToFloat32) \
- V(Arm64Int32ToFloat64) \
- V(Arm64Int64ToFloat32) \
- V(Arm64Int64ToFloat64) \
- V(Arm64Uint32ToFloat32) \
- V(Arm64Uint32ToFloat64) \
- V(Arm64Uint64ToFloat32) \
- V(Arm64Uint64ToFloat64) \
- V(Arm64Float64ExtractLowWord32) \
- V(Arm64Float64ExtractHighWord32) \
- V(Arm64Float64InsertLowWord32) \
- V(Arm64Float64InsertHighWord32) \
- V(Arm64Float64MoveU64) \
- V(Arm64U64MoveFloat64) \
- V(Arm64LdrS) \
- V(Arm64StrS) \
- V(Arm64LdrD) \
- V(Arm64StrD) \
- V(Arm64LdrQ) \
- V(Arm64StrQ) \
- V(Arm64Ldrb) \
- V(Arm64Ldrsb) \
- V(Arm64LdrsbW) \
- V(Arm64Strb) \
- V(Arm64Ldrh) \
- V(Arm64Ldrsh) \
- V(Arm64LdrshW) \
- V(Arm64Strh) \
- V(Arm64Ldrsw) \
- V(Arm64LdrW) \
- V(Arm64StrW) \
- V(Arm64Ldr) \
- V(Arm64LdrDecompressTaggedSigned) \
- V(Arm64LdrDecompressTaggedPointer) \
- V(Arm64LdrDecompressAnyTagged) \
- V(Arm64Str) \
- V(Arm64StrCompressTagged) \
- V(Arm64DmbIsh) \
- V(Arm64DsbIsb) \
- V(Arm64Sxtl) \
- V(Arm64Sxtl2) \
- V(Arm64Uxtl) \
- V(Arm64Uxtl2) \
- V(Arm64F64x2Splat) \
- V(Arm64F64x2ExtractLane) \
- V(Arm64F64x2ReplaceLane) \
- V(Arm64F64x2Abs) \
- V(Arm64F64x2Neg) \
- V(Arm64F64x2Sqrt) \
- V(Arm64F64x2Add) \
- V(Arm64F64x2Sub) \
- V(Arm64F64x2Mul) \
- V(Arm64F64x2MulElement) \
- V(Arm64F64x2Div) \
- V(Arm64F64x2Min) \
- V(Arm64F64x2Max) \
- V(Arm64F64x2Eq) \
- V(Arm64F64x2Ne) \
- V(Arm64F64x2Lt) \
- V(Arm64F64x2Le) \
- V(Arm64F64x2Qfma) \
- V(Arm64F64x2Qfms) \
- V(Arm64F64x2Pmin) \
- V(Arm64F64x2Pmax) \
- V(Arm64F64x2ConvertLowI32x4S) \
- V(Arm64F64x2ConvertLowI32x4U) \
- V(Arm64F64x2PromoteLowF32x4) \
- V(Arm64F32x4Splat) \
- V(Arm64F32x4ExtractLane) \
- V(Arm64F32x4ReplaceLane) \
- V(Arm64F32x4SConvertI32x4) \
- V(Arm64F32x4UConvertI32x4) \
- V(Arm64F32x4Abs) \
- V(Arm64F32x4Neg) \
- V(Arm64F32x4Sqrt) \
- V(Arm64F32x4RecipApprox) \
- V(Arm64F32x4RecipSqrtApprox) \
- V(Arm64F32x4Add) \
- V(Arm64F32x4Sub) \
- V(Arm64F32x4Mul) \
- V(Arm64F32x4MulElement) \
- V(Arm64F32x4Div) \
- V(Arm64F32x4Min) \
- V(Arm64F32x4Max) \
- V(Arm64F32x4Eq) \
- V(Arm64F32x4Ne) \
- V(Arm64F32x4Lt) \
- V(Arm64F32x4Le) \
- V(Arm64F32x4Qfma) \
- V(Arm64F32x4Qfms) \
- V(Arm64F32x4Pmin) \
- V(Arm64F32x4Pmax) \
- V(Arm64F32x4DemoteF64x2Zero) \
- V(Arm64I64x2Splat) \
- V(Arm64I64x2ExtractLane) \
- V(Arm64I64x2ReplaceLane) \
- V(Arm64I64x2Abs) \
- V(Arm64I64x2Neg) \
- V(Arm64I64x2Shl) \
- V(Arm64I64x2ShrS) \
- V(Arm64I64x2Add) \
- V(Arm64I64x2Sub) \
- V(Arm64I64x2Mul) \
- V(Arm64I64x2Eq) \
- V(Arm64I64x2Ne) \
- V(Arm64I64x2GtS) \
- V(Arm64I64x2GeS) \
- V(Arm64I64x2ShrU) \
- V(Arm64I64x2BitMask) \
- V(Arm64I32x4Splat) \
- V(Arm64I32x4ExtractLane) \
- V(Arm64I32x4ReplaceLane) \
- V(Arm64I32x4SConvertF32x4) \
- V(Arm64I32x4Neg) \
- V(Arm64I32x4Shl) \
- V(Arm64I32x4ShrS) \
- V(Arm64I32x4Add) \
- V(Arm64I32x4Sub) \
- V(Arm64I32x4Mul) \
- V(Arm64I32x4Mla) \
- V(Arm64I32x4Mls) \
- V(Arm64I32x4MinS) \
- V(Arm64I32x4MaxS) \
- V(Arm64I32x4Eq) \
- V(Arm64I32x4Ne) \
- V(Arm64I32x4GtS) \
- V(Arm64I32x4GeS) \
- V(Arm64I32x4UConvertF32x4) \
- V(Arm64I32x4ShrU) \
- V(Arm64I32x4MinU) \
- V(Arm64I32x4MaxU) \
- V(Arm64I32x4GtU) \
- V(Arm64I32x4GeU) \
- V(Arm64I32x4Abs) \
- V(Arm64I32x4BitMask) \
- V(Arm64I32x4DotI16x8S) \
- V(Arm64I32x4TruncSatF64x2SZero) \
- V(Arm64I32x4TruncSatF64x2UZero) \
- V(Arm64I16x8Splat) \
- V(Arm64I16x8ExtractLaneU) \
- V(Arm64I16x8ExtractLaneS) \
- V(Arm64I16x8ReplaceLane) \
- V(Arm64I16x8Neg) \
- V(Arm64I16x8Shl) \
- V(Arm64I16x8ShrS) \
- V(Arm64I16x8SConvertI32x4) \
- V(Arm64I16x8Add) \
- V(Arm64I16x8AddSatS) \
- V(Arm64I16x8Sub) \
- V(Arm64I16x8SubSatS) \
- V(Arm64I16x8Mul) \
- V(Arm64I16x8Mla) \
- V(Arm64I16x8Mls) \
- V(Arm64I16x8MinS) \
- V(Arm64I16x8MaxS) \
- V(Arm64I16x8Eq) \
- V(Arm64I16x8Ne) \
- V(Arm64I16x8GtS) \
- V(Arm64I16x8GeS) \
- V(Arm64I16x8ShrU) \
- V(Arm64I16x8UConvertI32x4) \
- V(Arm64I16x8AddSatU) \
- V(Arm64I16x8SubSatU) \
- V(Arm64I16x8MinU) \
- V(Arm64I16x8MaxU) \
- V(Arm64I16x8GtU) \
- V(Arm64I16x8GeU) \
- V(Arm64I16x8RoundingAverageU) \
- V(Arm64I16x8Q15MulRSatS) \
- V(Arm64I16x8Abs) \
- V(Arm64I16x8BitMask) \
- V(Arm64I8x16Splat) \
- V(Arm64I8x16ExtractLaneU) \
- V(Arm64I8x16ExtractLaneS) \
- V(Arm64I8x16ReplaceLane) \
- V(Arm64I8x16Neg) \
- V(Arm64I8x16Shl) \
- V(Arm64I8x16ShrS) \
- V(Arm64I8x16SConvertI16x8) \
- V(Arm64I8x16Add) \
- V(Arm64I8x16AddSatS) \
- V(Arm64I8x16Sub) \
- V(Arm64I8x16SubSatS) \
- V(Arm64I8x16Mla) \
- V(Arm64I8x16Mls) \
- V(Arm64I8x16MinS) \
- V(Arm64I8x16MaxS) \
- V(Arm64I8x16Eq) \
- V(Arm64I8x16Ne) \
- V(Arm64I8x16GtS) \
- V(Arm64I8x16GeS) \
- V(Arm64I8x16ShrU) \
- V(Arm64I8x16UConvertI16x8) \
- V(Arm64I8x16AddSatU) \
- V(Arm64I8x16SubSatU) \
- V(Arm64I8x16MinU) \
- V(Arm64I8x16MaxU) \
- V(Arm64I8x16GtU) \
- V(Arm64I8x16GeU) \
- V(Arm64I8x16RoundingAverageU) \
- V(Arm64I8x16Abs) \
- V(Arm64I8x16BitMask) \
- V(Arm64S128Const) \
- V(Arm64S128Zero) \
- V(Arm64S128Dup) \
- V(Arm64S128And) \
- V(Arm64S128Or) \
- V(Arm64S128Xor) \
- V(Arm64S128Not) \
- V(Arm64S128Select) \
- V(Arm64S128AndNot) \
- V(Arm64S32x4ZipLeft) \
- V(Arm64S32x4ZipRight) \
- V(Arm64S32x4UnzipLeft) \
- V(Arm64S32x4UnzipRight) \
- V(Arm64S32x4TransposeLeft) \
- V(Arm64S32x4TransposeRight) \
- V(Arm64S32x4Shuffle) \
- V(Arm64S16x8ZipLeft) \
- V(Arm64S16x8ZipRight) \
- V(Arm64S16x8UnzipLeft) \
- V(Arm64S16x8UnzipRight) \
- V(Arm64S16x8TransposeLeft) \
- V(Arm64S16x8TransposeRight) \
- V(Arm64S8x16ZipLeft) \
- V(Arm64S8x16ZipRight) \
- V(Arm64S8x16UnzipLeft) \
- V(Arm64S8x16UnzipRight) \
- V(Arm64S8x16TransposeLeft) \
- V(Arm64S8x16TransposeRight) \
- V(Arm64S8x16Concat) \
- V(Arm64I8x16Swizzle) \
- V(Arm64I8x16Shuffle) \
- V(Arm64S32x2Reverse) \
- V(Arm64S16x4Reverse) \
- V(Arm64S16x2Reverse) \
- V(Arm64S8x8Reverse) \
- V(Arm64S8x4Reverse) \
- V(Arm64S8x2Reverse) \
- V(Arm64V128AnyTrue) \
- V(Arm64I64x2AllTrue) \
- V(Arm64I32x4AllTrue) \
- V(Arm64I16x8AllTrue) \
- V(Arm64I8x16AllTrue) \
- V(Arm64LoadSplat) \
- V(Arm64LoadLane) \
- V(Arm64StoreLane) \
- V(Arm64S128Load8x8S) \
- V(Arm64S128Load8x8U) \
- V(Arm64S128Load16x4S) \
- V(Arm64S128Load16x4U) \
- V(Arm64S128Load32x2S) \
- V(Arm64S128Load32x2U) \
- V(Arm64Word64AtomicLoadUint8) \
- V(Arm64Word64AtomicLoadUint16) \
- V(Arm64Word64AtomicLoadUint32) \
- V(Arm64Word64AtomicLoadUint64) \
- V(Arm64Word64AtomicStoreWord8) \
- V(Arm64Word64AtomicStoreWord16) \
- V(Arm64Word64AtomicStoreWord32) \
- V(Arm64Word64AtomicStoreWord64) \
- V(Arm64Word64AtomicAddUint8) \
- V(Arm64Word64AtomicAddUint16) \
- V(Arm64Word64AtomicAddUint32) \
- V(Arm64Word64AtomicAddUint64) \
- V(Arm64Word64AtomicSubUint8) \
- V(Arm64Word64AtomicSubUint16) \
- V(Arm64Word64AtomicSubUint32) \
- V(Arm64Word64AtomicSubUint64) \
- V(Arm64Word64AtomicAndUint8) \
- V(Arm64Word64AtomicAndUint16) \
- V(Arm64Word64AtomicAndUint32) \
- V(Arm64Word64AtomicAndUint64) \
- V(Arm64Word64AtomicOrUint8) \
- V(Arm64Word64AtomicOrUint16) \
- V(Arm64Word64AtomicOrUint32) \
- V(Arm64Word64AtomicOrUint64) \
- V(Arm64Word64AtomicXorUint8) \
- V(Arm64Word64AtomicXorUint16) \
- V(Arm64Word64AtomicXorUint32) \
- V(Arm64Word64AtomicXorUint64) \
- V(Arm64Word64AtomicExchangeUint8) \
- V(Arm64Word64AtomicExchangeUint16) \
- V(Arm64Word64AtomicExchangeUint32) \
- V(Arm64Word64AtomicExchangeUint64) \
- V(Arm64Word64AtomicCompareExchangeUint8) \
- V(Arm64Word64AtomicCompareExchangeUint16) \
- V(Arm64Word64AtomicCompareExchangeUint32) \
+
+// Opcodes that support a MemoryAccessMode.
+#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(Arm64Ldr) \
+ V(Arm64Ldrb) \
+ V(Arm64LdrD) \
+ V(Arm64Ldrh) \
+ V(Arm64LdrQ) \
+ V(Arm64LdrS) \
+ V(Arm64Ldrsb) \
+ V(Arm64LdrsbW) \
+ V(Arm64Ldrsh) \
+ V(Arm64LdrshW) \
+ V(Arm64Ldrsw) \
+ V(Arm64LdrW) \
+ V(Arm64LoadLane) \
+ V(Arm64LoadSplat) \
+ V(Arm64S128Load16x4S) \
+ V(Arm64S128Load16x4U) \
+ V(Arm64S128Load32x2S) \
+ V(Arm64S128Load32x2U) \
+ V(Arm64S128Load8x8S) \
+ V(Arm64S128Load8x8U) \
+ V(Arm64StoreLane) \
+ V(Arm64Str) \
+ V(Arm64Strb) \
+ V(Arm64StrD) \
+ V(Arm64Strh) \
+ V(Arm64StrQ) \
+ V(Arm64StrS) \
+ V(Arm64StrW)
+
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(Arm64Add) \
+ V(Arm64Add32) \
+ V(Arm64And) \
+ V(Arm64And32) \
+ V(Arm64Bic) \
+ V(Arm64Bic32) \
+ V(Arm64Clz) \
+ V(Arm64Clz32) \
+ V(Arm64Cmp) \
+ V(Arm64Cmp32) \
+ V(Arm64Cmn) \
+ V(Arm64Cmn32) \
+ V(Arm64Cnt) \
+ V(Arm64Cnt32) \
+ V(Arm64Cnt64) \
+ V(Arm64Tst) \
+ V(Arm64Tst32) \
+ V(Arm64Or) \
+ V(Arm64Or32) \
+ V(Arm64Orn) \
+ V(Arm64Orn32) \
+ V(Arm64Eor) \
+ V(Arm64Eor32) \
+ V(Arm64Eon) \
+ V(Arm64Eon32) \
+ V(Arm64Sadalp) \
+ V(Arm64Saddlp) \
+ V(Arm64Sub) \
+ V(Arm64Sub32) \
+ V(Arm64Mul) \
+ V(Arm64Mul32) \
+ V(Arm64Smlal) \
+ V(Arm64Smlal2) \
+ V(Arm64Smull) \
+ V(Arm64Smull2) \
+ V(Arm64Uadalp) \
+ V(Arm64Uaddlp) \
+ V(Arm64Umlal) \
+ V(Arm64Umlal2) \
+ V(Arm64Umull) \
+ V(Arm64Umull2) \
+ V(Arm64Madd) \
+ V(Arm64Madd32) \
+ V(Arm64Msub) \
+ V(Arm64Msub32) \
+ V(Arm64Mneg) \
+ V(Arm64Mneg32) \
+ V(Arm64Idiv) \
+ V(Arm64Idiv32) \
+ V(Arm64Udiv) \
+ V(Arm64Udiv32) \
+ V(Arm64Imod) \
+ V(Arm64Imod32) \
+ V(Arm64Umod) \
+ V(Arm64Umod32) \
+ V(Arm64Not) \
+ V(Arm64Not32) \
+ V(Arm64Lsl) \
+ V(Arm64Lsl32) \
+ V(Arm64Lsr) \
+ V(Arm64Lsr32) \
+ V(Arm64Asr) \
+ V(Arm64Asr32) \
+ V(Arm64Ror) \
+ V(Arm64Ror32) \
+ V(Arm64Mov32) \
+ V(Arm64Sxtb32) \
+ V(Arm64Sxth32) \
+ V(Arm64Sxtb) \
+ V(Arm64Sxth) \
+ V(Arm64Sxtw) \
+ V(Arm64Sbfx) \
+ V(Arm64Sbfx32) \
+ V(Arm64Ubfx) \
+ V(Arm64Ubfx32) \
+ V(Arm64Ubfiz32) \
+ V(Arm64Bfi) \
+ V(Arm64Rbit) \
+ V(Arm64Rbit32) \
+ V(Arm64Rev) \
+ V(Arm64Rev32) \
+ V(Arm64TestAndBranch32) \
+ V(Arm64TestAndBranch) \
+ V(Arm64CompareAndBranch32) \
+ V(Arm64CompareAndBranch) \
+ V(Arm64Claim) \
+ V(Arm64Poke) \
+ V(Arm64PokePair) \
+ V(Arm64Peek) \
+ V(Arm64Float32Cmp) \
+ V(Arm64Float32Add) \
+ V(Arm64Float32Sub) \
+ V(Arm64Float32Mul) \
+ V(Arm64Float32Div) \
+ V(Arm64Float32Abs) \
+ V(Arm64Float32Abd) \
+ V(Arm64Float32Neg) \
+ V(Arm64Float32Sqrt) \
+ V(Arm64Float32Fnmul) \
+ V(Arm64Float32RoundDown) \
+ V(Arm64Float32Max) \
+ V(Arm64Float32Min) \
+ V(Arm64Float64Cmp) \
+ V(Arm64Float64Add) \
+ V(Arm64Float64Sub) \
+ V(Arm64Float64Mul) \
+ V(Arm64Float64Div) \
+ V(Arm64Float64Mod) \
+ V(Arm64Float64Max) \
+ V(Arm64Float64Min) \
+ V(Arm64Float64Abs) \
+ V(Arm64Float64Abd) \
+ V(Arm64Float64Neg) \
+ V(Arm64Float64Sqrt) \
+ V(Arm64Float64Fnmul) \
+ V(Arm64Float64RoundDown) \
+ V(Arm64Float32RoundUp) \
+ V(Arm64Float64RoundUp) \
+ V(Arm64Float64RoundTiesAway) \
+ V(Arm64Float32RoundTruncate) \
+ V(Arm64Float64RoundTruncate) \
+ V(Arm64Float32RoundTiesEven) \
+ V(Arm64Float64RoundTiesEven) \
+ V(Arm64Float64SilenceNaN) \
+ V(Arm64Float32ToFloat64) \
+ V(Arm64Float64ToFloat32) \
+ V(Arm64Float32ToInt32) \
+ V(Arm64Float64ToInt32) \
+ V(Arm64Float32ToUint32) \
+ V(Arm64Float64ToUint32) \
+ V(Arm64Float32ToInt64) \
+ V(Arm64Float64ToInt64) \
+ V(Arm64Float32ToUint64) \
+ V(Arm64Float64ToUint64) \
+ V(Arm64Int32ToFloat32) \
+ V(Arm64Int32ToFloat64) \
+ V(Arm64Int64ToFloat32) \
+ V(Arm64Int64ToFloat64) \
+ V(Arm64Uint32ToFloat32) \
+ V(Arm64Uint32ToFloat64) \
+ V(Arm64Uint64ToFloat32) \
+ V(Arm64Uint64ToFloat64) \
+ V(Arm64Float64ExtractLowWord32) \
+ V(Arm64Float64ExtractHighWord32) \
+ V(Arm64Float64InsertLowWord32) \
+ V(Arm64Float64InsertHighWord32) \
+ V(Arm64Float64MoveU64) \
+ V(Arm64U64MoveFloat64) \
+ V(Arm64LdrDecompressTaggedSigned) \
+ V(Arm64LdrDecompressTaggedPointer) \
+ V(Arm64LdrDecompressAnyTagged) \
+ V(Arm64LdarDecompressTaggedSigned) \
+ V(Arm64LdarDecompressTaggedPointer) \
+ V(Arm64LdarDecompressAnyTagged) \
+ V(Arm64StrCompressTagged) \
+ V(Arm64StlrCompressTagged) \
+ V(Arm64DmbIsh) \
+ V(Arm64DsbIsb) \
+ V(Arm64Sxtl) \
+ V(Arm64Sxtl2) \
+ V(Arm64Uxtl) \
+ V(Arm64Uxtl2) \
+ V(Arm64FSplat) \
+ V(Arm64FAbs) \
+ V(Arm64FSqrt) \
+ V(Arm64FNeg) \
+ V(Arm64FExtractLane) \
+ V(Arm64FReplaceLane) \
+ V(Arm64FAdd) \
+ V(Arm64FSub) \
+ V(Arm64FMul) \
+ V(Arm64FMulElement) \
+ V(Arm64FDiv) \
+ V(Arm64FMin) \
+ V(Arm64FMax) \
+ V(Arm64FEq) \
+ V(Arm64FNe) \
+ V(Arm64FLt) \
+ V(Arm64FLe) \
+ V(Arm64FGt) \
+ V(Arm64FGe) \
+ V(Arm64F64x2Qfma) \
+ V(Arm64F64x2Qfms) \
+ V(Arm64F64x2Pmin) \
+ V(Arm64F64x2Pmax) \
+ V(Arm64F64x2ConvertLowI32x4S) \
+ V(Arm64F64x2ConvertLowI32x4U) \
+ V(Arm64F64x2PromoteLowF32x4) \
+ V(Arm64F32x4SConvertI32x4) \
+ V(Arm64F32x4UConvertI32x4) \
+ V(Arm64F32x4RecipApprox) \
+ V(Arm64F32x4RecipSqrtApprox) \
+ V(Arm64F32x4Qfma) \
+ V(Arm64F32x4Qfms) \
+ V(Arm64F32x4Pmin) \
+ V(Arm64F32x4Pmax) \
+ V(Arm64F32x4DemoteF64x2Zero) \
+ V(Arm64ISplat) \
+ V(Arm64IAbs) \
+ V(Arm64INeg) \
+ V(Arm64IExtractLane) \
+ V(Arm64IReplaceLane) \
+ V(Arm64I64x2Shl) \
+ V(Arm64I64x2ShrS) \
+ V(Arm64IAdd) \
+ V(Arm64ISub) \
+ V(Arm64I64x2Mul) \
+ V(Arm64IEq) \
+ V(Arm64INe) \
+ V(Arm64IGtS) \
+ V(Arm64IGeS) \
+ V(Arm64I64x2ShrU) \
+ V(Arm64I64x2BitMask) \
+ V(Arm64I32x4SConvertF32x4) \
+ V(Arm64I32x4Shl) \
+ V(Arm64I32x4ShrS) \
+ V(Arm64I32x4Mul) \
+ V(Arm64Mla) \
+ V(Arm64Mls) \
+ V(Arm64IMinS) \
+ V(Arm64IMaxS) \
+ V(Arm64I32x4UConvertF32x4) \
+ V(Arm64I32x4ShrU) \
+ V(Arm64IMinU) \
+ V(Arm64IMaxU) \
+ V(Arm64IGtU) \
+ V(Arm64IGeU) \
+ V(Arm64I32x4BitMask) \
+ V(Arm64I32x4DotI16x8S) \
+ V(Arm64I32x4TruncSatF64x2SZero) \
+ V(Arm64I32x4TruncSatF64x2UZero) \
+ V(Arm64IExtractLaneU) \
+ V(Arm64IExtractLaneS) \
+ V(Arm64I16x8Shl) \
+ V(Arm64I16x8ShrS) \
+ V(Arm64I16x8SConvertI32x4) \
+ V(Arm64IAddSatS) \
+ V(Arm64ISubSatS) \
+ V(Arm64I16x8Mul) \
+ V(Arm64I16x8ShrU) \
+ V(Arm64I16x8UConvertI32x4) \
+ V(Arm64IAddSatU) \
+ V(Arm64ISubSatU) \
+ V(Arm64RoundingAverageU) \
+ V(Arm64I16x8Q15MulRSatS) \
+ V(Arm64I16x8BitMask) \
+ V(Arm64I8x16Shl) \
+ V(Arm64I8x16ShrS) \
+ V(Arm64I8x16SConvertI16x8) \
+ V(Arm64I8x16ShrU) \
+ V(Arm64I8x16UConvertI16x8) \
+ V(Arm64I8x16BitMask) \
+ V(Arm64S128Const) \
+ V(Arm64S128Zero) \
+ V(Arm64S128Dup) \
+ V(Arm64S128And) \
+ V(Arm64S128Or) \
+ V(Arm64S128Xor) \
+ V(Arm64S128Not) \
+ V(Arm64S128Select) \
+ V(Arm64S128AndNot) \
+ V(Arm64Ssra) \
+ V(Arm64Usra) \
+ V(Arm64S32x4ZipLeft) \
+ V(Arm64S32x4ZipRight) \
+ V(Arm64S32x4UnzipLeft) \
+ V(Arm64S32x4UnzipRight) \
+ V(Arm64S32x4TransposeLeft) \
+ V(Arm64S32x4TransposeRight) \
+ V(Arm64S32x4Shuffle) \
+ V(Arm64S16x8ZipLeft) \
+ V(Arm64S16x8ZipRight) \
+ V(Arm64S16x8UnzipLeft) \
+ V(Arm64S16x8UnzipRight) \
+ V(Arm64S16x8TransposeLeft) \
+ V(Arm64S16x8TransposeRight) \
+ V(Arm64S8x16ZipLeft) \
+ V(Arm64S8x16ZipRight) \
+ V(Arm64S8x16UnzipLeft) \
+ V(Arm64S8x16UnzipRight) \
+ V(Arm64S8x16TransposeLeft) \
+ V(Arm64S8x16TransposeRight) \
+ V(Arm64S8x16Concat) \
+ V(Arm64I8x16Swizzle) \
+ V(Arm64I8x16Shuffle) \
+ V(Arm64S32x2Reverse) \
+ V(Arm64S16x4Reverse) \
+ V(Arm64S16x2Reverse) \
+ V(Arm64S8x8Reverse) \
+ V(Arm64S8x4Reverse) \
+ V(Arm64S8x2Reverse) \
+ V(Arm64V128AnyTrue) \
+ V(Arm64I64x2AllTrue) \
+ V(Arm64I32x4AllTrue) \
+ V(Arm64I16x8AllTrue) \
+ V(Arm64I8x16AllTrue) \
+ V(Arm64Word64AtomicLoadUint64) \
+ V(Arm64Word64AtomicStoreWord64) \
+ V(Arm64Word64AtomicAddUint64) \
+ V(Arm64Word64AtomicSubUint64) \
+ V(Arm64Word64AtomicAndUint64) \
+ V(Arm64Word64AtomicOrUint64) \
+ V(Arm64Word64AtomicXorUint64) \
+ V(Arm64Word64AtomicExchangeUint64) \
V(Arm64Word64AtomicCompareExchangeUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/chromium/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/chromium/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
index f4446cdbf86..4d123050ec2 100644
--- a/chromium/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
+++ b/chromium/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
@@ -26,6 +26,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Cmn:
case kArm64Cmn32:
case kArm64Cnt:
+ case kArm64Cnt32:
+ case kArm64Cnt64:
case kArm64Tst:
case kArm64Tst32:
case kArm64Or:
@@ -42,10 +44,14 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Sub32:
case kArm64Mul:
case kArm64Mul32:
+ case kArm64Smlal:
+ case kArm64Smlal2:
case kArm64Smull:
case kArm64Smull2:
case kArm64Uadalp:
case kArm64Uaddlp:
+ case kArm64Umlal:
+ case kArm64Umlal2:
case kArm64Umull:
case kArm64Umull2:
case kArm64Madd:
@@ -147,23 +153,25 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Float64MoveU64:
case kArm64U64MoveFloat64:
case kArm64Float64SilenceNaN:
- case kArm64F64x2Splat:
- case kArm64F64x2ExtractLane:
- case kArm64F64x2ReplaceLane:
- case kArm64F64x2Abs:
- case kArm64F64x2Neg:
- case kArm64F64x2Sqrt:
- case kArm64F64x2Add:
- case kArm64F64x2Sub:
- case kArm64F64x2Mul:
- case kArm64F64x2MulElement:
- case kArm64F64x2Div:
- case kArm64F64x2Min:
- case kArm64F64x2Max:
- case kArm64F64x2Eq:
- case kArm64F64x2Ne:
- case kArm64F64x2Lt:
- case kArm64F64x2Le:
+ case kArm64FExtractLane:
+ case kArm64FReplaceLane:
+ case kArm64FSplat:
+ case kArm64FAbs:
+ case kArm64FSqrt:
+ case kArm64FNeg:
+ case kArm64FAdd:
+ case kArm64FSub:
+ case kArm64FMul:
+ case kArm64FMulElement:
+ case kArm64FDiv:
+ case kArm64FMin:
+ case kArm64FMax:
+ case kArm64FEq:
+ case kArm64FNe:
+ case kArm64FLt:
+ case kArm64FLe:
+ case kArm64FGt:
+ case kArm64FGe:
case kArm64F64x2Qfma:
case kArm64F64x2Qfms:
case kArm64F64x2Pmin:
@@ -171,144 +179,73 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64F64x2ConvertLowI32x4S:
case kArm64F64x2ConvertLowI32x4U:
case kArm64F64x2PromoteLowF32x4:
- case kArm64F32x4Splat:
- case kArm64F32x4ExtractLane:
- case kArm64F32x4ReplaceLane:
case kArm64F32x4SConvertI32x4:
case kArm64F32x4UConvertI32x4:
- case kArm64F32x4Abs:
- case kArm64F32x4Neg:
- case kArm64F32x4Sqrt:
case kArm64F32x4RecipApprox:
case kArm64F32x4RecipSqrtApprox:
- case kArm64F32x4Add:
- case kArm64F32x4Sub:
- case kArm64F32x4Mul:
- case kArm64F32x4MulElement:
- case kArm64F32x4Div:
- case kArm64F32x4Min:
- case kArm64F32x4Max:
- case kArm64F32x4Eq:
- case kArm64F32x4Ne:
- case kArm64F32x4Lt:
- case kArm64F32x4Le:
case kArm64F32x4Qfma:
case kArm64F32x4Qfms:
case kArm64F32x4Pmin:
case kArm64F32x4Pmax:
case kArm64F32x4DemoteF64x2Zero:
- case kArm64I64x2Splat:
- case kArm64I64x2ExtractLane:
- case kArm64I64x2ReplaceLane:
- case kArm64I64x2Abs:
- case kArm64I64x2Neg:
+ case kArm64IExtractLane:
+ case kArm64IReplaceLane:
+ case kArm64ISplat:
+ case kArm64IAbs:
+ case kArm64INeg:
+ case kArm64Mla:
+ case kArm64Mls:
+ case kArm64RoundingAverageU:
case kArm64I64x2Shl:
case kArm64I64x2ShrS:
- case kArm64I64x2Add:
- case kArm64I64x2Sub:
+ case kArm64IAdd:
+ case kArm64ISub:
case kArm64I64x2Mul:
- case kArm64I64x2Eq:
- case kArm64I64x2Ne:
- case kArm64I64x2GtS:
- case kArm64I64x2GeS:
+ case kArm64IEq:
+ case kArm64INe:
+ case kArm64IGtS:
+ case kArm64IGeS:
case kArm64I64x2ShrU:
case kArm64I64x2BitMask:
- case kArm64I32x4Splat:
- case kArm64I32x4ExtractLane:
- case kArm64I32x4ReplaceLane:
case kArm64I32x4SConvertF32x4:
case kArm64Sxtl:
case kArm64Sxtl2:
case kArm64Uxtl:
case kArm64Uxtl2:
- case kArm64I32x4Neg:
case kArm64I32x4Shl:
case kArm64I32x4ShrS:
- case kArm64I32x4Add:
- case kArm64I32x4Sub:
case kArm64I32x4Mul:
- case kArm64I32x4Mla:
- case kArm64I32x4Mls:
- case kArm64I32x4MinS:
- case kArm64I32x4MaxS:
- case kArm64I32x4Eq:
- case kArm64I32x4Ne:
- case kArm64I32x4GtS:
- case kArm64I32x4GeS:
+ case kArm64IMinS:
+ case kArm64IMaxS:
case kArm64I32x4UConvertF32x4:
case kArm64I32x4ShrU:
- case kArm64I32x4MinU:
- case kArm64I32x4MaxU:
- case kArm64I32x4GtU:
- case kArm64I32x4GeU:
- case kArm64I32x4Abs:
+ case kArm64IMinU:
+ case kArm64IMaxU:
+ case kArm64IGtU:
+ case kArm64IGeU:
case kArm64I32x4BitMask:
case kArm64I32x4DotI16x8S:
case kArm64I32x4TruncSatF64x2SZero:
case kArm64I32x4TruncSatF64x2UZero:
- case kArm64I16x8Splat:
- case kArm64I16x8ExtractLaneU:
- case kArm64I16x8ExtractLaneS:
- case kArm64I16x8ReplaceLane:
- case kArm64I16x8Neg:
+ case kArm64IExtractLaneU:
+ case kArm64IExtractLaneS:
case kArm64I16x8Shl:
case kArm64I16x8ShrS:
case kArm64I16x8SConvertI32x4:
- case kArm64I16x8Add:
- case kArm64I16x8AddSatS:
- case kArm64I16x8Sub:
- case kArm64I16x8SubSatS:
+ case kArm64IAddSatS:
+ case kArm64ISubSatS:
case kArm64I16x8Mul:
- case kArm64I16x8Mla:
- case kArm64I16x8Mls:
- case kArm64I16x8MinS:
- case kArm64I16x8MaxS:
- case kArm64I16x8Eq:
- case kArm64I16x8Ne:
- case kArm64I16x8GtS:
- case kArm64I16x8GeS:
case kArm64I16x8ShrU:
case kArm64I16x8UConvertI32x4:
- case kArm64I16x8AddSatU:
- case kArm64I16x8SubSatU:
- case kArm64I16x8MinU:
- case kArm64I16x8MaxU:
- case kArm64I16x8GtU:
- case kArm64I16x8GeU:
- case kArm64I16x8RoundingAverageU:
+ case kArm64IAddSatU:
+ case kArm64ISubSatU:
case kArm64I16x8Q15MulRSatS:
- case kArm64I16x8Abs:
case kArm64I16x8BitMask:
- case kArm64I8x16Splat:
- case kArm64I8x16ExtractLaneU:
- case kArm64I8x16ExtractLaneS:
- case kArm64I8x16ReplaceLane:
- case kArm64I8x16Neg:
case kArm64I8x16Shl:
case kArm64I8x16ShrS:
case kArm64I8x16SConvertI16x8:
- case kArm64I8x16Add:
- case kArm64I8x16AddSatS:
- case kArm64I8x16Sub:
- case kArm64I8x16SubSatS:
- case kArm64I8x16Mla:
- case kArm64I8x16Mls:
- case kArm64I8x16MinS:
- case kArm64I8x16MaxS:
- case kArm64I8x16Eq:
- case kArm64I8x16Ne:
- case kArm64I8x16GtS:
- case kArm64I8x16GeS:
case kArm64I8x16UConvertI16x8:
- case kArm64I8x16AddSatU:
- case kArm64I8x16SubSatU:
case kArm64I8x16ShrU:
- case kArm64I8x16MinU:
- case kArm64I8x16MaxU:
- case kArm64I8x16GtU:
- case kArm64I8x16GeU:
- case kArm64I8x16RoundingAverageU:
- case kArm64I8x16Abs:
case kArm64I8x16BitMask:
case kArm64S128Const:
case kArm64S128Zero:
@@ -319,6 +256,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64S128Not:
case kArm64S128Select:
case kArm64S128AndNot:
+ case kArm64Ssra:
+ case kArm64Usra:
case kArm64S32x4ZipLeft:
case kArm64S32x4ZipRight:
case kArm64S32x4UnzipLeft:
@@ -373,6 +312,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64LdrDecompressTaggedSigned:
case kArm64LdrDecompressTaggedPointer:
case kArm64LdrDecompressAnyTagged:
+ case kArm64LdarDecompressTaggedSigned:
+ case kArm64LdarDecompressTaggedPointer:
+ case kArm64LdarDecompressAnyTagged:
case kArm64Peek:
case kArm64LoadSplat:
case kArm64LoadLane:
@@ -395,48 +337,22 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64StrW:
case kArm64Str:
case kArm64StrCompressTagged:
+ case kArm64StlrCompressTagged:
case kArm64DmbIsh:
case kArm64DsbIsb:
case kArm64StoreLane:
return kHasSideEffect;
- case kArm64Word64AtomicLoadUint8:
- case kArm64Word64AtomicLoadUint16:
- case kArm64Word64AtomicLoadUint32:
case kArm64Word64AtomicLoadUint64:
return kIsLoadOperation;
- case kArm64Word64AtomicStoreWord8:
- case kArm64Word64AtomicStoreWord16:
- case kArm64Word64AtomicStoreWord32:
case kArm64Word64AtomicStoreWord64:
- case kArm64Word64AtomicAddUint8:
- case kArm64Word64AtomicAddUint16:
- case kArm64Word64AtomicAddUint32:
case kArm64Word64AtomicAddUint64:
- case kArm64Word64AtomicSubUint8:
- case kArm64Word64AtomicSubUint16:
- case kArm64Word64AtomicSubUint32:
case kArm64Word64AtomicSubUint64:
- case kArm64Word64AtomicAndUint8:
- case kArm64Word64AtomicAndUint16:
- case kArm64Word64AtomicAndUint32:
case kArm64Word64AtomicAndUint64:
- case kArm64Word64AtomicOrUint8:
- case kArm64Word64AtomicOrUint16:
- case kArm64Word64AtomicOrUint32:
case kArm64Word64AtomicOrUint64:
- case kArm64Word64AtomicXorUint8:
- case kArm64Word64AtomicXorUint16:
- case kArm64Word64AtomicXorUint32:
case kArm64Word64AtomicXorUint64:
- case kArm64Word64AtomicExchangeUint8:
- case kArm64Word64AtomicExchangeUint16:
- case kArm64Word64AtomicExchangeUint32:
case kArm64Word64AtomicExchangeUint64:
- case kArm64Word64AtomicCompareExchangeUint8:
- case kArm64Word64AtomicCompareExchangeUint16:
- case kArm64Word64AtomicCompareExchangeUint32:
case kArm64Word64AtomicCompareExchangeUint64:
return kHasSideEffect;
diff --git a/chromium/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/chromium/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
index 6a1a101e35b..5dec14b9982 100644
--- a/chromium/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
+++ b/chromium/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
@@ -190,7 +190,8 @@ void VisitSimdShiftRRR(InstructionSelector* selector, ArchOpcode opcode,
}
}
-void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+void VisitRRI(InstructionSelector* selector, InstructionCode opcode,
+ Node* node) {
Arm64OperandGenerator g(selector);
int32_t imm = OpParameter<int32_t>(node->op());
selector->Emit(opcode, g.DefineAsRegister(node),
@@ -205,7 +206,8 @@ void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
g.UseOperand(node->InputAt(1), operand_mode));
}
-void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+void VisitRRIR(InstructionSelector* selector, InstructionCode opcode,
+ Node* node) {
Arm64OperandGenerator g(selector);
int32_t imm = OpParameter<int32_t>(node->op());
selector->Emit(opcode, g.DefineAsRegister(node),
@@ -577,9 +579,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+void InstructionSelector::VisitAbortCSADcheck(Node* node) {
Arm64OperandGenerator g(this);
- Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), x1));
+ Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), x1));
}
void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
@@ -845,10 +847,6 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kNone:
UNREACHABLE();
}
- if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
- }
if (node->opcode() == IrOpcode::kProtectedLoad) {
opcode |= AccessModeField::encode(kMemoryAccessProtected);
}
@@ -856,8 +854,6 @@ void InstructionSelector::VisitLoad(Node* node) {
EmitLoad(this, node, opcode, immediate_mode, rep);
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
-
void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); }
void InstructionSelector::VisitStore(Node* node) {
@@ -1441,6 +1437,8 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
#define RR_OP_LIST(V) \
V(Word64Clz, kArm64Clz) \
V(Word32Clz, kArm64Clz32) \
+ V(Word32Popcnt, kArm64Cnt32) \
+ V(Word64Popcnt, kArm64Cnt64) \
V(Word32ReverseBits, kArm64Rbit32) \
V(Word64ReverseBits, kArm64Rbit) \
V(Word32ReverseBytes, kArm64Rev32) \
@@ -1531,10 +1529,6 @@ void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
-
-void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
-
void InstructionSelector::VisitInt32Add(Node* node) {
Arm64OperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -1938,7 +1932,9 @@ void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
Node* value = node->InputAt(0);
- if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
+ if ((value->opcode() == IrOpcode::kLoad ||
+ value->opcode() == IrOpcode::kLoadImmutable) &&
+ CanCover(node, value)) {
// Generate sign-extending load.
LoadRepresentation load_rep = LoadRepresentationOf(value->op());
MachineRepresentation rep = load_rep.representation();
@@ -2324,9 +2320,6 @@ template <int N>
bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node,
typename CbzOrTbzMatchTrait<N>::IntegralType value,
Node* user, FlagsCondition cond, FlagsContinuation* cont) {
- // Branch poisoning requires flags to be set, so when it's enabled for
- // a particular branch, we shouldn't be applying the cbz/tbz optimization.
- DCHECK(!cont->IsPoisoned());
// Only handle branches and deoptimisations.
if (!cont->IsBranch() && !cont->IsDeoptimize()) return false;
@@ -2414,7 +2407,7 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
std::swap(left, right);
}
- if (opcode == kArm64Cmp && !cont->IsPoisoned()) {
+ if (opcode == kArm64Cmp) {
Int64Matcher m(right);
if (m.HasResolvedValue()) {
if (TryEmitCbzOrTbz<64>(selector, left, m.ResolvedValue(), node,
@@ -2432,19 +2425,17 @@ void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Int32BinopMatcher m(node);
FlagsCondition cond = cont->condition();
- if (!cont->IsPoisoned()) {
- if (m.right().HasResolvedValue()) {
- if (TryEmitCbzOrTbz<32>(selector, m.left().node(),
- m.right().ResolvedValue(), node, cond, cont)) {
- return;
- }
- } else if (m.left().HasResolvedValue()) {
- FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
- if (TryEmitCbzOrTbz<32>(selector, m.right().node(),
- m.left().ResolvedValue(), node, commuted_cond,
- cont)) {
- return;
- }
+ if (m.right().HasResolvedValue()) {
+ if (TryEmitCbzOrTbz<32>(selector, m.left().node(),
+ m.right().ResolvedValue(), node, cond, cont)) {
+ return;
+ }
+ } else if (m.left().HasResolvedValue()) {
+ FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
+ if (TryEmitCbzOrTbz<32>(selector, m.right().node(),
+ m.left().ResolvedValue(), node, commuted_cond,
+ cont)) {
+ return;
}
}
ArchOpcode opcode = kArm64Cmp32;
@@ -2533,8 +2524,7 @@ struct TestAndBranchMatcher {
Matcher matcher_;
void Initialize() {
- if (cont_->IsBranch() && !cont_->IsPoisoned() &&
- matcher_.right().HasResolvedValue() &&
+ if (cont_->IsBranch() && matcher_.right().HasResolvedValue() &&
base::bits::IsPowerOfTwo(matcher_.right().ResolvedValue())) {
// If the mask has only one bit set, we can use tbz/tbnz.
DCHECK((cont_->condition() == kEqual) ||
@@ -2583,7 +2573,7 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
}
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
Arm64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2592,13 +2582,14 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
g.UseUniqueRegister(value)};
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
- InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR);
+ InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR) |
+ AtomicWidthField::encode(width);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
arraysize(temps), temps);
}
void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
Arm64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2609,40 +2600,149 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
g.UseUniqueRegister(new_value)};
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
- InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR);
+ InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR) |
+ AtomicWidthField::encode(width);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
arraysize(temps), temps);
}
void VisitAtomicLoad(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ AtomicWidth width) {
Arm64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index)};
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
InstructionOperand temps[] = {g.TempRegister()};
- InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR);
+
+ // The memory order is ignored as both acquire and sequentially consistent
+ // loads can emit LDAR.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ InstructionCode code;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
+ code = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
+ code = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ code = kAtomicLoadWord32;
+ break;
+ case MachineRepresentation::kWord64:
+ code = kArm64Word64AtomicLoadUint64;
+ break;
+#ifdef V8_COMPRESS_POINTERS
+ case MachineRepresentation::kTaggedSigned:
+ code = kArm64LdarDecompressTaggedSigned;
+ break;
+ case MachineRepresentation::kTaggedPointer:
+ code = kArm64LdarDecompressTaggedPointer;
+ break;
+ case MachineRepresentation::kTagged:
+ code = kArm64LdarDecompressAnyTagged;
+ break;
+#else
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ if (kTaggedSize == 8) {
+ code = kArm64Word64AtomicLoadUint64;
+ } else {
+ code = kAtomicLoadWord32;
+ }
+ break;
+#endif
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed:
+ DCHECK(COMPRESS_POINTERS_BOOL);
+ code = kAtomicLoadWord32;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ code |=
+ AddressingModeField::encode(kMode_MRR) | AtomicWidthField::encode(width);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
arraysize(temps), temps);
}
void VisitAtomicStore(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ AtomicWidth width) {
Arm64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
+
+ // The memory order is ignored as both release and sequentially consistent
+ // stores can emit STLR.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_params.write_barrier_kind();
+ MachineRepresentation rep = store_params.representation();
+
+ if (FLAG_enable_unconditional_write_barriers &&
+ CanBeTaggedOrCompressedPointer(rep)) {
+ write_barrier_kind = kFullWriteBarrier;
+ }
+
InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
g.UseUniqueRegister(value)};
InstructionOperand temps[] = {g.TempRegister()};
- InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR);
+ InstructionCode code;
+
+ if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
+ DCHECK(CanBeTaggedOrCompressedPointer(rep));
+ DCHECK_EQ(AtomicWidthSize(width), kTaggedSize);
+
+ RecordWriteMode record_write_mode =
+ WriteBarrierKindToRecordWriteMode(write_barrier_kind);
+ code = kArchAtomicStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ } else {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ code = kAtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ code = kAtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ code = kAtomicStoreWord32;
+ break;
+ case MachineRepresentation::kWord64:
+ DCHECK_EQ(width, AtomicWidth::kWord64);
+ code = kArm64Word64AtomicStoreWord64;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ DCHECK_EQ(AtomicWidthSize(width), kTaggedSize);
+ code = kArm64StlrCompressTagged;
+ break;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed:
+ CHECK(COMPRESS_POINTERS_BOOL);
+ DCHECK_EQ(width, AtomicWidth::kWord32);
+ code = kArm64StlrCompressTagged;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ code |= AtomicWidthField::encode(width);
+ }
+
+ code |= AddressingModeField::encode(kMode_MRR);
selector->Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps),
temps);
}
void VisitAtomicBinop(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
Arm64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2653,7 +2753,8 @@ void VisitAtomicBinop(InstructionSelector* selector, Node* node,
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
g.TempRegister()};
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
arraysize(temps), temps);
}
@@ -2842,7 +2943,7 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
}
// Branch could not be combined with a compare, compare against 0 and branch.
- if (!cont->IsPoisoned() && cont->IsBranch()) {
+ if (cont->IsBranch()) {
Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(),
g.UseRegister(value), g.Label(cont->true_block()),
g.Label(cont->false_block()));
@@ -3196,159 +3297,91 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode =
- load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
- : kWord32AtomicLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicLoadWord32;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicLoad(this, node, opcode);
+ VisitAtomicLoad(this, node, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = kArm64Word64AtomicLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kArm64Word64AtomicLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kArm64Word64AtomicLoadUint32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kArm64Word64AtomicLoadUint64;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicLoad(this, node, opcode);
+ VisitAtomicLoad(this, node, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kWord32AtomicStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kWord32AtomicStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicStoreWord32;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicStore(this, node, opcode);
+ VisitAtomicStore(this, node, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kArm64Word64AtomicStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kArm64Word64AtomicStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kArm64Word64AtomicStoreWord32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kArm64Word64AtomicStoreWord64;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicStore(this, node, opcode);
+ VisitAtomicStore(this, node, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
- opcode = kArm64Word64AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kArm64Word64AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kArm64Word64AtomicExchangeUint32;
+ opcode = kAtomicExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kArm64Word64AtomicExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
- opcode = kArm64Word64AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kArm64Word64AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kArm64Word64AtomicCompareExchangeUint32;
+ opcode = kAtomicCompareExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kArm64Word64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicBinaryOperation(
@@ -3369,15 +3402,14 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -3402,14 +3434,14 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
- VisitWord64AtomicBinaryOperation( \
- node, kArm64Word64Atomic##op##Uint8, kArm64Word64Atomic##op##Uint16, \
- kArm64Word64Atomic##op##Uint32, kArm64Word64Atomic##op##Uint64); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
+ VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
+ kAtomic##op##Uint16, kAtomic##op##Word32, \
+ kArm64Word64Atomic##op##Uint64); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -3426,44 +3458,22 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
UNREACHABLE();
}
-#define SIMD_TYPE_LIST(V) \
- V(F64x2) \
- V(F32x4) \
- V(I64x2) \
- V(I32x4) \
- V(I16x8) \
- V(I8x16)
-
#define SIMD_UNOP_LIST(V) \
- V(F64x2Abs, kArm64F64x2Abs) \
- V(F64x2Neg, kArm64F64x2Neg) \
- V(F64x2Sqrt, kArm64F64x2Sqrt) \
V(F64x2ConvertLowI32x4S, kArm64F64x2ConvertLowI32x4S) \
V(F64x2ConvertLowI32x4U, kArm64F64x2ConvertLowI32x4U) \
V(F64x2PromoteLowF32x4, kArm64F64x2PromoteLowF32x4) \
V(F32x4SConvertI32x4, kArm64F32x4SConvertI32x4) \
V(F32x4UConvertI32x4, kArm64F32x4UConvertI32x4) \
- V(F32x4Abs, kArm64F32x4Abs) \
- V(F32x4Neg, kArm64F32x4Neg) \
- V(F32x4Sqrt, kArm64F32x4Sqrt) \
V(F32x4RecipApprox, kArm64F32x4RecipApprox) \
V(F32x4RecipSqrtApprox, kArm64F32x4RecipSqrtApprox) \
V(F32x4DemoteF64x2Zero, kArm64F32x4DemoteF64x2Zero) \
- V(I64x2Abs, kArm64I64x2Abs) \
- V(I64x2Neg, kArm64I64x2Neg) \
V(I64x2BitMask, kArm64I64x2BitMask) \
V(I32x4SConvertF32x4, kArm64I32x4SConvertF32x4) \
- V(I32x4Neg, kArm64I32x4Neg) \
V(I32x4UConvertF32x4, kArm64I32x4UConvertF32x4) \
- V(I32x4Abs, kArm64I32x4Abs) \
V(I32x4BitMask, kArm64I32x4BitMask) \
V(I32x4TruncSatF64x2SZero, kArm64I32x4TruncSatF64x2SZero) \
V(I32x4TruncSatF64x2UZero, kArm64I32x4TruncSatF64x2UZero) \
- V(I16x8Neg, kArm64I16x8Neg) \
- V(I16x8Abs, kArm64I16x8Abs) \
V(I16x8BitMask, kArm64I16x8BitMask) \
- V(I8x16Neg, kArm64I8x16Neg) \
- V(I8x16Abs, kArm64I8x16Abs) \
V(I8x16BitMask, kArm64I8x16BitMask) \
V(S128Not, kArm64S128Not) \
V(V128AnyTrue, kArm64V128AnyTrue) \
@@ -3472,6 +3482,28 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8AllTrue, kArm64I16x8AllTrue) \
V(I8x16AllTrue, kArm64I8x16AllTrue)
+#define SIMD_UNOP_LANE_SIZE_LIST(V) \
+ V(F64x2Splat, kArm64FSplat, 64) \
+ V(F64x2Abs, kArm64FAbs, 64) \
+ V(F64x2Sqrt, kArm64FSqrt, 64) \
+ V(F64x2Neg, kArm64FNeg, 64) \
+ V(F32x4Splat, kArm64FSplat, 32) \
+ V(F32x4Abs, kArm64FAbs, 32) \
+ V(F32x4Sqrt, kArm64FSqrt, 32) \
+ V(F32x4Neg, kArm64FNeg, 32) \
+ V(I64x2Splat, kArm64ISplat, 64) \
+ V(I64x2Abs, kArm64IAbs, 64) \
+ V(I64x2Neg, kArm64INeg, 64) \
+ V(I32x4Splat, kArm64ISplat, 32) \
+ V(I32x4Abs, kArm64IAbs, 32) \
+ V(I32x4Neg, kArm64INeg, 32) \
+ V(I16x8Splat, kArm64ISplat, 16) \
+ V(I16x8Abs, kArm64IAbs, 16) \
+ V(I16x8Neg, kArm64INeg, 16) \
+ V(I8x16Splat, kArm64ISplat, 8) \
+ V(I8x16Abs, kArm64IAbs, 8) \
+ V(I8x16Neg, kArm64INeg, 8)
+
#define SIMD_SHIFT_OP_LIST(V) \
V(I64x2Shl, 64) \
V(I64x2ShrS, 64) \
@@ -3487,85 +3519,77 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I8x16ShrU, 8)
#define SIMD_BINOP_LIST(V) \
- V(F64x2Add, kArm64F64x2Add) \
- V(F64x2Sub, kArm64F64x2Sub) \
- V(F64x2Div, kArm64F64x2Div) \
- V(F64x2Min, kArm64F64x2Min) \
- V(F64x2Max, kArm64F64x2Max) \
- V(F64x2Eq, kArm64F64x2Eq) \
- V(F64x2Ne, kArm64F64x2Ne) \
- V(F64x2Lt, kArm64F64x2Lt) \
- V(F64x2Le, kArm64F64x2Le) \
- V(F32x4Add, kArm64F32x4Add) \
- V(F32x4Sub, kArm64F32x4Sub) \
- V(F32x4Div, kArm64F32x4Div) \
- V(F32x4Min, kArm64F32x4Min) \
- V(F32x4Max, kArm64F32x4Max) \
- V(F32x4Eq, kArm64F32x4Eq) \
- V(F32x4Ne, kArm64F32x4Ne) \
- V(F32x4Lt, kArm64F32x4Lt) \
- V(F32x4Le, kArm64F32x4Le) \
- V(I64x2Add, kArm64I64x2Add) \
- V(I64x2Sub, kArm64I64x2Sub) \
- V(I64x2Eq, kArm64I64x2Eq) \
- V(I64x2Ne, kArm64I64x2Ne) \
- V(I64x2GtS, kArm64I64x2GtS) \
- V(I64x2GeS, kArm64I64x2GeS) \
V(I32x4Mul, kArm64I32x4Mul) \
- V(I32x4MinS, kArm64I32x4MinS) \
- V(I32x4MaxS, kArm64I32x4MaxS) \
- V(I32x4Eq, kArm64I32x4Eq) \
- V(I32x4Ne, kArm64I32x4Ne) \
- V(I32x4GtS, kArm64I32x4GtS) \
- V(I32x4GeS, kArm64I32x4GeS) \
- V(I32x4MinU, kArm64I32x4MinU) \
- V(I32x4MaxU, kArm64I32x4MaxU) \
- V(I32x4GtU, kArm64I32x4GtU) \
- V(I32x4GeU, kArm64I32x4GeU) \
V(I32x4DotI16x8S, kArm64I32x4DotI16x8S) \
V(I16x8SConvertI32x4, kArm64I16x8SConvertI32x4) \
- V(I16x8AddSatS, kArm64I16x8AddSatS) \
- V(I16x8SubSatS, kArm64I16x8SubSatS) \
V(I16x8Mul, kArm64I16x8Mul) \
- V(I16x8MinS, kArm64I16x8MinS) \
- V(I16x8MaxS, kArm64I16x8MaxS) \
- V(I16x8Eq, kArm64I16x8Eq) \
- V(I16x8Ne, kArm64I16x8Ne) \
- V(I16x8GtS, kArm64I16x8GtS) \
- V(I16x8GeS, kArm64I16x8GeS) \
V(I16x8UConvertI32x4, kArm64I16x8UConvertI32x4) \
- V(I16x8AddSatU, kArm64I16x8AddSatU) \
- V(I16x8SubSatU, kArm64I16x8SubSatU) \
- V(I16x8MinU, kArm64I16x8MinU) \
- V(I16x8MaxU, kArm64I16x8MaxU) \
- V(I16x8GtU, kArm64I16x8GtU) \
- V(I16x8GeU, kArm64I16x8GeU) \
- V(I16x8RoundingAverageU, kArm64I16x8RoundingAverageU) \
V(I16x8Q15MulRSatS, kArm64I16x8Q15MulRSatS) \
- V(I8x16Add, kArm64I8x16Add) \
- V(I8x16Sub, kArm64I8x16Sub) \
V(I8x16SConvertI16x8, kArm64I8x16SConvertI16x8) \
- V(I8x16AddSatS, kArm64I8x16AddSatS) \
- V(I8x16SubSatS, kArm64I8x16SubSatS) \
- V(I8x16MinS, kArm64I8x16MinS) \
- V(I8x16MaxS, kArm64I8x16MaxS) \
- V(I8x16Eq, kArm64I8x16Eq) \
- V(I8x16Ne, kArm64I8x16Ne) \
- V(I8x16GtS, kArm64I8x16GtS) \
- V(I8x16GeS, kArm64I8x16GeS) \
V(I8x16UConvertI16x8, kArm64I8x16UConvertI16x8) \
- V(I8x16AddSatU, kArm64I8x16AddSatU) \
- V(I8x16SubSatU, kArm64I8x16SubSatU) \
- V(I8x16MinU, kArm64I8x16MinU) \
- V(I8x16MaxU, kArm64I8x16MaxU) \
- V(I8x16GtU, kArm64I8x16GtU) \
- V(I8x16GeU, kArm64I8x16GeU) \
- V(I8x16RoundingAverageU, kArm64I8x16RoundingAverageU) \
V(S128And, kArm64S128And) \
V(S128Or, kArm64S128Or) \
V(S128Xor, kArm64S128Xor) \
V(S128AndNot, kArm64S128AndNot)
+#define SIMD_BINOP_LANE_SIZE_LIST(V) \
+ V(F64x2Min, kArm64FMin, 64) \
+ V(F64x2Max, kArm64FMax, 64) \
+ V(F64x2Add, kArm64FAdd, 64) \
+ V(F64x2Sub, kArm64FSub, 64) \
+ V(F64x2Div, kArm64FDiv, 64) \
+ V(F32x4Min, kArm64FMin, 32) \
+ V(F32x4Max, kArm64FMax, 32) \
+ V(F32x4Add, kArm64FAdd, 32) \
+ V(F32x4Sub, kArm64FSub, 32) \
+ V(F32x4Div, kArm64FDiv, 32) \
+ V(I64x2Sub, kArm64ISub, 64) \
+ V(I64x2Eq, kArm64IEq, 64) \
+ V(I64x2Ne, kArm64INe, 64) \
+ V(I64x2GtS, kArm64IGtS, 64) \
+ V(I64x2GeS, kArm64IGeS, 64) \
+ V(I32x4Eq, kArm64IEq, 32) \
+ V(I32x4Ne, kArm64INe, 32) \
+ V(I32x4GtS, kArm64IGtS, 32) \
+ V(I32x4GeS, kArm64IGeS, 32) \
+ V(I32x4GtU, kArm64IGtU, 32) \
+ V(I32x4GeU, kArm64IGeU, 32) \
+ V(I32x4MinS, kArm64IMinS, 32) \
+ V(I32x4MaxS, kArm64IMaxS, 32) \
+ V(I32x4MinU, kArm64IMinU, 32) \
+ V(I32x4MaxU, kArm64IMaxU, 32) \
+ V(I16x8AddSatS, kArm64IAddSatS, 16) \
+ V(I16x8SubSatS, kArm64ISubSatS, 16) \
+ V(I16x8AddSatU, kArm64IAddSatU, 16) \
+ V(I16x8SubSatU, kArm64ISubSatU, 16) \
+ V(I16x8Eq, kArm64IEq, 16) \
+ V(I16x8Ne, kArm64INe, 16) \
+ V(I16x8GtS, kArm64IGtS, 16) \
+ V(I16x8GeS, kArm64IGeS, 16) \
+ V(I16x8GtU, kArm64IGtU, 16) \
+ V(I16x8GeU, kArm64IGeU, 16) \
+ V(I16x8RoundingAverageU, kArm64RoundingAverageU, 16) \
+ V(I8x16RoundingAverageU, kArm64RoundingAverageU, 8) \
+ V(I16x8MinS, kArm64IMinS, 16) \
+ V(I16x8MaxS, kArm64IMaxS, 16) \
+ V(I16x8MinU, kArm64IMinU, 16) \
+ V(I16x8MaxU, kArm64IMaxU, 16) \
+ V(I8x16Sub, kArm64ISub, 8) \
+ V(I8x16AddSatS, kArm64IAddSatS, 8) \
+ V(I8x16SubSatS, kArm64ISubSatS, 8) \
+ V(I8x16AddSatU, kArm64IAddSatU, 8) \
+ V(I8x16SubSatU, kArm64ISubSatU, 8) \
+ V(I8x16Eq, kArm64IEq, 8) \
+ V(I8x16Ne, kArm64INe, 8) \
+ V(I8x16GtS, kArm64IGtS, 8) \
+ V(I8x16GeS, kArm64IGeS, 8) \
+ V(I8x16GtU, kArm64IGtU, 8) \
+ V(I8x16GeU, kArm64IGeU, 8) \
+ V(I8x16MinS, kArm64IMinS, 8) \
+ V(I8x16MaxS, kArm64IMaxS, 8) \
+ V(I8x16MinU, kArm64IMinU, 8) \
+ V(I8x16MaxU, kArm64IMaxU, 8)
+
void InstructionSelector::VisitS128Const(Node* node) {
Arm64OperandGenerator g(this);
static const int kUint32Immediates = 4;
@@ -3589,34 +3613,34 @@ void InstructionSelector::VisitS128Zero(Node* node) {
Emit(kArm64S128Zero, g.DefineAsRegister(node));
}
-#define SIMD_VISIT_SPLAT(Type) \
- void InstructionSelector::Visit##Type##Splat(Node* node) { \
- VisitRR(this, kArm64##Type##Splat, node); \
- }
-SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
-#undef SIMD_VISIT_SPLAT
-
-#define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \
- void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
- VisitRRI(this, kArm64##Type##ExtractLane##Sign, node); \
- }
-SIMD_VISIT_EXTRACT_LANE(F64x2, )
-SIMD_VISIT_EXTRACT_LANE(F32x4, )
-SIMD_VISIT_EXTRACT_LANE(I64x2, )
-SIMD_VISIT_EXTRACT_LANE(I32x4, )
-SIMD_VISIT_EXTRACT_LANE(I16x8, U)
-SIMD_VISIT_EXTRACT_LANE(I16x8, S)
-SIMD_VISIT_EXTRACT_LANE(I8x16, U)
-SIMD_VISIT_EXTRACT_LANE(I8x16, S)
+#define SIMD_VISIT_EXTRACT_LANE(Type, T, Sign, LaneSize) \
+ void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
+ VisitRRI(this, \
+ kArm64##T##ExtractLane##Sign | LaneSizeField::encode(LaneSize), \
+ node); \
+ }
+SIMD_VISIT_EXTRACT_LANE(F64x2, F, , 64)
+SIMD_VISIT_EXTRACT_LANE(F32x4, F, , 32)
+SIMD_VISIT_EXTRACT_LANE(I64x2, I, , 64)
+SIMD_VISIT_EXTRACT_LANE(I32x4, I, , 32)
+SIMD_VISIT_EXTRACT_LANE(I16x8, I, U, 16)
+SIMD_VISIT_EXTRACT_LANE(I16x8, I, S, 16)
+SIMD_VISIT_EXTRACT_LANE(I8x16, I, U, 8)
+SIMD_VISIT_EXTRACT_LANE(I8x16, I, S, 8)
#undef SIMD_VISIT_EXTRACT_LANE
-#define SIMD_VISIT_REPLACE_LANE(Type) \
- void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
- VisitRRIR(this, kArm64##Type##ReplaceLane, node); \
- }
-SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
+#define SIMD_VISIT_REPLACE_LANE(Type, T, LaneSize) \
+ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
+ VisitRRIR(this, kArm64##T##ReplaceLane | LaneSizeField::encode(LaneSize), \
+ node); \
+ }
+SIMD_VISIT_REPLACE_LANE(F64x2, F, 64)
+SIMD_VISIT_REPLACE_LANE(F32x4, F, 32)
+SIMD_VISIT_REPLACE_LANE(I64x2, I, 64)
+SIMD_VISIT_REPLACE_LANE(I32x4, I, 32)
+SIMD_VISIT_REPLACE_LANE(I16x8, I, 16)
+SIMD_VISIT_REPLACE_LANE(I8x16, I, 8)
#undef SIMD_VISIT_REPLACE_LANE
-#undef SIMD_TYPE_LIST
#define SIMD_VISIT_UNOP(Name, instruction) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -3642,6 +3666,22 @@ SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
#undef SIMD_VISIT_BINOP
#undef SIMD_BINOP_LIST
+#define SIMD_VISIT_BINOP_LANE_SIZE(Name, instruction, LaneSize) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRRR(this, instruction | LaneSizeField::encode(LaneSize), node); \
+ }
+SIMD_BINOP_LANE_SIZE_LIST(SIMD_VISIT_BINOP_LANE_SIZE)
+#undef SIMD_VISIT_BINOP_LANE_SIZE
+#undef SIMD_BINOP_LANE_SIZE_LIST
+
+#define SIMD_VISIT_UNOP_LANE_SIZE(Name, instruction, LaneSize) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRR(this, instruction | LaneSizeField::encode(LaneSize), node); \
+ }
+SIMD_UNOP_LANE_SIZE_LIST(SIMD_VISIT_UNOP_LANE_SIZE)
+#undef SIMD_VISIT_UNOP_LANE_SIZE
+#undef SIMD_UNOP_LANE_SIZE_LIST
+
using ShuffleMatcher =
ValueMatcher<S128ImmediateParameter, IrOpcode::kI8x16Shuffle>;
using BinopWithShuffleMatcher = BinopMatcher<ShuffleMatcher, ShuffleMatcher>;
@@ -3702,22 +3742,22 @@ MulWithDupResult TryMatchMulWithDup(Node* node) {
void InstructionSelector::VisitF32x4Mul(Node* node) {
if (MulWithDupResult result = TryMatchMulWithDup<4>(node)) {
Arm64OperandGenerator g(this);
- Emit(kArm64F32x4MulElement, g.DefineAsRegister(node),
- g.UseRegister(result.input), g.UseRegister(result.dup_node),
- g.UseImmediate(result.index));
+ Emit(kArm64FMulElement | LaneSizeField::encode(32),
+ g.DefineAsRegister(node), g.UseRegister(result.input),
+ g.UseRegister(result.dup_node), g.UseImmediate(result.index));
} else {
- return VisitRRR(this, kArm64F32x4Mul, node);
+ return VisitRRR(this, kArm64FMul | LaneSizeField::encode(32), node);
}
}
void InstructionSelector::VisitF64x2Mul(Node* node) {
if (MulWithDupResult result = TryMatchMulWithDup<2>(node)) {
Arm64OperandGenerator g(this);
- Emit(kArm64F64x2MulElement, g.DefineAsRegister(node),
- g.UseRegister(result.input), g.UseRegister(result.dup_node),
- g.UseImmediate(result.index));
+ Emit(kArm64FMulElement | LaneSizeField::encode(64),
+ g.DefineAsRegister(node), g.UseRegister(result.input),
+ g.UseRegister(result.dup_node), g.UseImmediate(result.index));
} else {
- return VisitRRR(this, kArm64F64x2Mul, node);
+ return VisitRRR(this, kArm64FMul | LaneSizeField::encode(64), node);
}
}
@@ -3729,86 +3769,218 @@ void InstructionSelector::VisitI64x2Mul(Node* node) {
arraysize(temps), temps);
}
-#define VISIT_SIMD_ADD(Type, PairwiseType, LaneSize) \
- void InstructionSelector::Visit##Type##Add(Node* node) { \
- Arm64OperandGenerator g(this); \
- Node* left = node->InputAt(0); \
- Node* right = node->InputAt(1); \
- /* Select Mla(z, x, y) for Add(Mul(x, y), z). */ \
- if (left->opcode() == IrOpcode::k##Type##Mul && CanCover(node, left)) { \
- Emit(kArm64##Type##Mla, g.DefineSameAsFirst(node), g.UseRegister(right), \
- g.UseRegister(left->InputAt(0)), g.UseRegister(left->InputAt(1))); \
- return; \
- } \
- /* Select Mla(z, x, y) for Add(z, Mul(x, y)). */ \
- if (right->opcode() == IrOpcode::k##Type##Mul && CanCover(node, right)) { \
- Emit(kArm64##Type##Mla, g.DefineSameAsFirst(node), g.UseRegister(left), \
- g.UseRegister(right->InputAt(0)), \
- g.UseRegister(right->InputAt(1))); \
- return; \
- } \
- /* Select Sadalp(x, y) for Add(x, ExtAddPairwiseS(y)). */ \
- if (right->opcode() == \
- IrOpcode::k##Type##ExtAddPairwise##PairwiseType##S && \
- CanCover(node, right)) { \
- Emit(kArm64Sadalp | LaneSizeField::encode(LaneSize), \
- g.DefineSameAsFirst(node), g.UseRegister(left), \
- g.UseRegister(right->InputAt(0))); \
- return; \
- } \
- /* Select Sadalp(y, x) for Add(ExtAddPairwiseS(x), y). */ \
- if (left->opcode() == \
- IrOpcode::k##Type##ExtAddPairwise##PairwiseType##S && \
- CanCover(node, left)) { \
- Emit(kArm64Sadalp | LaneSizeField::encode(LaneSize), \
- g.DefineSameAsFirst(node), g.UseRegister(right), \
- g.UseRegister(left->InputAt(0))); \
- return; \
- } \
- /* Select Uadalp(x, y) for Add(x, ExtAddPairwiseU(y)). */ \
- if (right->opcode() == \
- IrOpcode::k##Type##ExtAddPairwise##PairwiseType##U && \
- CanCover(node, right)) { \
- Emit(kArm64Uadalp | LaneSizeField::encode(LaneSize), \
- g.DefineSameAsFirst(node), g.UseRegister(left), \
- g.UseRegister(right->InputAt(0))); \
- return; \
- } \
- /* Select Uadalp(y, x) for Add(ExtAddPairwiseU(x), y). */ \
- if (left->opcode() == \
- IrOpcode::k##Type##ExtAddPairwise##PairwiseType##U && \
- CanCover(node, left)) { \
- Emit(kArm64Uadalp | LaneSizeField::encode(LaneSize), \
- g.DefineSameAsFirst(node), g.UseRegister(right), \
- g.UseRegister(left->InputAt(0))); \
- return; \
- } \
- VisitRRR(this, kArm64##Type##Add, node); \
+namespace {
+
+// Used for pattern matching SIMD Add operations where one of the inputs matches
+// |opcode| and ensure that the matched input is on the LHS (input 0).
+struct SimdAddOpMatcher : public NodeMatcher {
+ explicit SimdAddOpMatcher(Node* node, IrOpcode::Value opcode)
+ : NodeMatcher(node),
+ opcode_(opcode),
+ left_(InputAt(0)),
+ right_(InputAt(1)) {
+ DCHECK(HasProperty(Operator::kCommutative));
+ PutOpOnLeft();
+ }
+
+ bool Matches() { return left_->opcode() == opcode_; }
+ Node* left() const { return left_; }
+ Node* right() const { return right_; }
+
+ private:
+ void PutOpOnLeft() {
+ if (right_->opcode() == opcode_) {
+ std::swap(left_, right_);
+ node()->ReplaceInput(0, left_);
+ node()->ReplaceInput(1, right_);
+ }
+ }
+ IrOpcode::Value opcode_;
+ Node* left_;
+ Node* right_;
+};
+
+bool ShraHelper(InstructionSelector* selector, Node* node, int lane_size,
+ InstructionCode shra_code, InstructionCode add_code,
+ IrOpcode::Value shift_op) {
+ Arm64OperandGenerator g(selector);
+ SimdAddOpMatcher m(node, shift_op);
+ if (!m.Matches() || !selector->CanCover(node, m.left())) return false;
+ if (!g.IsIntegerConstant(m.left()->InputAt(1))) return false;
+
+ // If shifting by zero, just do the addition
+ if (g.GetIntegerConstantValue(m.left()->InputAt(1)) % lane_size == 0) {
+ selector->Emit(add_code, g.DefineAsRegister(node),
+ g.UseRegister(m.left()->InputAt(0)),
+ g.UseRegister(m.right()));
+ } else {
+ selector->Emit(shra_code | LaneSizeField::encode(lane_size),
+ g.DefineSameAsFirst(node), g.UseRegister(m.right()),
+ g.UseRegister(m.left()->InputAt(0)),
+ g.UseImmediate(m.left()->InputAt(1)));
+ }
+ return true;
+}
+
+bool AdalpHelper(InstructionSelector* selector, Node* node, int lane_size,
+ InstructionCode adalp_code, IrOpcode::Value ext_op) {
+ Arm64OperandGenerator g(selector);
+ SimdAddOpMatcher m(node, ext_op);
+ if (!m.Matches() || !selector->CanCover(node, m.left())) return false;
+ selector->Emit(adalp_code | LaneSizeField::encode(lane_size),
+ g.DefineSameAsFirst(node), g.UseRegister(m.right()),
+ g.UseRegister(m.left()->InputAt(0)));
+ return true;
+}
+
+bool MlaHelper(InstructionSelector* selector, Node* node,
+ InstructionCode mla_code, IrOpcode::Value mul_op) {
+ Arm64OperandGenerator g(selector);
+ SimdAddOpMatcher m(node, mul_op);
+ if (!m.Matches() || !selector->CanCover(node, m.left())) return false;
+ selector->Emit(mla_code, g.DefineSameAsFirst(node), g.UseRegister(m.right()),
+ g.UseRegister(m.left()->InputAt(0)),
+ g.UseRegister(m.left()->InputAt(1)));
+ return true;
+}
+
+bool SmlalHelper(InstructionSelector* selector, Node* node, int lane_size,
+ InstructionCode smlal_code, IrOpcode::Value ext_mul_op) {
+ Arm64OperandGenerator g(selector);
+ SimdAddOpMatcher m(node, ext_mul_op);
+ if (!m.Matches() || !selector->CanCover(node, m.left())) return false;
+
+ selector->Emit(smlal_code | LaneSizeField::encode(lane_size),
+ g.DefineSameAsFirst(node), g.UseRegister(m.right()),
+ g.UseRegister(m.left()->InputAt(0)),
+ g.UseRegister(m.left()->InputAt(1)));
+ return true;
+}
+
+} // namespace
+
+void InstructionSelector::VisitI64x2Add(Node* node) {
+ if (!ShraHelper(this, node, 64, kArm64Ssra,
+ kArm64IAdd | LaneSizeField::encode(64),
+ IrOpcode::kI64x2ShrS) &&
+ !ShraHelper(this, node, 64, kArm64Usra,
+ kArm64IAdd | LaneSizeField::encode(64),
+ IrOpcode::kI64x2ShrU)) {
+ VisitRRR(this, kArm64IAdd | LaneSizeField::encode(64), node);
+ }
+}
+
+void InstructionSelector::VisitI8x16Add(Node* node) {
+ if (!ShraHelper(this, node, 8, kArm64Ssra,
+ kArm64IAdd | LaneSizeField::encode(8),
+ IrOpcode::kI8x16ShrS) &&
+ !ShraHelper(this, node, 8, kArm64Usra,
+ kArm64IAdd | LaneSizeField::encode(8),
+ IrOpcode::kI8x16ShrU)) {
+ VisitRRR(this, kArm64IAdd | LaneSizeField::encode(8), node);
+ }
+}
+
+#define VISIT_SIMD_ADD(Type, PairwiseType, LaneSize) \
+ void InstructionSelector::Visit##Type##Add(Node* node) { \
+ /* Select Mla(z, x, y) for Add(x, Mul(y, z)). */ \
+ if (MlaHelper(this, node, kArm64Mla | LaneSizeField::encode(LaneSize), \
+ IrOpcode::k##Type##Mul)) { \
+ return; \
+ } \
+ /* Select S/Uadalp(x, y) for Add(x, ExtAddPairwise(y)). */ \
+ if (AdalpHelper(this, node, LaneSize, kArm64Sadalp, \
+ IrOpcode::k##Type##ExtAddPairwise##PairwiseType##S) || \
+ AdalpHelper(this, node, LaneSize, kArm64Uadalp, \
+ IrOpcode::k##Type##ExtAddPairwise##PairwiseType##U)) { \
+ return; \
+ } \
+ /* Select S/Usra(x, y) for Add(x, ShiftRight(y, imm)). */ \
+ if (ShraHelper(this, node, LaneSize, kArm64Ssra, \
+ kArm64IAdd | LaneSizeField::encode(LaneSize), \
+ IrOpcode::k##Type##ShrS) || \
+ ShraHelper(this, node, LaneSize, kArm64Usra, \
+ kArm64IAdd | LaneSizeField::encode(LaneSize), \
+ IrOpcode::k##Type##ShrU)) { \
+ return; \
+ } \
+ /* Select Smlal/Umlal(x, y, z) for Add(x, ExtMulLow(y, z)) and \
+ * Smlal2/Umlal2(x, y, z) for Add(x, ExtMulHigh(y, z)). */ \
+ if (SmlalHelper(this, node, LaneSize, kArm64Smlal, \
+ IrOpcode::k##Type##ExtMulLow##PairwiseType##S) || \
+ SmlalHelper(this, node, LaneSize, kArm64Smlal2, \
+ IrOpcode::k##Type##ExtMulHigh##PairwiseType##S) || \
+ SmlalHelper(this, node, LaneSize, kArm64Umlal, \
+ IrOpcode::k##Type##ExtMulLow##PairwiseType##U) || \
+ SmlalHelper(this, node, LaneSize, kArm64Umlal2, \
+ IrOpcode::k##Type##ExtMulHigh##PairwiseType##U)) { \
+ return; \
+ } \
+ VisitRRR(this, kArm64IAdd | LaneSizeField::encode(LaneSize), node); \
}
VISIT_SIMD_ADD(I32x4, I16x8, 32)
VISIT_SIMD_ADD(I16x8, I8x16, 16)
#undef VISIT_SIMD_ADD
-#define VISIT_SIMD_SUB(Type) \
+#define VISIT_SIMD_SUB(Type, LaneSize) \
void InstructionSelector::Visit##Type##Sub(Node* node) { \
Arm64OperandGenerator g(this); \
Node* left = node->InputAt(0); \
Node* right = node->InputAt(1); \
/* Select Mls(z, x, y) for Sub(z, Mul(x, y)). */ \
if (right->opcode() == IrOpcode::k##Type##Mul && CanCover(node, right)) { \
- Emit(kArm64##Type##Mls, g.DefineSameAsFirst(node), g.UseRegister(left), \
+ Emit(kArm64Mls | LaneSizeField::encode(LaneSize), \
+ g.DefineSameAsFirst(node), g.UseRegister(left), \
g.UseRegister(right->InputAt(0)), \
g.UseRegister(right->InputAt(1))); \
return; \
} \
- VisitRRR(this, kArm64##Type##Sub, node); \
+ VisitRRR(this, kArm64ISub | LaneSizeField::encode(LaneSize), node); \
}
-VISIT_SIMD_SUB(I32x4)
-VISIT_SIMD_SUB(I16x8)
+VISIT_SIMD_SUB(I32x4, 32)
+VISIT_SIMD_SUB(I16x8, 16)
#undef VISIT_SIMD_SUB
+namespace {
+bool isSimdZero(Arm64OperandGenerator& g, Node* node) {
+ auto m = V128ConstMatcher(node);
+ if (m.HasResolvedValue()) {
+ auto imms = m.ResolvedValue().immediate();
+ return (std::all_of(imms.begin(), imms.end(), std::logical_not<uint8_t>()));
+ }
+ return node->opcode() == IrOpcode::kS128Zero;
+}
+} // namespace
+
+#define VISIT_SIMD_FCM(Type, CmOp, CmOpposite, LaneSize) \
+ void InstructionSelector::Visit##Type##CmOp(Node* node) { \
+ Arm64OperandGenerator g(this); \
+ Node* left = node->InputAt(0); \
+ Node* right = node->InputAt(1); \
+ if (isSimdZero(g, left)) { \
+ Emit(kArm64F##CmOpposite | LaneSizeField::encode(LaneSize), \
+ g.DefineAsRegister(node), g.UseRegister(right)); \
+ return; \
+ } else if (isSimdZero(g, right)) { \
+ Emit(kArm64F##CmOp | LaneSizeField::encode(LaneSize), \
+ g.DefineAsRegister(node), g.UseRegister(left)); \
+ return; \
+ } \
+ VisitRRR(this, kArm64F##CmOp | LaneSizeField::encode(LaneSize), node); \
+ }
+
+VISIT_SIMD_FCM(F64x2, Eq, Eq, 64)
+VISIT_SIMD_FCM(F64x2, Ne, Ne, 64)
+VISIT_SIMD_FCM(F64x2, Lt, Gt, 64)
+VISIT_SIMD_FCM(F64x2, Le, Ge, 64)
+VISIT_SIMD_FCM(F32x4, Eq, Eq, 32)
+VISIT_SIMD_FCM(F32x4, Ne, Ne, 32)
+VISIT_SIMD_FCM(F32x4, Lt, Gt, 32)
+VISIT_SIMD_FCM(F32x4, Le, Ge, 32)
+#undef VISIT_SIMD_FCM
+
void InstructionSelector::VisitS128Select(Node* node) {
Arm64OperandGenerator g(this);
Emit(kArm64S128Select, g.DefineSameAsFirst(node),
@@ -4110,6 +4282,8 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat64RoundTiesAway |
MachineOperatorBuilder::kFloat32RoundTiesEven |
MachineOperatorBuilder::kFloat64RoundTiesEven |
+ MachineOperatorBuilder::kWord32Popcnt |
+ MachineOperatorBuilder::kWord64Popcnt |
MachineOperatorBuilder::kWord32ShiftIsSafe |
MachineOperatorBuilder::kInt32DivIsSafe |
MachineOperatorBuilder::kUint32DivIsSafe |
diff --git a/chromium/v8/src/compiler/backend/code-generator.cc b/chromium/v8/src/compiler/backend/code-generator.cc
index 9e378b84584..ad5e18d0025 100644
--- a/chromium/v8/src/compiler/backend/code-generator.cc
+++ b/chromium/v8/src/compiler/backend/code-generator.cc
@@ -41,14 +41,16 @@ class CodeGenerator::JumpTable final : public ZoneObject {
size_t const target_count_;
};
-CodeGenerator::CodeGenerator(
- Zone* codegen_zone, Frame* frame, Linkage* linkage,
- InstructionSequence* instructions, OptimizedCompilationInfo* info,
- Isolate* isolate, base::Optional<OsrHelper> osr_helper,
- int start_source_position, JumpOptimizationInfo* jump_opt,
- PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options,
- Builtin builtin, size_t max_unoptimized_frame_height,
- size_t max_pushed_argument_count, const char* debug_name)
+CodeGenerator::CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage,
+ InstructionSequence* instructions,
+ OptimizedCompilationInfo* info, Isolate* isolate,
+ base::Optional<OsrHelper> osr_helper,
+ int start_source_position,
+ JumpOptimizationInfo* jump_opt,
+ const AssemblerOptions& options, Builtin builtin,
+ size_t max_unoptimized_frame_height,
+ size_t max_pushed_argument_count,
+ const char* debug_name)
: zone_(codegen_zone),
isolate_(isolate),
frame_access_state_(nullptr),
@@ -80,7 +82,6 @@ CodeGenerator::CodeGenerator(
codegen_zone, SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS),
protected_instructions_(codegen_zone),
result_(kSuccess),
- poisoning_level_(poisoning_level),
block_starts_(codegen_zone),
instr_starts_(codegen_zone),
debug_name_(debug_name) {
@@ -284,9 +285,6 @@ void CodeGenerator::AssembleCode() {
BailoutIfDeoptimized();
}
- offsets_info_.init_poison = tasm()->pc_offset();
- InitializeSpeculationPoison();
-
// Define deoptimization literals for all inlined functions.
DCHECK_EQ(0u, deoptimization_literals_.size());
for (OptimizedCompilationInfo::InlinedFunctionHolder& inlined :
@@ -355,8 +353,6 @@ void CodeGenerator::AssembleCode() {
tasm()->bind(GetLabel(current_block_));
- TryInsertBranchPoisoning(block);
-
if (block->must_construct_frame()) {
AssembleConstructFrame();
// We need to setup the root register after we assemble the prologue, to
@@ -494,37 +490,6 @@ void CodeGenerator::AssembleCode() {
result_ = kSuccess;
}
-void CodeGenerator::TryInsertBranchPoisoning(const InstructionBlock* block) {
- // See if our predecessor was a basic block terminated by a branch_and_poison
- // instruction. If yes, then perform the masking based on the flags.
- if (block->PredecessorCount() != 1) return;
- RpoNumber pred_rpo = (block->predecessors())[0];
- const InstructionBlock* pred = instructions()->InstructionBlockAt(pred_rpo);
- if (pred->code_start() == pred->code_end()) return;
- Instruction* instr = instructions()->InstructionAt(pred->code_end() - 1);
- FlagsMode mode = FlagsModeField::decode(instr->opcode());
- switch (mode) {
- case kFlags_branch_and_poison: {
- BranchInfo branch;
- RpoNumber target = ComputeBranchInfo(&branch, instr);
- if (!target.IsValid()) {
- // Non-trivial branch, add the masking code.
- FlagsCondition condition = branch.condition;
- if (branch.false_label == GetLabel(block->rpo_number())) {
- condition = NegateFlagsCondition(condition);
- }
- AssembleBranchPoisoning(condition, instr);
- }
- break;
- }
- case kFlags_deoptimize_and_poison: {
- UNREACHABLE();
- }
- default:
- break;
- }
-}
-
void CodeGenerator::AssembleArchBinarySearchSwitchRange(
Register input, RpoNumber def_block, std::pair<int32_t, Label*>* begin,
std::pair<int32_t, Label*>* end) {
@@ -839,8 +804,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
switch (mode) {
- case kFlags_branch:
- case kFlags_branch_and_poison: {
+ case kFlags_branch: {
BranchInfo branch;
RpoNumber target = ComputeBranchInfo(&branch, instr);
if (target.IsValid()) {
@@ -854,8 +818,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
AssembleArchBranch(instr, &branch);
break;
}
- case kFlags_deoptimize:
- case kFlags_deoptimize_and_poison: {
+ case kFlags_deoptimize: {
// Assemble a conditional eager deoptimization after this instruction.
InstructionOperandConverter i(this, instr);
size_t frame_state_offset =
@@ -864,17 +827,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
DeoptImmedArgsCountField::decode(instr->opcode());
DeoptimizationExit* const exit = AddDeoptimizationExit(
instr, frame_state_offset, immediate_args_count);
- Label continue_label;
BranchInfo branch;
branch.condition = condition;
branch.true_label = exit->label();
- branch.false_label = &continue_label;
+ branch.false_label = exit->continue_label();
branch.fallthru = true;
AssembleArchDeoptBranch(instr, &branch);
- tasm()->bind(&continue_label);
- if (mode == kFlags_deoptimize_and_poison) {
- AssembleBranchPoisoning(NegateFlagsCondition(branch.condition), instr);
- }
tasm()->bind(exit->continue_label());
break;
}
@@ -890,21 +848,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
case kFlags_trap: {
#if V8_ENABLE_WEBASSEMBLY
AssembleArchTrap(instr, condition);
+ break;
#else
UNREACHABLE();
#endif // V8_ENABLE_WEBASSEMBLY
- break;
}
case kFlags_none: {
break;
}
}
- // TODO(jarin) We should thread the flag through rather than set it.
- if (instr->IsCall()) {
- ResetSpeculationPoison();
- }
-
return kSuccess;
}
@@ -1087,9 +1040,9 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
if (needs_frame_state) {
MarkLazyDeoptSite();
- // If the frame state is present, it starts at argument 2 - after
- // the code address and the poison-alias index.
- size_t frame_state_offset = 2;
+ // If the frame state is present, it starts at argument 1 - after
+ // the code address.
+ size_t frame_state_offset = 1;
FrameStateDescriptor* descriptor =
GetDeoptimizationEntry(instr, frame_state_offset).descriptor();
int pc_offset = tasm()->pc_offset_for_safepoint();
@@ -1428,29 +1381,6 @@ DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
OutputFrameStateCombine::Ignore());
}
-void CodeGenerator::InitializeSpeculationPoison() {
- if (poisoning_level_ == PoisoningMitigationLevel::kDontPoison) return;
-
- // Initialize {kSpeculationPoisonRegister} either by comparing the expected
- // with the actual call target, or by unconditionally using {-1} initially.
- // Masking register arguments with it only makes sense in the first case.
- if (info()->called_with_code_start_register()) {
- tasm()->RecordComment("-- Prologue: generate speculation poison --");
- GenerateSpeculationPoisonFromCodeStartRegister();
- if (info()->poison_register_arguments()) {
- AssembleRegisterArgumentPoisoning();
- }
- } else {
- ResetSpeculationPoison();
- }
-}
-
-void CodeGenerator::ResetSpeculationPoison() {
- if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
- tasm()->ResetSpeculationPoisonRegister();
- }
-}
-
OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
: frame_(gen->frame()), tasm_(gen->tasm()), next_(gen->ools_) {
gen->ools_ = this;
diff --git a/chromium/v8/src/compiler/backend/code-generator.h b/chromium/v8/src/compiler/backend/code-generator.h
index 7ccb09d5ac3..18de20f92c8 100644
--- a/chromium/v8/src/compiler/backend/code-generator.h
+++ b/chromium/v8/src/compiler/backend/code-generator.h
@@ -103,7 +103,6 @@ class DeoptimizationLiteral {
struct TurbolizerCodeOffsetsInfo {
int code_start_register_check = -1;
int deopt_check = -1;
- int init_poison = -1;
int blocks_start = -1;
int out_of_line_code = -1;
int deoptimization_exits = -1;
@@ -120,14 +119,16 @@ struct TurbolizerInstructionStartInfo {
// Generates native code for a sequence of instructions.
class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
public:
- explicit CodeGenerator(
- Zone* codegen_zone, Frame* frame, Linkage* linkage,
- InstructionSequence* instructions, OptimizedCompilationInfo* info,
- Isolate* isolate, base::Optional<OsrHelper> osr_helper,
- int start_source_position, JumpOptimizationInfo* jump_opt,
- PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options,
- Builtin builtin, size_t max_unoptimized_frame_height,
- size_t max_pushed_argument_count, const char* debug_name = nullptr);
+ explicit CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage,
+ InstructionSequence* instructions,
+ OptimizedCompilationInfo* info, Isolate* isolate,
+ base::Optional<OsrHelper> osr_helper,
+ int start_source_position,
+ JumpOptimizationInfo* jump_opt,
+ const AssemblerOptions& options, Builtin builtin,
+ size_t max_unoptimized_frame_height,
+ size_t max_pushed_argument_count,
+ const char* debug_name = nullptr);
// Generate native code. After calling AssembleCode, call FinalizeCode to
// produce the actual code object. If an error occurs during either phase,
@@ -216,17 +217,6 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
// Assemble instructions for the specified block.
CodeGenResult AssembleBlock(const InstructionBlock* block);
- // Inserts mask update at the beginning of an instruction block if the
- // predecessor blocks ends with a masking branch.
- void TryInsertBranchPoisoning(const InstructionBlock* block);
-
- // Initializes the masking register in the prologue of a function.
- void InitializeSpeculationPoison();
- // Reset the masking register during execution of a function.
- void ResetSpeculationPoison();
- // Generates a mask from the pc passed in {kJavaScriptCallCodeStartRegister}.
- void GenerateSpeculationPoisonFromCodeStartRegister();
-
// Assemble code for the specified instruction.
CodeGenResult AssembleInstruction(int instruction_index,
const InstructionBlock* block);
@@ -276,18 +266,12 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
// contains the expected pointer to the start of the instruction stream.
void AssembleCodeStartRegisterCheck();
- void AssembleBranchPoisoning(FlagsCondition condition, Instruction* instr);
-
// When entering a code that is marked for deoptimization, rather continuing
// with its execution, we jump to a lazy compiled code. We need to do this
// because this code has already been deoptimized and needs to be unlinked
// from the JS functions referring it.
void BailoutIfDeoptimized();
- // Generates code to poison the stack pointer and implicit register arguments
- // like the context register and the function register.
- void AssembleRegisterArgumentPoisoning();
-
// Generates an architecture-specific, descriptor-specific prologue
// to set up a stack frame.
void AssembleConstructFrame();
@@ -484,7 +468,6 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
SourcePositionTableBuilder source_position_table_builder_;
ZoneVector<trap_handler::ProtectedInstructionData> protected_instructions_;
CodeGenResult result_;
- PoisoningMitigationLevel poisoning_level_;
ZoneVector<int> block_starts_;
TurbolizerCodeOffsetsInfo offsets_info_;
ZoneVector<TurbolizerInstructionStartInfo> instr_starts_;
diff --git a/chromium/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/chromium/v8/src/compiler/backend/ia32/code-generator-ia32.cc
index 5db3f20fa4f..da6a9a81e32 100644
--- a/chromium/v8/src/compiler/backend/ia32/code-generator-ia32.cc
+++ b/chromium/v8/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -5,6 +5,7 @@
#include "src/base/overflowing-math.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/callable.h"
+#include "src/codegen/cpu-features.h"
#include "src/codegen/ia32/assembler-ia32.h"
#include "src/codegen/ia32/register-ia32.h"
#include "src/codegen/macro-assembler.h"
@@ -342,8 +343,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ CallRecordWriteStubSaveRegisters(object_, scratch1_,
remembered_set_action, save_fp_mode,
StubCallMode::kCallWasmRuntimeStub);
- } else {
#endif // V8_ENABLE_WEBASSEMBLY
+ } else {
__ CallRecordWriteStubSaveRegisters(object_, scratch1_,
remembered_set_action, save_fp_mode);
}
@@ -684,16 +685,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ bind(&skip);
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- // TODO(860429): Remove remaining poisoning infrastructure on ia32.
- UNREACHABLE();
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- // TODO(860429): Remove remaining poisoning infrastructure on ia32.
- UNREACHABLE();
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -712,11 +703,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineCall(reg);
- } else {
- __ call(reg);
- }
+ __ call(reg);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -738,19 +725,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (DetermineStubCallMode() == StubCallMode::kCallWasmRuntimeStub) {
__ wasm_call(wasm_code, constant.rmode());
} else {
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineCall(wasm_code, constant.rmode());
- } else {
- __ call(wasm_code, constant.rmode());
- }
+ __ call(wasm_code, constant.rmode());
}
} else {
- Register reg = i.InputRegister(0);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineCall(reg);
- } else {
- __ call(reg);
- }
+ __ call(i.InputRegister(0));
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -762,12 +740,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Address wasm_code = static_cast<Address>(constant.ToInt32());
__ jmp(wasm_code, constant.rmode());
} else {
- Register reg = i.InputRegister(0);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineJump(reg);
- } else {
- __ jmp(reg);
- }
+ __ jmp(i.InputRegister(0));
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -784,11 +757,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineJump(reg);
- } else {
- __ jmp(reg);
- }
+ __ jmp(reg);
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -800,11 +769,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineJump(reg);
- } else {
- __ jmp(reg);
- }
+ __ jmp(reg);
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
break;
@@ -826,8 +791,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchPrepareCallCFunction: {
// Frame alignment requires using FP-relative frame addressing.
frame_access_state()->SetFrameAccessToFP();
- int const num_parameters = MiscField::decode(instr->opcode());
- __ PrepareCallCFunction(num_parameters, i.TempRegister(0));
+ int const num_gp_parameters = ParamField::decode(instr->opcode());
+ int const num_fp_parameters = FPParamField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_gp_parameters + num_fp_parameters,
+ i.TempRegister(0));
break;
}
case kArchSaveCallerRegisters: {
@@ -922,13 +889,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchComment:
__ RecordComment(reinterpret_cast<const char*>(i.InputInt32(0)));
break;
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
DCHECK(i.InputRegister(0) == edx);
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
__ int3();
@@ -993,7 +960,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(ool->exit());
break;
}
- case kArchStoreWithWriteBarrier: {
+ case kArchStoreWithWriteBarrier: // Fall thrugh.
+ case kArchAtomicStoreWithWriteBarrier: {
RecordWriteMode mode =
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
Register object = i.InputRegister(0);
@@ -1005,7 +973,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
auto ool = zone()->New<OutOfLineRecordWrite>(this, object, operand, value,
scratch0, scratch1, mode,
DetermineStubCallMode());
- __ mov(operand, value);
+ if (arch_opcode == kArchStoreWithWriteBarrier) {
+ __ mov(operand, value);
+ } else {
+ __ mov(scratch0, value);
+ __ xchg(scratch0, operand);
+ }
if (mode > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value, ool->exit());
}
@@ -1278,134 +1251,85 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIA32Bswap:
__ bswap(i.OutputRegister());
break;
- case kArchWordPoisonOnSpeculation:
- // TODO(860429): Remove remaining poisoning infrastructure on ia32.
- UNREACHABLE();
case kIA32MFence:
__ mfence();
break;
case kIA32LFence:
__ lfence();
break;
- case kSSEFloat32Cmp:
- __ ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat32Add:
- __ addss(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat32Sub:
- __ subss(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat32Mul:
- __ mulss(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat32Div:
- __ divss(i.InputDoubleRegister(0), i.InputOperand(1));
- // Don't delete this mov. It may improve performance on some CPUs,
- // when there is a (v)mulss depending on the result.
- __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
- break;
- case kSSEFloat32Sqrt:
- __ sqrtss(i.OutputDoubleRegister(), i.InputOperand(0));
+ case kIA32Float32Cmp:
+ __ Ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
break;
- case kSSEFloat32Abs: {
- // TODO(bmeurer): Use 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psrlq(tmp, 33);
- __ andps(i.OutputDoubleRegister(), tmp);
- break;
- }
- case kSSEFloat32Neg: {
- // TODO(bmeurer): Use 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psllq(tmp, 31);
- __ xorps(i.OutputDoubleRegister(), tmp);
+ case kIA32Float32Sqrt:
+ __ Sqrtss(i.OutputDoubleRegister(), i.InputOperand(0));
break;
- }
- case kSSEFloat32Round: {
+ case kIA32Float32Round: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
RoundingMode const mode =
static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
- __ roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
+ __ Roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
break;
}
- case kSSEFloat64Cmp:
- __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat64Add:
- __ addsd(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat64Sub:
- __ subsd(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat64Mul:
- __ mulsd(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat64Div:
- __ divsd(i.InputDoubleRegister(0), i.InputOperand(1));
- // Don't delete this mov. It may improve performance on some CPUs,
- // when there is a (v)mulsd depending on the result.
- __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
+ case kIA32Float64Cmp:
+ __ Ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
break;
- case kSSEFloat32Max: {
+ case kIA32Float32Max: {
Label compare_swap, done_compare;
if (instr->InputAt(1)->IsFPRegister()) {
- __ ucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ __ Ucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
- __ ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
+ __ Ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
}
auto ool =
zone()->New<OutOfLineLoadFloat32NaN>(this, i.OutputDoubleRegister());
__ j(parity_even, ool->entry());
__ j(above, &done_compare, Label::kNear);
__ j(below, &compare_swap, Label::kNear);
- __ movmskps(i.TempRegister(0), i.InputDoubleRegister(0));
+ __ Movmskps(i.TempRegister(0), i.InputDoubleRegister(0));
__ test(i.TempRegister(0), Immediate(1));
__ j(zero, &done_compare, Label::kNear);
__ bind(&compare_swap);
if (instr->InputAt(1)->IsFPRegister()) {
- __ movss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ __ Movss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
- __ movss(i.InputDoubleRegister(0), i.InputOperand(1));
+ __ Movss(i.InputDoubleRegister(0), i.InputOperand(1));
}
__ bind(&done_compare);
__ bind(ool->exit());
break;
}
- case kSSEFloat64Max: {
+ case kIA32Float64Max: {
Label compare_swap, done_compare;
if (instr->InputAt(1)->IsFPRegister()) {
- __ ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ __ Ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
- __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
+ __ Ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
}
auto ool =
zone()->New<OutOfLineLoadFloat64NaN>(this, i.OutputDoubleRegister());
__ j(parity_even, ool->entry());
__ j(above, &done_compare, Label::kNear);
__ j(below, &compare_swap, Label::kNear);
- __ movmskpd(i.TempRegister(0), i.InputDoubleRegister(0));
+ __ Movmskpd(i.TempRegister(0), i.InputDoubleRegister(0));
__ test(i.TempRegister(0), Immediate(1));
__ j(zero, &done_compare, Label::kNear);
__ bind(&compare_swap);
if (instr->InputAt(1)->IsFPRegister()) {
- __ movsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ __ Movsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
- __ movsd(i.InputDoubleRegister(0), i.InputOperand(1));
+ __ Movsd(i.InputDoubleRegister(0), i.InputOperand(1));
}
__ bind(&done_compare);
__ bind(ool->exit());
break;
}
- case kSSEFloat32Min: {
+ case kIA32Float32Min: {
Label compare_swap, done_compare;
if (instr->InputAt(1)->IsFPRegister()) {
- __ ucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ __ Ucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
- __ ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
+ __ Ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
}
auto ool =
zone()->New<OutOfLineLoadFloat32NaN>(this, i.OutputDoubleRegister());
@@ -1413,29 +1337,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ j(below, &done_compare, Label::kNear);
__ j(above, &compare_swap, Label::kNear);
if (instr->InputAt(1)->IsFPRegister()) {
- __ movmskps(i.TempRegister(0), i.InputDoubleRegister(1));
+ __ Movmskps(i.TempRegister(0), i.InputDoubleRegister(1));
} else {
- __ movss(kScratchDoubleReg, i.InputOperand(1));
- __ movmskps(i.TempRegister(0), kScratchDoubleReg);
+ __ Movss(kScratchDoubleReg, i.InputOperand(1));
+ __ Movmskps(i.TempRegister(0), kScratchDoubleReg);
}
__ test(i.TempRegister(0), Immediate(1));
__ j(zero, &done_compare, Label::kNear);
__ bind(&compare_swap);
if (instr->InputAt(1)->IsFPRegister()) {
- __ movss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ __ Movss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
- __ movss(i.InputDoubleRegister(0), i.InputOperand(1));
+ __ Movss(i.InputDoubleRegister(0), i.InputOperand(1));
}
__ bind(&done_compare);
__ bind(ool->exit());
break;
}
- case kSSEFloat64Min: {
+ case kIA32Float64Min: {
Label compare_swap, done_compare;
if (instr->InputAt(1)->IsFPRegister()) {
- __ ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ __ Ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
- __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
+ __ Ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
}
auto ool =
zone()->New<OutOfLineLoadFloat64NaN>(this, i.OutputDoubleRegister());
@@ -1443,32 +1367,32 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ j(below, &done_compare, Label::kNear);
__ j(above, &compare_swap, Label::kNear);
if (instr->InputAt(1)->IsFPRegister()) {
- __ movmskpd(i.TempRegister(0), i.InputDoubleRegister(1));
+ __ Movmskpd(i.TempRegister(0), i.InputDoubleRegister(1));
} else {
- __ movsd(kScratchDoubleReg, i.InputOperand(1));
- __ movmskpd(i.TempRegister(0), kScratchDoubleReg);
+ __ Movsd(kScratchDoubleReg, i.InputOperand(1));
+ __ Movmskpd(i.TempRegister(0), kScratchDoubleReg);
}
__ test(i.TempRegister(0), Immediate(1));
__ j(zero, &done_compare, Label::kNear);
__ bind(&compare_swap);
if (instr->InputAt(1)->IsFPRegister()) {
- __ movsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ __ Movsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
- __ movsd(i.InputDoubleRegister(0), i.InputOperand(1));
+ __ Movsd(i.InputDoubleRegister(0), i.InputOperand(1));
}
__ bind(&done_compare);
__ bind(ool->exit());
break;
}
- case kSSEFloat64Mod: {
+ case kIA32Float64Mod: {
Register tmp = i.TempRegister(1);
__ mov(tmp, esp);
__ AllocateStackSpace(kDoubleSize);
__ and_(esp, -8); // align to 8 byte boundary.
// Move values to st(0) and st(1).
- __ movsd(Operand(esp, 0), i.InputDoubleRegister(1));
+ __ Movsd(Operand(esp, 0), i.InputDoubleRegister(1));
__ fld_d(Operand(esp, 0));
- __ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
+ __ Movsd(Operand(esp, 0), i.InputDoubleRegister(0));
__ fld_d(Operand(esp, 0));
// Loop while fprem isn't done.
Label mod_loop;
@@ -1484,186 +1408,147 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// Move output to stack and clean up.
__ fstp(1);
__ fstp_d(Operand(esp, 0));
- __ movsd(i.OutputDoubleRegister(), Operand(esp, 0));
+ __ Movsd(i.OutputDoubleRegister(), Operand(esp, 0));
__ mov(esp, tmp);
break;
}
- case kSSEFloat64Abs: {
- // TODO(bmeurer): Use 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psrlq(tmp, 1);
- __ andps(i.OutputDoubleRegister(), tmp);
- break;
- }
- case kSSEFloat64Neg: {
- // TODO(bmeurer): Use 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psllq(tmp, 63);
- __ xorps(i.OutputDoubleRegister(), tmp);
- break;
- }
- case kSSEFloat64Sqrt:
- __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
+ case kIA32Float64Sqrt:
+ __ Sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
- case kSSEFloat64Round: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ case kIA32Float64Round: {
RoundingMode const mode =
static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
- __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
+ __ Roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
break;
}
- case kSSEFloat32ToFloat64:
- __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
+ case kIA32Float32ToFloat64:
+ __ Cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
- case kSSEFloat64ToFloat32:
- __ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
+ case kIA32Float64ToFloat32:
+ __ Cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
break;
- case kSSEFloat32ToInt32:
- __ cvttss2si(i.OutputRegister(), i.InputOperand(0));
+ case kIA32Float32ToInt32:
+ __ Cvttss2si(i.OutputRegister(), i.InputOperand(0));
break;
- case kSSEFloat32ToUint32:
+ case kIA32Float32ToUint32:
__ Cvttss2ui(i.OutputRegister(), i.InputOperand(0),
i.TempSimd128Register(0));
break;
- case kSSEFloat64ToInt32:
- __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
+ case kIA32Float64ToInt32:
+ __ Cvttsd2si(i.OutputRegister(), i.InputOperand(0));
break;
- case kSSEFloat64ToUint32:
+ case kIA32Float64ToUint32:
__ Cvttsd2ui(i.OutputRegister(), i.InputOperand(0),
i.TempSimd128Register(0));
break;
case kSSEInt32ToFloat32:
+ // Calling Cvtsi2ss (which does a xor) regresses some benchmarks.
__ cvtsi2ss(i.OutputDoubleRegister(), i.InputOperand(0));
break;
- case kSSEUint32ToFloat32:
+ case kIA32Uint32ToFloat32:
__ Cvtui2ss(i.OutputDoubleRegister(), i.InputOperand(0),
i.TempRegister(0));
break;
case kSSEInt32ToFloat64:
+ // Calling Cvtsi2sd (which does a xor) regresses some benchmarks.
__ cvtsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
- case kSSEUint32ToFloat64:
+ case kIA32Uint32ToFloat64:
__ Cvtui2sd(i.OutputDoubleRegister(), i.InputOperand(0),
i.TempRegister(0));
break;
- case kSSEFloat64ExtractLowWord32:
+ case kIA32Float64ExtractLowWord32:
if (instr->InputAt(0)->IsFPStackSlot()) {
__ mov(i.OutputRegister(), i.InputOperand(0));
} else {
- __ movd(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ Movd(i.OutputRegister(), i.InputDoubleRegister(0));
}
break;
- case kSSEFloat64ExtractHighWord32:
+ case kIA32Float64ExtractHighWord32:
if (instr->InputAt(0)->IsFPStackSlot()) {
__ mov(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
} else {
__ Pextrd(i.OutputRegister(), i.InputDoubleRegister(0), 1);
}
break;
- case kSSEFloat64InsertLowWord32:
+ case kIA32Float64InsertLowWord32:
__ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 0);
break;
- case kSSEFloat64InsertHighWord32:
+ case kIA32Float64InsertHighWord32:
__ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 1);
break;
- case kSSEFloat64LoadLowWord32:
- __ movd(i.OutputDoubleRegister(), i.InputOperand(0));
+ case kIA32Float64LoadLowWord32:
+ __ Movd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
- case kAVXFloat32Add: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vaddss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat32Add: {
+ __ Addss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
break;
}
- case kAVXFloat32Sub: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vsubss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat32Sub: {
+ __ Subss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
break;
}
- case kAVXFloat32Mul: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vmulss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat32Mul: {
+ __ Mulss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
break;
}
- case kAVXFloat32Div: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vdivss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat32Div: {
+ __ Divss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
// Don't delete this mov. It may improve performance on some CPUs,
// when there is a (v)mulss depending on the result.
__ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
}
- case kAVXFloat64Add: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vaddsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat64Add: {
+ __ Addsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
break;
}
- case kAVXFloat64Sub: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vsubsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat64Sub: {
+ __ Subsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
break;
}
- case kAVXFloat64Mul: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vmulsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat64Mul: {
+ __ Mulsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
break;
}
- case kAVXFloat64Div: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vdivsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat64Div: {
+ __ Divsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
// Don't delete this mov. It may improve performance on some CPUs,
// when there is a (v)mulsd depending on the result.
__ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
}
- case kAVXFloat32Abs: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psrlq(tmp, 33);
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vandps(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
+ case kFloat32Abs: {
+ __ Absps(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.TempRegister(0));
break;
}
- case kAVXFloat32Neg: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psllq(tmp, 31);
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vxorps(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
+ case kFloat32Neg: {
+ __ Negps(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.TempRegister(0));
break;
}
- case kAVXFloat64Abs: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psrlq(tmp, 1);
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vandpd(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
+ case kFloat64Abs: {
+ __ Abspd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.TempRegister(0));
break;
}
- case kAVXFloat64Neg: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psllq(tmp, 63);
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vxorpd(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
+ case kFloat64Neg: {
+ __ Negpd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.TempRegister(0));
break;
}
- case kSSEFloat64SilenceNaN:
- __ xorps(kScratchDoubleReg, kScratchDoubleReg);
- __ subsd(i.InputDoubleRegister(0), kScratchDoubleReg);
+ case kIA32Float64SilenceNaN:
+ __ Xorps(kScratchDoubleReg, kScratchDoubleReg);
+ __ Subsd(i.InputDoubleRegister(0), kScratchDoubleReg);
break;
case kIA32Movsxbl:
ASSEMBLE_MOVX(movsx_b);
@@ -1955,7 +1840,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32F64x2PromoteLowF32x4: {
- __ Cvtps2pd(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ if (HasAddressingMode(instr)) {
+ __ Cvtps2pd(i.OutputSimd128Register(), i.MemoryOperand());
+ } else {
+ __ Cvtps2pd(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ }
break;
}
case kIA32F32x4DemoteF64x2Zero: {
@@ -2102,28 +1991,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I64x2Mul: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister left = i.InputSimd128Register(0);
- XMMRegister right = i.InputSimd128Register(1);
- XMMRegister tmp1 = i.TempSimd128Register(0);
- XMMRegister tmp2 = i.TempSimd128Register(1);
-
- __ Movaps(tmp1, left);
- __ Movaps(tmp2, right);
-
- // Multiply high dword of each qword of left with right.
- __ Psrlq(tmp1, byte{32});
- __ Pmuludq(tmp1, tmp1, right);
-
- // Multiply high dword of each qword of right with left.
- __ Psrlq(tmp2, byte{32});
- __ Pmuludq(tmp2, tmp2, left);
-
- __ Paddq(tmp2, tmp2, tmp1);
- __ Psllq(tmp2, tmp2, byte{32});
-
- __ Pmuludq(dst, left, right);
- __ Paddq(dst, dst, tmp2);
+ __ I64x2Mul(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.TempSimd128Register(0),
+ i.TempSimd128Register(1));
break;
}
case kIA32I64x2ShrU: {
@@ -2242,34 +2112,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Addps(dst, dst, kScratchDoubleReg); // add hi and lo, may round.
break;
}
- case kIA32F32x4Abs: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- if (dst == src) {
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Psrld(kScratchDoubleReg, kScratchDoubleReg, byte{1});
- __ Andps(dst, kScratchDoubleReg);
- } else {
- __ Pcmpeqd(dst, dst);
- __ Psrld(dst, dst, byte{1});
- __ Andps(dst, src);
- }
- break;
- }
- case kIA32F32x4Neg: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- if (dst == src) {
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Pslld(kScratchDoubleReg, kScratchDoubleReg, byte{31});
- __ Xorps(dst, kScratchDoubleReg);
- } else {
- __ Pcmpeqd(dst, dst);
- __ Pslld(dst, dst, byte{31});
- __ Xorps(dst, src);
- }
- break;
- }
case kIA32F32x4Sqrt: {
__ Sqrtps(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
@@ -2302,120 +2144,34 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(1));
break;
}
- case kSSEF32x4Min: {
- XMMRegister src1 = i.InputSimd128Register(1),
- dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- // The minps instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform minps in both orders, merge the resuls, and adjust.
- __ movaps(kScratchDoubleReg, src1);
- __ minps(kScratchDoubleReg, dst);
- __ minps(dst, src1);
- // propagate -0's and NaNs, which may be non-canonical.
- __ orps(kScratchDoubleReg, dst);
- // Canonicalize NaNs by quieting and clearing the payload.
- __ cmpps(dst, kScratchDoubleReg, 3);
- __ orps(kScratchDoubleReg, dst);
- __ psrld(dst, 10);
- __ andnps(dst, kScratchDoubleReg);
- break;
- }
- case kAVXF32x4Min: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src0 = i.InputSimd128Register(0);
- Operand src1 = i.InputOperand(1);
- // See comment above for correction of minps.
- __ vmovups(kScratchDoubleReg, src1);
- __ vminps(kScratchDoubleReg, kScratchDoubleReg, src0);
- __ vminps(dst, src0, src1);
- __ vorps(dst, dst, kScratchDoubleReg);
- __ vcmpneqps(kScratchDoubleReg, dst, dst);
- __ vorps(dst, dst, kScratchDoubleReg);
- __ vpsrld(kScratchDoubleReg, kScratchDoubleReg, 10);
- __ vandnps(dst, kScratchDoubleReg, dst);
- break;
- }
- case kSSEF32x4Max: {
- XMMRegister src1 = i.InputSimd128Register(1),
- dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- // The maxps instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform maxps in both orders, merge the resuls, and adjust.
- __ movaps(kScratchDoubleReg, src1);
- __ maxps(kScratchDoubleReg, dst);
- __ maxps(dst, src1);
- // Find discrepancies.
- __ xorps(dst, kScratchDoubleReg);
- // Propagate NaNs, which may be non-canonical.
- __ orps(kScratchDoubleReg, dst);
- // Propagate sign discrepancy and (subtle) quiet NaNs.
- __ subps(kScratchDoubleReg, dst);
- // Canonicalize NaNs by clearing the payload.
- __ cmpps(dst, kScratchDoubleReg, 3);
- __ psrld(dst, 10);
- __ andnps(dst, kScratchDoubleReg);
- break;
- }
- case kAVXF32x4Max: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src0 = i.InputSimd128Register(0);
- Operand src1 = i.InputOperand(1);
- // See comment above for correction of maxps.
- __ vmovups(kScratchDoubleReg, src1);
- __ vmaxps(kScratchDoubleReg, kScratchDoubleReg, src0);
- __ vmaxps(dst, src0, src1);
- __ vxorps(dst, dst, kScratchDoubleReg);
- __ vorps(kScratchDoubleReg, kScratchDoubleReg, dst);
- __ vsubps(kScratchDoubleReg, kScratchDoubleReg, dst);
- __ vcmpneqps(dst, kScratchDoubleReg, kScratchDoubleReg);
- __ vpsrld(dst, dst, 10);
- __ vandnps(dst, dst, kScratchDoubleReg);
- break;
- }
- case kSSEF32x4Eq: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ cmpeqps(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXF32x4Eq: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vcmpeqps(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEF32x4Ne: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ cmpneqps(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32F32x4Min: {
+ __ F32x4Min(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
- case kAVXF32x4Ne: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vcmpneqps(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32F32x4Max: {
+ __ F32x4Max(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
- case kSSEF32x4Lt: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ cmpltps(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32F32x4Eq: {
+ __ Cmpeqps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXF32x4Lt: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vcmpltps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32F32x4Ne: {
+ __ Cmpneqps(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEF32x4Le: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ cmpleps(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32F32x4Lt: {
+ __ Cmpltps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXF32x4Le: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vcmpleps(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32F32x4Le: {
+ __ Cmpleps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
case kIA32F32x4Pmin: {
@@ -2445,20 +2201,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I32x4SConvertF32x4: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- // NAN->0
- __ Cmpeqps(kScratchDoubleReg, src, src);
- __ Pand(dst, src, kScratchDoubleReg);
- // Set top bit if >= 0 (but not -0.0!)
- __ Pxor(kScratchDoubleReg, dst);
- // Convert
- __ Cvttps2dq(dst, dst);
- // Set top bit if >=0 is now < 0
- __ Pand(kScratchDoubleReg, dst);
- __ Psrad(kScratchDoubleReg, kScratchDoubleReg, byte{31});
- // Set positive overflow lanes to 0x7FFFFFFF
- __ Pxor(dst, kScratchDoubleReg);
+ __ I32x4SConvertF32x4(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchDoubleReg,
+ i.TempRegister(0));
break;
}
case kIA32I32x4SConvertI16x8Low: {
@@ -2490,117 +2235,63 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_SHIFT(Psrad, 5);
break;
}
- case kSSEI32x4Add: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ paddd(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I32x4Add: {
+ __ Paddd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI32x4Add: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpaddd(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I32x4Sub: {
+ __ Psubd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kSSEI32x4Sub: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ psubd(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I32x4Mul: {
+ __ Pmulld(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI32x4Sub: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpsubd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I32x4MinS: {
+ __ Pminsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI32x4Mul: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pmulld(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I32x4MaxS: {
+ __ Pmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI32x4Mul: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpmulld(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I32x4Eq: {
+ __ Pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI32x4MinS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pminsd(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI32x4MinS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpminsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I32x4Ne: {
+ __ Pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ Pxor(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg);
break;
}
- case kSSEI32x4MaxS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pmaxsd(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI32x4MaxS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I32x4GtS: {
+ __ Pcmpgtd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI32x4Eq: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pcmpeqd(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI32x4Eq: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI32x4Ne: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pcmpeqd(i.OutputSimd128Register(), i.InputOperand(1));
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ xorps(i.OutputSimd128Register(), kScratchDoubleReg);
- break;
- }
- case kAVXI32x4Ne: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
- __ vpxor(i.OutputSimd128Register(), i.OutputSimd128Register(),
- kScratchDoubleReg);
- break;
- }
- case kSSEI32x4GtS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pcmpgtd(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI32x4GtS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpcmpgtd(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI32x4GeS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ case kIA32I32x4GeS: {
XMMRegister dst = i.OutputSimd128Register();
- Operand src = i.InputOperand(1);
- __ pminsd(dst, src);
- __ pcmpeqd(dst, src);
- break;
- }
- case kAVXI32x4GeS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister src1 = i.InputSimd128Register(0);
- Operand src2 = i.InputOperand(1);
- __ vpminsd(kScratchDoubleReg, src1, src2);
- __ vpcmpeqd(i.OutputSimd128Register(), kScratchDoubleReg, src2);
+ XMMRegister src2 = i.InputSimd128Register(1);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpminsd(kScratchDoubleReg, src1, src2);
+ __ vpcmpeqd(dst, kScratchDoubleReg, src2);
+ } else {
+ DCHECK_EQ(dst, src1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ pminsd(dst, src2);
+ __ pcmpeqd(dst, src2);
+ }
break;
}
case kSSEI32x4UConvertF32x4: {
@@ -2671,28 +2362,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_SHIFT(Psrld, 5);
break;
}
- case kSSEI32x4MinU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pminud(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI32x4MinU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpminud(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI32x4MaxU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pmaxud(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I32x4MinU: {
+ __ Pminud(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI32x4MaxU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpmaxud(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I32x4MaxU: {
+ __ Pmaxud(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
case kSSEI32x4GtU: {
@@ -2748,10 +2425,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I16x8Splat: {
- XMMRegister dst = i.OutputSimd128Register();
- __ Movd(dst, i.InputOperand(0));
- __ Pshuflw(dst, dst, uint8_t{0x0});
- __ Pshufd(dst, dst, uint8_t{0x0});
+ if (instr->InputAt(0)->IsRegister()) {
+ __ I16x8Splat(i.OutputSimd128Register(), i.InputRegister(0));
+ } else {
+ __ I16x8Splat(i.OutputSimd128Register(), i.InputOperand(0));
+ }
break;
}
case kIA32I16x8ExtractLaneS: {
@@ -2789,105 +2467,51 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_SHIFT(Psraw, 4);
break;
}
- case kSSEI16x8SConvertI32x4: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ packssdw(i.OutputSimd128Register(), i.InputSimd128Register(1));
- break;
- }
- case kAVXI16x8SConvertI32x4: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpackssdw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I16x8SConvertI32x4: {
+ __ Packssdw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kSSEI16x8Add: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ paddw(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I16x8Add: {
+ __ Paddw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI16x8Add: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpaddw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I16x8AddSatS: {
+ __ Paddsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI16x8AddSatS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ paddsw(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI16x8AddSatS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpaddsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI16x8Sub: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ psubw(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I16x8Sub: {
+ __ Psubw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI16x8Sub: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpsubw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I16x8SubSatS: {
+ __ Psubsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI16x8SubSatS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ psubsw(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI16x8SubSatS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpsubsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI16x8Mul: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pmullw(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI16x8Mul: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpmullw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI16x8MinS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pminsw(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I16x8Mul: {
+ __ Pmullw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI16x8MinS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpminsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I16x8MinS: {
+ __ Pminsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kSSEI16x8MaxS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pmaxsw(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I16x8MaxS: {
+ __ Pmaxsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI16x8MaxS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpmaxsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I16x8Eq: {
+ __ Pcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI16x8Eq: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pcmpeqw(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI16x8Eq: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
case kSSEI16x8Ne: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pcmpeqw(i.OutputSimd128Register(), i.InputOperand(1));
@@ -2904,15 +2528,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kScratchDoubleReg);
break;
}
- case kSSEI16x8GtS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pcmpgtw(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI16x8GtS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpcmpgtw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I16x8GtS: {
+ __ Pcmpgtw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
case kSSEI16x8GeS: {
@@ -2944,63 +2562,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_SHIFT(Psrlw, 4);
break;
}
- case kSSEI16x8UConvertI32x4: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ packusdw(i.OutputSimd128Register(), i.InputSimd128Register(1));
- break;
- }
- case kAVXI16x8UConvertI32x4: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister dst = i.OutputSimd128Register();
- __ vpackusdw(dst, dst, i.InputSimd128Register(1));
- break;
- }
- case kSSEI16x8AddSatU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ paddusw(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI16x8AddSatU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpaddusw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI16x8SubSatU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ psubusw(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI16x8SubSatU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpsubusw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I16x8UConvertI32x4: {
+ __ Packusdw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
- case kSSEI16x8MinU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pminuw(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I16x8AddSatU: {
+ __ Paddusw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI16x8MinU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpminuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I16x8SubSatU: {
+ __ Psubusw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI16x8MaxU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pmaxuw(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I16x8MinU: {
+ __ Pminuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI16x8MaxU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpmaxuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I16x8MaxU: {
+ __ Pmaxuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
case kSSEI16x8GtU: {
@@ -3060,10 +2644,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I8x16Splat: {
- XMMRegister dst = i.OutputSimd128Register();
- __ Movd(dst, i.InputOperand(0));
- __ Pxor(kScratchDoubleReg, kScratchDoubleReg);
- __ Pshufb(dst, kScratchDoubleReg);
+ if (instr->InputAt(0)->IsRegister()) {
+ __ I8x16Splat(i.OutputSimd128Register(), i.InputRegister(0),
+ kScratchDoubleReg);
+ } else {
+ __ I8x16Splat(i.OutputSimd128Register(), i.InputOperand(0),
+ kScratchDoubleReg);
+ }
break;
}
case kIA32I8x16ExtractLaneS: {
@@ -3137,15 +2724,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ S128Store32Lane(operand, i.InputSimd128Register(index), laneidx);
break;
}
- case kSSEI8x16SConvertI16x8: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ packsswb(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI8x16SConvertI16x8: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpacksswb(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I8x16SConvertI16x8: {
+ __ Packsswb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
case kIA32I8x16Neg: {
@@ -3162,64 +2743,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kIA32I8x16Shl: {
XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- Register tmp = i.ToRegister(instr->TempAt(0));
- XMMRegister tmp_simd = i.TempSimd128Register(1);
+ XMMRegister src = i.InputSimd128Register(0);
+ DCHECK_IMPLIES(!CpuFeatures::IsSupported(AVX), dst == src);
+ Register tmp = i.TempRegister(0);
if (HasImmediateInput(instr, 1)) {
- // Perform 16-bit shift, then mask away low bits.
- uint8_t shift = i.InputInt3(1);
- __ Psllw(dst, dst, byte{shift});
-
- uint8_t bmask = static_cast<uint8_t>(0xff << shift);
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- __ mov(tmp, mask);
- __ Movd(tmp_simd, tmp);
- __ Pshufd(tmp_simd, tmp_simd, uint8_t{0});
- __ Pand(dst, tmp_simd);
+ __ I8x16Shl(dst, src, i.InputInt3(1), tmp, kScratchDoubleReg);
} else {
- // Take shift value modulo 8.
- __ mov(tmp, i.InputRegister(1));
- __ and_(tmp, 7);
- // Mask off the unwanted bits before word-shifting.
- __ Pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
- __ add(tmp, Immediate(8));
- __ Movd(tmp_simd, tmp);
- __ Psrlw(kScratchDoubleReg, kScratchDoubleReg, tmp_simd);
- __ Packuswb(kScratchDoubleReg, kScratchDoubleReg);
- __ Pand(dst, kScratchDoubleReg);
- // TODO(zhin): sub here to avoid asking for another temporary register,
- // examine codegen for other i8x16 shifts, they use less instructions.
- __ sub(tmp, Immediate(8));
- __ Movd(tmp_simd, tmp);
- __ Psllw(dst, dst, tmp_simd);
+ XMMRegister tmp_simd = i.TempSimd128Register(1);
+ __ I8x16Shl(dst, src, i.InputRegister(1), tmp, kScratchDoubleReg,
+ tmp_simd);
}
break;
}
case kIA32I8x16ShrS: {
XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
+ XMMRegister src = i.InputSimd128Register(0);
+ DCHECK_IMPLIES(!CpuFeatures::IsSupported(AVX), dst == src);
+
if (HasImmediateInput(instr, 1)) {
- __ Punpckhbw(kScratchDoubleReg, dst);
- __ Punpcklbw(dst, dst);
- uint8_t shift = i.InputInt3(1) + 8;
- __ Psraw(kScratchDoubleReg, shift);
- __ Psraw(dst, shift);
- __ Packsswb(dst, kScratchDoubleReg);
+ __ I8x16ShrS(dst, src, i.InputInt3(1), kScratchDoubleReg);
} else {
- Register tmp = i.ToRegister(instr->TempAt(0));
- XMMRegister tmp_simd = i.TempSimd128Register(1);
- // Unpack the bytes into words, do arithmetic shifts, and repack.
- __ Punpckhbw(kScratchDoubleReg, dst);
- __ Punpcklbw(dst, dst);
- __ mov(tmp, i.InputRegister(1));
- // Take shift value modulo 8.
- __ and_(tmp, 7);
- __ add(tmp, Immediate(8));
- __ Movd(tmp_simd, tmp);
- __ Psraw(kScratchDoubleReg, kScratchDoubleReg, tmp_simd);
- __ Psraw(dst, dst, tmp_simd);
- __ Packsswb(dst, kScratchDoubleReg);
+ __ I8x16ShrS(dst, src, i.InputRegister(1), i.TempRegister(0),
+ kScratchDoubleReg, i.TempSimd128Register(1));
}
break;
}
@@ -3296,18 +2842,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpcmpeqb(i.OutputSimd128Register(), kScratchDoubleReg, src2);
break;
}
- case kSSEI8x16UConvertI16x8: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- XMMRegister dst = i.OutputSimd128Register();
- __ packuswb(dst, i.InputOperand(1));
- break;
- }
- case kAVXI8x16UConvertI16x8: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister dst = i.OutputSimd128Register();
- __ vpackuswb(dst, dst, i.InputOperand(1));
+ case kIA32I8x16UConvertI16x8: {
+ __ Packuswb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
case kIA32I8x16AddSatU: {
@@ -3322,34 +2859,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kIA32I8x16ShrU: {
XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- Register tmp = i.ToRegister(instr->TempAt(0));
- XMMRegister tmp_simd = i.TempSimd128Register(1);
+ XMMRegister src = i.InputSimd128Register(0);
+ DCHECK_IMPLIES(!CpuFeatures::IsSupported(AVX), dst == src);
+ Register tmp = i.TempRegister(0);
if (HasImmediateInput(instr, 1)) {
- // Perform 16-bit shift, then mask away high bits.
- uint8_t shift = i.InputInt3(1);
- __ Psrlw(dst, dst, byte{shift});
-
- uint8_t bmask = 0xff >> shift;
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- __ mov(tmp, mask);
- __ Movd(tmp_simd, tmp);
- __ Pshufd(tmp_simd, tmp_simd, uint8_t{0});
- __ Pand(dst, tmp_simd);
+ __ I8x16ShrU(dst, src, i.InputInt3(1), tmp, kScratchDoubleReg);
} else {
- // Unpack the bytes into words, do logical shifts, and repack.
- __ Punpckhbw(kScratchDoubleReg, dst);
- __ Punpcklbw(dst, dst);
- __ mov(tmp, i.InputRegister(1));
- // Take shift value modulo 8.
- __ and_(tmp, 7);
- __ add(tmp, Immediate(8));
- __ Movd(tmp_simd, tmp);
- __ Psrlw(kScratchDoubleReg, kScratchDoubleReg, tmp_simd);
- __ Psrlw(dst, dst, tmp_simd);
- __ Packuswb(dst, kScratchDoubleReg);
+ __ I8x16ShrU(dst, src, i.InputRegister(1), tmp, kScratchDoubleReg,
+ i.TempSimd128Register(1));
}
+
break;
}
case kIA32I8x16MinU: {
@@ -3444,37 +2964,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kScratchDoubleReg);
break;
}
- case kSSES128And: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ andps(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXS128And: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpand(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSES128Or: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ orps(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXS128Or: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpor(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32S128And: {
+ __ Pand(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSES128Xor: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ xorps(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32S128Or: {
+ __ Por(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXS128Xor: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpxor(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32S128Xor: {
+ __ Pxor(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
case kIA32S128Select: {
@@ -3541,20 +3043,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32S128Load8Splat: {
- __ Pinsrb(i.OutputSimd128Register(), i.MemoryOperand(), 0);
- __ Pxor(kScratchDoubleReg, kScratchDoubleReg);
- __ Pshufb(i.OutputSimd128Register(), kScratchDoubleReg);
+ __ S128Load8Splat(i.OutputSimd128Register(), i.MemoryOperand(),
+ kScratchDoubleReg);
break;
}
case kIA32S128Load16Splat: {
- __ Pinsrw(i.OutputSimd128Register(), i.MemoryOperand(), 0);
- __ Pshuflw(i.OutputSimd128Register(), i.OutputSimd128Register(),
- uint8_t{0});
- __ Punpcklqdq(i.OutputSimd128Register(), i.OutputSimd128Register());
+ __ S128Load16Splat(i.OutputSimd128Register(), i.MemoryOperand(),
+ kScratchDoubleReg);
break;
}
case kIA32S128Load32Splat: {
- __ Vbroadcastss(i.OutputSimd128Register(), i.MemoryOperand());
+ __ S128Load32Splat(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
case kIA32S128Load64Splat: {
@@ -3640,10 +3139,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
uint8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
if (lane < 4) {
__ Pshuflw(dst, src, half_dup);
- __ Pshufd(dst, dst, uint8_t{0});
+ __ Punpcklqdq(dst, dst);
} else {
__ Pshufhw(dst, src, half_dup);
- __ Pshufd(dst, dst, uint8_t{0xaa});
+ __ Punpckhqdq(dst, dst);
}
break;
}
@@ -3671,10 +3170,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
uint8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
if (lane < 4) {
__ Pshuflw(dst, dst, half_dup);
- __ Pshufd(dst, dst, uint8_t{0});
+ __ Punpcklqdq(dst, dst);
} else {
__ Pshufhw(dst, dst, half_dup);
- __ Pshufd(dst, dst, uint8_t{0xaa});
+ __ Punpckhqdq(dst, dst);
}
break;
}
@@ -3937,17 +3436,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32Word32AtomicPairLoad: {
- XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
- __ movq(tmp, i.MemoryOperand());
- __ Pextrd(i.OutputRegister(0), tmp, 0);
- __ Pextrd(i.OutputRegister(1), tmp, 1);
+ __ movq(kScratchDoubleReg, i.MemoryOperand());
+ __ Pextrd(i.OutputRegister(0), kScratchDoubleReg, 0);
+ __ Pextrd(i.OutputRegister(1), kScratchDoubleReg, 1);
+ break;
+ }
+ case kIA32Word32ReleasePairStore: {
+ __ push(ebx);
+ i.MoveInstructionOperandToRegister(ebx, instr->InputAt(1));
+ __ push(ebx);
+ i.MoveInstructionOperandToRegister(ebx, instr->InputAt(0));
+ __ push(ebx);
+ frame_access_state()->IncreaseSPDelta(3);
+ __ movq(kScratchDoubleReg, MemOperand(esp, 0));
+ __ pop(ebx);
+ __ pop(ebx);
+ __ pop(ebx);
+ frame_access_state()->IncreaseSPDelta(-3);
+ __ movq(i.MemoryOperand(2), kScratchDoubleReg);
break;
}
- case kIA32Word32AtomicPairStore: {
+ case kIA32Word32SeqCstPairStore: {
Label store;
__ bind(&store);
- __ mov(i.TempRegister(0), i.MemoryOperand(2));
- __ mov(i.TempRegister(1), i.NextMemoryOperand(2));
+ __ mov(eax, i.MemoryOperand(2));
+ __ mov(edx, i.NextMemoryOperand(2));
__ push(ebx);
frame_access_state()->IncreaseSPDelta(1);
i.MoveInstructionOperandToRegister(ebx, instr->InputAt(0));
@@ -3958,27 +3471,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ j(not_equal, &store);
break;
}
- case kWord32AtomicExchangeInt8: {
+ case kAtomicExchangeInt8: {
__ xchg_b(i.InputRegister(0), i.MemoryOperand(1));
__ movsx_b(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kWord32AtomicExchangeUint8: {
+ case kAtomicExchangeUint8: {
__ xchg_b(i.InputRegister(0), i.MemoryOperand(1));
__ movzx_b(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kWord32AtomicExchangeInt16: {
+ case kAtomicExchangeInt16: {
__ xchg_w(i.InputRegister(0), i.MemoryOperand(1));
__ movsx_w(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kWord32AtomicExchangeUint16: {
+ case kAtomicExchangeUint16: {
__ xchg_w(i.InputRegister(0), i.MemoryOperand(1));
__ movzx_w(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kWord32AtomicExchangeWord32: {
+ case kAtomicExchangeWord32: {
__ xchg(i.InputRegister(0), i.MemoryOperand(1));
break;
}
@@ -3998,31 +3511,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ j(not_equal, &exchange);
break;
}
- case kWord32AtomicCompareExchangeInt8: {
+ case kAtomicCompareExchangeInt8: {
__ lock();
__ cmpxchg_b(i.MemoryOperand(2), i.InputRegister(1));
__ movsx_b(eax, eax);
break;
}
- case kWord32AtomicCompareExchangeUint8: {
+ case kAtomicCompareExchangeUint8: {
__ lock();
__ cmpxchg_b(i.MemoryOperand(2), i.InputRegister(1));
__ movzx_b(eax, eax);
break;
}
- case kWord32AtomicCompareExchangeInt16: {
+ case kAtomicCompareExchangeInt16: {
__ lock();
__ cmpxchg_w(i.MemoryOperand(2), i.InputRegister(1));
__ movsx_w(eax, eax);
break;
}
- case kWord32AtomicCompareExchangeUint16: {
+ case kAtomicCompareExchangeUint16: {
__ lock();
__ cmpxchg_w(i.MemoryOperand(2), i.InputRegister(1));
__ movzx_w(eax, eax);
break;
}
- case kWord32AtomicCompareExchangeWord32: {
+ case kAtomicCompareExchangeWord32: {
__ lock();
__ cmpxchg(i.MemoryOperand(2), i.InputRegister(1));
break;
@@ -4038,27 +3551,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: { \
+ case kAtomic##op##Int8: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
__ movsx_b(eax, eax); \
break; \
} \
- case kWord32Atomic##op##Uint8: { \
+ case kAtomic##op##Uint8: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
__ movzx_b(eax, eax); \
break; \
} \
- case kWord32Atomic##op##Int16: { \
+ case kAtomic##op##Int16: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
__ movsx_w(eax, eax); \
break; \
} \
- case kWord32Atomic##op##Uint16: { \
+ case kAtomic##op##Uint16: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
__ movzx_w(eax, eax); \
break; \
} \
- case kWord32Atomic##op##Word32: { \
+ case kAtomic##op##Word32: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov, cmpxchg); \
break; \
}
@@ -4107,16 +3620,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ j(not_equal, &binop);
break;
}
- case kWord32AtomicLoadInt8:
- case kWord32AtomicLoadUint8:
- case kWord32AtomicLoadInt16:
- case kWord32AtomicLoadUint16:
- case kWord32AtomicLoadWord32:
- case kWord32AtomicStoreWord8:
- case kWord32AtomicStoreWord16:
- case kWord32AtomicStoreWord32:
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
+ case kAtomicStoreWord8:
+ case kAtomicStoreWord16:
+ case kAtomicStoreWord32:
UNREACHABLE(); // Won't be generated by instruction selector.
- break;
}
return kSuccess;
}
@@ -4126,41 +3638,29 @@ static Condition FlagsConditionToCondition(FlagsCondition condition) {
case kUnorderedEqual:
case kEqual:
return equal;
- break;
case kUnorderedNotEqual:
case kNotEqual:
return not_equal;
- break;
case kSignedLessThan:
return less;
- break;
case kSignedGreaterThanOrEqual:
return greater_equal;
- break;
case kSignedLessThanOrEqual:
return less_equal;
- break;
case kSignedGreaterThan:
return greater;
- break;
case kUnsignedLessThan:
return below;
- break;
case kUnsignedGreaterThanOrEqual:
return above_equal;
- break;
case kUnsignedLessThanOrEqual:
return below_equal;
- break;
case kUnsignedGreaterThan:
return above;
- break;
case kOverflow:
return overflow;
- break;
case kNotOverflow:
return no_overflow;
- break;
default:
UNREACHABLE();
}
@@ -4183,12 +3683,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ jmp(flabel);
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(860429): Remove remaining poisoning infrastructure on ia32.
- UNREACHABLE();
-}
-
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -4648,18 +4142,24 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// The number of arguments without the receiver is
// max(argc_reg, parameter_slots-1), and the receiver is added in
// DropArguments().
- int parameter_slots_without_receiver = parameter_slots - 1;
Label mismatch_return;
Register scratch_reg = edx;
DCHECK_NE(argc_reg, scratch_reg);
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & argc_reg.bit());
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & scratch_reg.bit());
- __ cmp(argc_reg, Immediate(parameter_slots_without_receiver));
+ if (kJSArgcIncludesReceiver) {
+ __ cmp(argc_reg, Immediate(parameter_slots));
+ } else {
+ int parameter_slots_without_receiver = parameter_slots - 1;
+ __ cmp(argc_reg, Immediate(parameter_slots_without_receiver));
+ }
__ j(greater, &mismatch_return, Label::kNear);
__ Ret(parameter_slots * kSystemPointerSize, scratch_reg);
__ bind(&mismatch_return);
__ DropArguments(argc_reg, scratch_reg, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
// We use a return instead of a jump for better return address prediction.
__ Ret();
} else if (additional_pop_count->IsImmediate()) {
diff --git a/chromium/v8/src/compiler/backend/ia32/instruction-codes-ia32.h b/chromium/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
index 42af3326f3a..ca150547634 100644
--- a/chromium/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
+++ b/chromium/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
@@ -11,404 +11,359 @@ namespace compiler {
// IA32-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(IA32Add) \
- V(IA32And) \
- V(IA32Cmp) \
- V(IA32Cmp16) \
- V(IA32Cmp8) \
- V(IA32Test) \
- V(IA32Test16) \
- V(IA32Test8) \
- V(IA32Or) \
- V(IA32Xor) \
- V(IA32Sub) \
- V(IA32Imul) \
- V(IA32ImulHigh) \
- V(IA32UmulHigh) \
- V(IA32Idiv) \
- V(IA32Udiv) \
- V(IA32Not) \
- V(IA32Neg) \
- V(IA32Shl) \
- V(IA32Shr) \
- V(IA32Sar) \
- V(IA32AddPair) \
- V(IA32SubPair) \
- V(IA32MulPair) \
- V(IA32ShlPair) \
- V(IA32ShrPair) \
- V(IA32SarPair) \
- V(IA32Rol) \
- V(IA32Ror) \
- V(IA32Lzcnt) \
- V(IA32Tzcnt) \
- V(IA32Popcnt) \
- V(IA32Bswap) \
- V(IA32MFence) \
- V(IA32LFence) \
- V(SSEFloat32Cmp) \
- V(SSEFloat32Add) \
- V(SSEFloat32Sub) \
- V(SSEFloat32Mul) \
- V(SSEFloat32Div) \
- V(SSEFloat32Abs) \
- V(SSEFloat32Neg) \
- V(SSEFloat32Sqrt) \
- V(SSEFloat32Round) \
- V(SSEFloat64Cmp) \
- V(SSEFloat64Add) \
- V(SSEFloat64Sub) \
- V(SSEFloat64Mul) \
- V(SSEFloat64Div) \
- V(SSEFloat64Mod) \
- V(SSEFloat32Max) \
- V(SSEFloat64Max) \
- V(SSEFloat32Min) \
- V(SSEFloat64Min) \
- V(SSEFloat64Abs) \
- V(SSEFloat64Neg) \
- V(SSEFloat64Sqrt) \
- V(SSEFloat64Round) \
- V(SSEFloat32ToFloat64) \
- V(SSEFloat64ToFloat32) \
- V(SSEFloat32ToInt32) \
- V(SSEFloat32ToUint32) \
- V(SSEFloat64ToInt32) \
- V(SSEFloat64ToUint32) \
- V(SSEInt32ToFloat32) \
- V(SSEUint32ToFloat32) \
- V(SSEInt32ToFloat64) \
- V(SSEUint32ToFloat64) \
- V(SSEFloat64ExtractLowWord32) \
- V(SSEFloat64ExtractHighWord32) \
- V(SSEFloat64InsertLowWord32) \
- V(SSEFloat64InsertHighWord32) \
- V(SSEFloat64LoadLowWord32) \
- V(SSEFloat64SilenceNaN) \
- V(AVXFloat32Add) \
- V(AVXFloat32Sub) \
- V(AVXFloat32Mul) \
- V(AVXFloat32Div) \
- V(AVXFloat64Add) \
- V(AVXFloat64Sub) \
- V(AVXFloat64Mul) \
- V(AVXFloat64Div) \
- V(AVXFloat64Abs) \
- V(AVXFloat64Neg) \
- V(AVXFloat32Abs) \
- V(AVXFloat32Neg) \
- V(IA32Movsxbl) \
- V(IA32Movzxbl) \
- V(IA32Movb) \
- V(IA32Movsxwl) \
- V(IA32Movzxwl) \
- V(IA32Movw) \
- V(IA32Movl) \
- V(IA32Movss) \
- V(IA32Movsd) \
- V(IA32Movdqu) \
- V(IA32Movlps) \
- V(IA32Movhps) \
- V(IA32BitcastFI) \
- V(IA32BitcastIF) \
- V(IA32Lea) \
- V(IA32Push) \
- V(IA32Poke) \
- V(IA32Peek) \
- V(IA32F64x2Splat) \
- V(F64x2ExtractLane) \
- V(F64x2ReplaceLane) \
- V(IA32F64x2Sqrt) \
- V(IA32F64x2Add) \
- V(IA32F64x2Sub) \
- V(IA32F64x2Mul) \
- V(IA32F64x2Div) \
- V(IA32F64x2Min) \
- V(IA32F64x2Max) \
- V(IA32F64x2Eq) \
- V(IA32F64x2Ne) \
- V(IA32F64x2Lt) \
- V(IA32F64x2Le) \
- V(IA32F64x2Pmin) \
- V(IA32F64x2Pmax) \
- V(IA32F64x2Round) \
- V(IA32F64x2ConvertLowI32x4S) \
- V(IA32F64x2ConvertLowI32x4U) \
- V(IA32F64x2PromoteLowF32x4) \
- V(IA32I64x2SplatI32Pair) \
- V(IA32I64x2ReplaceLaneI32Pair) \
- V(IA32I64x2Abs) \
- V(IA32I64x2Neg) \
- V(IA32I64x2Shl) \
- V(IA32I64x2ShrS) \
- V(IA32I64x2Add) \
- V(IA32I64x2Sub) \
- V(IA32I64x2Mul) \
- V(IA32I64x2ShrU) \
- V(IA32I64x2BitMask) \
- V(IA32I64x2Eq) \
- V(IA32I64x2Ne) \
- V(IA32I64x2GtS) \
- V(IA32I64x2GeS) \
- V(IA32I64x2ExtMulLowI32x4S) \
- V(IA32I64x2ExtMulHighI32x4S) \
- V(IA32I64x2ExtMulLowI32x4U) \
- V(IA32I64x2ExtMulHighI32x4U) \
- V(IA32I64x2SConvertI32x4Low) \
- V(IA32I64x2SConvertI32x4High) \
- V(IA32I64x2UConvertI32x4Low) \
- V(IA32I64x2UConvertI32x4High) \
- V(IA32F32x4Splat) \
- V(IA32F32x4ExtractLane) \
- V(IA32Insertps) \
- V(IA32F32x4SConvertI32x4) \
- V(IA32F32x4UConvertI32x4) \
- V(IA32F32x4Abs) \
- V(IA32F32x4Neg) \
- V(IA32F32x4Sqrt) \
- V(IA32F32x4RecipApprox) \
- V(IA32F32x4RecipSqrtApprox) \
- V(IA32F32x4Add) \
- V(IA32F32x4Sub) \
- V(IA32F32x4Mul) \
- V(IA32F32x4Div) \
- V(SSEF32x4Min) \
- V(AVXF32x4Min) \
- V(SSEF32x4Max) \
- V(AVXF32x4Max) \
- V(SSEF32x4Eq) \
- V(AVXF32x4Eq) \
- V(SSEF32x4Ne) \
- V(AVXF32x4Ne) \
- V(SSEF32x4Lt) \
- V(AVXF32x4Lt) \
- V(SSEF32x4Le) \
- V(AVXF32x4Le) \
- V(IA32F32x4Pmin) \
- V(IA32F32x4Pmax) \
- V(IA32F32x4Round) \
- V(IA32F32x4DemoteF64x2Zero) \
- V(IA32I32x4Splat) \
- V(IA32I32x4ExtractLane) \
- V(IA32I32x4SConvertF32x4) \
- V(IA32I32x4SConvertI16x8Low) \
- V(IA32I32x4SConvertI16x8High) \
- V(IA32I32x4Neg) \
- V(IA32I32x4Shl) \
- V(IA32I32x4ShrS) \
- V(SSEI32x4Add) \
- V(AVXI32x4Add) \
- V(SSEI32x4Sub) \
- V(AVXI32x4Sub) \
- V(SSEI32x4Mul) \
- V(AVXI32x4Mul) \
- V(SSEI32x4MinS) \
- V(AVXI32x4MinS) \
- V(SSEI32x4MaxS) \
- V(AVXI32x4MaxS) \
- V(SSEI32x4Eq) \
- V(AVXI32x4Eq) \
- V(SSEI32x4Ne) \
- V(AVXI32x4Ne) \
- V(SSEI32x4GtS) \
- V(AVXI32x4GtS) \
- V(SSEI32x4GeS) \
- V(AVXI32x4GeS) \
- V(SSEI32x4UConvertF32x4) \
- V(AVXI32x4UConvertF32x4) \
- V(IA32I32x4UConvertI16x8Low) \
- V(IA32I32x4UConvertI16x8High) \
- V(IA32I32x4ShrU) \
- V(SSEI32x4MinU) \
- V(AVXI32x4MinU) \
- V(SSEI32x4MaxU) \
- V(AVXI32x4MaxU) \
- V(SSEI32x4GtU) \
- V(AVXI32x4GtU) \
- V(SSEI32x4GeU) \
- V(AVXI32x4GeU) \
- V(IA32I32x4Abs) \
- V(IA32I32x4BitMask) \
- V(IA32I32x4DotI16x8S) \
- V(IA32I32x4ExtMulLowI16x8S) \
- V(IA32I32x4ExtMulHighI16x8S) \
- V(IA32I32x4ExtMulLowI16x8U) \
- V(IA32I32x4ExtMulHighI16x8U) \
- V(IA32I32x4ExtAddPairwiseI16x8S) \
- V(IA32I32x4ExtAddPairwiseI16x8U) \
- V(IA32I32x4TruncSatF64x2SZero) \
- V(IA32I32x4TruncSatF64x2UZero) \
- V(IA32I16x8Splat) \
- V(IA32I16x8ExtractLaneS) \
- V(IA32I16x8SConvertI8x16Low) \
- V(IA32I16x8SConvertI8x16High) \
- V(IA32I16x8Neg) \
- V(IA32I16x8Shl) \
- V(IA32I16x8ShrS) \
- V(SSEI16x8SConvertI32x4) \
- V(AVXI16x8SConvertI32x4) \
- V(SSEI16x8Add) \
- V(AVXI16x8Add) \
- V(SSEI16x8AddSatS) \
- V(AVXI16x8AddSatS) \
- V(SSEI16x8Sub) \
- V(AVXI16x8Sub) \
- V(SSEI16x8SubSatS) \
- V(AVXI16x8SubSatS) \
- V(SSEI16x8Mul) \
- V(AVXI16x8Mul) \
- V(SSEI16x8MinS) \
- V(AVXI16x8MinS) \
- V(SSEI16x8MaxS) \
- V(AVXI16x8MaxS) \
- V(SSEI16x8Eq) \
- V(AVXI16x8Eq) \
- V(SSEI16x8Ne) \
- V(AVXI16x8Ne) \
- V(SSEI16x8GtS) \
- V(AVXI16x8GtS) \
- V(SSEI16x8GeS) \
- V(AVXI16x8GeS) \
- V(IA32I16x8UConvertI8x16Low) \
- V(IA32I16x8UConvertI8x16High) \
- V(IA32I16x8ShrU) \
- V(SSEI16x8UConvertI32x4) \
- V(AVXI16x8UConvertI32x4) \
- V(SSEI16x8AddSatU) \
- V(AVXI16x8AddSatU) \
- V(SSEI16x8SubSatU) \
- V(AVXI16x8SubSatU) \
- V(SSEI16x8MinU) \
- V(AVXI16x8MinU) \
- V(SSEI16x8MaxU) \
- V(AVXI16x8MaxU) \
- V(SSEI16x8GtU) \
- V(AVXI16x8GtU) \
- V(SSEI16x8GeU) \
- V(AVXI16x8GeU) \
- V(IA32I16x8RoundingAverageU) \
- V(IA32I16x8Abs) \
- V(IA32I16x8BitMask) \
- V(IA32I16x8ExtMulLowI8x16S) \
- V(IA32I16x8ExtMulHighI8x16S) \
- V(IA32I16x8ExtMulLowI8x16U) \
- V(IA32I16x8ExtMulHighI8x16U) \
- V(IA32I16x8ExtAddPairwiseI8x16S) \
- V(IA32I16x8ExtAddPairwiseI8x16U) \
- V(IA32I16x8Q15MulRSatS) \
- V(IA32I8x16Splat) \
- V(IA32I8x16ExtractLaneS) \
- V(IA32Pinsrb) \
- V(IA32Pinsrw) \
- V(IA32Pinsrd) \
- V(IA32Pextrb) \
- V(IA32Pextrw) \
- V(IA32S128Store32Lane) \
- V(SSEI8x16SConvertI16x8) \
- V(AVXI8x16SConvertI16x8) \
- V(IA32I8x16Neg) \
- V(IA32I8x16Shl) \
- V(IA32I8x16ShrS) \
- V(IA32I8x16Add) \
- V(IA32I8x16AddSatS) \
- V(IA32I8x16Sub) \
- V(IA32I8x16SubSatS) \
- V(IA32I8x16MinS) \
- V(IA32I8x16MaxS) \
- V(IA32I8x16Eq) \
- V(SSEI8x16Ne) \
- V(AVXI8x16Ne) \
- V(IA32I8x16GtS) \
- V(SSEI8x16GeS) \
- V(AVXI8x16GeS) \
- V(SSEI8x16UConvertI16x8) \
- V(AVXI8x16UConvertI16x8) \
- V(IA32I8x16AddSatU) \
- V(IA32I8x16SubSatU) \
- V(IA32I8x16ShrU) \
- V(IA32I8x16MinU) \
- V(IA32I8x16MaxU) \
- V(SSEI8x16GtU) \
- V(AVXI8x16GtU) \
- V(SSEI8x16GeU) \
- V(AVXI8x16GeU) \
- V(IA32I8x16RoundingAverageU) \
- V(IA32I8x16Abs) \
- V(IA32I8x16BitMask) \
- V(IA32I8x16Popcnt) \
- V(IA32S128Const) \
- V(IA32S128Zero) \
- V(IA32S128AllOnes) \
- V(IA32S128Not) \
- V(SSES128And) \
- V(AVXS128And) \
- V(SSES128Or) \
- V(AVXS128Or) \
- V(SSES128Xor) \
- V(AVXS128Xor) \
- V(IA32S128Select) \
- V(IA32S128AndNot) \
- V(IA32I8x16Swizzle) \
- V(IA32I8x16Shuffle) \
- V(IA32S128Load8Splat) \
- V(IA32S128Load16Splat) \
- V(IA32S128Load32Splat) \
- V(IA32S128Load64Splat) \
- V(IA32S128Load8x8S) \
- V(IA32S128Load8x8U) \
- V(IA32S128Load16x4S) \
- V(IA32S128Load16x4U) \
- V(IA32S128Load32x2S) \
- V(IA32S128Load32x2U) \
- V(IA32S32x4Rotate) \
- V(IA32S32x4Swizzle) \
- V(IA32S32x4Shuffle) \
- V(IA32S16x8Blend) \
- V(IA32S16x8HalfShuffle1) \
- V(IA32S16x8HalfShuffle2) \
- V(IA32S8x16Alignr) \
- V(IA32S16x8Dup) \
- V(IA32S8x16Dup) \
- V(SSES16x8UnzipHigh) \
- V(AVXS16x8UnzipHigh) \
- V(SSES16x8UnzipLow) \
- V(AVXS16x8UnzipLow) \
- V(SSES8x16UnzipHigh) \
- V(AVXS8x16UnzipHigh) \
- V(SSES8x16UnzipLow) \
- V(AVXS8x16UnzipLow) \
- V(IA32S64x2UnpackHigh) \
- V(IA32S32x4UnpackHigh) \
- V(IA32S16x8UnpackHigh) \
- V(IA32S8x16UnpackHigh) \
- V(IA32S64x2UnpackLow) \
- V(IA32S32x4UnpackLow) \
- V(IA32S16x8UnpackLow) \
- V(IA32S8x16UnpackLow) \
- V(SSES8x16TransposeLow) \
- V(AVXS8x16TransposeLow) \
- V(SSES8x16TransposeHigh) \
- V(AVXS8x16TransposeHigh) \
- V(SSES8x8Reverse) \
- V(AVXS8x8Reverse) \
- V(SSES8x4Reverse) \
- V(AVXS8x4Reverse) \
- V(SSES8x2Reverse) \
- V(AVXS8x2Reverse) \
- V(IA32S128AnyTrue) \
- V(IA32I64x2AllTrue) \
- V(IA32I32x4AllTrue) \
- V(IA32I16x8AllTrue) \
- V(IA32I8x16AllTrue) \
- V(IA32Word32AtomicPairLoad) \
- V(IA32Word32AtomicPairStore) \
- V(IA32Word32AtomicPairAdd) \
- V(IA32Word32AtomicPairSub) \
- V(IA32Word32AtomicPairAnd) \
- V(IA32Word32AtomicPairOr) \
- V(IA32Word32AtomicPairXor) \
- V(IA32Word32AtomicPairExchange) \
+
+// Opcodes that support a MemoryAccessMode.
+#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) // None.
+
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(IA32Add) \
+ V(IA32And) \
+ V(IA32Cmp) \
+ V(IA32Cmp16) \
+ V(IA32Cmp8) \
+ V(IA32Test) \
+ V(IA32Test16) \
+ V(IA32Test8) \
+ V(IA32Or) \
+ V(IA32Xor) \
+ V(IA32Sub) \
+ V(IA32Imul) \
+ V(IA32ImulHigh) \
+ V(IA32UmulHigh) \
+ V(IA32Idiv) \
+ V(IA32Udiv) \
+ V(IA32Not) \
+ V(IA32Neg) \
+ V(IA32Shl) \
+ V(IA32Shr) \
+ V(IA32Sar) \
+ V(IA32AddPair) \
+ V(IA32SubPair) \
+ V(IA32MulPair) \
+ V(IA32ShlPair) \
+ V(IA32ShrPair) \
+ V(IA32SarPair) \
+ V(IA32Rol) \
+ V(IA32Ror) \
+ V(IA32Lzcnt) \
+ V(IA32Tzcnt) \
+ V(IA32Popcnt) \
+ V(IA32Bswap) \
+ V(IA32MFence) \
+ V(IA32LFence) \
+ V(IA32Float32Cmp) \
+ V(IA32Float32Sqrt) \
+ V(IA32Float32Round) \
+ V(IA32Float64Cmp) \
+ V(IA32Float64Mod) \
+ V(IA32Float32Max) \
+ V(IA32Float64Max) \
+ V(IA32Float32Min) \
+ V(IA32Float64Min) \
+ V(IA32Float64Sqrt) \
+ V(IA32Float64Round) \
+ V(IA32Float32ToFloat64) \
+ V(IA32Float64ToFloat32) \
+ V(IA32Float32ToInt32) \
+ V(IA32Float32ToUint32) \
+ V(IA32Float64ToInt32) \
+ V(IA32Float64ToUint32) \
+ V(SSEInt32ToFloat32) \
+ V(IA32Uint32ToFloat32) \
+ V(SSEInt32ToFloat64) \
+ V(IA32Uint32ToFloat64) \
+ V(IA32Float64ExtractLowWord32) \
+ V(IA32Float64ExtractHighWord32) \
+ V(IA32Float64InsertLowWord32) \
+ V(IA32Float64InsertHighWord32) \
+ V(IA32Float64LoadLowWord32) \
+ V(IA32Float64SilenceNaN) \
+ V(Float32Add) \
+ V(Float32Sub) \
+ V(Float64Add) \
+ V(Float64Sub) \
+ V(Float32Mul) \
+ V(Float32Div) \
+ V(Float64Mul) \
+ V(Float64Div) \
+ V(Float64Abs) \
+ V(Float64Neg) \
+ V(Float32Abs) \
+ V(Float32Neg) \
+ V(IA32Movsxbl) \
+ V(IA32Movzxbl) \
+ V(IA32Movb) \
+ V(IA32Movsxwl) \
+ V(IA32Movzxwl) \
+ V(IA32Movw) \
+ V(IA32Movl) \
+ V(IA32Movss) \
+ V(IA32Movsd) \
+ V(IA32Movdqu) \
+ V(IA32Movlps) \
+ V(IA32Movhps) \
+ V(IA32BitcastFI) \
+ V(IA32BitcastIF) \
+ V(IA32Lea) \
+ V(IA32Push) \
+ V(IA32Poke) \
+ V(IA32Peek) \
+ V(IA32F64x2Splat) \
+ V(F64x2ExtractLane) \
+ V(F64x2ReplaceLane) \
+ V(IA32F64x2Sqrt) \
+ V(IA32F64x2Add) \
+ V(IA32F64x2Sub) \
+ V(IA32F64x2Mul) \
+ V(IA32F64x2Div) \
+ V(IA32F64x2Min) \
+ V(IA32F64x2Max) \
+ V(IA32F64x2Eq) \
+ V(IA32F64x2Ne) \
+ V(IA32F64x2Lt) \
+ V(IA32F64x2Le) \
+ V(IA32F64x2Pmin) \
+ V(IA32F64x2Pmax) \
+ V(IA32F64x2Round) \
+ V(IA32F64x2ConvertLowI32x4S) \
+ V(IA32F64x2ConvertLowI32x4U) \
+ V(IA32F64x2PromoteLowF32x4) \
+ V(IA32I64x2SplatI32Pair) \
+ V(IA32I64x2ReplaceLaneI32Pair) \
+ V(IA32I64x2Abs) \
+ V(IA32I64x2Neg) \
+ V(IA32I64x2Shl) \
+ V(IA32I64x2ShrS) \
+ V(IA32I64x2Add) \
+ V(IA32I64x2Sub) \
+ V(IA32I64x2Mul) \
+ V(IA32I64x2ShrU) \
+ V(IA32I64x2BitMask) \
+ V(IA32I64x2Eq) \
+ V(IA32I64x2Ne) \
+ V(IA32I64x2GtS) \
+ V(IA32I64x2GeS) \
+ V(IA32I64x2ExtMulLowI32x4S) \
+ V(IA32I64x2ExtMulHighI32x4S) \
+ V(IA32I64x2ExtMulLowI32x4U) \
+ V(IA32I64x2ExtMulHighI32x4U) \
+ V(IA32I64x2SConvertI32x4Low) \
+ V(IA32I64x2SConvertI32x4High) \
+ V(IA32I64x2UConvertI32x4Low) \
+ V(IA32I64x2UConvertI32x4High) \
+ V(IA32F32x4Splat) \
+ V(IA32F32x4ExtractLane) \
+ V(IA32Insertps) \
+ V(IA32F32x4SConvertI32x4) \
+ V(IA32F32x4UConvertI32x4) \
+ V(IA32F32x4Sqrt) \
+ V(IA32F32x4RecipApprox) \
+ V(IA32F32x4RecipSqrtApprox) \
+ V(IA32F32x4Add) \
+ V(IA32F32x4Sub) \
+ V(IA32F32x4Mul) \
+ V(IA32F32x4Div) \
+ V(IA32F32x4Min) \
+ V(IA32F32x4Max) \
+ V(IA32F32x4Eq) \
+ V(IA32F32x4Ne) \
+ V(IA32F32x4Lt) \
+ V(IA32F32x4Le) \
+ V(IA32F32x4Pmin) \
+ V(IA32F32x4Pmax) \
+ V(IA32F32x4Round) \
+ V(IA32F32x4DemoteF64x2Zero) \
+ V(IA32I32x4Splat) \
+ V(IA32I32x4ExtractLane) \
+ V(IA32I32x4SConvertF32x4) \
+ V(IA32I32x4SConvertI16x8Low) \
+ V(IA32I32x4SConvertI16x8High) \
+ V(IA32I32x4Neg) \
+ V(IA32I32x4Shl) \
+ V(IA32I32x4ShrS) \
+ V(IA32I32x4Add) \
+ V(IA32I32x4Sub) \
+ V(IA32I32x4Mul) \
+ V(IA32I32x4MinS) \
+ V(IA32I32x4MaxS) \
+ V(IA32I32x4Eq) \
+ V(IA32I32x4Ne) \
+ V(IA32I32x4GtS) \
+ V(IA32I32x4GeS) \
+ V(SSEI32x4UConvertF32x4) \
+ V(AVXI32x4UConvertF32x4) \
+ V(IA32I32x4UConvertI16x8Low) \
+ V(IA32I32x4UConvertI16x8High) \
+ V(IA32I32x4ShrU) \
+ V(IA32I32x4MinU) \
+ V(IA32I32x4MaxU) \
+ V(SSEI32x4GtU) \
+ V(AVXI32x4GtU) \
+ V(SSEI32x4GeU) \
+ V(AVXI32x4GeU) \
+ V(IA32I32x4Abs) \
+ V(IA32I32x4BitMask) \
+ V(IA32I32x4DotI16x8S) \
+ V(IA32I32x4ExtMulLowI16x8S) \
+ V(IA32I32x4ExtMulHighI16x8S) \
+ V(IA32I32x4ExtMulLowI16x8U) \
+ V(IA32I32x4ExtMulHighI16x8U) \
+ V(IA32I32x4ExtAddPairwiseI16x8S) \
+ V(IA32I32x4ExtAddPairwiseI16x8U) \
+ V(IA32I32x4TruncSatF64x2SZero) \
+ V(IA32I32x4TruncSatF64x2UZero) \
+ V(IA32I16x8Splat) \
+ V(IA32I16x8ExtractLaneS) \
+ V(IA32I16x8SConvertI8x16Low) \
+ V(IA32I16x8SConvertI8x16High) \
+ V(IA32I16x8Neg) \
+ V(IA32I16x8Shl) \
+ V(IA32I16x8ShrS) \
+ V(IA32I16x8SConvertI32x4) \
+ V(IA32I16x8Add) \
+ V(IA32I16x8AddSatS) \
+ V(IA32I16x8Sub) \
+ V(IA32I16x8SubSatS) \
+ V(IA32I16x8Mul) \
+ V(IA32I16x8MinS) \
+ V(IA32I16x8MaxS) \
+ V(IA32I16x8Eq) \
+ V(SSEI16x8Ne) \
+ V(AVXI16x8Ne) \
+ V(IA32I16x8GtS) \
+ V(SSEI16x8GeS) \
+ V(AVXI16x8GeS) \
+ V(IA32I16x8UConvertI8x16Low) \
+ V(IA32I16x8UConvertI8x16High) \
+ V(IA32I16x8ShrU) \
+ V(IA32I16x8UConvertI32x4) \
+ V(IA32I16x8AddSatU) \
+ V(IA32I16x8SubSatU) \
+ V(IA32I16x8MinU) \
+ V(IA32I16x8MaxU) \
+ V(SSEI16x8GtU) \
+ V(AVXI16x8GtU) \
+ V(SSEI16x8GeU) \
+ V(AVXI16x8GeU) \
+ V(IA32I16x8RoundingAverageU) \
+ V(IA32I16x8Abs) \
+ V(IA32I16x8BitMask) \
+ V(IA32I16x8ExtMulLowI8x16S) \
+ V(IA32I16x8ExtMulHighI8x16S) \
+ V(IA32I16x8ExtMulLowI8x16U) \
+ V(IA32I16x8ExtMulHighI8x16U) \
+ V(IA32I16x8ExtAddPairwiseI8x16S) \
+ V(IA32I16x8ExtAddPairwiseI8x16U) \
+ V(IA32I16x8Q15MulRSatS) \
+ V(IA32I8x16Splat) \
+ V(IA32I8x16ExtractLaneS) \
+ V(IA32Pinsrb) \
+ V(IA32Pinsrw) \
+ V(IA32Pinsrd) \
+ V(IA32Pextrb) \
+ V(IA32Pextrw) \
+ V(IA32S128Store32Lane) \
+ V(IA32I8x16SConvertI16x8) \
+ V(IA32I8x16Neg) \
+ V(IA32I8x16Shl) \
+ V(IA32I8x16ShrS) \
+ V(IA32I8x16Add) \
+ V(IA32I8x16AddSatS) \
+ V(IA32I8x16Sub) \
+ V(IA32I8x16SubSatS) \
+ V(IA32I8x16MinS) \
+ V(IA32I8x16MaxS) \
+ V(IA32I8x16Eq) \
+ V(SSEI8x16Ne) \
+ V(AVXI8x16Ne) \
+ V(IA32I8x16GtS) \
+ V(SSEI8x16GeS) \
+ V(AVXI8x16GeS) \
+ V(IA32I8x16UConvertI16x8) \
+ V(IA32I8x16AddSatU) \
+ V(IA32I8x16SubSatU) \
+ V(IA32I8x16ShrU) \
+ V(IA32I8x16MinU) \
+ V(IA32I8x16MaxU) \
+ V(SSEI8x16GtU) \
+ V(AVXI8x16GtU) \
+ V(SSEI8x16GeU) \
+ V(AVXI8x16GeU) \
+ V(IA32I8x16RoundingAverageU) \
+ V(IA32I8x16Abs) \
+ V(IA32I8x16BitMask) \
+ V(IA32I8x16Popcnt) \
+ V(IA32S128Const) \
+ V(IA32S128Zero) \
+ V(IA32S128AllOnes) \
+ V(IA32S128Not) \
+ V(IA32S128And) \
+ V(IA32S128Or) \
+ V(IA32S128Xor) \
+ V(IA32S128Select) \
+ V(IA32S128AndNot) \
+ V(IA32I8x16Swizzle) \
+ V(IA32I8x16Shuffle) \
+ V(IA32S128Load8Splat) \
+ V(IA32S128Load16Splat) \
+ V(IA32S128Load32Splat) \
+ V(IA32S128Load64Splat) \
+ V(IA32S128Load8x8S) \
+ V(IA32S128Load8x8U) \
+ V(IA32S128Load16x4S) \
+ V(IA32S128Load16x4U) \
+ V(IA32S128Load32x2S) \
+ V(IA32S128Load32x2U) \
+ V(IA32S32x4Rotate) \
+ V(IA32S32x4Swizzle) \
+ V(IA32S32x4Shuffle) \
+ V(IA32S16x8Blend) \
+ V(IA32S16x8HalfShuffle1) \
+ V(IA32S16x8HalfShuffle2) \
+ V(IA32S8x16Alignr) \
+ V(IA32S16x8Dup) \
+ V(IA32S8x16Dup) \
+ V(SSES16x8UnzipHigh) \
+ V(AVXS16x8UnzipHigh) \
+ V(SSES16x8UnzipLow) \
+ V(AVXS16x8UnzipLow) \
+ V(SSES8x16UnzipHigh) \
+ V(AVXS8x16UnzipHigh) \
+ V(SSES8x16UnzipLow) \
+ V(AVXS8x16UnzipLow) \
+ V(IA32S64x2UnpackHigh) \
+ V(IA32S32x4UnpackHigh) \
+ V(IA32S16x8UnpackHigh) \
+ V(IA32S8x16UnpackHigh) \
+ V(IA32S64x2UnpackLow) \
+ V(IA32S32x4UnpackLow) \
+ V(IA32S16x8UnpackLow) \
+ V(IA32S8x16UnpackLow) \
+ V(SSES8x16TransposeLow) \
+ V(AVXS8x16TransposeLow) \
+ V(SSES8x16TransposeHigh) \
+ V(AVXS8x16TransposeHigh) \
+ V(SSES8x8Reverse) \
+ V(AVXS8x8Reverse) \
+ V(SSES8x4Reverse) \
+ V(AVXS8x4Reverse) \
+ V(SSES8x2Reverse) \
+ V(AVXS8x2Reverse) \
+ V(IA32S128AnyTrue) \
+ V(IA32I64x2AllTrue) \
+ V(IA32I32x4AllTrue) \
+ V(IA32I16x8AllTrue) \
+ V(IA32I8x16AllTrue) \
+ V(IA32Word32AtomicPairLoad) \
+ V(IA32Word32ReleasePairStore) \
+ V(IA32Word32SeqCstPairStore) \
+ V(IA32Word32AtomicPairAdd) \
+ V(IA32Word32AtomicPairSub) \
+ V(IA32Word32AtomicPairAnd) \
+ V(IA32Word32AtomicPairOr) \
+ V(IA32Word32AtomicPairXor) \
+ V(IA32Word32AtomicPairExchange) \
V(IA32Word32AtomicPairCompareExchange)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/chromium/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc b/chromium/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
index 278e7ea99b9..01e4f8faa83 100644
--- a/chromium/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
+++ b/chromium/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
@@ -48,57 +48,45 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Popcnt:
case kIA32Bswap:
case kIA32Lea:
- case kSSEFloat32Cmp:
- case kSSEFloat32Add:
- case kSSEFloat32Sub:
- case kSSEFloat32Mul:
- case kSSEFloat32Div:
- case kSSEFloat32Abs:
- case kSSEFloat32Neg:
- case kSSEFloat32Sqrt:
- case kSSEFloat32Round:
- case kSSEFloat64Cmp:
- case kSSEFloat64Add:
- case kSSEFloat64Sub:
- case kSSEFloat64Mul:
- case kSSEFloat64Div:
- case kSSEFloat64Mod:
- case kSSEFloat32Max:
- case kSSEFloat64Max:
- case kSSEFloat32Min:
- case kSSEFloat64Min:
- case kSSEFloat64Abs:
- case kSSEFloat64Neg:
- case kSSEFloat64Sqrt:
- case kSSEFloat64Round:
- case kSSEFloat32ToFloat64:
- case kSSEFloat64ToFloat32:
- case kSSEFloat32ToInt32:
- case kSSEFloat32ToUint32:
- case kSSEFloat64ToInt32:
- case kSSEFloat64ToUint32:
+ case kIA32Float32Cmp:
+ case kIA32Float32Sqrt:
+ case kIA32Float32Round:
+ case kIA32Float64Cmp:
+ case kIA32Float64Mod:
+ case kIA32Float32Max:
+ case kIA32Float64Max:
+ case kIA32Float32Min:
+ case kIA32Float64Min:
+ case kIA32Float64Sqrt:
+ case kIA32Float64Round:
+ case kIA32Float32ToFloat64:
+ case kIA32Float64ToFloat32:
+ case kIA32Float32ToInt32:
+ case kIA32Float32ToUint32:
+ case kIA32Float64ToInt32:
+ case kIA32Float64ToUint32:
case kSSEInt32ToFloat32:
- case kSSEUint32ToFloat32:
+ case kIA32Uint32ToFloat32:
case kSSEInt32ToFloat64:
- case kSSEUint32ToFloat64:
- case kSSEFloat64ExtractLowWord32:
- case kSSEFloat64ExtractHighWord32:
- case kSSEFloat64InsertLowWord32:
- case kSSEFloat64InsertHighWord32:
- case kSSEFloat64LoadLowWord32:
- case kSSEFloat64SilenceNaN:
- case kAVXFloat32Add:
- case kAVXFloat32Sub:
- case kAVXFloat32Mul:
- case kAVXFloat32Div:
- case kAVXFloat64Add:
- case kAVXFloat64Sub:
- case kAVXFloat64Mul:
- case kAVXFloat64Div:
- case kAVXFloat64Abs:
- case kAVXFloat64Neg:
- case kAVXFloat32Abs:
- case kAVXFloat32Neg:
+ case kIA32Uint32ToFloat64:
+ case kIA32Float64ExtractLowWord32:
+ case kIA32Float64ExtractHighWord32:
+ case kIA32Float64InsertLowWord32:
+ case kIA32Float64InsertHighWord32:
+ case kIA32Float64LoadLowWord32:
+ case kIA32Float64SilenceNaN:
+ case kFloat32Add:
+ case kFloat32Sub:
+ case kFloat64Add:
+ case kFloat64Sub:
+ case kFloat32Mul:
+ case kFloat32Div:
+ case kFloat64Mul:
+ case kFloat64Div:
+ case kFloat64Abs:
+ case kFloat64Neg:
+ case kFloat32Abs:
+ case kFloat32Neg:
case kIA32BitcastFI:
case kIA32BitcastIF:
case kIA32F64x2Splat:
@@ -149,8 +137,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Insertps:
case kIA32F32x4SConvertI32x4:
case kIA32F32x4UConvertI32x4:
- case kIA32F32x4Abs:
- case kIA32F32x4Neg:
case kIA32F32x4Sqrt:
case kIA32F32x4RecipApprox:
case kIA32F32x4RecipSqrtApprox:
@@ -158,18 +144,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32F32x4Sub:
case kIA32F32x4Mul:
case kIA32F32x4Div:
- case kSSEF32x4Min:
- case kAVXF32x4Min:
- case kSSEF32x4Max:
- case kAVXF32x4Max:
- case kSSEF32x4Eq:
- case kAVXF32x4Eq:
- case kSSEF32x4Ne:
- case kAVXF32x4Ne:
- case kSSEF32x4Lt:
- case kAVXF32x4Lt:
- case kSSEF32x4Le:
- case kAVXF32x4Le:
+ case kIA32F32x4Min:
+ case kIA32F32x4Max:
+ case kIA32F32x4Eq:
+ case kIA32F32x4Ne:
+ case kIA32F32x4Lt:
+ case kIA32F32x4Le:
case kIA32F32x4Pmin:
case kIA32F32x4Pmax:
case kIA32F32x4Round:
@@ -182,33 +162,22 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I32x4Neg:
case kIA32I32x4Shl:
case kIA32I32x4ShrS:
- case kSSEI32x4Add:
- case kAVXI32x4Add:
- case kSSEI32x4Sub:
- case kAVXI32x4Sub:
- case kSSEI32x4Mul:
- case kAVXI32x4Mul:
- case kSSEI32x4MinS:
- case kAVXI32x4MinS:
- case kSSEI32x4MaxS:
- case kAVXI32x4MaxS:
- case kSSEI32x4Eq:
- case kAVXI32x4Eq:
- case kSSEI32x4Ne:
- case kAVXI32x4Ne:
- case kSSEI32x4GtS:
- case kAVXI32x4GtS:
- case kSSEI32x4GeS:
- case kAVXI32x4GeS:
+ case kIA32I32x4Add:
+ case kIA32I32x4Sub:
+ case kIA32I32x4Mul:
+ case kIA32I32x4MinS:
+ case kIA32I32x4MaxS:
+ case kIA32I32x4Eq:
+ case kIA32I32x4Ne:
+ case kIA32I32x4GtS:
+ case kIA32I32x4GeS:
case kSSEI32x4UConvertF32x4:
case kAVXI32x4UConvertF32x4:
case kIA32I32x4UConvertI16x8Low:
case kIA32I32x4UConvertI16x8High:
case kIA32I32x4ShrU:
- case kSSEI32x4MinU:
- case kAVXI32x4MinU:
- case kSSEI32x4MaxU:
- case kAVXI32x4MaxU:
+ case kIA32I32x4MinU:
+ case kIA32I32x4MaxU:
case kSSEI32x4GtU:
case kAVXI32x4GtU:
case kSSEI32x4GeU:
@@ -231,43 +200,28 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I16x8Neg:
case kIA32I16x8Shl:
case kIA32I16x8ShrS:
- case kSSEI16x8SConvertI32x4:
- case kAVXI16x8SConvertI32x4:
- case kSSEI16x8Add:
- case kAVXI16x8Add:
- case kSSEI16x8AddSatS:
- case kAVXI16x8AddSatS:
- case kSSEI16x8Sub:
- case kAVXI16x8Sub:
- case kSSEI16x8SubSatS:
- case kAVXI16x8SubSatS:
- case kSSEI16x8Mul:
- case kAVXI16x8Mul:
- case kSSEI16x8MinS:
- case kAVXI16x8MinS:
- case kSSEI16x8MaxS:
- case kAVXI16x8MaxS:
- case kSSEI16x8Eq:
- case kAVXI16x8Eq:
+ case kIA32I16x8SConvertI32x4:
+ case kIA32I16x8Add:
+ case kIA32I16x8AddSatS:
+ case kIA32I16x8Sub:
+ case kIA32I16x8SubSatS:
+ case kIA32I16x8Mul:
+ case kIA32I16x8MinS:
+ case kIA32I16x8MaxS:
+ case kIA32I16x8Eq:
case kSSEI16x8Ne:
case kAVXI16x8Ne:
- case kSSEI16x8GtS:
- case kAVXI16x8GtS:
+ case kIA32I16x8GtS:
case kSSEI16x8GeS:
case kAVXI16x8GeS:
case kIA32I16x8UConvertI8x16Low:
case kIA32I16x8UConvertI8x16High:
case kIA32I16x8ShrU:
- case kSSEI16x8UConvertI32x4:
- case kAVXI16x8UConvertI32x4:
- case kSSEI16x8AddSatU:
- case kAVXI16x8AddSatU:
- case kSSEI16x8SubSatU:
- case kAVXI16x8SubSatU:
- case kSSEI16x8MinU:
- case kAVXI16x8MinU:
- case kSSEI16x8MaxU:
- case kAVXI16x8MaxU:
+ case kIA32I16x8UConvertI32x4:
+ case kIA32I16x8AddSatU:
+ case kIA32I16x8SubSatU:
+ case kIA32I16x8MinU:
+ case kIA32I16x8MaxU:
case kSSEI16x8GtU:
case kAVXI16x8GtU:
case kSSEI16x8GeU:
@@ -290,8 +244,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Pextrb:
case kIA32Pextrw:
case kIA32S128Store32Lane:
- case kSSEI8x16SConvertI16x8:
- case kAVXI8x16SConvertI16x8:
+ case kIA32I8x16SConvertI16x8:
case kIA32I8x16Neg:
case kIA32I8x16Shl:
case kIA32I8x16ShrS:
@@ -307,8 +260,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I8x16GtS:
case kSSEI8x16GeS:
case kAVXI8x16GeS:
- case kSSEI8x16UConvertI16x8:
- case kAVXI8x16UConvertI16x8:
+ case kIA32I8x16UConvertI16x8:
case kIA32I8x16AddSatU:
case kIA32I8x16SubSatU:
case kIA32I8x16ShrU:
@@ -326,12 +278,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32S128Zero:
case kIA32S128AllOnes:
case kIA32S128Not:
- case kSSES128And:
- case kAVXS128And:
- case kSSES128Or:
- case kAVXS128Or:
- case kSSES128Xor:
- case kAVXS128Xor:
+ case kIA32S128And:
+ case kIA32S128Or:
+ case kIA32S128Xor:
case kIA32S128Select:
case kIA32S128AndNot:
case kIA32I8x16Swizzle:
@@ -423,7 +372,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Word32AtomicPairLoad:
return kIsLoadOperation;
- case kIA32Word32AtomicPairStore:
+ case kIA32Word32ReleasePairStore:
+ case kIA32Word32SeqCstPairStore:
case kIA32Word32AtomicPairAdd:
case kIA32Word32AtomicPairSub:
case kIA32Word32AtomicPairAnd:
@@ -447,51 +397,51 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
// Basic latency modeling for ia32 instructions. They have been determined
// in an empirical way.
switch (instr->arch_opcode()) {
- case kSSEFloat64Mul:
+ case kFloat64Mul:
return 5;
case kIA32Imul:
case kIA32ImulHigh:
return 5;
- case kSSEFloat32Cmp:
- case kSSEFloat64Cmp:
+ case kIA32Float32Cmp:
+ case kIA32Float64Cmp:
return 9;
- case kSSEFloat32Add:
- case kSSEFloat32Sub:
- case kSSEFloat32Abs:
- case kSSEFloat32Neg:
- case kSSEFloat64Add:
- case kSSEFloat64Sub:
- case kSSEFloat64Max:
- case kSSEFloat64Min:
- case kSSEFloat64Abs:
- case kSSEFloat64Neg:
+ case kFloat32Add:
+ case kFloat32Sub:
+ case kFloat64Add:
+ case kFloat64Sub:
+ case kFloat32Abs:
+ case kFloat32Neg:
+ case kIA32Float64Max:
+ case kIA32Float64Min:
+ case kFloat64Abs:
+ case kFloat64Neg:
return 5;
- case kSSEFloat32Mul:
+ case kFloat32Mul:
return 4;
- case kSSEFloat32ToFloat64:
- case kSSEFloat64ToFloat32:
+ case kIA32Float32ToFloat64:
+ case kIA32Float64ToFloat32:
return 6;
- case kSSEFloat32Round:
- case kSSEFloat64Round:
- case kSSEFloat32ToInt32:
- case kSSEFloat64ToInt32:
+ case kIA32Float32Round:
+ case kIA32Float64Round:
+ case kIA32Float32ToInt32:
+ case kIA32Float64ToInt32:
return 8;
- case kSSEFloat32ToUint32:
+ case kIA32Float32ToUint32:
return 21;
- case kSSEFloat64ToUint32:
+ case kIA32Float64ToUint32:
return 15;
case kIA32Idiv:
return 33;
case kIA32Udiv:
return 26;
- case kSSEFloat32Div:
+ case kFloat32Div:
return 35;
- case kSSEFloat64Div:
+ case kFloat64Div:
return 63;
- case kSSEFloat32Sqrt:
- case kSSEFloat64Sqrt:
+ case kIA32Float32Sqrt:
+ case kIA32Float64Sqrt:
return 25;
- case kSSEFloat64Mod:
+ case kIA32Float64Mod:
return 50;
case kArchTruncateDoubleToI:
return 9;
diff --git a/chromium/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/chromium/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
index f36fdb29356..8c2b58564ad 100644
--- a/chromium/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
+++ b/chromium/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -99,11 +99,14 @@ class IA32OperandGenerator final : public OperandGenerator {
bool CanBeImmediate(Node* node) {
switch (node->opcode()) {
case IrOpcode::kInt32Constant:
- case IrOpcode::kNumberConstant:
case IrOpcode::kExternalConstant:
case IrOpcode::kRelocatableInt32Constant:
case IrOpcode::kRelocatableInt64Constant:
return true;
+ case IrOpcode::kNumberConstant: {
+ const double value = OpParameter<double>(node->op());
+ return bit_cast<int64_t>(value) == 0;
+ }
case IrOpcode::kHeapConstant: {
// TODO(bmeurer): We must not dereference handles concurrently. If we
// really have to this here, then we need to find a way to put this
@@ -246,6 +249,41 @@ class IA32OperandGenerator final : public OperandGenerator {
namespace {
+ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
+ ArchOpcode opcode;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
+ opcode = kIA32Movss;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kIA32Movsd;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kIA32Movsxbl : kIA32Movzxbl;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kIA32Movsxwl : kIA32Movzxwl;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ opcode = kIA32Movl;
+ break;
+ case MachineRepresentation::kSimd128:
+ opcode = kIA32Movdqu;
+ break;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ }
+ return opcode;
+}
+
void VisitRO(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
IA32OperandGenerator g(selector);
Node* input = node->InputAt(0);
@@ -280,27 +318,27 @@ void VisitRR(InstructionSelector* selector, Node* node,
}
void VisitRROFloat(InstructionSelector* selector, Node* node,
- ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
+ ArchOpcode opcode) {
IA32OperandGenerator g(selector);
InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
InstructionOperand operand1 = g.Use(node->InputAt(1));
if (selector->IsSupported(AVX)) {
- selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0, operand1);
+ selector->Emit(opcode, g.DefineAsRegister(node), operand0, operand1);
} else {
- selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1);
+ selector->Emit(opcode, g.DefineSameAsFirst(node), operand0, operand1);
}
}
void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
- ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
+ ArchOpcode opcode) {
IA32OperandGenerator g(selector);
- InstructionOperand temps[] = {g.TempSimd128Register()};
+ InstructionOperand temps[] = {g.TempRegister()};
if (selector->IsSupported(AVX)) {
- selector->Emit(avx_opcode, g.DefineAsRegister(node), g.UseUnique(input),
+ selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(input),
arraysize(temps), temps);
} else {
- selector->Emit(sse_opcode, g.DefineSameAsFirst(node),
- g.UseUniqueRegister(input), arraysize(temps), temps);
+ selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(input),
+ arraysize(temps), temps);
}
}
@@ -329,7 +367,7 @@ void VisitRROSimd(InstructionSelector* selector, Node* node,
InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
if (selector->IsSupported(AVX)) {
selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0,
- g.Use(node->InputAt(1)));
+ g.UseRegister(node->InputAt(1)));
} else {
selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0,
g.UseRegister(node->InputAt(1)));
@@ -389,14 +427,28 @@ void VisitRROSimdShift(InstructionSelector* selector, Node* node,
}
}
-void VisitRROI8x16SimdShift(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+void VisitI8x16Shift(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
IA32OperandGenerator g(selector);
- InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0));
- InstructionOperand operand1 = g.UseUniqueRegister(node->InputAt(1));
- InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()};
- selector->Emit(opcode, g.DefineSameAsFirst(node), operand0, operand1,
- arraysize(temps), temps);
+ InstructionOperand output = CpuFeatures::IsSupported(AVX)
+ ? g.UseRegister(node)
+ : g.DefineSameAsFirst(node);
+
+ if (g.CanBeImmediate(node->InputAt(1))) {
+ if (opcode == kIA32I8x16ShrS) {
+ selector->Emit(opcode, output, g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(node->InputAt(1)));
+ } else {
+ InstructionOperand temps[] = {g.TempRegister()};
+ selector->Emit(opcode, output, g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(node->InputAt(1)), arraysize(temps), temps);
+ }
+ } else {
+ InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0));
+ InstructionOperand operand1 = g.UseUniqueRegister(node->InputAt(1));
+ InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()};
+ selector->Emit(opcode, output, operand0, operand1, arraysize(temps), temps);
+ }
}
} // namespace
@@ -409,9 +461,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+void InstructionSelector::VisitAbortCSADcheck(Node* node) {
IA32OperandGenerator g(this);
- Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), edx));
+ Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), edx));
}
void InstructionSelector::VisitLoadLane(Node* node) {
@@ -521,72 +573,110 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
Emit(code, 1, outputs, input_count, inputs);
}
+void InstructionSelector::VisitLoad(Node* node, Node* value,
+ InstructionCode opcode) {
+ IA32OperandGenerator g(this);
+ InstructionOperand outputs[1];
+ outputs[0] = g.DefineAsRegister(node);
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ AddressingMode mode =
+ g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count);
+ InstructionCode code = opcode | AddressingModeField::encode(mode);
+ Emit(code, 1, outputs, input_count, inputs);
+}
+
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ DCHECK(!load_rep.IsMapWord());
+ VisitLoad(node, node, GetLoadOpcode(load_rep));
+}
- ArchOpcode opcode;
- switch (load_rep.representation()) {
+void InstructionSelector::VisitProtectedLoad(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
+
+namespace {
+
+ArchOpcode GetStoreOpcode(MachineRepresentation rep) {
+ switch (rep) {
case MachineRepresentation::kFloat32:
- opcode = kIA32Movss;
- break;
+ return kIA32Movss;
case MachineRepresentation::kFloat64:
- opcode = kIA32Movsd;
- break;
+ return kIA32Movsd;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kIA32Movsxbl : kIA32Movzxbl;
- break;
+ return kIA32Movb;
case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kIA32Movsxwl : kIA32Movzxwl;
- break;
+ return kIA32Movw;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
- opcode = kIA32Movl;
- break;
+ return kIA32Movl;
case MachineRepresentation::kSimd128:
- opcode = kIA32Movdqu;
- break;
+ return kIA32Movdqu;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
- case MachineRepresentation::kMapWord:
UNREACHABLE();
}
+}
- IA32OperandGenerator g(this);
- InstructionOperand outputs[1];
- outputs[0] = g.DefineAsRegister(node);
- InstructionOperand inputs[3];
- size_t input_count = 0;
- AddressingMode mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
- InstructionCode code = opcode | AddressingModeField::encode(mode);
- if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- code |= AccessModeField::encode(kMemoryAccessPoisoned);
+ArchOpcode GetSeqCstStoreOpcode(MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ return kAtomicExchangeInt8;
+ case MachineRepresentation::kWord16:
+ return kAtomicExchangeInt16;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ return kAtomicExchangeWord32;
+ default:
+ UNREACHABLE();
}
- Emit(code, 1, outputs, input_count, inputs);
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
+void VisitAtomicExchange(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, MachineRepresentation rep) {
+ IA32OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
-void InstructionSelector::VisitProtectedLoad(Node* node) {
- // TODO(eholk)
- UNIMPLEMENTED();
+ AddressingMode addressing_mode;
+ InstructionOperand value_operand = (rep == MachineRepresentation::kWord8)
+ ? g.UseFixed(value, edx)
+ : g.UseUniqueRegister(value);
+ InstructionOperand inputs[] = {
+ value_operand, g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
+ InstructionOperand outputs[] = {
+ (rep == MachineRepresentation::kWord8)
+ // Using DefineSameAsFirst requires the register to be unallocated.
+ ? g.DefineAsFixed(node, edx)
+ : g.DefineSameAsFirst(node)};
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ selector->Emit(code, 1, outputs, arraysize(inputs), inputs);
}
-void InstructionSelector::VisitStore(Node* node) {
- IA32OperandGenerator g(this);
+void VisitStoreCommon(InstructionSelector* selector, Node* node,
+ StoreRepresentation store_rep,
+ base::Optional<AtomicMemoryOrder> atomic_order) {
+ IA32OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = StoreRepresentationOf(node->op());
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
MachineRepresentation rep = store_rep.representation();
+ const bool is_seqcst =
+ atomic_order && *atomic_order == AtomicMemoryOrder::kSeqCst;
if (FLAG_enable_unconditional_write_barriers && CanBeTaggedPointer(rep)) {
write_barrier_kind = kFullWriteBarrier;
@@ -603,48 +693,23 @@ void InstructionSelector::VisitStore(Node* node) {
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
size_t const temp_count = arraysize(temps);
- InstructionCode code = kArchStoreWithWriteBarrier;
+ InstructionCode code = is_seqcst ? kArchAtomicStoreWithWriteBarrier
+ : kArchStoreWithWriteBarrier;
code |= AddressingModeField::encode(addressing_mode);
code |= MiscField::encode(static_cast<int>(record_write_mode));
- Emit(code, 0, nullptr, arraysize(inputs), inputs, temp_count, temps);
+ selector->Emit(code, 0, nullptr, arraysize(inputs), inputs, temp_count,
+ temps);
+ } else if (is_seqcst) {
+ VisitAtomicExchange(selector, node, GetSeqCstStoreOpcode(rep), rep);
} else {
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kFloat32:
- opcode = kIA32Movss;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kIA32Movsd;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kWord8:
- opcode = kIA32Movb;
- break;
- case MachineRepresentation::kWord16:
- opcode = kIA32Movw;
- break;
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord32:
- opcode = kIA32Movl;
- break;
- case MachineRepresentation::kSimd128:
- opcode = kIA32Movdqu;
- break;
- case MachineRepresentation::kCompressedPointer: // Fall through.
- case MachineRepresentation::kCompressed: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kMapWord: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- }
+ // Release and non-atomic stores emit MOV.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
InstructionOperand val;
if (g.CanBeImmediate(value)) {
val = g.UseImmediate(value);
- } else if (rep == MachineRepresentation::kWord8 ||
- rep == MachineRepresentation::kBit) {
+ } else if (!atomic_order && (rep == MachineRepresentation::kWord8 ||
+ rep == MachineRepresentation::kBit)) {
val = g.UseByteRegister(value);
} else {
val = g.UseRegister(value);
@@ -655,13 +720,20 @@ void InstructionSelector::VisitStore(Node* node) {
AddressingMode addressing_mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionCode code =
- opcode | AddressingModeField::encode(addressing_mode);
+ GetStoreOpcode(rep) | AddressingModeField::encode(addressing_mode);
inputs[input_count++] = val;
- Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
- inputs);
+ selector->Emit(code, 0, static_cast<InstructionOperand*>(nullptr),
+ input_count, inputs);
}
}
+} // namespace
+
+void InstructionSelector::VisitStore(Node* node) {
+ VisitStoreCommon(this, node, StoreRepresentationOf(node->op()),
+ base::nullopt);
+}
+
void InstructionSelector::VisitProtectedStore(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -1057,80 +1129,82 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
VisitShift(this, node, kIA32Ror);
}
-#define RO_OP_LIST(V) \
- V(Word32Clz, kIA32Lzcnt) \
- V(Word32Ctz, kIA32Tzcnt) \
- V(Word32Popcnt, kIA32Popcnt) \
- V(ChangeFloat32ToFloat64, kSSEFloat32ToFloat64) \
- V(RoundInt32ToFloat32, kSSEInt32ToFloat32) \
- V(ChangeInt32ToFloat64, kSSEInt32ToFloat64) \
- V(TruncateFloat32ToInt32, kSSEFloat32ToInt32) \
- V(ChangeFloat64ToInt32, kSSEFloat64ToInt32) \
- V(TruncateFloat64ToFloat32, kSSEFloat64ToFloat32) \
- V(RoundFloat64ToInt32, kSSEFloat64ToInt32) \
- V(BitcastFloat32ToInt32, kIA32BitcastFI) \
- V(BitcastInt32ToFloat32, kIA32BitcastIF) \
- V(Float32Sqrt, kSSEFloat32Sqrt) \
- V(Float64Sqrt, kSSEFloat64Sqrt) \
- V(Float64ExtractLowWord32, kSSEFloat64ExtractLowWord32) \
- V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32) \
- V(SignExtendWord8ToInt32, kIA32Movsxbl) \
- V(SignExtendWord16ToInt32, kIA32Movsxwl) \
+#define RO_OP_LIST(V) \
+ V(Word32Clz, kIA32Lzcnt) \
+ V(Word32Ctz, kIA32Tzcnt) \
+ V(Word32Popcnt, kIA32Popcnt) \
+ V(ChangeFloat32ToFloat64, kIA32Float32ToFloat64) \
+ V(RoundInt32ToFloat32, kSSEInt32ToFloat32) \
+ V(ChangeInt32ToFloat64, kSSEInt32ToFloat64) \
+ V(TruncateFloat32ToInt32, kIA32Float32ToInt32) \
+ V(ChangeFloat64ToInt32, kIA32Float64ToInt32) \
+ V(TruncateFloat64ToFloat32, kIA32Float64ToFloat32) \
+ V(RoundFloat64ToInt32, kIA32Float64ToInt32) \
+ V(BitcastFloat32ToInt32, kIA32BitcastFI) \
+ V(BitcastInt32ToFloat32, kIA32BitcastIF) \
+ V(Float32Sqrt, kIA32Float32Sqrt) \
+ V(Float64Sqrt, kIA32Float64Sqrt) \
+ V(Float64ExtractLowWord32, kIA32Float64ExtractLowWord32) \
+ V(Float64ExtractHighWord32, kIA32Float64ExtractHighWord32) \
+ V(SignExtendWord8ToInt32, kIA32Movsxbl) \
+ V(SignExtendWord16ToInt32, kIA32Movsxwl) \
V(F64x2Sqrt, kIA32F64x2Sqrt)
-#define RO_WITH_TEMP_OP_LIST(V) V(ChangeUint32ToFloat64, kSSEUint32ToFloat64)
-
-#define RO_WITH_TEMP_SIMD_OP_LIST(V) \
- V(TruncateFloat32ToUint32, kSSEFloat32ToUint32) \
- V(ChangeFloat64ToUint32, kSSEFloat64ToUint32) \
- V(TruncateFloat64ToUint32, kSSEFloat64ToUint32)
-
-#define RR_OP_LIST(V) \
- V(TruncateFloat64ToWord32, kArchTruncateDoubleToI) \
- V(Float32RoundDown, kSSEFloat32Round | MiscField::encode(kRoundDown)) \
- V(Float64RoundDown, kSSEFloat64Round | MiscField::encode(kRoundDown)) \
- V(Float32RoundUp, kSSEFloat32Round | MiscField::encode(kRoundUp)) \
- V(Float64RoundUp, kSSEFloat64Round | MiscField::encode(kRoundUp)) \
- V(Float32RoundTruncate, kSSEFloat32Round | MiscField::encode(kRoundToZero)) \
- V(Float64RoundTruncate, kSSEFloat64Round | MiscField::encode(kRoundToZero)) \
- V(Float32RoundTiesEven, \
- kSSEFloat32Round | MiscField::encode(kRoundToNearest)) \
- V(Float64RoundTiesEven, \
- kSSEFloat64Round | MiscField::encode(kRoundToNearest)) \
- V(F32x4Ceil, kIA32F32x4Round | MiscField::encode(kRoundUp)) \
- V(F32x4Floor, kIA32F32x4Round | MiscField::encode(kRoundDown)) \
- V(F32x4Trunc, kIA32F32x4Round | MiscField::encode(kRoundToZero)) \
- V(F32x4NearestInt, kIA32F32x4Round | MiscField::encode(kRoundToNearest)) \
- V(F64x2Ceil, kIA32F64x2Round | MiscField::encode(kRoundUp)) \
- V(F64x2Floor, kIA32F64x2Round | MiscField::encode(kRoundDown)) \
- V(F64x2Trunc, kIA32F64x2Round | MiscField::encode(kRoundToZero)) \
+#define RO_WITH_TEMP_OP_LIST(V) V(ChangeUint32ToFloat64, kIA32Uint32ToFloat64)
+
+#define RO_WITH_TEMP_SIMD_OP_LIST(V) \
+ V(TruncateFloat32ToUint32, kIA32Float32ToUint32) \
+ V(ChangeFloat64ToUint32, kIA32Float64ToUint32) \
+ V(TruncateFloat64ToUint32, kIA32Float64ToUint32)
+
+#define RR_OP_LIST(V) \
+ V(TruncateFloat64ToWord32, kArchTruncateDoubleToI) \
+ V(Float32RoundDown, kIA32Float32Round | MiscField::encode(kRoundDown)) \
+ V(Float64RoundDown, kIA32Float64Round | MiscField::encode(kRoundDown)) \
+ V(Float32RoundUp, kIA32Float32Round | MiscField::encode(kRoundUp)) \
+ V(Float64RoundUp, kIA32Float64Round | MiscField::encode(kRoundUp)) \
+ V(Float32RoundTruncate, kIA32Float32Round | MiscField::encode(kRoundToZero)) \
+ V(Float64RoundTruncate, kIA32Float64Round | MiscField::encode(kRoundToZero)) \
+ V(Float32RoundTiesEven, \
+ kIA32Float32Round | MiscField::encode(kRoundToNearest)) \
+ V(Float64RoundTiesEven, \
+ kIA32Float64Round | MiscField::encode(kRoundToNearest)) \
+ V(F32x4Ceil, kIA32F32x4Round | MiscField::encode(kRoundUp)) \
+ V(F32x4Floor, kIA32F32x4Round | MiscField::encode(kRoundDown)) \
+ V(F32x4Trunc, kIA32F32x4Round | MiscField::encode(kRoundToZero)) \
+ V(F32x4NearestInt, kIA32F32x4Round | MiscField::encode(kRoundToNearest)) \
+ V(F64x2Ceil, kIA32F64x2Round | MiscField::encode(kRoundUp)) \
+ V(F64x2Floor, kIA32F64x2Round | MiscField::encode(kRoundDown)) \
+ V(F64x2Trunc, kIA32F64x2Round | MiscField::encode(kRoundToZero)) \
V(F64x2NearestInt, kIA32F64x2Round | MiscField::encode(kRoundToNearest))
-#define RRO_FLOAT_OP_LIST(V) \
- V(Float32Add, kAVXFloat32Add, kSSEFloat32Add) \
- V(Float64Add, kAVXFloat64Add, kSSEFloat64Add) \
- V(Float32Sub, kAVXFloat32Sub, kSSEFloat32Sub) \
- V(Float64Sub, kAVXFloat64Sub, kSSEFloat64Sub) \
- V(Float32Mul, kAVXFloat32Mul, kSSEFloat32Mul) \
- V(Float64Mul, kAVXFloat64Mul, kSSEFloat64Mul) \
- V(Float32Div, kAVXFloat32Div, kSSEFloat32Div) \
- V(Float64Div, kAVXFloat64Div, kSSEFloat64Div) \
- V(F64x2Add, kIA32F64x2Add, kIA32F64x2Add) \
- V(F64x2Sub, kIA32F64x2Sub, kIA32F64x2Sub) \
- V(F64x2Mul, kIA32F64x2Mul, kIA32F64x2Mul) \
- V(F64x2Div, kIA32F64x2Div, kIA32F64x2Div) \
- V(F64x2Eq, kIA32F64x2Eq, kIA32F64x2Eq) \
- V(F64x2Ne, kIA32F64x2Ne, kIA32F64x2Ne) \
- V(F64x2Lt, kIA32F64x2Lt, kIA32F64x2Lt) \
- V(F64x2Le, kIA32F64x2Le, kIA32F64x2Le)
-
-#define FLOAT_UNOP_LIST(V) \
- V(Float32Abs, kAVXFloat32Abs, kSSEFloat32Abs) \
- V(Float64Abs, kAVXFloat64Abs, kSSEFloat64Abs) \
- V(Float32Neg, kAVXFloat32Neg, kSSEFloat32Neg) \
- V(Float64Neg, kAVXFloat64Neg, kSSEFloat64Neg) \
- V(F64x2Abs, kAVXFloat64Abs, kSSEFloat64Abs) \
- V(F64x2Neg, kAVXFloat64Neg, kSSEFloat64Neg)
+#define RRO_FLOAT_OP_LIST(V) \
+ V(Float32Add, kFloat32Add) \
+ V(Float64Add, kFloat64Add) \
+ V(Float32Sub, kFloat32Sub) \
+ V(Float64Sub, kFloat64Sub) \
+ V(Float32Mul, kFloat32Mul) \
+ V(Float64Mul, kFloat64Mul) \
+ V(Float32Div, kFloat32Div) \
+ V(Float64Div, kFloat64Div) \
+ V(F64x2Add, kIA32F64x2Add) \
+ V(F64x2Sub, kIA32F64x2Sub) \
+ V(F64x2Mul, kIA32F64x2Mul) \
+ V(F64x2Div, kIA32F64x2Div) \
+ V(F64x2Eq, kIA32F64x2Eq) \
+ V(F64x2Ne, kIA32F64x2Ne) \
+ V(F64x2Lt, kIA32F64x2Lt) \
+ V(F64x2Le, kIA32F64x2Le)
+
+#define FLOAT_UNOP_LIST(V) \
+ V(Float32Abs, kFloat32Abs) \
+ V(Float64Abs, kFloat64Abs) \
+ V(Float32Neg, kFloat32Neg) \
+ V(Float64Neg, kFloat64Neg) \
+ V(F32x4Abs, kFloat32Abs) \
+ V(F32x4Neg, kFloat32Neg) \
+ V(F64x2Abs, kFloat64Abs) \
+ V(F64x2Neg, kFloat64Neg)
#define RO_VISITOR(Name, opcode) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -1164,17 +1238,17 @@ RR_OP_LIST(RR_VISITOR)
#undef RR_VISITOR
#undef RR_OP_LIST
-#define RRO_FLOAT_VISITOR(Name, avx, sse) \
+#define RRO_FLOAT_VISITOR(Name, opcode) \
void InstructionSelector::Visit##Name(Node* node) { \
- VisitRROFloat(this, node, avx, sse); \
+ VisitRROFloat(this, node, opcode); \
}
RRO_FLOAT_OP_LIST(RRO_FLOAT_VISITOR)
#undef RRO_FLOAT_VISITOR
#undef RRO_FLOAT_OP_LIST
-#define FLOAT_UNOP_VISITOR(Name, avx, sse) \
- void InstructionSelector::Visit##Name(Node* node) { \
- VisitFloatUnop(this, node, node->InputAt(0), avx, sse); \
+#define FLOAT_UNOP_VISITOR(Name, opcode) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitFloatUnop(this, node, node->InputAt(0), opcode); \
}
FLOAT_UNOP_LIST(FLOAT_UNOP_VISITOR)
#undef FLOAT_UNOP_VISITOR
@@ -1281,14 +1355,14 @@ void InstructionSelector::VisitUint32Mod(Node* node) {
void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister()};
- Emit(kSSEUint32ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
+ Emit(kIA32Uint32ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
arraysize(temps), temps);
}
void InstructionSelector::VisitFloat64Mod(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister()};
- Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
+ Emit(kIA32Float64Mod, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
arraysize(temps), temps);
}
@@ -1296,7 +1370,7 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
void InstructionSelector::VisitFloat32Max(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister()};
- Emit(kSSEFloat32Max, g.DefineSameAsFirst(node),
+ Emit(kIA32Float32Max, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
arraysize(temps), temps);
}
@@ -1304,7 +1378,7 @@ void InstructionSelector::VisitFloat32Max(Node* node) {
void InstructionSelector::VisitFloat64Max(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister()};
- Emit(kSSEFloat64Max, g.DefineSameAsFirst(node),
+ Emit(kIA32Float64Max, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
arraysize(temps), temps);
}
@@ -1312,7 +1386,7 @@ void InstructionSelector::VisitFloat64Max(Node* node) {
void InstructionSelector::VisitFloat32Min(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister()};
- Emit(kSSEFloat32Min, g.DefineSameAsFirst(node),
+ Emit(kIA32Float32Min, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
arraysize(temps), temps);
}
@@ -1320,7 +1394,7 @@ void InstructionSelector::VisitFloat32Min(Node* node) {
void InstructionSelector::VisitFloat64Min(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister()};
- Emit(kSSEFloat64Min, g.DefineSameAsFirst(node),
+ Emit(kIA32Float64Min, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
arraysize(temps), temps);
}
@@ -1556,7 +1630,7 @@ void VisitFloat32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Node* const left = node->InputAt(0);
Node* const right = node->InputAt(1);
- VisitCompare(selector, kSSEFloat32Cmp, right, left, cont, false);
+ VisitCompare(selector, kIA32Float32Cmp, right, left, cont, false);
}
// Shared routine for multiple float64 compare operations (inputs commuted).
@@ -1564,7 +1638,7 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Node* const left = node->InputAt(0);
Node* const right = node->InputAt(1);
- VisitCompare(selector, kSSEFloat64Cmp, right, left, cont, false);
+ VisitCompare(selector, kIA32Float64Cmp, right, left, cont, false);
}
// Shared routine for multiple word compare operations.
@@ -1617,29 +1691,6 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
VisitWordCompare(selector, node, kIA32Cmp, cont);
}
-void VisitAtomicExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode, MachineRepresentation rep) {
- IA32OperandGenerator g(selector);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
-
- AddressingMode addressing_mode;
- InstructionOperand value_operand = (rep == MachineRepresentation::kWord8)
- ? g.UseFixed(value, edx)
- : g.UseUniqueRegister(value);
- InstructionOperand inputs[] = {
- value_operand, g.UseUniqueRegister(base),
- g.GetEffectiveIndexOperand(index, &addressing_mode)};
- InstructionOperand outputs[] = {
- (rep == MachineRepresentation::kWord8)
- // Using DefineSameAsFirst requires the register to be unallocated.
- ? g.DefineAsFixed(node, edx)
- : g.DefineSameAsFirst(node)};
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- selector->Emit(code, 1, outputs, arraysize(inputs), inputs);
-}
-
void VisitAtomicBinOp(InstructionSelector* selector, Node* node,
ArchOpcode opcode, MachineRepresentation rep) {
AddressingMode addressing_mode;
@@ -1922,10 +1973,10 @@ void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
Float64Matcher mleft(left);
if (mleft.HasResolvedValue() &&
(bit_cast<uint64_t>(mleft.ResolvedValue()) >> 32) == 0u) {
- Emit(kSSEFloat64LoadLowWord32, g.DefineAsRegister(node), g.Use(right));
+ Emit(kIA32Float64LoadLowWord32, g.DefineAsRegister(node), g.Use(right));
return;
}
- Emit(kSSEFloat64InsertLowWord32, g.DefineSameAsFirst(node),
+ Emit(kIA32Float64InsertLowWord32, g.DefineSameAsFirst(node),
g.UseRegister(left), g.Use(right));
}
@@ -1933,13 +1984,13 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
IA32OperandGenerator g(this);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
- Emit(kSSEFloat64InsertHighWord32, g.DefineSameAsFirst(node),
+ Emit(kIA32Float64InsertHighWord32, g.DefineSameAsFirst(node),
g.UseRegister(left), g.Use(right));
}
void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
IA32OperandGenerator g(this);
- Emit(kSSEFloat64SilenceNaN, g.DefineSameAsFirst(node),
+ Emit(kIA32Float64SilenceNaN, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)));
}
@@ -1949,32 +2000,25 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
load_rep.representation() == MachineRepresentation::kWord16 ||
- load_rep.representation() == MachineRepresentation::kWord32);
+ load_rep.representation() == MachineRepresentation::kWord32 ||
+ load_rep.representation() == MachineRepresentation::kTaggedSigned ||
+ load_rep.representation() == MachineRepresentation::kTaggedPointer ||
+ load_rep.representation() == MachineRepresentation::kTagged);
USE(load_rep);
- VisitLoad(node);
+ // The memory order is ignored as both acquire and sequentially consistent
+ // loads can emit MOV.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+ VisitLoad(node, node, GetLoadOpcode(load_rep));
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- IA32OperandGenerator g(this);
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kWord32AtomicExchangeInt8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kWord32AtomicExchangeInt16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicExchangeWord32;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicExchange(this, node, opcode, rep);
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ VisitStoreCommon(this, node, store_params.store_representation(),
+ store_params.order());
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
@@ -1982,15 +2026,15 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
@@ -2007,15 +2051,15 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
@@ -2053,12 +2097,11 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
VisitAtomicBinOp(this, node, opcode, type.representation());
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2068,6 +2111,8 @@ VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
+ // Both acquire and sequentially consistent loads can emit MOV.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
IA32OperandGenerator g(this);
AddressingMode mode;
Node* base = node->InputAt(0);
@@ -2079,10 +2124,9 @@ void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
g.GetEffectiveIndexOperand(index, &mode)};
InstructionCode code =
kIA32Word32AtomicPairLoad | AddressingModeField::encode(mode);
- InstructionOperand temps[] = {g.TempDoubleRegister()};
InstructionOperand outputs[] = {g.DefineAsRegister(projection0),
g.DefineAsRegister(projection1)};
- Emit(code, 2, outputs, 2, inputs, 1, temps);
+ Emit(code, 2, outputs, 2, inputs);
} else if (projection0 || projection1) {
// Only one word is needed, so it's enough to load just that.
ArchOpcode opcode = kIA32Movl;
@@ -2103,25 +2147,45 @@ void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
}
void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
+ // Release pair stores emit a MOVQ via a double register, and sequentially
+ // consistent stores emit CMPXCHG8B.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+
IA32OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
Node* value_high = node->InputAt(3);
- AddressingMode addressing_mode;
- InstructionOperand inputs[] = {
- g.UseUniqueRegisterOrSlotOrConstant(value), g.UseFixed(value_high, ecx),
- g.UseUniqueRegister(base),
- g.GetEffectiveIndexOperand(index, &addressing_mode)};
- // Allocating temp registers here as stores are performed using an atomic
- // exchange, the output of which is stored in edx:eax, which should be saved
- // and restored at the end of the instruction.
- InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
- const int num_temps = arraysize(temps);
- InstructionCode code =
- kIA32Word32AtomicPairStore | AddressingModeField::encode(addressing_mode);
- Emit(code, 0, nullptr, arraysize(inputs), inputs, num_temps, temps);
+ AtomicMemoryOrder order = OpParameter<AtomicMemoryOrder>(node->op());
+ if (order == AtomicMemoryOrder::kAcqRel) {
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[] = {
+ g.UseUniqueRegisterOrSlotOrConstant(value),
+ g.UseUniqueRegisterOrSlotOrConstant(value_high),
+ g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode),
+ };
+ InstructionCode code = kIA32Word32ReleasePairStore |
+ AddressingModeField::encode(addressing_mode);
+ Emit(code, 0, nullptr, arraysize(inputs), inputs);
+ } else {
+ DCHECK_EQ(order, AtomicMemoryOrder::kSeqCst);
+
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[] = {
+ g.UseUniqueRegisterOrSlotOrConstant(value), g.UseFixed(value_high, ecx),
+ g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
+ // Allocating temp registers here as stores are performed using an atomic
+ // exchange, the output of which is stored in edx:eax, which should be saved
+ // and restored at the end of the instruction.
+ InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
+ const int num_temps = arraysize(temps);
+ InstructionCode code = kIA32Word32SeqCstPairStore |
+ AddressingModeField::encode(addressing_mode);
+ Emit(code, 0, nullptr, arraysize(inputs), inputs, num_temps, temps);
+ }
}
void InstructionSelector::VisitWord32AtomicPairAdd(Node* node) {
@@ -2191,62 +2255,59 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I8x16)
#define SIMD_BINOP_LIST(V) \
- V(F32x4Min) \
- V(F32x4Max) \
- V(F32x4Eq) \
- V(F32x4Ne) \
- V(F32x4Lt) \
- V(F32x4Le) \
- V(I32x4Add) \
- V(I32x4Sub) \
- V(I32x4Mul) \
- V(I32x4MinS) \
- V(I32x4MaxS) \
- V(I32x4Eq) \
- V(I32x4Ne) \
- V(I32x4GtS) \
- V(I32x4GeS) \
- V(I32x4MinU) \
- V(I32x4MaxU) \
V(I32x4GtU) \
V(I32x4GeU) \
- V(I16x8SConvertI32x4) \
- V(I16x8Add) \
- V(I16x8AddSatS) \
- V(I16x8Sub) \
- V(I16x8SubSatS) \
- V(I16x8Mul) \
- V(I16x8MinS) \
- V(I16x8MaxS) \
- V(I16x8Eq) \
V(I16x8Ne) \
- V(I16x8GtS) \
V(I16x8GeS) \
- V(I16x8AddSatU) \
- V(I16x8SubSatU) \
- V(I16x8MinU) \
- V(I16x8MaxU) \
V(I16x8GtU) \
V(I16x8GeU) \
- V(I8x16SConvertI16x8) \
V(I8x16Ne) \
V(I8x16GeS) \
V(I8x16GtU) \
- V(I8x16GeU) \
- V(S128And) \
- V(S128Or) \
- V(S128Xor)
+ V(I8x16GeU)
#define SIMD_BINOP_UNIFIED_SSE_AVX_LIST(V) \
V(F32x4Add) \
V(F32x4Sub) \
V(F32x4Mul) \
V(F32x4Div) \
+ V(F32x4Eq) \
+ V(F32x4Ne) \
+ V(F32x4Lt) \
+ V(F32x4Le) \
+ V(F32x4Min) \
+ V(F32x4Max) \
V(I64x2Add) \
V(I64x2Sub) \
V(I64x2Eq) \
V(I64x2Ne) \
+ V(I32x4Add) \
+ V(I32x4Sub) \
+ V(I32x4Mul) \
+ V(I32x4MinS) \
+ V(I32x4MaxS) \
+ V(I32x4Eq) \
+ V(I32x4Ne) \
+ V(I32x4GtS) \
+ V(I32x4GeS) \
+ V(I32x4MinU) \
+ V(I32x4MaxU) \
V(I32x4DotI16x8S) \
+ V(I16x8Add) \
+ V(I16x8AddSatS) \
+ V(I16x8Sub) \
+ V(I16x8SubSatS) \
+ V(I16x8Mul) \
+ V(I16x8Eq) \
+ V(I16x8GtS) \
+ V(I16x8MinS) \
+ V(I16x8MaxS) \
+ V(I16x8AddSatU) \
+ V(I16x8SubSatU) \
+ V(I16x8MinU) \
+ V(I16x8MaxU) \
+ V(I16x8SConvertI32x4) \
+ V(I16x8UConvertI32x4) \
V(I16x8RoundingAverageU) \
V(I8x16Add) \
V(I8x16AddSatS) \
@@ -2260,7 +2321,12 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I8x16SubSatU) \
V(I8x16MinU) \
V(I8x16MaxU) \
- V(I8x16RoundingAverageU)
+ V(I8x16SConvertI16x8) \
+ V(I8x16UConvertI16x8) \
+ V(I8x16RoundingAverageU) \
+ V(S128And) \
+ V(S128Or) \
+ V(S128Xor)
// These opcodes require all inputs to be registers because the codegen is
// simpler with all registers.
@@ -2281,10 +2347,7 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
#define SIMD_UNOP_LIST(V) \
V(F64x2ConvertLowI32x4S) \
- V(F64x2PromoteLowF32x4) \
V(F32x4DemoteF64x2Zero) \
- V(F32x4Abs) \
- V(F32x4Neg) \
V(F32x4Sqrt) \
V(F32x4SConvertI32x4) \
V(F32x4RecipApprox) \
@@ -2462,7 +2525,12 @@ void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
}
void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
- VisitRRSimd(this, node, kIA32I32x4SConvertF32x4);
+ IA32OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempRegister()};
+ InstructionOperand dst =
+ IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
+ Emit(kIA32I32x4SConvertF32x4, dst, g.UseRegister(node->InputAt(0)),
+ arraysize(temps), temps);
}
void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
@@ -2625,26 +2693,6 @@ SIMD_BINOP_RRR(VISIT_SIMD_BINOP_RRR)
#undef VISIT_SIMD_BINOP_RRR
#undef SIMD_BINOP_RRR
-// TODO(v8:9198): SSE requires operand1 to be a register as we don't have memory
-// alignment yet. For AVX, memory operands are fine, but can have performance
-// issues if not aligned to 16/32 bytes (based on load size), see SDM Vol 1,
-// chapter 14.9
-void VisitPack(InstructionSelector* selector, Node* node, ArchOpcode avx_opcode,
- ArchOpcode sse_opcode) {
- IA32OperandGenerator g(selector);
- InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
- InstructionOperand operand1 = g.UseRegister(node->InputAt(1));
- if (selector->IsSupported(AVX)) {
- selector->Emit(avx_opcode, g.DefineSameAsFirst(node), operand0, operand1);
- } else {
- selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1);
- }
-}
-
-void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
- VisitPack(this, node, kAVXI16x8UConvertI32x4, kSSEI16x8UConvertI32x4);
-}
-
void InstructionSelector::VisitI16x8BitMask(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand temps[] = {g.TempSimd128Register()};
@@ -2652,43 +2700,16 @@ void InstructionSelector::VisitI16x8BitMask(Node* node) {
g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps);
}
-void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
- VisitPack(this, node, kAVXI8x16UConvertI16x8, kSSEI8x16UConvertI16x8);
-}
-
void InstructionSelector::VisitI8x16Shl(Node* node) {
- IA32OperandGenerator g(this);
- if (g.CanBeImmediate(node->InputAt(1))) {
- InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()};
- this->Emit(kIA32I8x16Shl, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)),
- g.UseImmediate(node->InputAt(1)), arraysize(temps), temps);
- } else {
- VisitRROI8x16SimdShift(this, node, kIA32I8x16Shl);
- }
+ VisitI8x16Shift(this, node, kIA32I8x16Shl);
}
void InstructionSelector::VisitI8x16ShrS(Node* node) {
- IA32OperandGenerator g(this);
- if (g.CanBeImmediate(node->InputAt(1))) {
- this->Emit(kIA32I8x16ShrS, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)),
- g.UseImmediate(node->InputAt(1)));
- } else {
- VisitRROI8x16SimdShift(this, node, kIA32I8x16ShrS);
- }
+ VisitI8x16Shift(this, node, kIA32I8x16ShrS);
}
void InstructionSelector::VisitI8x16ShrU(Node* node) {
- IA32OperandGenerator g(this);
- if (g.CanBeImmediate(node->InputAt(1))) {
- InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()};
- this->Emit(kIA32I8x16ShrU, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)),
- g.UseImmediate(node->InputAt(1)), arraysize(temps), temps);
- } else {
- VisitRROI8x16SimdShift(this, node, kIA32I8x16ShrU);
- }
+ VisitI8x16Shift(this, node, kIA32I8x16ShrU);
}
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
@@ -3153,6 +3174,25 @@ void InstructionSelector::VisitI64x2Abs(Node* node) {
VisitRRSimd(this, node, kIA32I64x2Abs, kIA32I64x2Abs);
}
+void InstructionSelector::VisitF64x2PromoteLowF32x4(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionCode code = kIA32F64x2PromoteLowF32x4;
+ Node* input = node->InputAt(0);
+ LoadTransformMatcher m(input);
+
+ if (m.Is(LoadTransformation::kS128Load64Zero) && CanCover(node, input)) {
+ // Trap handler is not supported on IA32.
+ DCHECK_NE(m.ResolvedValue().kind, MemoryAccessKind::kProtected);
+ // LoadTransforms cannot be eliminated, so they are visited even if
+ // unused. Mark it as defined so that we don't visit it.
+ MarkAsDefined(input);
+ VisitLoad(node, input, code);
+ return;
+ }
+
+ VisitRR(this, node, code);
+}
+
void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g,
int first_input_index,
Node* node) {
diff --git a/chromium/v8/src/compiler/backend/instruction-codes.h b/chromium/v8/src/compiler/backend/instruction-codes.h
index 31d669813e2..56d4d960bd3 100644
--- a/chromium/v8/src/compiler/backend/instruction-codes.h
+++ b/chromium/v8/src/compiler/backend/instruction-codes.h
@@ -17,6 +17,8 @@
#include "src/compiler/backend/mips/instruction-codes-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/compiler/backend/mips64/instruction-codes-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/compiler/backend/loong64/instruction-codes-loong64.h"
#elif V8_TARGET_ARCH_X64
#include "src/compiler/backend/x64/instruction-codes-x64.h"
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
@@ -30,6 +32,7 @@
#define TARGET_ADDRESSING_MODE_LIST(V)
#endif
#include "src/base/bit-field.h"
+#include "src/codegen/atomic-memory-order.h"
#include "src/compiler/write-barrier-kind.h"
namespace v8 {
@@ -89,7 +92,7 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode(
V(ArchBinarySearchSwitch) \
V(ArchTableSwitch) \
V(ArchNop) \
- V(ArchAbortCSAAssert) \
+ V(ArchAbortCSADcheck) \
V(ArchDebugBreak) \
V(ArchComment) \
V(ArchThrowTerminator) \
@@ -99,53 +102,53 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode(
V(ArchParentFramePointer) \
V(ArchTruncateDoubleToI) \
V(ArchStoreWithWriteBarrier) \
+ V(ArchAtomicStoreWithWriteBarrier) \
V(ArchStackSlot) \
- V(ArchWordPoisonOnSpeculation) \
V(ArchStackPointerGreaterThan) \
V(ArchStackCheckOffset) \
- V(Word32AtomicLoadInt8) \
- V(Word32AtomicLoadUint8) \
- V(Word32AtomicLoadInt16) \
- V(Word32AtomicLoadUint16) \
- V(Word32AtomicLoadWord32) \
- V(Word32AtomicStoreWord8) \
- V(Word32AtomicStoreWord16) \
- V(Word32AtomicStoreWord32) \
- V(Word32AtomicExchangeInt8) \
- V(Word32AtomicExchangeUint8) \
- V(Word32AtomicExchangeInt16) \
- V(Word32AtomicExchangeUint16) \
- V(Word32AtomicExchangeWord32) \
- V(Word32AtomicCompareExchangeInt8) \
- V(Word32AtomicCompareExchangeUint8) \
- V(Word32AtomicCompareExchangeInt16) \
- V(Word32AtomicCompareExchangeUint16) \
- V(Word32AtomicCompareExchangeWord32) \
- V(Word32AtomicAddInt8) \
- V(Word32AtomicAddUint8) \
- V(Word32AtomicAddInt16) \
- V(Word32AtomicAddUint16) \
- V(Word32AtomicAddWord32) \
- V(Word32AtomicSubInt8) \
- V(Word32AtomicSubUint8) \
- V(Word32AtomicSubInt16) \
- V(Word32AtomicSubUint16) \
- V(Word32AtomicSubWord32) \
- V(Word32AtomicAndInt8) \
- V(Word32AtomicAndUint8) \
- V(Word32AtomicAndInt16) \
- V(Word32AtomicAndUint16) \
- V(Word32AtomicAndWord32) \
- V(Word32AtomicOrInt8) \
- V(Word32AtomicOrUint8) \
- V(Word32AtomicOrInt16) \
- V(Word32AtomicOrUint16) \
- V(Word32AtomicOrWord32) \
- V(Word32AtomicXorInt8) \
- V(Word32AtomicXorUint8) \
- V(Word32AtomicXorInt16) \
- V(Word32AtomicXorUint16) \
- V(Word32AtomicXorWord32) \
+ V(AtomicLoadInt8) \
+ V(AtomicLoadUint8) \
+ V(AtomicLoadInt16) \
+ V(AtomicLoadUint16) \
+ V(AtomicLoadWord32) \
+ V(AtomicStoreWord8) \
+ V(AtomicStoreWord16) \
+ V(AtomicStoreWord32) \
+ V(AtomicExchangeInt8) \
+ V(AtomicExchangeUint8) \
+ V(AtomicExchangeInt16) \
+ V(AtomicExchangeUint16) \
+ V(AtomicExchangeWord32) \
+ V(AtomicCompareExchangeInt8) \
+ V(AtomicCompareExchangeUint8) \
+ V(AtomicCompareExchangeInt16) \
+ V(AtomicCompareExchangeUint16) \
+ V(AtomicCompareExchangeWord32) \
+ V(AtomicAddInt8) \
+ V(AtomicAddUint8) \
+ V(AtomicAddInt16) \
+ V(AtomicAddUint16) \
+ V(AtomicAddWord32) \
+ V(AtomicSubInt8) \
+ V(AtomicSubUint8) \
+ V(AtomicSubInt16) \
+ V(AtomicSubUint16) \
+ V(AtomicSubWord32) \
+ V(AtomicAndInt8) \
+ V(AtomicAndUint8) \
+ V(AtomicAndInt16) \
+ V(AtomicAndUint16) \
+ V(AtomicAndWord32) \
+ V(AtomicOrInt8) \
+ V(AtomicOrUint8) \
+ V(AtomicOrInt16) \
+ V(AtomicOrUint16) \
+ V(AtomicOrWord32) \
+ V(AtomicXorInt8) \
+ V(AtomicXorUint8) \
+ V(AtomicXorInt16) \
+ V(AtomicXorUint16) \
+ V(AtomicXorWord32) \
V(Ieee754Float64Acos) \
V(Ieee754Float64Acosh) \
V(Ieee754Float64Asin) \
@@ -208,12 +211,10 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
enum FlagsMode {
kFlags_none = 0,
kFlags_branch = 1,
- kFlags_branch_and_poison = 2,
- kFlags_deoptimize = 3,
- kFlags_deoptimize_and_poison = 4,
- kFlags_set = 5,
- kFlags_trap = 6,
- kFlags_select = 7,
+ kFlags_deoptimize = 2,
+ kFlags_set = 3,
+ kFlags_trap = 4,
+ kFlags_select = 5,
};
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
@@ -262,9 +263,20 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
enum MemoryAccessMode {
kMemoryAccessDirect = 0,
kMemoryAccessProtected = 1,
- kMemoryAccessPoisoned = 2
};
+enum class AtomicWidth { kWord32, kWord64 };
+
+inline size_t AtomicWidthSize(AtomicWidth width) {
+ switch (width) {
+ case AtomicWidth::kWord32:
+ return 4;
+ case AtomicWidth::kWord64:
+ return 8;
+ }
+ UNREACHABLE();
+}
+
// The InstructionCode is an opaque, target-specific integer that encodes
// what code to emit for an instruction in the code generator. It is not
// interesting to the register allocator, as the inputs and flags on the
@@ -279,15 +291,74 @@ using ArchOpcodeField = base::BitField<ArchOpcode, 0, 9>;
static_assert(ArchOpcodeField::is_valid(kLastArchOpcode),
"All opcodes must fit in the 9-bit ArchOpcodeField.");
using AddressingModeField = base::BitField<AddressingMode, 9, 5>;
+static_assert(
+ AddressingModeField::is_valid(kLastAddressingMode),
+ "All addressing modes must fit in the 5-bit AddressingModeField.");
using FlagsModeField = base::BitField<FlagsMode, 14, 3>;
using FlagsConditionField = base::BitField<FlagsCondition, 17, 5>;
-using DeoptImmedArgsCountField = base::BitField<int, 22, 2>;
-using DeoptFrameStateOffsetField = base::BitField<int, 24, 8>;
+using MiscField = base::BitField<int, 22, 10>;
+
+// {MiscField} is used for a variety of things, depending on the opcode.
+// TODO(turbofan): There should be an abstraction that ensures safe encoding and
+// decoding. {HasMemoryAccessMode} and its uses are a small step in that
+// direction.
+
// LaneSizeField and AccessModeField are helper types to encode/decode a lane
// size, an access mode, or both inside the overlapping MiscField.
using LaneSizeField = base::BitField<int, 22, 8>;
using AccessModeField = base::BitField<MemoryAccessMode, 30, 2>;
-using MiscField = base::BitField<int, 22, 10>;
+// TODO(turbofan): {HasMemoryAccessMode} is currently only used to guard
+// decoding (in CodeGenerator and InstructionScheduler). Encoding (in
+// InstructionSelector) is not yet guarded. There are in fact instructions for
+// which InstructionSelector does set a MemoryAccessMode but CodeGenerator
+// doesn't care to consume it (e.g. kArm64LdrDecompressTaggedSigned). This is
+// scary. {HasMemoryAccessMode} does not include these instructions, so they can
+// be easily found by guarding encoding.
+inline bool HasMemoryAccessMode(ArchOpcode opcode) {
+ switch (opcode) {
+#define CASE(Name) \
+ case k##Name: \
+ return true;
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(CASE)
+#undef CASE
+ default:
+ return false;
+ }
+}
+
+using DeoptImmedArgsCountField = base::BitField<int, 22, 2>;
+using DeoptFrameStateOffsetField = base::BitField<int, 24, 8>;
+
+// AtomicWidthField overlaps with MiscField and is used for the various Atomic
+// opcodes. Only used on 64bit architectures. All atomic instructions on 32bit
+// architectures are assumed to be 32bit wide.
+using AtomicWidthField = base::BitField<AtomicWidth, 22, 2>;
+
+// AtomicMemoryOrderField overlaps with MiscField and is used for the various
+// Atomic opcodes. This field is not used on all architectures. It is used on
+// architectures where the codegen for kSeqCst and kAcqRel differ only by
+// emitting fences.
+using AtomicMemoryOrderField = base::BitField<AtomicMemoryOrder, 24, 2>;
+using AtomicStoreRecordWriteModeField = base::BitField<RecordWriteMode, 26, 4>;
+
+// ParamField and FPParamField overlap with MiscField, as the latter is never
+// used for Call instructions. These 2 fields represent the general purpose
+// and floating point parameter counts of a direct call into C and are given 5
+// bits each, which allow storing a number up to the current maximum parameter
+// count, which is 20 (see kMaxCParameters defined in macro-assembler.h).
+using ParamField = base::BitField<int, 22, 5>;
+using FPParamField = base::BitField<int, 27, 5>;
+
+// This static assertion serves as an early warning if we are about to exhaust
+// the available opcode space. If we are about to exhaust it, we should start
+// looking into options to compress some opcodes (see
+// https://crbug.com/v8/12093) before we fully run out of available opcodes.
+// Otherwise we risk being unable to land an important security fix or merge
+// back fixes that add new opcodes.
+// It is OK to temporarily reduce the required slack if we have a tracking bug
+// to reduce the number of used opcodes again.
+static_assert(ArchOpcodeField::kMax - kLastArchOpcode >= 16,
+ "We are running close to the number of available opcodes.");
} // namespace compiler
} // namespace internal
diff --git a/chromium/v8/src/compiler/backend/instruction-scheduler.cc b/chromium/v8/src/compiler/backend/instruction-scheduler.cc
index c46d263bae2..3d0be78262e 100644
--- a/chromium/v8/src/compiler/backend/instruction-scheduler.cc
+++ b/chromium/v8/src/compiler/backend/instruction-scheduler.cc
@@ -132,7 +132,6 @@ void InstructionScheduler::AddInstruction(Instruction* instr) {
// We should not have branches in the middle of a block.
DCHECK_NE(instr->flags_mode(), kFlags_branch);
- DCHECK_NE(instr->flags_mode(), kFlags_branch_and_poison);
if (IsFixedRegisterParameter(instr)) {
if (last_live_in_reg_marker_ != nullptr) {
@@ -168,12 +167,16 @@ void InstructionScheduler::AddInstruction(Instruction* instr) {
last_side_effect_instr_->AddSuccessor(new_node);
}
pending_loads_.push_back(new_node);
- } else if (instr->IsDeoptimizeCall() || instr->IsTrap()) {
+ } else if (instr->IsDeoptimizeCall() || CanTrap(instr)) {
// Ensure that deopts or traps are not reordered with respect to
// side-effect instructions.
if (last_side_effect_instr_ != nullptr) {
last_side_effect_instr_->AddSuccessor(new_node);
}
+ }
+
+ // Update last deoptimization or trap point.
+ if (instr->IsDeoptimizeCall() || CanTrap(instr)) {
last_deopt_or_trap_ = new_node;
}
@@ -298,11 +301,6 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
// effects.
return kIsLoadOperation;
- case kArchWordPoisonOnSpeculation:
- // While poisoning operations have no side effect, they must not be
- // reordered relative to branches.
- return kHasSideEffect;
-
case kArchPrepareCallCFunction:
case kArchPrepareTailCall:
case kArchTailCallCodeObject:
@@ -310,7 +308,7 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
#if V8_ENABLE_WEBASSEMBLY
case kArchTailCallWasm:
#endif // V8_ENABLE_WEBASSEMBLY
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
return kHasSideEffect;
case kArchDebugBreak:
@@ -334,55 +332,56 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
return kIsBarrier;
case kArchStoreWithWriteBarrier:
+ case kArchAtomicStoreWithWriteBarrier:
return kHasSideEffect;
- case kWord32AtomicLoadInt8:
- case kWord32AtomicLoadUint8:
- case kWord32AtomicLoadInt16:
- case kWord32AtomicLoadUint16:
- case kWord32AtomicLoadWord32:
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
return kIsLoadOperation;
- case kWord32AtomicStoreWord8:
- case kWord32AtomicStoreWord16:
- case kWord32AtomicStoreWord32:
+ case kAtomicStoreWord8:
+ case kAtomicStoreWord16:
+ case kAtomicStoreWord32:
return kHasSideEffect;
- case kWord32AtomicExchangeInt8:
- case kWord32AtomicExchangeUint8:
- case kWord32AtomicExchangeInt16:
- case kWord32AtomicExchangeUint16:
- case kWord32AtomicExchangeWord32:
- case kWord32AtomicCompareExchangeInt8:
- case kWord32AtomicCompareExchangeUint8:
- case kWord32AtomicCompareExchangeInt16:
- case kWord32AtomicCompareExchangeUint16:
- case kWord32AtomicCompareExchangeWord32:
- case kWord32AtomicAddInt8:
- case kWord32AtomicAddUint8:
- case kWord32AtomicAddInt16:
- case kWord32AtomicAddUint16:
- case kWord32AtomicAddWord32:
- case kWord32AtomicSubInt8:
- case kWord32AtomicSubUint8:
- case kWord32AtomicSubInt16:
- case kWord32AtomicSubUint16:
- case kWord32AtomicSubWord32:
- case kWord32AtomicAndInt8:
- case kWord32AtomicAndUint8:
- case kWord32AtomicAndInt16:
- case kWord32AtomicAndUint16:
- case kWord32AtomicAndWord32:
- case kWord32AtomicOrInt8:
- case kWord32AtomicOrUint8:
- case kWord32AtomicOrInt16:
- case kWord32AtomicOrUint16:
- case kWord32AtomicOrWord32:
- case kWord32AtomicXorInt8:
- case kWord32AtomicXorUint8:
- case kWord32AtomicXorInt16:
- case kWord32AtomicXorUint16:
- case kWord32AtomicXorWord32:
+ case kAtomicExchangeInt8:
+ case kAtomicExchangeUint8:
+ case kAtomicExchangeInt16:
+ case kAtomicExchangeUint16:
+ case kAtomicExchangeWord32:
+ case kAtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeWord32:
+ case kAtomicAddInt8:
+ case kAtomicAddUint8:
+ case kAtomicAddInt16:
+ case kAtomicAddUint16:
+ case kAtomicAddWord32:
+ case kAtomicSubInt8:
+ case kAtomicSubUint8:
+ case kAtomicSubInt16:
+ case kAtomicSubUint16:
+ case kAtomicSubWord32:
+ case kAtomicAndInt8:
+ case kAtomicAndUint8:
+ case kAtomicAndInt16:
+ case kAtomicAndUint16:
+ case kAtomicAndWord32:
+ case kAtomicOrInt8:
+ case kAtomicOrUint8:
+ case kAtomicOrInt16:
+ case kAtomicOrUint16:
+ case kAtomicOrWord32:
+ case kAtomicXorInt8:
+ case kAtomicXorUint8:
+ case kAtomicXorInt16:
+ case kAtomicXorUint16:
+ case kAtomicXorWord32:
return kHasSideEffect;
#define CASE(Name) case k##Name:
diff --git a/chromium/v8/src/compiler/backend/instruction-scheduler.h b/chromium/v8/src/compiler/backend/instruction-scheduler.h
index c22190bd50f..d4c08a033d1 100644
--- a/chromium/v8/src/compiler/backend/instruction-scheduler.h
+++ b/chromium/v8/src/compiler/backend/instruction-scheduler.h
@@ -169,6 +169,12 @@ class InstructionScheduler final : public ZoneObject {
return (GetInstructionFlags(instr) & kIsLoadOperation) != 0;
}
+ bool CanTrap(const Instruction* instr) const {
+ return instr->IsTrap() ||
+ (instr->HasMemoryAccessMode() &&
+ instr->memory_access_mode() == kMemoryAccessProtected);
+ }
+
// The scheduler will not move the following instructions before the last
// deopt/trap check:
// * loads (this is conservative)
@@ -184,7 +190,7 @@ class InstructionScheduler final : public ZoneObject {
// trap point we encountered.
bool DependsOnDeoptOrTrap(const Instruction* instr) const {
return MayNeedDeoptOrTrapCheck(instr) || instr->IsDeoptimizeCall() ||
- instr->IsTrap() || HasSideEffect(instr) || IsLoadOperation(instr);
+ CanTrap(instr) || HasSideEffect(instr) || IsLoadOperation(instr);
}
// Identify nops used as a definition point for live-in registers at
diff --git a/chromium/v8/src/compiler/backend/instruction-selector.cc b/chromium/v8/src/compiler/backend/instruction-selector.cc
index f279ea15900..beb716abbe3 100644
--- a/chromium/v8/src/compiler/backend/instruction-selector.cc
+++ b/chromium/v8/src/compiler/backend/instruction-selector.cc
@@ -39,7 +39,7 @@ InstructionSelector::InstructionSelector(
size_t* max_pushed_argument_count, SourcePositionMode source_position_mode,
Features features, EnableScheduling enable_scheduling,
EnableRootsRelativeAddressing enable_roots_relative_addressing,
- PoisoningMitigationLevel poisoning_level, EnableTraceTurboJson trace_turbo)
+ EnableTraceTurboJson trace_turbo)
: zone_(zone),
linkage_(linkage),
sequence_(sequence),
@@ -63,7 +63,6 @@ InstructionSelector::InstructionSelector(
enable_roots_relative_addressing_(enable_roots_relative_addressing),
enable_switch_jump_table_(enable_switch_jump_table),
state_values_cache_(zone),
- poisoning_level_(poisoning_level),
frame_(frame),
instruction_selection_failed_(false),
instr_origins_(sequence->zone()),
@@ -1076,17 +1075,10 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
}
DCHECK_EQ(1u, buffer->instruction_args.size());
- // Argument 1 is used for poison-alias index (encoded in a word-sized
- // immediate. This an index of the operand that aliases with poison register
- // or -1 if there is no aliasing.
- buffer->instruction_args.push_back(g.TempImmediate(-1));
- const size_t poison_alias_index = 1;
- DCHECK_EQ(buffer->instruction_args.size() - 1, poison_alias_index);
-
// If the call needs a frame state, we insert the state information as
// follows (n is the number of value inputs to the frame state):
- // arg 2 : deoptimization id.
- // arg 3 - arg (n + 2) : value inputs to the frame state.
+ // arg 1 : deoptimization id.
+ // arg 2 - arg (n + 2) : value inputs to the frame state.
size_t frame_state_entries = 0;
USE(frame_state_entries); // frame_state_entries is only used for debug.
if (buffer->frame_state_descriptor != nullptr) {
@@ -1123,7 +1115,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
&buffer->instruction_args, FrameStateInputKind::kStackSlot,
instruction_zone());
- DCHECK_EQ(2 + frame_state_entries, buffer->instruction_args.size());
+ DCHECK_EQ(1 + frame_state_entries, buffer->instruction_args.size());
}
size_t input_count = static_cast<size_t>(buffer->input_count());
@@ -1159,23 +1151,11 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
buffer->pushed_nodes[stack_index] = param;
pushed_count++;
} else {
- // If we do load poisoning and the linkage uses the poisoning register,
- // then we request the input in memory location, and during code
- // generation, we move the input to the register.
- if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison &&
- unallocated.HasFixedRegisterPolicy()) {
- int reg = unallocated.fixed_register_index();
- if (Register::from_code(reg) == kSpeculationPoisonRegister) {
- buffer->instruction_args[poison_alias_index] = g.TempImmediate(
- static_cast<int32_t>(buffer->instruction_args.size()));
- op = g.UseRegisterOrSlotOrConstant(*iter);
- }
- }
buffer->instruction_args.push_back(op);
}
}
DCHECK_EQ(input_count, buffer->instruction_args.size() + pushed_count -
- frame_state_entries - 1);
+ frame_state_entries);
if (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK && is_tail_call &&
stack_param_delta != 0) {
// For tail calls that change the size of their parameter list and keep
@@ -1215,9 +1195,7 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
if (node->opcode() == IrOpcode::kStore ||
node->opcode() == IrOpcode::kUnalignedStore ||
node->opcode() == IrOpcode::kCall ||
- node->opcode() == IrOpcode::kProtectedLoad ||
node->opcode() == IrOpcode::kProtectedStore ||
- node->opcode() == IrOpcode::kLoadTransform ||
#define ADD_EFFECT_FOR_ATOMIC_OP(Opcode) \
node->opcode() == IrOpcode::k##Opcode ||
MACHINE_ATOMIC_OP_LIST(ADD_EFFECT_FOR_ATOMIC_OP)
@@ -1474,8 +1452,8 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kStateValues:
case IrOpcode::kObjectState:
return;
- case IrOpcode::kAbortCSAAssert:
- VisitAbortCSAAssert(node);
+ case IrOpcode::kAbortCSADcheck:
+ VisitAbortCSADcheck(node);
return;
case IrOpcode::kDebugBreak:
VisitDebugBreak(node);
@@ -1509,11 +1487,6 @@ void InstructionSelector::VisitNode(Node* node) {
MarkAsRepresentation(MachineRepresentation::kSimd128, node);
return VisitLoadLane(node);
}
- case IrOpcode::kPoisonedLoad: {
- LoadRepresentation type = LoadRepresentationOf(node->op());
- MarkAsRepresentation(type.representation(), node);
- return VisitPoisonedLoad(node);
- }
case IrOpcode::kStore:
return VisitStore(node);
case IrOpcode::kProtectedStore:
@@ -1850,12 +1823,6 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsFloat64(node), VisitFloat64InsertLowWord32(node);
case IrOpcode::kFloat64InsertHighWord32:
return MarkAsFloat64(node), VisitFloat64InsertHighWord32(node);
- case IrOpcode::kTaggedPoisonOnSpeculation:
- return MarkAsTagged(node), VisitTaggedPoisonOnSpeculation(node);
- case IrOpcode::kWord32PoisonOnSpeculation:
- return MarkAsWord32(node), VisitWord32PoisonOnSpeculation(node);
- case IrOpcode::kWord64PoisonOnSpeculation:
- return MarkAsWord64(node), VisitWord64PoisonOnSpeculation(node);
case IrOpcode::kStackSlot:
return VisitStackSlot(node);
case IrOpcode::kStackPointerGreaterThan:
@@ -1900,12 +1867,14 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kMemoryBarrier:
return VisitMemoryBarrier(node);
case IrOpcode::kWord32AtomicLoad: {
- LoadRepresentation type = LoadRepresentationOf(node->op());
+ AtomicLoadParameters params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation type = params.representation();
MarkAsRepresentation(type.representation(), node);
return VisitWord32AtomicLoad(node);
}
case IrOpcode::kWord64AtomicLoad: {
- LoadRepresentation type = LoadRepresentationOf(node->op());
+ AtomicLoadParameters params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation type = params.representation();
MarkAsRepresentation(type.representation(), node);
return VisitWord64AtomicLoad(node);
}
@@ -2389,30 +2358,6 @@ void InstructionSelector::VisitNode(Node* node) {
}
}
-void InstructionSelector::EmitWordPoisonOnSpeculation(Node* node) {
- if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
- OperandGenerator g(this);
- Node* input_node = NodeProperties::GetValueInput(node, 0);
- InstructionOperand input = g.UseRegister(input_node);
- InstructionOperand output = g.DefineSameAsFirst(node);
- Emit(kArchWordPoisonOnSpeculation, output, input);
- } else {
- EmitIdentity(node);
- }
-}
-
-void InstructionSelector::VisitWord32PoisonOnSpeculation(Node* node) {
- EmitWordPoisonOnSpeculation(node);
-}
-
-void InstructionSelector::VisitWord64PoisonOnSpeculation(Node* node) {
- EmitWordPoisonOnSpeculation(node);
-}
-
-void InstructionSelector::VisitTaggedPoisonOnSpeculation(Node* node) {
- EmitWordPoisonOnSpeculation(node);
-}
-
void InstructionSelector::VisitStackPointerGreaterThan(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kStackPointerGreaterThanCondition, node);
@@ -2766,7 +2711,8 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
#endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && \
- !V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_RISCV64
+ !V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC64 && \
+ !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64
void InstructionSelector::VisitWord64AtomicLoad(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
@@ -2792,7 +2738,7 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
}
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_PPC64
// !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390 &&
- // !V8_TARGET_ARCH_RISCV64
+ // !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64
#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
// This is only needed on 32-bit to split the 64-bit value into two operands.
@@ -2806,11 +2752,12 @@ void InstructionSelector::VisitI64x2ReplaceLaneI32Pair(Node* node) {
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64
#if !V8_TARGET_ARCH_ARM64
-#if !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_LOONG64 && !V8_TARGET_ARCH_RISCV64
void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_MIPS64
+#endif // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_LOONG64 &&
+ // !V8_TARGET_ARCH_RISCV64
void InstructionSelector::VisitF64x2Qfma(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Qfms(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Qfma(Node* node) { UNIMPLEMENTED(); }
@@ -2837,7 +2784,7 @@ namespace {
LinkageLocation ExceptionLocation() {
return LinkageLocation::ForRegister(kReturnRegister0.code(),
- MachineType::IntPtr());
+ MachineType::TaggedPointer());
}
constexpr InstructionCode EncodeCallDescriptorFlags(
@@ -2967,16 +2914,20 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
InstructionCode opcode;
switch (call_descriptor->kind()) {
case CallDescriptor::kCallAddress: {
- int misc_field = static_cast<int>(call_descriptor->ParameterCount());
+ int gp_param_count =
+ static_cast<int>(call_descriptor->GPParameterCount());
+ int fp_param_count =
+ static_cast<int>(call_descriptor->FPParameterCount());
#if ABI_USES_FUNCTION_DESCRIPTORS
- // Highest misc_field bit is used on AIX to indicate if a CFunction call
- // has function descriptor or not.
- STATIC_ASSERT(MiscField::kSize == kHasFunctionDescriptorBitShift + 1);
+ // Highest fp_param_count bit is used on AIX to indicate if a CFunction
+ // call has function descriptor or not.
+ STATIC_ASSERT(FPParamField::kSize == kHasFunctionDescriptorBitShift + 1);
if (!call_descriptor->NoFunctionDescriptor()) {
- misc_field |= 1 << kHasFunctionDescriptorBitShift;
+ fp_param_count |= 1 << kHasFunctionDescriptorBitShift;
}
#endif
- opcode = kArchCallCFunction | MiscField::encode(misc_field);
+ opcode = kArchCallCFunction | ParamField::encode(gp_param_count) |
+ FPParamField::encode(fp_param_count);
break;
}
case CallDescriptor::kCallCodeObject:
@@ -3104,45 +3055,24 @@ void InstructionSelector::VisitReturn(Node* ret) {
void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
BasicBlock* fbranch) {
- if (NeedsPoisoning(IsSafetyCheckOf(branch->op()))) {
- FlagsContinuation cont =
- FlagsContinuation::ForBranchAndPoison(kNotEqual, tbranch, fbranch);
- VisitWordCompareZero(branch, branch->InputAt(0), &cont);
- } else {
- FlagsContinuation cont =
- FlagsContinuation::ForBranch(kNotEqual, tbranch, fbranch);
- VisitWordCompareZero(branch, branch->InputAt(0), &cont);
- }
+ FlagsContinuation cont =
+ FlagsContinuation::ForBranch(kNotEqual, tbranch, fbranch);
+ VisitWordCompareZero(branch, branch->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- if (NeedsPoisoning(p.is_safety_check())) {
- FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison(
- kNotEqual, p.kind(), p.reason(), node->id(), p.feedback(),
- node->InputAt(1));
- VisitWordCompareZero(node, node->InputAt(0), &cont);
- } else {
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->id(), p.feedback(),
- node->InputAt(1));
- VisitWordCompareZero(node, node->InputAt(0), &cont);
- }
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kNotEqual, p.kind(), p.reason(), node->id(), p.feedback(),
+ node->InputAt(1));
+ VisitWordCompareZero(node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- if (NeedsPoisoning(p.is_safety_check())) {
- FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison(
- kEqual, p.kind(), p.reason(), node->id(), p.feedback(),
- node->InputAt(1));
- VisitWordCompareZero(node, node->InputAt(0), &cont);
- } else {
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->id(), p.feedback(),
- node->InputAt(1));
- VisitWordCompareZero(node, node->InputAt(0), &cont);
- }
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kEqual, p.kind(), p.reason(), node->id(), p.feedback(), node->InputAt(1));
+ VisitWordCompareZero(node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitSelect(Node* node) {
@@ -3186,17 +3116,10 @@ void InstructionSelector::VisitDynamicCheckMapsWithDeoptUnless(Node* node) {
g.UseImmediate(n.slot()), g.UseImmediate(n.handler())});
}
- if (NeedsPoisoning(IsSafetyCheck::kCriticalSafetyCheck)) {
- FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison(
- kEqual, p.kind(), p.reason(), node->id(), p.feedback(), n.frame_state(),
- dynamic_check_args.data(), static_cast<int>(dynamic_check_args.size()));
- VisitWordCompareZero(node, n.condition(), &cont);
- } else {
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->id(), p.feedback(), n.frame_state(),
- dynamic_check_args.data(), static_cast<int>(dynamic_check_args.size()));
- VisitWordCompareZero(node, n.condition(), &cont);
- }
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kEqual, p.kind(), p.reason(), node->id(), p.feedback(), n.frame_state(),
+ dynamic_check_args.data(), static_cast<int>(dynamic_check_args.size()));
+ VisitWordCompareZero(node, n.condition(), &cont);
}
void InstructionSelector::VisitTrapIf(Node* node, TrapId trap_id) {
@@ -3409,18 +3332,6 @@ void InstructionSelector::SwapShuffleInputs(Node* node) {
}
#endif // V8_ENABLE_WEBASSEMBLY
-// static
-bool InstructionSelector::NeedsPoisoning(IsSafetyCheck safety_check) const {
- switch (poisoning_level_) {
- case PoisoningMitigationLevel::kDontPoison:
- return false;
- case PoisoningMitigationLevel::kPoisonAll:
- return safety_check != IsSafetyCheck::kNoSafetyCheck;
- case PoisoningMitigationLevel::kPoisonCriticalOnly:
- return safety_check == IsSafetyCheck::kCriticalSafetyCheck;
- }
- UNREACHABLE();
-}
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/compiler/backend/instruction-selector.h b/chromium/v8/src/compiler/backend/instruction-selector.h
index 11a329d1d6e..b33de8e8569 100644
--- a/chromium/v8/src/compiler/backend/instruction-selector.h
+++ b/chromium/v8/src/compiler/backend/instruction-selector.h
@@ -54,13 +54,6 @@ class FlagsContinuation final {
return FlagsContinuation(kFlags_branch, condition, true_block, false_block);
}
- static FlagsContinuation ForBranchAndPoison(FlagsCondition condition,
- BasicBlock* true_block,
- BasicBlock* false_block) {
- return FlagsContinuation(kFlags_branch_and_poison, condition, true_block,
- false_block);
- }
-
// Creates a new flags continuation for an eager deoptimization exit.
static FlagsContinuation ForDeoptimize(
FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason,
@@ -71,16 +64,6 @@ class FlagsContinuation final {
extra_args_count);
}
- // Creates a new flags continuation for an eager deoptimization exit.
- static FlagsContinuation ForDeoptimizeAndPoison(
- FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason,
- NodeId node_id, FeedbackSource const& feedback, Node* frame_state,
- InstructionOperand* extra_args = nullptr, int extra_args_count = 0) {
- return FlagsContinuation(kFlags_deoptimize_and_poison, condition, kind,
- reason, node_id, feedback, frame_state, extra_args,
- extra_args_count);
- }
-
// Creates a new flags continuation for a boolean value.
static FlagsContinuation ForSet(FlagsCondition condition, Node* result) {
return FlagsContinuation(condition, result);
@@ -98,16 +81,8 @@ class FlagsContinuation final {
}
bool IsNone() const { return mode_ == kFlags_none; }
- bool IsBranch() const {
- return mode_ == kFlags_branch || mode_ == kFlags_branch_and_poison;
- }
- bool IsDeoptimize() const {
- return mode_ == kFlags_deoptimize || mode_ == kFlags_deoptimize_and_poison;
- }
- bool IsPoisoned() const {
- return mode_ == kFlags_branch_and_poison ||
- mode_ == kFlags_deoptimize_and_poison;
- }
+ bool IsBranch() const { return mode_ == kFlags_branch; }
+ bool IsDeoptimize() const { return mode_ == kFlags_deoptimize; }
bool IsSet() const { return mode_ == kFlags_set; }
bool IsTrap() const { return mode_ == kFlags_trap; }
bool IsSelect() const { return mode_ == kFlags_select; }
@@ -226,7 +201,7 @@ class FlagsContinuation final {
condition_(condition),
true_block_(true_block),
false_block_(false_block) {
- DCHECK(mode == kFlags_branch || mode == kFlags_branch_and_poison);
+ DCHECK(mode == kFlags_branch);
DCHECK_NOT_NULL(true_block);
DCHECK_NOT_NULL(false_block);
}
@@ -245,7 +220,7 @@ class FlagsContinuation final {
frame_state_or_result_(frame_state),
extra_args_(extra_args),
extra_args_count_(extra_args_count) {
- DCHECK(mode == kFlags_deoptimize || mode == kFlags_deoptimize_and_poison);
+ DCHECK(mode == kFlags_deoptimize);
DCHECK_NOT_NULL(frame_state);
}
@@ -338,8 +313,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
: kDisableScheduling,
EnableRootsRelativeAddressing enable_roots_relative_addressing =
kDisableRootsRelativeAddressing,
- PoisoningMitigationLevel poisoning_level =
- PoisoningMitigationLevel::kDontPoison,
EnableTraceTurboJson trace_turbo = kDisableTraceTurboJson);
// Visit code for the entire graph with the included schedule.
@@ -443,8 +416,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
static MachineOperatorBuilder::AlignmentRequirements AlignmentRequirements();
- bool NeedsPoisoning(IsSafetyCheck safety_check) const;
-
// ===========================================================================
// ============ Architecture-independent graph covering methods. =============
// ===========================================================================
@@ -681,8 +652,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void VisitWordCompareZero(Node* user, Node* value, FlagsContinuation* cont);
- void EmitWordPoisonOnSpeculation(Node* node);
-
void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments,
const CallDescriptor* call_descriptor, Node* node);
void EmitPrepareResults(ZoneVector<compiler::PushParameter>* results,
@@ -797,7 +766,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
FrameStateInput::Equal>
state_values_cache_;
- PoisoningMitigationLevel poisoning_level_;
Frame* frame_;
bool instruction_selection_failed_;
ZoneVector<std::pair<int, int>> instr_origins_;
diff --git a/chromium/v8/src/compiler/backend/instruction.cc b/chromium/v8/src/compiler/backend/instruction.cc
index 63ca78e0600..a5c008bad52 100644
--- a/chromium/v8/src/compiler/backend/instruction.cc
+++ b/chromium/v8/src/compiler/backend/instruction.cc
@@ -7,7 +7,9 @@
#include <cstddef>
#include <iomanip>
+#include "src/codegen/aligned-slot-allocator.h"
#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/machine-type.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/source-position.h"
#include "src/compiler/common-operator.h"
@@ -77,10 +79,15 @@ FlagsCondition CommuteFlagsCondition(FlagsCondition condition) {
}
bool InstructionOperand::InterferesWith(const InstructionOperand& other) const {
- if (kSimpleFPAliasing || !this->IsFPLocationOperand() ||
- !other.IsFPLocationOperand())
+ const bool kComplexFPAliasing = !kSimpleFPAliasing &&
+ this->IsFPLocationOperand() &&
+ other.IsFPLocationOperand();
+ const bool kComplexS128SlotAliasing =
+ (this->IsSimd128StackSlot() && other.IsAnyStackSlot()) ||
+ (other.IsSimd128StackSlot() && this->IsAnyStackSlot());
+ if (!kComplexFPAliasing && !kComplexS128SlotAliasing) {
return EqualsCanonicalized(other);
- // Aliasing is complex and both operands are fp locations.
+ }
const LocationOperand& loc = *LocationOperand::cast(this);
const LocationOperand& other_loc = LocationOperand::cast(other);
LocationOperand::LocationKind kind = loc.location_kind();
@@ -88,22 +95,29 @@ bool InstructionOperand::InterferesWith(const InstructionOperand& other) const {
if (kind != other_kind) return false;
MachineRepresentation rep = loc.representation();
MachineRepresentation other_rep = other_loc.representation();
- if (rep == other_rep) return EqualsCanonicalized(other);
- if (kind == LocationOperand::REGISTER) {
- // FP register-register interference.
- return GetRegConfig()->AreAliases(rep, loc.register_code(), other_rep,
- other_loc.register_code());
+
+ if (kComplexFPAliasing && !kComplexS128SlotAliasing) {
+ if (rep == other_rep) return EqualsCanonicalized(other);
+ if (kind == LocationOperand::REGISTER) {
+ // FP register-register interference.
+ return GetRegConfig()->AreAliases(rep, loc.register_code(), other_rep,
+ other_loc.register_code());
+ }
}
- // FP slot-slot interference. Slots of different FP reps can alias because
- // the gap resolver may break a move into 2 or 4 equivalent smaller moves.
+
+ // Complex multi-slot operand interference:
+ // - slots of different FP reps can alias because the gap resolver may break a
+ // move into 2 or 4 equivalent smaller moves,
+ // - stack layout can be rearranged for tail calls
DCHECK_EQ(LocationOperand::STACK_SLOT, kind);
int index_hi = loc.index();
int index_lo =
- index_hi - (1 << ElementSizeLog2Of(rep)) / kSystemPointerSize + 1;
+ index_hi -
+ AlignedSlotAllocator::NumSlotsForWidth(ElementSizeInBytes(rep)) + 1;
int other_index_hi = other_loc.index();
int other_index_lo =
other_index_hi -
- (1 << ElementSizeLog2Of(other_rep)) / kSystemPointerSize + 1;
+ AlignedSlotAllocator::NumSlotsForWidth(ElementSizeInBytes(other_rep)) + 1;
return other_index_hi >= index_lo && index_hi >= other_index_lo;
}
@@ -410,12 +424,8 @@ std::ostream& operator<<(std::ostream& os, const FlagsMode& fm) {
return os;
case kFlags_branch:
return os << "branch";
- case kFlags_branch_and_poison:
- return os << "branch_and_poison";
case kFlags_deoptimize:
return os << "deoptimize";
- case kFlags_deoptimize_and_poison:
- return os << "deoptimize_and_poison";
case kFlags_set:
return os << "set";
case kFlags_trap:
diff --git a/chromium/v8/src/compiler/backend/instruction.h b/chromium/v8/src/compiler/backend/instruction.h
index 204683c9735..7372a5160dd 100644
--- a/chromium/v8/src/compiler/backend/instruction.h
+++ b/chromium/v8/src/compiler/backend/instruction.h
@@ -882,6 +882,13 @@ class V8_EXPORT_PRIVATE Instruction final {
return FlagsConditionField::decode(opcode());
}
int misc() const { return MiscField::decode(opcode()); }
+ bool HasMemoryAccessMode() const {
+ return compiler::HasMemoryAccessMode(arch_opcode());
+ }
+ MemoryAccessMode memory_access_mode() const {
+ DCHECK(HasMemoryAccessMode());
+ return AccessModeField::decode(opcode());
+ }
static Instruction* New(Zone* zone, InstructionCode opcode) {
return New(zone, opcode, 0, nullptr, 0, nullptr, 0, nullptr);
@@ -935,8 +942,7 @@ class V8_EXPORT_PRIVATE Instruction final {
bool IsDeoptimizeCall() const {
return arch_opcode() == ArchOpcode::kArchDeoptimize ||
- FlagsModeField::decode(opcode()) == kFlags_deoptimize ||
- FlagsModeField::decode(opcode()) == kFlags_deoptimize_and_poison;
+ FlagsModeField::decode(opcode()) == kFlags_deoptimize;
}
bool IsTrap() const {
diff --git a/chromium/v8/src/compiler/backend/jump-threading.cc b/chromium/v8/src/compiler/backend/jump-threading.cc
index e91b7e17d2b..258d05955e5 100644
--- a/chromium/v8/src/compiler/backend/jump-threading.cc
+++ b/chromium/v8/src/compiler/backend/jump-threading.cc
@@ -55,17 +55,6 @@ struct JumpThreadingState {
RpoNumber onstack() { return RpoNumber::FromInt(-2); }
};
-bool IsBlockWithBranchPoisoning(InstructionSequence* code,
- InstructionBlock* block) {
- if (block->PredecessorCount() != 1) return false;
- RpoNumber pred_rpo = (block->predecessors())[0];
- const InstructionBlock* pred = code->InstructionBlockAt(pred_rpo);
- if (pred->code_start() == pred->code_end()) return false;
- Instruction* instr = code->InstructionAt(pred->code_end() - 1);
- FlagsMode mode = FlagsModeField::decode(instr->opcode());
- return mode == kFlags_branch_and_poison;
-}
-
} // namespace
bool JumpThreading::ComputeForwarding(Zone* local_zone,
@@ -92,85 +81,80 @@ bool JumpThreading::ComputeForwarding(Zone* local_zone,
TRACE("jt [%d] B%d\n", static_cast<int>(stack.size()),
block->rpo_number().ToInt());
RpoNumber fw = block->rpo_number();
- if (!IsBlockWithBranchPoisoning(code, block)) {
- bool fallthru = true;
- for (int i = block->code_start(); i < block->code_end(); ++i) {
- Instruction* instr = code->InstructionAt(i);
- if (!instr->AreMovesRedundant()) {
- // can't skip instructions with non redundant moves.
- TRACE(" parallel move\n");
- fallthru = false;
- } else if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
- // can't skip instructions with flags continuations.
- TRACE(" flags\n");
- fallthru = false;
- } else if (instr->IsNop()) {
- // skip nops.
- TRACE(" nop\n");
- continue;
- } else if (instr->arch_opcode() == kArchJmp) {
- // try to forward the jump instruction.
- TRACE(" jmp\n");
- // if this block deconstructs the frame, we can't forward it.
- // TODO(mtrofin): we can still forward if we end up building
- // the frame at start. So we should move the decision of whether
- // to build a frame or not in the register allocator, and trickle it
- // here and to the code generator.
- if (frame_at_start || !(block->must_deconstruct_frame() ||
- block->must_construct_frame())) {
- fw = code->InputRpo(instr, 0);
- }
- fallthru = false;
- } else if (instr->IsRet()) {
- TRACE(" ret\n");
- if (fallthru) {
- CHECK_IMPLIES(block->must_construct_frame(),
- block->must_deconstruct_frame());
- // Only handle returns with immediate/constant operands, since
- // they must always be the same for all returns in a function.
- // Dynamic return values might use different registers at
- // different return sites and therefore cannot be shared.
- if (instr->InputAt(0)->IsImmediate()) {
- int32_t return_size = ImmediateOperand::cast(instr->InputAt(0))
- ->inline_int32_value();
- // Instructions can be shared only for blocks that share
- // the same |must_deconstruct_frame| attribute.
- if (block->must_deconstruct_frame()) {
- if (empty_deconstruct_frame_return_block ==
- RpoNumber::Invalid()) {
- empty_deconstruct_frame_return_block = block->rpo_number();
- empty_deconstruct_frame_return_size = return_size;
- } else if (empty_deconstruct_frame_return_size ==
- return_size) {
- fw = empty_deconstruct_frame_return_block;
- block->clear_must_deconstruct_frame();
- }
- } else {
- if (empty_no_deconstruct_frame_return_block ==
- RpoNumber::Invalid()) {
- empty_no_deconstruct_frame_return_block =
- block->rpo_number();
- empty_no_deconstruct_frame_return_size = return_size;
- } else if (empty_no_deconstruct_frame_return_size ==
- return_size) {
- fw = empty_no_deconstruct_frame_return_block;
- }
+ bool fallthru = true;
+ for (int i = block->code_start(); i < block->code_end(); ++i) {
+ Instruction* instr = code->InstructionAt(i);
+ if (!instr->AreMovesRedundant()) {
+ // can't skip instructions with non redundant moves.
+ TRACE(" parallel move\n");
+ fallthru = false;
+ } else if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
+ // can't skip instructions with flags continuations.
+ TRACE(" flags\n");
+ fallthru = false;
+ } else if (instr->IsNop()) {
+ // skip nops.
+ TRACE(" nop\n");
+ continue;
+ } else if (instr->arch_opcode() == kArchJmp) {
+ // try to forward the jump instruction.
+ TRACE(" jmp\n");
+ // if this block deconstructs the frame, we can't forward it.
+ // TODO(mtrofin): we can still forward if we end up building
+ // the frame at start. So we should move the decision of whether
+ // to build a frame or not in the register allocator, and trickle it
+ // here and to the code generator.
+ if (frame_at_start || !(block->must_deconstruct_frame() ||
+ block->must_construct_frame())) {
+ fw = code->InputRpo(instr, 0);
+ }
+ fallthru = false;
+ } else if (instr->IsRet()) {
+ TRACE(" ret\n");
+ if (fallthru) {
+ CHECK_IMPLIES(block->must_construct_frame(),
+ block->must_deconstruct_frame());
+ // Only handle returns with immediate/constant operands, since
+ // they must always be the same for all returns in a function.
+ // Dynamic return values might use different registers at
+ // different return sites and therefore cannot be shared.
+ if (instr->InputAt(0)->IsImmediate()) {
+ int32_t return_size = ImmediateOperand::cast(instr->InputAt(0))
+ ->inline_int32_value();
+ // Instructions can be shared only for blocks that share
+ // the same |must_deconstruct_frame| attribute.
+ if (block->must_deconstruct_frame()) {
+ if (empty_deconstruct_frame_return_block ==
+ RpoNumber::Invalid()) {
+ empty_deconstruct_frame_return_block = block->rpo_number();
+ empty_deconstruct_frame_return_size = return_size;
+ } else if (empty_deconstruct_frame_return_size == return_size) {
+ fw = empty_deconstruct_frame_return_block;
+ block->clear_must_deconstruct_frame();
+ }
+ } else {
+ if (empty_no_deconstruct_frame_return_block ==
+ RpoNumber::Invalid()) {
+ empty_no_deconstruct_frame_return_block = block->rpo_number();
+ empty_no_deconstruct_frame_return_size = return_size;
+ } else if (empty_no_deconstruct_frame_return_size ==
+ return_size) {
+ fw = empty_no_deconstruct_frame_return_block;
}
}
}
- fallthru = false;
- } else {
- // can't skip other instructions.
- TRACE(" other\n");
- fallthru = false;
}
- break;
- }
- if (fallthru) {
- int next = 1 + block->rpo_number().ToInt();
- if (next < code->InstructionBlockCount())
- fw = RpoNumber::FromInt(next);
+ fallthru = false;
+ } else {
+ // can't skip other instructions.
+ TRACE(" other\n");
+ fallthru = false;
}
+ break;
+ }
+ if (fallthru) {
+ int next = 1 + block->rpo_number().ToInt();
+ if (next < code->InstructionBlockCount()) fw = RpoNumber::FromInt(next);
}
state.Forward(fw);
}
@@ -225,7 +209,7 @@ void JumpThreading::ApplyForwarding(Zone* local_zone,
for (int i = block->code_start(); i < block->code_end(); ++i) {
Instruction* instr = code->InstructionAt(i);
FlagsMode mode = FlagsModeField::decode(instr->opcode());
- if (mode == kFlags_branch || mode == kFlags_branch_and_poison) {
+ if (mode == kFlags_branch) {
fallthru = false; // branches don't fall through to the next block.
} else if (instr->arch_opcode() == kArchJmp ||
instr->arch_opcode() == kArchRet) {
diff --git a/chromium/v8/src/compiler/backend/loong64/code-generator-loong64.cc b/chromium/v8/src/compiler/backend/loong64/code-generator-loong64.cc
new file mode 100644
index 00000000000..33226126cd6
--- /dev/null
+++ b/chromium/v8/src/compiler/backend/loong64/code-generator-loong64.cc
@@ -0,0 +1,2636 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/callable.h"
+#include "src/codegen/loong64/constants-loong64.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/optimized-compilation-info.h"
+#include "src/compiler/backend/code-generator-impl.h"
+#include "src/compiler/backend/code-generator.h"
+#include "src/compiler/backend/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/osr.h"
+#include "src/heap/memory-chunk.h"
+
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/wasm-code-manager.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ tasm()->
+
+// TODO(LOONG_dev): consider renaming these macros.
+#define TRACE_MSG(msg) \
+ PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
+ __LINE__)
+
+#define TRACE_UNIMPL() \
+ PrintF("UNIMPLEMENTED code_generator_loong64: %s at line %d\n", \
+ __FUNCTION__, __LINE__)
+
+// Adds Loong64-specific methods to convert InstructionOperands.
+class Loong64OperandConverter final : public InstructionOperandConverter {
+ public:
+ Loong64OperandConverter(CodeGenerator* gen, Instruction* instr)
+ : InstructionOperandConverter(gen, instr) {}
+
+ FloatRegister OutputSingleRegister(size_t index = 0) {
+ return ToSingleRegister(instr_->OutputAt(index));
+ }
+
+ FloatRegister InputSingleRegister(size_t index) {
+ return ToSingleRegister(instr_->InputAt(index));
+ }
+
+ FloatRegister ToSingleRegister(InstructionOperand* op) {
+ // Single (Float) and Double register namespace is same on LOONG64,
+ // both are typedefs of FPURegister.
+ return ToDoubleRegister(op);
+ }
+
+ Register InputOrZeroRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) {
+ DCHECK_EQ(0, InputInt32(index));
+ return zero_reg;
+ }
+ return InputRegister(index);
+ }
+
+ DoubleRegister InputOrZeroDoubleRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
+
+ return InputDoubleRegister(index);
+ }
+
+ DoubleRegister InputOrZeroSingleRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
+
+ return InputSingleRegister(index);
+ }
+
+ Operand InputImmediate(size_t index) {
+ Constant constant = ToConstant(instr_->InputAt(index));
+ switch (constant.type()) {
+ case Constant::kInt32:
+ return Operand(constant.ToInt32());
+ case Constant::kInt64:
+ return Operand(constant.ToInt64());
+ case Constant::kFloat32:
+ return Operand::EmbeddedNumber(constant.ToFloat32());
+ case Constant::kFloat64:
+ return Operand::EmbeddedNumber(constant.ToFloat64().value());
+ case Constant::kExternalReference:
+ case Constant::kCompressedHeapObject:
+ case Constant::kHeapObject:
+ break;
+ case Constant::kDelayedStringConstant:
+ return Operand::EmbeddedStringConstant(
+ constant.ToDelayedStringConstant());
+ case Constant::kRpoNumber:
+ UNREACHABLE(); // TODO(titzer): RPO immediates on loong64?
+ }
+ UNREACHABLE();
+ }
+
+ Operand InputOperand(size_t index) {
+ InstructionOperand* op = instr_->InputAt(index);
+ if (op->IsRegister()) {
+ return Operand(ToRegister(op));
+ }
+ return InputImmediate(index);
+ }
+
+ MemOperand MemoryOperand(size_t* first_index) {
+ const size_t index = *first_index;
+ switch (AddressingModeField::decode(instr_->opcode())) {
+ case kMode_None:
+ break;
+ case kMode_Root:
+ *first_index += 1;
+ return MemOperand(kRootRegister, InputInt32(index));
+ case kMode_MRI:
+ *first_index += 2;
+ return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+ case kMode_MRR:
+ *first_index += 2;
+ return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
+ }
+ UNREACHABLE();
+ }
+
+ MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
+
+ MemOperand ToMemOperand(InstructionOperand* op) const {
+ DCHECK_NOT_NULL(op);
+ DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
+ return SlotToMemOperand(AllocatedOperand::cast(op)->index());
+ }
+
+ MemOperand SlotToMemOperand(int slot) const {
+ FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
+ return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
+ }
+};
+
+static inline bool HasRegisterInput(Instruction* instr, size_t index) {
+ return instr->InputAt(index)->IsRegister();
+}
+
+namespace {
+
+class OutOfLineRecordWrite final : public OutOfLineCode {
+ public:
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand offset,
+ Register value, RecordWriteMode mode,
+ StubCallMode stub_mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ offset_(offset),
+ value_(value),
+ mode_(mode),
+#if V8_ENABLE_WEBASSEMBLY
+ stub_mode_(stub_mode),
+#endif // V8_ENABLE_WEBASSEMBLY
+ must_save_lr_(!gen->frame_access_state()->has_frame()),
+ zone_(gen->zone()) {
+ }
+
+ void Generate() final {
+ __ CheckPageFlag(value_, MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
+ RememberedSetAction const remembered_set_action =
+ mode_ > RecordWriteMode::kValueIsMap ? RememberedSetAction::kEmit
+ : RememberedSetAction::kOmit;
+ SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
+ ? SaveFPRegsMode::kSave
+ : SaveFPRegsMode::kIgnore;
+ if (must_save_lr_) {
+ // We need to save and restore ra if the frame was elided.
+ __ Push(ra);
+ }
+ if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
+ __ CallEphemeronKeyBarrier(object_, offset_, save_fp_mode);
+#if V8_ENABLE_WEBASSEMBLY
+ } else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched when the code
+ // is added to the native module and copied into wasm code space.
+ __ CallRecordWriteStubSaveRegisters(object_, offset_,
+ remembered_set_action, save_fp_mode,
+ StubCallMode::kCallWasmRuntimeStub);
+#endif // V8_ENABLE_WEBASSEMBLY
+ } else {
+ __ CallRecordWriteStubSaveRegisters(object_, offset_,
+ remembered_set_action, save_fp_mode);
+ }
+ if (must_save_lr_) {
+ __ Pop(ra);
+ }
+ }
+
+ private:
+ Register const object_;
+ Operand const offset_;
+ Register const value_;
+ RecordWriteMode const mode_;
+#if V8_ENABLE_WEBASSEMBLY
+ StubCallMode const stub_mode_;
+#endif // V8_ENABLE_WEBASSEMBLY
+ bool must_save_lr_;
+ Zone* zone_;
+};
+
+#define CREATE_OOL_CLASS(ool_name, tasm_ool_name, T) \
+ class ool_name final : public OutOfLineCode { \
+ public: \
+ ool_name(CodeGenerator* gen, T dst, T src1, T src2) \
+ : OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \
+ \
+ void Generate() final { __ tasm_ool_name(dst_, src1_, src2_); } \
+ \
+ private: \
+ T const dst_; \
+ T const src1_; \
+ T const src2_; \
+ }
+
+CREATE_OOL_CLASS(OutOfLineFloat32Max, Float32MaxOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat32Min, Float32MinOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat64Max, Float64MaxOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat64Min, Float64MinOutOfLine, FPURegister);
+
+#undef CREATE_OOL_CLASS
+
+Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
+ switch (condition) {
+ case kEqual:
+ return eq;
+ case kNotEqual:
+ return ne;
+ case kSignedLessThan:
+ return lt;
+ case kSignedGreaterThanOrEqual:
+ return ge;
+ case kSignedLessThanOrEqual:
+ return le;
+ case kSignedGreaterThan:
+ return gt;
+ case kUnsignedLessThan:
+ return lo;
+ case kUnsignedGreaterThanOrEqual:
+ return hs;
+ case kUnsignedLessThanOrEqual:
+ return ls;
+ case kUnsignedGreaterThan:
+ return hi;
+ case kUnorderedEqual:
+ case kUnorderedNotEqual:
+ break;
+ default:
+ break;
+ }
+ UNREACHABLE();
+}
+
+Condition FlagsConditionToConditionTst(FlagsCondition condition) {
+ switch (condition) {
+ case kNotEqual:
+ return ne;
+ case kEqual:
+ return eq;
+ default:
+ break;
+ }
+ UNREACHABLE();
+}
+
+Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
+ switch (condition) {
+ case kOverflow:
+ return ne;
+ case kNotOverflow:
+ return eq;
+ default:
+ break;
+ }
+ UNREACHABLE();
+}
+
+FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
+ FlagsCondition condition) {
+ switch (condition) {
+ case kEqual:
+ *predicate = true;
+ return CEQ;
+ case kNotEqual:
+ *predicate = false;
+ return CEQ;
+ case kUnsignedLessThan:
+ *predicate = true;
+ return CLT;
+ case kUnsignedGreaterThanOrEqual:
+ *predicate = false;
+ return CLT;
+ case kUnsignedLessThanOrEqual:
+ *predicate = true;
+ return CLE;
+ case kUnsignedGreaterThan:
+ *predicate = false;
+ return CLE;
+ case kUnorderedEqual:
+ case kUnorderedNotEqual:
+ *predicate = true;
+ break;
+ default:
+ *predicate = true;
+ break;
+ }
+ UNREACHABLE();
+}
+
+} // namespace
+
+#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
+ do { \
+ __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
+ __ dbar(0); \
+ } while (0)
+
+// TODO(LOONG_dev): remove second dbar?
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
+ do { \
+ __ dbar(0); \
+ __ asm_instr(i.InputOrZeroRegister(2), i.MemoryOperand()); \
+ __ dbar(0); \
+ } while (0)
+
+// only use for sub_w and sub_d
+#define ASSEMBLE_ATOMIC_BINOP(load_linked, store_conditional, bin_instr) \
+ do { \
+ Label binop; \
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ dbar(0); \
+ __ bind(&binop); \
+ __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
+ __ bin_instr(i.TempRegister(1), i.OutputRegister(0), \
+ Operand(i.InputRegister(2))); \
+ __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
+ __ dbar(0); \
+ } while (0)
+
+// TODO(LOONG_dev): remove second dbar?
+#define ASSEMBLE_ATOMIC_BINOP_EXT(load_linked, store_conditional, sign_extend, \
+ size, bin_instr, representation) \
+ do { \
+ Label binop; \
+ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ if (representation == 32) { \
+ __ andi(i.TempRegister(3), i.TempRegister(0), 0x3); \
+ } else { \
+ DCHECK_EQ(representation, 64); \
+ __ andi(i.TempRegister(3), i.TempRegister(0), 0x7); \
+ } \
+ __ Sub_d(i.TempRegister(0), i.TempRegister(0), \
+ Operand(i.TempRegister(3))); \
+ __ slli_w(i.TempRegister(3), i.TempRegister(3), 3); \
+ __ dbar(0); \
+ __ bind(&binop); \
+ __ load_linked(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
+ __ ExtractBits(i.OutputRegister(0), i.TempRegister(1), i.TempRegister(3), \
+ size, sign_extend); \
+ __ bin_instr(i.TempRegister(2), i.OutputRegister(0), \
+ Operand(i.InputRegister(2))); \
+ __ InsertBits(i.TempRegister(1), i.TempRegister(2), i.TempRegister(3), \
+ size); \
+ __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
+ __ dbar(0); \
+ } while (0)
+
+// TODO(LOONG_dev): remove second dbar?
+#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT( \
+ load_linked, store_conditional, sign_extend, size, representation) \
+ do { \
+ Label exchange; \
+ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ if (representation == 32) { \
+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
+ } else { \
+ DCHECK_EQ(representation, 64); \
+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x7); \
+ } \
+ __ Sub_d(i.TempRegister(0), i.TempRegister(0), \
+ Operand(i.TempRegister(1))); \
+ __ slli_w(i.TempRegister(1), i.TempRegister(1), 3); \
+ __ dbar(0); \
+ __ bind(&exchange); \
+ __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
+ size, sign_extend); \
+ __ InsertBits(i.TempRegister(2), i.InputRegister(2), i.TempRegister(1), \
+ size); \
+ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&exchange, eq, i.TempRegister(2), Operand(zero_reg)); \
+ __ dbar(0); \
+ } while (0)
+
+// TODO(LOONG_dev): remove second dbar?
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_linked, \
+ store_conditional) \
+ do { \
+ Label compareExchange; \
+ Label exit; \
+ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ dbar(0); \
+ __ bind(&compareExchange); \
+ __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&exit, ne, i.InputRegister(2), \
+ Operand(i.OutputRegister(0))); \
+ __ mov(i.TempRegister(2), i.InputRegister(3)); \
+ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
+ Operand(zero_reg)); \
+ __ bind(&exit); \
+ __ dbar(0); \
+ } while (0)
+
+// TODO(LOONG_dev): remove second dbar?
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( \
+ load_linked, store_conditional, sign_extend, size, representation) \
+ do { \
+ Label compareExchange; \
+ Label exit; \
+ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ if (representation == 32) { \
+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
+ } else { \
+ DCHECK_EQ(representation, 64); \
+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x7); \
+ } \
+ __ Sub_d(i.TempRegister(0), i.TempRegister(0), \
+ Operand(i.TempRegister(1))); \
+ __ slli_w(i.TempRegister(1), i.TempRegister(1), 3); \
+ __ dbar(0); \
+ __ bind(&compareExchange); \
+ __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
+ size, sign_extend); \
+ __ ExtractBits(i.InputRegister(2), i.InputRegister(2), zero_reg, size, \
+ sign_extend); \
+ __ BranchShort(&exit, ne, i.InputRegister(2), \
+ Operand(i.OutputRegister(0))); \
+ __ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \
+ size); \
+ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
+ Operand(zero_reg)); \
+ __ bind(&exit); \
+ __ dbar(0); \
+ } while (0)
+
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
+ UseScratchRegisterScope temps(tasm()); \
+ Register scratch = temps.Acquire(); \
+ __ PrepareCallCFunction(0, 2, scratch); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
+ } while (0)
+
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
+ UseScratchRegisterScope temps(tasm()); \
+ Register scratch = temps.Acquire(); \
+ __ PrepareCallCFunction(0, 1, scratch); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
+ } while (0)
+
+#define ASSEMBLE_F64X2_ARITHMETIC_BINOP(op) \
+ do { \
+ __ op(i.OutputSimd128Register(), i.InputSimd128Register(0), \
+ i.InputSimd128Register(1)); \
+ } while (0)
+
+void CodeGenerator::AssembleDeconstructFrame() {
+ __ mov(sp, fp);
+ __ Pop(ra, fp);
+}
+
+void CodeGenerator::AssemblePrepareTailCall() {
+ if (frame_access_state()->has_frame()) {
+ __ Ld_d(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ Ld_d(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ }
+ frame_access_state()->SetFrameAccessToSP();
+}
+
+namespace {
+
+void AdjustStackPointerForTailCall(TurboAssembler* tasm,
+ FrameAccessState* state,
+ int new_slot_above_sp,
+ bool allow_shrinkage = true) {
+ int current_sp_offset = state->GetSPToFPSlotCount() +
+ StandardFrameConstants::kFixedSlotCountAboveFp;
+ int stack_slot_delta = new_slot_above_sp - current_sp_offset;
+ if (stack_slot_delta > 0) {
+ tasm->Sub_d(sp, sp, stack_slot_delta * kSystemPointerSize);
+ state->IncreaseSPDelta(stack_slot_delta);
+ } else if (allow_shrinkage && stack_slot_delta < 0) {
+ tasm->Add_d(sp, sp, -stack_slot_delta * kSystemPointerSize);
+ state->IncreaseSPDelta(stack_slot_delta);
+ }
+}
+
+} // namespace
+
+void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
+ int first_unused_slot_offset) {
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ first_unused_slot_offset, false);
+}
+
+void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
+ int first_unused_slot_offset) {
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ first_unused_slot_offset);
+}
+
+// Check that {kJavaScriptCallCodeStartRegister} is correct.
+void CodeGenerator::AssembleCodeStartRegisterCheck() {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ ComputeCodeStartAddress(scratch);
+ __ Assert(eq, AbortReason::kWrongFunctionCodeStart,
+ kJavaScriptCallCodeStartRegister, Operand(scratch));
+}
+
+// Check if the code object is marked for deoptimization. If it is, then it
+// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
+// to:
+// 1. read from memory the word that contains that bit, which can be found in
+// the flags in the referenced {CodeDataContainer} object;
+// 2. test kMarkedForDeoptimizationBit in those flags; and
+// 3. if it is not zero then it jumps to the builtin.
+void CodeGenerator::BailoutIfDeoptimized() {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
+ __ Ld_d(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
+ __ Ld_w(scratch, FieldMemOperand(
+ scratch, CodeDataContainer::kKindSpecificFlagsOffset));
+ __ And(scratch, scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
+ RelocInfo::CODE_TARGET, ne, scratch, Operand(zero_reg));
+}
+
+// Assembles an instruction after register allocation, producing machine code.
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+ Instruction* instr) {
+ Loong64OperandConverter i(this, instr);
+ InstructionCode opcode = instr->opcode();
+ ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
+ switch (arch_opcode) {
+ case kArchCallCodeObject: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
+ } else {
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ CallCodeObject(reg);
+ }
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchCallBuiltinPointer: {
+ DCHECK(!instr->InputAt(0)->IsImmediate());
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+#if V8_ENABLE_WEBASSEMBLY
+ case kArchCallWasmFunction: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ Constant constant = i.ToConstant(instr->InputAt(0));
+ Address wasm_code = static_cast<Address>(constant.ToInt64());
+ __ Call(wasm_code, constant.rmode());
+ } else {
+ __ Call(i.InputRegister(0));
+ }
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchTailCallWasm: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ Constant constant = i.ToConstant(instr->InputAt(0));
+ Address wasm_code = static_cast<Address>(constant.ToInt64());
+ __ Jump(wasm_code, constant.rmode());
+ } else {
+ __ Jump(i.InputRegister(0));
+ }
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ case kArchTailCallCodeObject: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
+ } else {
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ JumpCodeObject(reg);
+ }
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
+ case kArchTailCallAddress: {
+ CHECK(!instr->InputAt(0)->IsImmediate());
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ Jump(reg);
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
+ case kArchCallJSFunction: {
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ // Check the function's context matches the context argument.
+ __ Ld_d(scratch, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ Assert(eq, AbortReason::kWrongFunctionContext, cp, Operand(scratch));
+ }
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Ld_d(a2, FieldMemOperand(func, JSFunction::kCodeOffset));
+ __ CallCodeObject(a2);
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchPrepareCallCFunction: {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ int const num_parameters = MiscField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_parameters, scratch);
+ // Frame alignment requires using FP-relative frame addressing.
+ frame_access_state()->SetFrameAccessToFP();
+ break;
+ }
+ case kArchSaveCallerRegisters: {
+ fp_mode_ =
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
+ DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
+ fp_mode_ == SaveFPRegsMode::kSave);
+ // kReturnRegister0 should have been saved before entering the stub.
+ int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
+ DCHECK(IsAligned(bytes, kSystemPointerSize));
+ DCHECK_EQ(0, frame_access_state()->sp_delta());
+ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
+ DCHECK(!caller_registers_saved_);
+ caller_registers_saved_ = true;
+ break;
+ }
+ case kArchRestoreCallerRegisters: {
+ DCHECK(fp_mode_ ==
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
+ DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
+ fp_mode_ == SaveFPRegsMode::kSave);
+ // Don't overwrite the returned value.
+ int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
+ frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize));
+ DCHECK_EQ(0, frame_access_state()->sp_delta());
+ DCHECK(caller_registers_saved_);
+ caller_registers_saved_ = false;
+ break;
+ }
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall();
+ break;
+ case kArchCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+#if V8_ENABLE_WEBASSEMBLY
+ Label start_call;
+ bool isWasmCapiFunction =
+ linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
+ // from start_call to return address.
+ int offset = __ root_array_available() ? 36 : 80; // 9 or 20 instrs
+#endif // V8_ENABLE_WEBASSEMBLY
+#if V8_HOST_ARCH_LOONG64
+ if (FLAG_debug_code) {
+ offset += 12; // see CallCFunction
+ }
+#endif
+#if V8_ENABLE_WEBASSEMBLY
+ if (isWasmCapiFunction) {
+ __ bind(&start_call);
+ __ pcaddi(t7, -4);
+ __ St_d(t7, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ if (instr->InputAt(0)->IsImmediate()) {
+ ExternalReference ref = i.InputExternalReference(0);
+ __ CallCFunction(ref, num_parameters);
+ } else {
+ Register func = i.InputRegister(0);
+ __ CallCFunction(func, num_parameters);
+ }
+#if V8_ENABLE_WEBASSEMBLY
+ if (isWasmCapiFunction) {
+ CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call));
+ RecordSafepoint(instr->reference_map());
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ frame_access_state()->SetFrameAccessToDefault();
+ // Ideally, we should decrement SP delta to match the change of stack
+ // pointer in CallCFunction. However, for certain architectures (e.g.
+ // ARM), there may be more strict alignment requirement, causing old SP
+ // to be saved on the stack. In those cases, we can not calculate the SP
+ // delta statically.
+ frame_access_state()->ClearSPDelta();
+ if (caller_registers_saved_) {
+ // Need to re-sync SP delta introduced in kArchSaveCallerRegisters.
+ // Here, we assume the sequence to be:
+ // kArchSaveCallerRegisters;
+ // kArchCallCFunction;
+ // kArchRestoreCallerRegisters;
+ int bytes =
+ __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
+ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
+ }
+ break;
+ }
+ case kArchJmp:
+ AssembleArchJump(i.InputRpo(0));
+ break;
+ case kArchBinarySearchSwitch:
+ AssembleArchBinarySearchSwitch(instr);
+ break;
+ case kArchTableSwitch:
+ AssembleArchTableSwitch(instr);
+ break;
+ case kArchAbortCSADcheck:
+ DCHECK(i.InputRegister(0) == a0);
+ {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
+ RelocInfo::CODE_TARGET);
+ }
+ __ stop();
+ break;
+ case kArchDebugBreak:
+ __ DebugBreak();
+ break;
+ case kArchComment:
+ __ RecordComment(reinterpret_cast<const char*>(i.InputInt64(0)));
+ break;
+ case kArchNop:
+ case kArchThrowTerminator:
+ // don't emit code for nops.
+ break;
+ case kArchDeoptimize: {
+ DeoptimizationExit* exit =
+ BuildTranslation(instr, -1, 0, 0, OutputFrameStateCombine::Ignore());
+ __ Branch(exit->label());
+ break;
+ }
+ case kArchRet:
+ AssembleReturn(instr->InputAt(0));
+ break;
+ case kArchStackPointerGreaterThan: {
+ Register lhs_register = sp;
+ uint32_t offset;
+ if (ShouldApplyOffsetToStackCheck(instr, &offset)) {
+ lhs_register = i.TempRegister(1);
+ __ Sub_d(lhs_register, sp, offset);
+ }
+ __ Sltu(i.TempRegister(0), i.InputRegister(0), lhs_register);
+ break;
+ }
+ case kArchStackCheckOffset:
+ __ Move(i.OutputRegister(), Smi::FromInt(GetStackCheckOffset()));
+ break;
+ case kArchFramePointer:
+ __ mov(i.OutputRegister(), fp);
+ break;
+ case kArchParentFramePointer:
+ if (frame_access_state()->has_frame()) {
+ __ Ld_d(i.OutputRegister(), MemOperand(fp, 0));
+ } else {
+ __ mov(i.OutputRegister(), fp);
+ }
+ break;
+ case kArchTruncateDoubleToI:
+ __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
+ i.InputDoubleRegister(0), DetermineStubCallMode());
+ break;
+ case kArchStoreWithWriteBarrier: // Fall through.
+ case kArchAtomicStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ AddressingMode addressing_mode =
+ AddressingModeField::decode(instr->opcode());
+ Register object = i.InputRegister(0);
+ Operand offset(zero_reg);
+ if (addressing_mode == kMode_MRI) {
+ offset = Operand(i.InputInt64(1));
+ } else {
+ DCHECK_EQ(addressing_mode, kMode_MRR);
+ offset = Operand(i.InputRegister(1));
+ }
+ Register value = i.InputRegister(2);
+
+ auto ool = zone()->New<OutOfLineRecordWrite>(
+ this, object, offset, value, mode, DetermineStubCallMode());
+ if (arch_opcode == kArchStoreWithWriteBarrier) {
+ if (addressing_mode == kMode_MRI) {
+ __ St_d(value, MemOperand(object, i.InputInt64(1)));
+ } else {
+ DCHECK_EQ(addressing_mode, kMode_MRR);
+ __ St_d(value, MemOperand(object, i.InputRegister(1)));
+ }
+ } else {
+ DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode);
+ DCHECK_EQ(addressing_mode, kMode_MRI);
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ Add_d(scratch, object, Operand(i.InputInt64(1)));
+ __ amswap_db_d(zero_reg, value, scratch);
+ }
+ if (mode > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value, ool->exit());
+ }
+ __ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask,
+ ne, ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
+ case kArchStackSlot: {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ FrameOffset offset =
+ frame_access_state()->GetFrameOffset(i.InputInt32(0));
+ Register base_reg = offset.from_stack_pointer() ? sp : fp;
+ __ Add_d(i.OutputRegister(), base_reg, Operand(offset.offset()));
+ if (FLAG_debug_code) {
+ // Verify that the output_register is properly aligned
+ __ And(scratch, i.OutputRegister(), Operand(kSystemPointerSize - 1));
+ __ Assert(eq, AbortReason::kAllocationIsNotDoubleAligned, scratch,
+ Operand(zero_reg));
+ }
+ break;
+ }
+ case kIeee754Float64Acos:
+ ASSEMBLE_IEEE754_UNOP(acos);
+ break;
+ case kIeee754Float64Acosh:
+ ASSEMBLE_IEEE754_UNOP(acosh);
+ break;
+ case kIeee754Float64Asin:
+ ASSEMBLE_IEEE754_UNOP(asin);
+ break;
+ case kIeee754Float64Asinh:
+ ASSEMBLE_IEEE754_UNOP(asinh);
+ break;
+ case kIeee754Float64Atan:
+ ASSEMBLE_IEEE754_UNOP(atan);
+ break;
+ case kIeee754Float64Atanh:
+ ASSEMBLE_IEEE754_UNOP(atanh);
+ break;
+ case kIeee754Float64Atan2:
+ ASSEMBLE_IEEE754_BINOP(atan2);
+ break;
+ case kIeee754Float64Cos:
+ ASSEMBLE_IEEE754_UNOP(cos);
+ break;
+ case kIeee754Float64Cosh:
+ ASSEMBLE_IEEE754_UNOP(cosh);
+ break;
+ case kIeee754Float64Cbrt:
+ ASSEMBLE_IEEE754_UNOP(cbrt);
+ break;
+ case kIeee754Float64Exp:
+ ASSEMBLE_IEEE754_UNOP(exp);
+ break;
+ case kIeee754Float64Expm1:
+ ASSEMBLE_IEEE754_UNOP(expm1);
+ break;
+ case kIeee754Float64Log:
+ ASSEMBLE_IEEE754_UNOP(log);
+ break;
+ case kIeee754Float64Log1p:
+ ASSEMBLE_IEEE754_UNOP(log1p);
+ break;
+ case kIeee754Float64Log2:
+ ASSEMBLE_IEEE754_UNOP(log2);
+ break;
+ case kIeee754Float64Log10:
+ ASSEMBLE_IEEE754_UNOP(log10);
+ break;
+ case kIeee754Float64Pow:
+ ASSEMBLE_IEEE754_BINOP(pow);
+ break;
+ case kIeee754Float64Sin:
+ ASSEMBLE_IEEE754_UNOP(sin);
+ break;
+ case kIeee754Float64Sinh:
+ ASSEMBLE_IEEE754_UNOP(sinh);
+ break;
+ case kIeee754Float64Tan:
+ ASSEMBLE_IEEE754_UNOP(tan);
+ break;
+ case kIeee754Float64Tanh:
+ ASSEMBLE_IEEE754_UNOP(tanh);
+ break;
+ case kLoong64Add_w:
+ __ Add_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Add_d:
+ __ Add_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64AddOvf_d:
+ __ AddOverflow_d(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), t8);
+ break;
+ case kLoong64Sub_w:
+ __ Sub_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Sub_d:
+ __ Sub_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64SubOvf_d:
+ __ SubOverflow_d(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), t8);
+ break;
+ case kLoong64Mul_w:
+ __ Mul_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64MulOvf_w:
+ __ MulOverflow_w(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), t8);
+ break;
+ case kLoong64Mulh_w:
+ __ Mulh_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Mulh_wu:
+ __ Mulh_wu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Mulh_d:
+ __ Mulh_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Div_w:
+ __ Div_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ break;
+ case kLoong64Div_wu:
+ __ Div_wu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ break;
+ case kLoong64Mod_w:
+ __ Mod_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Mod_wu:
+ __ Mod_wu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Mul_d:
+ __ Mul_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Div_d:
+ __ Div_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ break;
+ case kLoong64Div_du:
+ __ Div_du(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ break;
+ case kLoong64Mod_d:
+ __ Mod_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Mod_du:
+ __ Mod_du(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Alsl_d:
+ DCHECK(instr->InputAt(2)->IsImmediate());
+ __ Alsl_d(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.InputInt8(2), t7);
+ break;
+ case kLoong64Alsl_w:
+ DCHECK(instr->InputAt(2)->IsImmediate());
+ __ Alsl_w(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.InputInt8(2), t7);
+ break;
+ case kLoong64And:
+ case kLoong64And32:
+ __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Or:
+ case kLoong64Or32:
+ __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Nor:
+ case kLoong64Nor32:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ } else {
+ DCHECK_EQ(0, i.InputOperand(1).immediate());
+ __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
+ }
+ break;
+ case kLoong64Xor:
+ case kLoong64Xor32:
+ __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Clz_w:
+ __ clz_w(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kLoong64Clz_d:
+ __ clz_d(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kLoong64Sll_w:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ sll_w(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int64_t imm = i.InputOperand(1).immediate();
+ __ slli_w(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
+ }
+ break;
+ case kLoong64Srl_w:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ srl_w(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int64_t imm = i.InputOperand(1).immediate();
+ __ srli_w(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
+ }
+ break;
+ case kLoong64Sra_w:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ sra_w(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int64_t imm = i.InputOperand(1).immediate();
+ __ srai_w(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
+ }
+ break;
+ case kLoong64Bstrpick_w:
+ __ bstrpick_w(i.OutputRegister(), i.InputRegister(0),
+ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1));
+ break;
+ case kLoong64Bstrins_w:
+ if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
+ __ bstrins_w(i.OutputRegister(), zero_reg,
+ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1));
+ } else {
+ __ bstrins_w(i.OutputRegister(), i.InputRegister(0),
+ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1));
+ }
+ break;
+ case kLoong64Bstrpick_d: {
+ __ bstrpick_d(i.OutputRegister(), i.InputRegister(0),
+ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1));
+ break;
+ }
+ case kLoong64Bstrins_d:
+ if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
+ __ bstrins_d(i.OutputRegister(), zero_reg,
+ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1));
+ } else {
+ __ bstrins_d(i.OutputRegister(), i.InputRegister(0),
+ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1));
+ }
+ break;
+ case kLoong64Sll_d:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ sll_d(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int64_t imm = i.InputOperand(1).immediate();
+ __ slli_d(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
+ }
+ break;
+ case kLoong64Srl_d:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ srl_d(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int64_t imm = i.InputOperand(1).immediate();
+ __ srli_d(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
+ }
+ break;
+ case kLoong64Sra_d:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ sra_d(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int64_t imm = i.InputOperand(1).immediate();
+ __ srai_d(i.OutputRegister(), i.InputRegister(0), imm);
+ }
+ break;
+ case kLoong64Rotr_w:
+ __ Rotr_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Rotr_d:
+ __ Rotr_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Tst:
+ __ And(t8, i.InputRegister(0), i.InputOperand(1));
+ // Pseudo-instruction used for cmp/branch. No opcode emitted here.
+ break;
+ case kLoong64Cmp:
+ // Pseudo-instruction used for cmp/branch. No opcode emitted here.
+ break;
+ case kLoong64Mov:
+ // TODO(LOONG_dev): Should we combine mov/li, or use separate instr?
+ // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
+ if (HasRegisterInput(instr, 0)) {
+ __ mov(i.OutputRegister(), i.InputRegister(0));
+ } else {
+ __ li(i.OutputRegister(), i.InputOperand(0));
+ }
+ break;
+
+ case kLoong64Float32Cmp: {
+ FPURegister left = i.InputOrZeroSingleRegister(0);
+ FPURegister right = i.InputOrZeroSingleRegister(1);
+ bool predicate;
+ FPUCondition cc =
+ FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition());
+
+ if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+
+ __ CompareF32(left, right, cc);
+ } break;
+ case kLoong64Float32Add:
+ __ fadd_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float32Sub:
+ __ fsub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float32Mul:
+ __ fmul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float32Div:
+ __ fdiv_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float32Abs:
+ __ fabs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ case kLoong64Float32Neg:
+ __ Neg_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ case kLoong64Float32Sqrt: {
+ __ fsqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kLoong64Float32Min: {
+ FPURegister dst = i.OutputSingleRegister();
+ FPURegister src1 = i.InputSingleRegister(0);
+ FPURegister src2 = i.InputSingleRegister(1);
+ auto ool = zone()->New<OutOfLineFloat32Min>(this, dst, src1, src2);
+ __ Float32Min(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
+ case kLoong64Float32Max: {
+ FPURegister dst = i.OutputSingleRegister();
+ FPURegister src1 = i.InputSingleRegister(0);
+ FPURegister src2 = i.InputSingleRegister(1);
+ auto ool = zone()->New<OutOfLineFloat32Max>(this, dst, src1, src2);
+ __ Float32Max(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
+ case kLoong64Float64Cmp: {
+ FPURegister left = i.InputOrZeroDoubleRegister(0);
+ FPURegister right = i.InputOrZeroDoubleRegister(1);
+ bool predicate;
+ FPUCondition cc =
+ FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition());
+ if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+
+ __ CompareF64(left, right, cc);
+ } break;
+ case kLoong64Float64Add:
+ __ fadd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float64Sub:
+ __ fsub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float64Mul:
+ // TODO(LOONG_dev): LOONG64 add special case: right op is -1.0, see arm
+ // port.
+ __ fmul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float64Div:
+ __ fdiv_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float64Mod: {
+ // TODO(turbofan): implement directly.
+ FrameScope scope(tasm(), StackFrame::MANUAL);
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
+ break;
+ }
+ case kLoong64Float64Abs:
+ __ fabs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kLoong64Float64Neg:
+ __ Neg_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kLoong64Float64Sqrt: {
+ __ fsqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kLoong64Float64Min: {
+ FPURegister dst = i.OutputDoubleRegister();
+ FPURegister src1 = i.InputDoubleRegister(0);
+ FPURegister src2 = i.InputDoubleRegister(1);
+ auto ool = zone()->New<OutOfLineFloat64Min>(this, dst, src1, src2);
+ __ Float64Min(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
+ case kLoong64Float64Max: {
+ FPURegister dst = i.OutputDoubleRegister();
+ FPURegister src1 = i.InputDoubleRegister(0);
+ FPURegister src2 = i.InputDoubleRegister(1);
+ auto ool = zone()->New<OutOfLineFloat64Max>(this, dst, src1, src2);
+ __ Float64Max(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
+ case kLoong64Float64RoundDown: {
+ __ Floor_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kLoong64Float32RoundDown: {
+ __ Floor_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ }
+ case kLoong64Float64RoundTruncate: {
+ __ Trunc_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kLoong64Float32RoundTruncate: {
+ __ Trunc_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ }
+ case kLoong64Float64RoundUp: {
+ __ Ceil_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kLoong64Float32RoundUp: {
+ __ Ceil_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ }
+ case kLoong64Float64RoundTiesEven: {
+ __ Round_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kLoong64Float32RoundTiesEven: {
+ __ Round_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ }
+ case kLoong64Float64SilenceNaN:
+ __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kLoong64Float64ToFloat32:
+ __ fcvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kLoong64Float32ToFloat64:
+ __ fcvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
+ break;
+ case kLoong64Int32ToFloat64: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ movgr2fr_w(scratch, i.InputRegister(0));
+ __ ffint_d_w(i.OutputDoubleRegister(), scratch);
+ break;
+ }
+ case kLoong64Int32ToFloat32: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ movgr2fr_w(scratch, i.InputRegister(0));
+ __ ffint_s_w(i.OutputDoubleRegister(), scratch);
+ break;
+ }
+ case kLoong64Uint32ToFloat32: {
+ __ Ffint_s_uw(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kLoong64Int64ToFloat32: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ movgr2fr_d(scratch, i.InputRegister(0));
+ __ ffint_s_l(i.OutputDoubleRegister(), scratch);
+ break;
+ }
+ case kLoong64Int64ToFloat64: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ movgr2fr_d(scratch, i.InputRegister(0));
+ __ ffint_d_l(i.OutputDoubleRegister(), scratch);
+ break;
+ }
+ case kLoong64Uint32ToFloat64: {
+ __ Ffint_d_uw(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kLoong64Uint64ToFloat64: {
+ __ Ffint_d_ul(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kLoong64Uint64ToFloat32: {
+ __ Ffint_s_ul(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kLoong64Float64ToInt32: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ ftintrz_w_d(scratch, i.InputDoubleRegister(0));
+ __ movfr2gr_s(i.OutputRegister(), scratch);
+ break;
+ }
+ case kLoong64Float32ToInt32: {
+ FPURegister scratch_d = kScratchDoubleReg;
+ bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode());
+ __ ftintrz_w_s(scratch_d, i.InputDoubleRegister(0));
+ __ movfr2gr_s(i.OutputRegister(), scratch_d);
+ if (set_overflow_to_min_i32) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
+ // because INT32_MIN allows easier out-of-bounds detection.
+ __ addi_w(scratch, i.OutputRegister(), 1);
+ __ slt(scratch, scratch, i.OutputRegister());
+ __ add_w(i.OutputRegister(), i.OutputRegister(), scratch);
+ }
+ break;
+ }
+ case kLoong64Float32ToInt64: {
+ FPURegister scratch_d = kScratchDoubleReg;
+
+ bool load_status = instr->OutputCount() > 1;
+ // Other arches use round to zero here, so we follow.
+ __ ftintrz_l_s(scratch_d, i.InputDoubleRegister(0));
+ __ movfr2gr_d(i.OutputRegister(), scratch_d);
+ if (load_status) {
+ Register output2 = i.OutputRegister(1);
+ __ movfcsr2gr(output2, FCSR2);
+ // Check for overflow and NaNs.
+ __ And(output2, output2,
+ kFCSROverflowCauseMask | kFCSRInvalidOpCauseMask);
+ __ Slt(output2, zero_reg, output2);
+ __ xori(output2, output2, 1);
+ }
+ break;
+ }
+ case kLoong64Float64ToInt64: {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ FPURegister scratch_d = kScratchDoubleReg;
+
+ bool set_overflow_to_min_i64 = MiscField::decode(instr->opcode());
+ bool load_status = instr->OutputCount() > 1;
+ // Other arches use round to zero here, so we follow.
+ __ ftintrz_l_d(scratch_d, i.InputDoubleRegister(0));
+ __ movfr2gr_d(i.OutputRegister(0), scratch_d);
+ if (load_status) {
+ Register output2 = i.OutputRegister(1);
+ __ movfcsr2gr(output2, FCSR2);
+ // Check for overflow and NaNs.
+ __ And(output2, output2,
+ kFCSROverflowCauseMask | kFCSRInvalidOpCauseMask);
+ __ Slt(output2, zero_reg, output2);
+ __ xori(output2, output2, 1);
+ }
+ if (set_overflow_to_min_i64) {
+ // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
+ // because INT64_MIN allows easier out-of-bounds detection.
+ __ addi_d(scratch, i.OutputRegister(), 1);
+ __ slt(scratch, scratch, i.OutputRegister());
+ __ add_d(i.OutputRegister(), i.OutputRegister(), scratch);
+ }
+ break;
+ }
+ case kLoong64Float64ToUint32: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ Ftintrz_uw_d(i.OutputRegister(), i.InputDoubleRegister(0), scratch);
+ break;
+ }
+ case kLoong64Float32ToUint32: {
+ FPURegister scratch = kScratchDoubleReg;
+ bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode());
+ __ Ftintrz_uw_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch);
+ if (set_overflow_to_min_i32) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
+ // because 0 allows easier out-of-bounds detection.
+ __ addi_w(scratch, i.OutputRegister(), 1);
+ __ Movz(i.OutputRegister(), zero_reg, scratch);
+ }
+ break;
+ }
+ case kLoong64Float32ToUint64: {
+ FPURegister scratch = kScratchDoubleReg;
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ __ Ftintrz_ul_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch,
+ result);
+ break;
+ }
+ case kLoong64Float64ToUint64: {
+ FPURegister scratch = kScratchDoubleReg;
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ __ Ftintrz_ul_d(i.OutputRegister(0), i.InputDoubleRegister(0), scratch,
+ result);
+ break;
+ }
+ case kLoong64BitcastDL:
+ __ movfr2gr_d(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kLoong64BitcastLD:
+ __ movgr2fr_d(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ case kLoong64Float64ExtractLowWord32:
+ __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kLoong64Float64ExtractHighWord32:
+ __ movfrh2gr_s(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kLoong64Float64InsertLowWord32:
+ __ FmoveLow(i.OutputDoubleRegister(), i.InputRegister(1));
+ break;
+ case kLoong64Float64InsertHighWord32:
+ __ movgr2frh_w(i.OutputDoubleRegister(), i.InputRegister(1));
+ break;
+ // ... more basic instructions ...
+
+ case kLoong64Ext_w_b:
+ __ ext_w_b(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kLoong64Ext_w_h:
+ __ ext_w_h(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kLoong64Ld_bu:
+ __ Ld_bu(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64Ld_b:
+ __ Ld_b(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64St_b:
+ __ St_b(i.InputOrZeroRegister(2), i.MemoryOperand());
+ break;
+ case kLoong64Ld_hu:
+ __ Ld_hu(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64Ld_h:
+ __ Ld_h(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64St_h:
+ __ St_h(i.InputOrZeroRegister(2), i.MemoryOperand());
+ break;
+ case kLoong64Ld_w:
+ __ Ld_w(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64Ld_wu:
+ __ Ld_wu(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64Ld_d:
+ __ Ld_d(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64St_w:
+ __ St_w(i.InputOrZeroRegister(2), i.MemoryOperand());
+ break;
+ case kLoong64St_d:
+ __ St_d(i.InputOrZeroRegister(2), i.MemoryOperand());
+ break;
+ case kLoong64Fld_s: {
+ __ Fld_s(i.OutputSingleRegister(), i.MemoryOperand());
+ break;
+ }
+ case kLoong64Fst_s: {
+ size_t index = 0;
+ MemOperand operand = i.MemoryOperand(&index);
+ FPURegister ft = i.InputOrZeroSingleRegister(index);
+ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+
+ __ Fst_s(ft, operand);
+ break;
+ }
+ case kLoong64Fld_d:
+ __ Fld_d(i.OutputDoubleRegister(), i.MemoryOperand());
+ break;
+ case kLoong64Fst_d: {
+ FPURegister ft = i.InputOrZeroDoubleRegister(2);
+ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+
+ __ Fst_d(ft, i.MemoryOperand());
+ break;
+ }
+ case kLoong64Dbar: {
+ __ dbar(0);
+ break;
+ }
+ case kLoong64Push:
+ if (instr->InputAt(0)->IsFPRegister()) {
+ __ Fst_d(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
+ __ Sub_d(sp, sp, Operand(kDoubleSize));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kSystemPointerSize);
+ } else {
+ __ Push(i.InputRegister(0));
+ frame_access_state()->IncreaseSPDelta(1);
+ }
+ break;
+ case kLoong64Peek: {
+ int reverse_slot = i.InputInt32(0);
+ int offset =
+ FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
+ if (instr->OutputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ Fld_d(i.OutputDoubleRegister(), MemOperand(fp, offset));
+ } else if (op->representation() == MachineRepresentation::kFloat32) {
+ __ Fld_s(i.OutputSingleRegister(0), MemOperand(fp, offset));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
+ abort();
+ }
+ } else {
+ __ Ld_d(i.OutputRegister(0), MemOperand(fp, offset));
+ }
+ break;
+ }
+ case kLoong64StackClaim: {
+ __ Sub_d(sp, sp, Operand(i.InputInt32(0)));
+ frame_access_state()->IncreaseSPDelta(i.InputInt32(0) /
+ kSystemPointerSize);
+ break;
+ }
+ case kLoong64Poke: {
+ if (instr->InputAt(0)->IsFPRegister()) {
+ __ Fst_d(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
+ } else {
+ __ St_d(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
+ }
+ break;
+ }
+ case kLoong64ByteSwap64: {
+ __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 8);
+ break;
+ }
+ case kLoong64ByteSwap32: {
+ __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4);
+ break;
+ }
+ case kAtomicLoadInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_b);
+ break;
+ case kAtomicLoadUint8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_bu);
+ break;
+ case kAtomicLoadInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_h);
+ break;
+ case kAtomicLoadUint16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_hu);
+ break;
+ case kAtomicLoadWord32:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_w);
+ break;
+ case kLoong64Word64AtomicLoadUint32:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_wu);
+ break;
+ case kLoong64Word64AtomicLoadUint64:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_d);
+ break;
+ case kAtomicStoreWord8:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(St_b);
+ break;
+ case kAtomicStoreWord16:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(St_h);
+ break;
+ case kAtomicStoreWord32:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(St_w);
+ break;
+ case kLoong64StoreCompressTagged:
+ case kLoong64Word64AtomicStoreWord64:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(St_d);
+ break;
+ case kAtomicExchangeInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, true, 8, 32);
+ break;
+ case kAtomicExchangeUint8:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, false, 8, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 8, 64);
+ break;
+ }
+ break;
+ case kAtomicExchangeInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, true, 16, 32);
+ break;
+ case kAtomicExchangeUint16:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, false, 16, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 16, 64);
+ break;
+ }
+ break;
+ case kAtomicExchangeWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amswap_db_w(i.OutputRegister(0), i.InputRegister(2),
+ i.TempRegister(0));
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 32, 64);
+ break;
+ }
+ break;
+ case kLoong64Word64AtomicExchangeUint64:
+ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amswap_db_d(i.OutputRegister(0), i.InputRegister(2),
+ i.TempRegister(0));
+ break;
+ case kAtomicCompareExchangeInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, true, 8, 32);
+ break;
+ case kAtomicCompareExchangeUint8:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, false, 8,
+ 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 8,
+ 64);
+ break;
+ }
+ break;
+ case kAtomicCompareExchangeInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, true, 16, 32);
+ break;
+ case kAtomicCompareExchangeUint16:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, false, 16,
+ 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 16,
+ 64);
+ break;
+ }
+ break;
+ case kAtomicCompareExchangeWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ slli_w(i.InputRegister(2), i.InputRegister(2), 0);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll_w, Sc_w);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 32,
+ 64);
+ break;
+ }
+ break;
+ case kLoong64Word64AtomicCompareExchangeUint64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll_d, Sc_d);
+ break;
+ case kAtomicAddWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amadd_db_w(i.OutputRegister(0), i.InputRegister(2),
+ i.TempRegister(0));
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 32, Add_d, 64);
+ break;
+ }
+ break;
+ case kAtomicSubWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_BINOP(Ll_w, Sc_w, Sub_w);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 32, Sub_d, 64);
+ break;
+ }
+ break;
+ case kAtomicAndWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amand_db_w(i.OutputRegister(0), i.InputRegister(2),
+ i.TempRegister(0));
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 32, And, 64);
+ break;
+ }
+ break;
+ case kAtomicOrWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amor_db_w(i.OutputRegister(0), i.InputRegister(2),
+ i.TempRegister(0));
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 32, Or, 64);
+ break;
+ }
+ break;
+ case kAtomicXorWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amxor_db_w(i.OutputRegister(0), i.InputRegister(2),
+ i.TempRegister(0));
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 32, Xor, 64);
+ break;
+ }
+ break;
+#define ATOMIC_BINOP_CASE(op, inst32, inst64) \
+ case kAtomic##op##Int8: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, true, 8, inst32, 32); \
+ break; \
+ case kAtomic##op##Uint8: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, false, 8, inst32, 32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 8, inst64, 64); \
+ break; \
+ } \
+ break; \
+ case kAtomic##op##Int16: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, true, 16, inst32, 32); \
+ break; \
+ case kAtomic##op##Uint16: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, false, 16, inst32, 32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 16, inst64, 64); \
+ break; \
+ } \
+ break;
+ ATOMIC_BINOP_CASE(Add, Add_w, Add_d)
+ ATOMIC_BINOP_CASE(Sub, Sub_w, Sub_d)
+ ATOMIC_BINOP_CASE(And, And, And)
+ ATOMIC_BINOP_CASE(Or, Or, Or)
+ ATOMIC_BINOP_CASE(Xor, Xor, Xor)
+#undef ATOMIC_BINOP_CASE
+
+ case kLoong64Word64AtomicAddUint64:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amadd_db_d(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0));
+ break;
+ case kLoong64Word64AtomicSubUint64:
+ ASSEMBLE_ATOMIC_BINOP(Ll_d, Sc_d, Sub_d);
+ break;
+ case kLoong64Word64AtomicAndUint64:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amand_db_d(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0));
+ break;
+ case kLoong64Word64AtomicOrUint64:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amor_db_d(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0));
+ break;
+ case kLoong64Word64AtomicXorUint64:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amxor_db_d(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0));
+ break;
+#undef ATOMIC_BINOP_CASE
+ case kLoong64S128Const:
+ case kLoong64S128Zero:
+ case kLoong64I32x4Splat:
+ case kLoong64I32x4ExtractLane:
+ case kLoong64I32x4Add:
+ case kLoong64I32x4ReplaceLane:
+ case kLoong64I32x4Sub:
+ case kLoong64F64x2Abs:
+ default:
+ break;
+ }
+ return kSuccess;
+}
+
+#define UNSUPPORTED_COND(opcode, condition) \
+ StdoutStream{} << "Unsupported " << #opcode << " condition: \"" << condition \
+ << "\""; \
+ UNIMPLEMENTED();
+
+void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
+ Instruction* instr, FlagsCondition condition,
+ Label* tlabel, Label* flabel, bool fallthru) {
+#undef __
+#define __ tasm->
+ Loong64OperandConverter i(gen, instr);
+
+ Condition cc = kNoCondition;
+ // LOONG64 does not have condition code flags, so compare and branch are
+ // implemented differently than on the other arch's. The compare operations
+ // emit loong64 pseudo-instructions, which are handled here by branch
+ // instructions that do the actual comparison. Essential that the input
+ // registers to compare pseudo-op are not modified before this branch op, as
+ // they are tested here.
+
+ if (instr->arch_opcode() == kLoong64Tst) {
+ cc = FlagsConditionToConditionTst(condition);
+ __ Branch(tlabel, cc, t8, Operand(zero_reg));
+ } else if (instr->arch_opcode() == kLoong64Add_d ||
+ instr->arch_opcode() == kLoong64Sub_d) {
+ UseScratchRegisterScope temps(tasm);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ cc = FlagsConditionToConditionOvf(condition);
+ __ srai_d(scratch, i.OutputRegister(), 32);
+ __ srai_w(scratch2, i.OutputRegister(), 31);
+ __ Branch(tlabel, cc, scratch2, Operand(scratch));
+ } else if (instr->arch_opcode() == kLoong64AddOvf_d ||
+ instr->arch_opcode() == kLoong64SubOvf_d) {
+ switch (condition) {
+ // Overflow occurs if overflow register is negative
+ case kOverflow:
+ __ Branch(tlabel, lt, t8, Operand(zero_reg));
+ break;
+ case kNotOverflow:
+ __ Branch(tlabel, ge, t8, Operand(zero_reg));
+ break;
+ default:
+ UNSUPPORTED_COND(instr->arch_opcode(), condition);
+ }
+ } else if (instr->arch_opcode() == kLoong64MulOvf_w) {
+ // Overflow occurs if overflow register is not zero
+ switch (condition) {
+ case kOverflow:
+ __ Branch(tlabel, ne, t8, Operand(zero_reg));
+ break;
+ case kNotOverflow:
+ __ Branch(tlabel, eq, t8, Operand(zero_reg));
+ break;
+ default:
+ UNSUPPORTED_COND(kLoong64MulOvf_w, condition);
+ }
+ } else if (instr->arch_opcode() == kLoong64Cmp) {
+ cc = FlagsConditionToConditionCmp(condition);
+ __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
+ } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
+ cc = FlagsConditionToConditionCmp(condition);
+ DCHECK((cc == ls) || (cc == hi));
+ if (cc == ls) {
+ __ xori(i.TempRegister(0), i.TempRegister(0), 1);
+ }
+ __ Branch(tlabel, ne, i.TempRegister(0), Operand(zero_reg));
+ } else if (instr->arch_opcode() == kLoong64Float32Cmp ||
+ instr->arch_opcode() == kLoong64Float64Cmp) {
+ bool predicate;
+ FlagsConditionToConditionCmpFPU(&predicate, condition);
+ if (predicate) {
+ __ BranchTrueF(tlabel);
+ } else {
+ __ BranchFalseF(tlabel);
+ }
+ } else {
+ PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
+ instr->arch_opcode());
+ UNIMPLEMENTED();
+ }
+ if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
+#undef __
+#define __ tasm()->
+}
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+ Label* tlabel = branch->true_label;
+ Label* flabel = branch->false_label;
+
+ AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel,
+ branch->fallthru);
+}
+
+#undef UNSUPPORTED_COND
+
+void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
+ BranchInfo* branch) {
+ AssembleArchBranch(instr, branch);
+}
+
+void CodeGenerator::AssembleArchJump(RpoNumber target) {
+ if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
+}
+
+#if V8_ENABLE_WEBASSEMBLY
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+ FlagsCondition condition) {
+ class OutOfLineTrap final : public OutOfLineCode {
+ public:
+ OutOfLineTrap(CodeGenerator* gen, Instruction* instr)
+ : OutOfLineCode(gen), instr_(instr), gen_(gen) {}
+ void Generate() final {
+ Loong64OperandConverter i(gen_, instr_);
+ TrapId trap_id =
+ static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
+ GenerateCallToTrap(trap_id);
+ }
+
+ private:
+ void GenerateCallToTrap(TrapId trap_id) {
+ if (trap_id == TrapId::kInvalid) {
+ // We cannot test calls to the runtime in cctest/test-run-wasm.
+ // Therefore we emit a call to C here instead of a call to the runtime.
+ // We use the context register as the scratch register, because we do
+ // not have a context here.
+ __ PrepareCallCFunction(0, 0, cp);
+ __ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(), 0);
+ __ LeaveFrame(StackFrame::WASM);
+ auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
+ int pop_count = static_cast<int>(call_descriptor->ParameterSlotCount());
+ pop_count += (pop_count & 1); // align
+ __ Drop(pop_count);
+ __ Ret();
+ } else {
+ gen_->AssembleSourcePosition(instr_);
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched when the code
+ // is added to the native module and copied into wasm code space.
+ __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
+ ReferenceMap* reference_map =
+ gen_->zone()->New<ReferenceMap>(gen_->zone());
+ gen_->RecordSafepoint(reference_map);
+ if (FLAG_debug_code) {
+ __ stop();
+ }
+ }
+ }
+ Instruction* instr_;
+ CodeGenerator* gen_;
+ };
+ auto ool = zone()->New<OutOfLineTrap>(this, instr);
+ Label* tlabel = ool->entry();
+ AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true);
+}
+#endif // V8_ENABLE_WEBASSEMBLY
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+ FlagsCondition condition) {
+ Loong64OperandConverter i(this, instr);
+
+ // Materialize a full 32-bit 1 or 0 value. The result register is always the
+ // last output of the instruction.
+ DCHECK_NE(0u, instr->OutputCount());
+ Register result = i.OutputRegister(instr->OutputCount() - 1);
+ Condition cc = kNoCondition;
+ // Loong64 does not have condition code flags, so compare and branch are
+ // implemented differently than on the other arch's. The compare operations
+ // emit loong64 pseudo-instructions, which are checked and handled here.
+
+ if (instr->arch_opcode() == kLoong64Tst) {
+ cc = FlagsConditionToConditionTst(condition);
+ if (cc == eq) {
+ __ Sltu(result, t8, 1);
+ } else {
+ __ Sltu(result, zero_reg, t8);
+ }
+ return;
+ } else if (instr->arch_opcode() == kLoong64Add_d ||
+ instr->arch_opcode() == kLoong64Sub_d) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ cc = FlagsConditionToConditionOvf(condition);
+ // Check for overflow creates 1 or 0 for result.
+ __ srli_d(scratch, i.OutputRegister(), 63);
+ __ srli_w(result, i.OutputRegister(), 31);
+ __ xor_(result, scratch, result);
+ if (cc == eq) // Toggle result for not overflow.
+ __ xori(result, result, 1);
+ return;
+ } else if (instr->arch_opcode() == kLoong64AddOvf_d ||
+ instr->arch_opcode() == kLoong64SubOvf_d) {
+ // Overflow occurs if overflow register is negative
+ __ slt(result, t8, zero_reg);
+ } else if (instr->arch_opcode() == kLoong64MulOvf_w) {
+ // Overflow occurs if overflow register is not zero
+ __ Sgtu(result, t8, zero_reg);
+ } else if (instr->arch_opcode() == kLoong64Cmp) {
+ cc = FlagsConditionToConditionCmp(condition);
+ switch (cc) {
+ case eq:
+ case ne: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ if (instr->InputAt(1)->IsImmediate()) {
+ if (is_int12(-right.immediate())) {
+ if (right.immediate() == 0) {
+ if (cc == eq) {
+ __ Sltu(result, left, 1);
+ } else {
+ __ Sltu(result, zero_reg, left);
+ }
+ } else {
+ __ Add_d(result, left, Operand(-right.immediate()));
+ if (cc == eq) {
+ __ Sltu(result, result, 1);
+ } else {
+ __ Sltu(result, zero_reg, result);
+ }
+ }
+ } else {
+ __ Xor(result, left, Operand(right));
+ if (cc == eq) {
+ __ Sltu(result, result, 1);
+ } else {
+ __ Sltu(result, zero_reg, result);
+ }
+ }
+ } else {
+ __ Xor(result, left, right);
+ if (cc == eq) {
+ __ Sltu(result, result, 1);
+ } else {
+ __ Sltu(result, zero_reg, result);
+ }
+ }
+ } break;
+ case lt:
+ case ge: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ __ Slt(result, left, right);
+ if (cc == ge) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ case gt:
+ case le: {
+ Register left = i.InputRegister(1);
+ Operand right = i.InputOperand(0);
+ __ Slt(result, left, right);
+ if (cc == le) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ case lo:
+ case hs: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ __ Sltu(result, left, right);
+ if (cc == hs) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ case hi:
+ case ls: {
+ Register left = i.InputRegister(1);
+ Operand right = i.InputOperand(0);
+ __ Sltu(result, left, right);
+ if (cc == ls) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ default:
+ UNREACHABLE();
+ }
+ return;
+ } else if (instr->arch_opcode() == kLoong64Float64Cmp ||
+ instr->arch_opcode() == kLoong64Float32Cmp) {
+ FPURegister left = i.InputOrZeroDoubleRegister(0);
+ FPURegister right = i.InputOrZeroDoubleRegister(1);
+ if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+ bool predicate;
+ FlagsConditionToConditionCmpFPU(&predicate, condition);
+ {
+ __ movcf2gr(result, FCC0);
+ if (!predicate) {
+ __ xori(result, result, 1);
+ }
+ }
+ return;
+ } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
+ cc = FlagsConditionToConditionCmp(condition);
+ DCHECK((cc == ls) || (cc == hi));
+ if (cc == ls) {
+ __ xori(i.OutputRegister(), i.TempRegister(0), 1);
+ }
+ return;
+ } else {
+ PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
+ instr->arch_opcode());
+ TRACE_UNIMPL();
+ UNIMPLEMENTED();
+ }
+}
+
+void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
+ Loong64OperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ std::vector<std::pair<int32_t, Label*>> cases;
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))});
+ }
+ AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(),
+ cases.data() + cases.size());
+}
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+ Loong64OperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ size_t const case_count = instr->InputCount() - 2;
+
+ __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
+ __ GenerateSwitchTable(input, case_count, [&i, this](size_t index) {
+ return GetLabel(i.InputRpo(index + 2));
+ });
+}
+
+void CodeGenerator::AssembleArchSelect(Instruction* instr,
+ FlagsCondition condition) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::FinishFrame(Frame* frame) {
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+
+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+ if (saves_fpu != 0) {
+ int count = base::bits::CountPopulation(saves_fpu);
+ DCHECK_EQ(kNumCalleeSavedFPU, count);
+ frame->AllocateSavedCalleeRegisterSlots(count *
+ (kDoubleSize / kSystemPointerSize));
+ }
+
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ int count = base::bits::CountPopulation(saves);
+ frame->AllocateSavedCalleeRegisterSlots(count);
+ }
+}
+
+void CodeGenerator::AssembleConstructFrame() {
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+
+ if (frame_access_state()->has_frame()) {
+ if (call_descriptor->IsCFunctionCall()) {
+#if V8_ENABLE_WEBASSEMBLY
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ __ StubPrologue(StackFrame::C_WASM_ENTRY);
+ // Reserve stack space for saving the c_entry_fp later.
+ __ Sub_d(sp, sp, Operand(kSystemPointerSize));
+#else
+ // For balance.
+ if (false) {
+#endif // V8_ENABLE_WEBASSEMBLY
+ } else {
+ __ Push(ra, fp);
+ __ mov(fp, sp);
+ }
+ } else if (call_descriptor->IsJSFunctionCall()) {
+ __ Prologue();
+ } else {
+ __ StubPrologue(info()->GetOutputStackFrameType());
+#if V8_ENABLE_WEBASSEMBLY
+ if (call_descriptor->IsWasmFunctionCall()) {
+ __ Push(kWasmInstanceRegister);
+ } else if (call_descriptor->IsWasmImportWrapper() ||
+ call_descriptor->IsWasmCapiFunction()) {
+ // Wasm import wrappers are passed a tuple in the place of the instance.
+ // Unpack the tuple into the instance and the target callable.
+ // This must be done here in the codegen because it cannot be expressed
+ // properly in the graph.
+ __ Ld_d(kJSFunctionRegister,
+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
+ __ Ld_d(kWasmInstanceRegister,
+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
+ __ Push(kWasmInstanceRegister);
+ if (call_descriptor->IsWasmCapiFunction()) {
+ // Reserve space for saving the PC later.
+ __ Sub_d(sp, sp, Operand(kSystemPointerSize));
+ }
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ }
+ }
+
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
+
+ if (info()->is_osr()) {
+ // TurboFan OSR-compiled functions cannot be entered directly.
+ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
+
+ // Unoptimized code jumps directly to this entrypoint while the unoptimized
+ // frame is still on the stack. Optimized code uses OSR values directly from
+ // the unoptimized frame. Thus, all that needs to be done is to allocate the
+ // remaining stack slots.
+ __ RecordComment("-- OSR entrypoint --");
+ osr_pc_offset_ = __ pc_offset();
+ required_slots -= osr_helper()->UnoptimizedFrameSlots();
+ }
+
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+
+ if (required_slots > 0) {
+ DCHECK(frame_access_state()->has_frame());
+#if V8_ENABLE_WEBASSEMBLY
+ if (info()->IsWasm() && required_slots * kSystemPointerSize > 4 * KB) {
+ // For WebAssembly functions with big frames we have to do the stack
+ // overflow check before we construct the frame. Otherwise we may not
+ // have enough space on the stack to call the runtime for the stack
+ // overflow.
+ Label done;
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ if (required_slots * kSystemPointerSize < FLAG_stack_size * KB) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ Ld_d(scratch, FieldMemOperand(
+ kWasmInstanceRegister,
+ WasmInstanceObject::kRealStackLimitAddressOffset));
+ __ Ld_d(scratch, MemOperand(scratch, 0));
+ __ Add_d(scratch, scratch,
+ Operand(required_slots * kSystemPointerSize));
+ __ Branch(&done, uge, sp, Operand(scratch));
+ }
+
+ __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
+ // The call does not return, hence we can ignore any references and just
+ // define an empty safepoint.
+ ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
+ RecordSafepoint(reference_map);
+ if (FLAG_debug_code) {
+ __ stop();
+ }
+
+ __ bind(&done);
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ }
+
+ const int returns = frame()->GetReturnSlotCount();
+
+ // Skip callee-saved and return slots, which are pushed below.
+ required_slots -= base::bits::CountPopulation(saves);
+ required_slots -= base::bits::CountPopulation(saves_fpu);
+ required_slots -= returns;
+ if (required_slots > 0) {
+ __ Sub_d(sp, sp, Operand(required_slots * kSystemPointerSize));
+ }
+
+ if (saves_fpu != 0) {
+ // Save callee-saved FPU registers.
+ __ MultiPushFPU(saves_fpu);
+ DCHECK_EQ(kNumCalleeSavedFPU, base::bits::CountPopulation(saves_fpu));
+ }
+
+ if (saves != 0) {
+ // Save callee-saved registers.
+ __ MultiPush(saves);
+ }
+
+ if (returns != 0) {
+ // Create space for returns.
+ __ Sub_d(sp, sp, Operand(returns * kSystemPointerSize));
+ }
+}
+
+void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+
+ const int returns = frame()->GetReturnSlotCount();
+ if (returns != 0) {
+ __ Add_d(sp, sp, Operand(returns * kSystemPointerSize));
+ }
+
+ // Restore GP registers.
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ __ MultiPop(saves);
+ }
+
+ // Restore FPU registers.
+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+ if (saves_fpu != 0) {
+ __ MultiPopFPU(saves_fpu);
+ }
+
+ Loong64OperandConverter g(this, nullptr);
+
+ const int parameter_slots =
+ static_cast<int>(call_descriptor->ParameterSlotCount());
+
+ // {aditional_pop_count} is only greater than zero if {parameter_slots = 0}.
+ // Check RawMachineAssembler::PopAndReturn.
+ if (parameter_slots != 0) {
+ if (additional_pop_count->IsImmediate()) {
+ DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
+ } else if (FLAG_debug_code) {
+ __ Assert(eq, AbortReason::kUnexpectedAdditionalPopValue,
+ g.ToRegister(additional_pop_count),
+ Operand(static_cast<int64_t>(0)));
+ }
+ }
+
+ // Functions with JS linkage have at least one parameter (the receiver).
+ // If {parameter_slots} == 0, it means it is a builtin with
+ // kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
+ // itself.
+ const bool drop_jsargs = frame_access_state()->has_frame() &&
+ call_descriptor->IsJSFunctionCall() &&
+ parameter_slots != 0;
+
+ if (call_descriptor->IsCFunctionCall()) {
+ AssembleDeconstructFrame();
+ } else if (frame_access_state()->has_frame()) {
+ // Canonicalize JSFunction return sites for now unless they have an variable
+ // number of stack slot pops.
+ if (additional_pop_count->IsImmediate() &&
+ g.ToConstant(additional_pop_count).ToInt32() == 0) {
+ if (return_label_.is_bound()) {
+ __ Branch(&return_label_);
+ return;
+ } else {
+ __ bind(&return_label_);
+ }
+ }
+ if (drop_jsargs) {
+ // Get the actual argument count
+ __ Ld_d(t0, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ }
+ AssembleDeconstructFrame();
+ }
+ if (drop_jsargs) {
+ // We must pop all arguments from the stack (including the receiver). This
+ // number of arguments is given by max(1 + argc_reg, parameter_count).
+ __ Add_d(t0, t0, Operand(1)); // Also pop the receiver.
+ if (parameter_slots > 1) {
+ __ li(t1, parameter_slots);
+ __ slt(t2, t0, t1);
+ __ Movn(t0, t1, t2);
+ }
+ __ slli_d(t0, t0, kSystemPointerSizeLog2);
+ __ add_d(sp, sp, t0);
+ } else if (additional_pop_count->IsImmediate()) {
+ int additional_count = g.ToConstant(additional_pop_count).ToInt32();
+ __ Drop(parameter_slots + additional_count);
+ } else {
+ Register pop_reg = g.ToRegister(additional_pop_count);
+ __ Drop(parameter_slots);
+ __ slli_d(pop_reg, pop_reg, kSystemPointerSizeLog2);
+ __ add_d(sp, sp, pop_reg);
+ }
+ __ Ret();
+}
+
+void CodeGenerator::FinishCode() {}
+
+void CodeGenerator::PrepareForDeoptimizationExits(
+ ZoneDeque<DeoptimizationExit*>* exits) {}
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+ InstructionOperand* destination) {
+ Loong64OperandConverter g(this, nullptr);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ __ mov(g.ToRegister(destination), src);
+ } else {
+ __ St_d(src, g.ToMemOperand(destination));
+ }
+ } else if (source->IsStackSlot()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ MemOperand src = g.ToMemOperand(source);
+ if (destination->IsRegister()) {
+ __ Ld_d(g.ToRegister(destination), src);
+ } else {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ Ld_d(scratch, src);
+ __ St_d(scratch, g.ToMemOperand(destination));
+ }
+ } else if (source->IsConstant()) {
+ Constant src = g.ToConstant(source);
+ if (destination->IsRegister() || destination->IsStackSlot()) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ Register dst =
+ destination->IsRegister() ? g.ToRegister(destination) : scratch;
+ switch (src.type()) {
+ case Constant::kInt32:
+ __ li(dst, Operand(src.ToInt32()));
+ break;
+ case Constant::kFloat32:
+ __ li(dst, Operand::EmbeddedNumber(src.ToFloat32()));
+ break;
+ case Constant::kInt64:
+#if V8_ENABLE_WEBASSEMBLY
+ if (RelocInfo::IsWasmReference(src.rmode()))
+ __ li(dst, Operand(src.ToInt64(), src.rmode()));
+ else
+#endif // V8_ENABLE_WEBASSEMBLY
+ __ li(dst, Operand(src.ToInt64()));
+ break;
+ case Constant::kFloat64:
+ __ li(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
+ break;
+ case Constant::kExternalReference:
+ __ li(dst, src.ToExternalReference());
+ break;
+ case Constant::kDelayedStringConstant:
+ __ li(dst, src.ToDelayedStringConstant());
+ break;
+ case Constant::kHeapObject: {
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ RootIndex index;
+ if (IsMaterializableFromRoot(src_object, &index)) {
+ __ LoadRoot(dst, index);
+ } else {
+ __ li(dst, src_object);
+ }
+ break;
+ }
+ case Constant::kCompressedHeapObject:
+ UNREACHABLE();
+ case Constant::kRpoNumber:
+ UNREACHABLE(); // TODO(titzer): loading RPO numbers on LOONG64.
+ }
+ if (destination->IsStackSlot()) __ St_d(dst, g.ToMemOperand(destination));
+ } else if (src.type() == Constant::kFloat32) {
+ if (destination->IsFPStackSlot()) {
+ MemOperand dst = g.ToMemOperand(destination);
+ if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
+ __ St_d(zero_reg, dst);
+ } else {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ li(scratch, Operand(bit_cast<int32_t>(src.ToFloat32())));
+ __ St_d(scratch, dst);
+ }
+ } else {
+ DCHECK(destination->IsFPRegister());
+ FloatRegister dst = g.ToSingleRegister(destination);
+ __ Move(dst, src.ToFloat32());
+ }
+ } else {
+ DCHECK_EQ(Constant::kFloat64, src.type());
+ DoubleRegister dst = destination->IsFPRegister()
+ ? g.ToDoubleRegister(destination)
+ : kScratchDoubleReg;
+ __ Move(dst, src.ToFloat64().value());
+ if (destination->IsFPStackSlot()) {
+ __ Fst_d(dst, g.ToMemOperand(destination));
+ }
+ }
+ } else if (source->IsFPRegister()) {
+ FPURegister src = g.ToDoubleRegister(source);
+ if (destination->IsFPRegister()) {
+ FPURegister dst = g.ToDoubleRegister(destination);
+ __ Move(dst, src);
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ __ Fst_d(src, g.ToMemOperand(destination));
+ }
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
+ MemOperand src = g.ToMemOperand(source);
+ if (destination->IsFPRegister()) {
+ __ Fld_d(g.ToDoubleRegister(destination), src);
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ FPURegister temp = kScratchDoubleReg;
+ __ Fld_d(temp, src);
+ __ Fst_d(temp, g.ToMemOperand(destination));
+ }
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+ InstructionOperand* destination) {
+ Loong64OperandConverter g(this, nullptr);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ // Register-register.
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ Move(scratch, src);
+ __ Move(src, dst);
+ __ Move(dst, scratch);
+ } else {
+ DCHECK(destination->IsStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ mov(scratch, src);
+ __ Ld_d(src, dst);
+ __ St_d(scratch, dst);
+ }
+ } else if (source->IsStackSlot()) {
+ DCHECK(destination->IsStackSlot());
+ // TODO(LOONG_dev): LOONG64 Optimize scratch registers usage
+ // Since the Ld instruction may need a scratch reg,
+ // we should not use both of the two scratch registers in
+ // UseScratchRegisterScope here.
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ FPURegister scratch_d = kScratchDoubleReg;
+ MemOperand src = g.ToMemOperand(source);
+ MemOperand dst = g.ToMemOperand(destination);
+ __ Ld_d(scratch, src);
+ __ Fld_d(scratch_d, dst);
+ __ St_d(scratch, dst);
+ __ Fst_d(scratch_d, src);
+ } else if (source->IsFPRegister()) {
+ FPURegister scratch_d = kScratchDoubleReg;
+ FPURegister src = g.ToDoubleRegister(source);
+ if (destination->IsFPRegister()) {
+ FPURegister dst = g.ToDoubleRegister(destination);
+ __ Move(scratch_d, src);
+ __ Move(src, dst);
+ __ Move(dst, scratch_d);
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ Move(scratch_d, src);
+ __ Fld_d(src, dst);
+ __ Fst_d(scratch_d, dst);
+ }
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPStackSlot());
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ MemOperand src0 = g.ToMemOperand(source);
+ MemOperand src1(src0.base(), src0.offset() + kIntSize);
+ MemOperand dst0 = g.ToMemOperand(destination);
+ MemOperand dst1(dst0.base(), dst0.offset() + kIntSize);
+ FPURegister scratch_d = kScratchDoubleReg;
+ __ Fld_d(scratch_d, dst0); // Save destination in temp_1.
+ __ Ld_w(scratch, src0); // Then use scratch to copy source to destination.
+ __ St_w(scratch, dst0);
+ __ Ld_w(scratch, src1);
+ __ St_w(scratch, dst1);
+ __ Fst_d(scratch_d, src0);
+ } else {
+ // No other combinations are possible.
+ UNREACHABLE();
+ }
+}
+
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+ // On 64-bit LOONG64 we emit the jump tables inline.
+ UNREACHABLE();
+}
+
+#undef ASSEMBLE_ATOMIC_LOAD_INTEGER
+#undef ASSEMBLE_ATOMIC_STORE_INTEGER
+#undef ASSEMBLE_ATOMIC_BINOP
+#undef ASSEMBLE_ATOMIC_BINOP_EXT
+#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER
+#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT
+#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
+#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT
+#undef ASSEMBLE_IEEE754_BINOP
+#undef ASSEMBLE_IEEE754_UNOP
+
+#undef TRACE_MSG
+#undef TRACE_UNIMPL
+#undef __
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/compiler/backend/loong64/instruction-codes-loong64.h b/chromium/v8/src/compiler/backend/loong64/instruction-codes-loong64.h
new file mode 100644
index 00000000000..e38d37451d1
--- /dev/null
+++ b/chromium/v8/src/compiler/backend/loong64/instruction-codes-loong64.h
@@ -0,0 +1,402 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_BACKEND_LOONG64_INSTRUCTION_CODES_LOONG64_H_
+#define V8_COMPILER_BACKEND_LOONG64_INSTRUCTION_CODES_LOONG64_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// LOONG64-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+
+// Opcodes that support a MemoryAccessMode.
+#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) // None.
+
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(Loong64Add_d) \
+ V(Loong64Add_w) \
+ V(Loong64AddOvf_d) \
+ V(Loong64Sub_d) \
+ V(Loong64Sub_w) \
+ V(Loong64SubOvf_d) \
+ V(Loong64Mul_d) \
+ V(Loong64MulOvf_w) \
+ V(Loong64Mulh_d) \
+ V(Loong64Mulh_w) \
+ V(Loong64Mulh_wu) \
+ V(Loong64Mul_w) \
+ V(Loong64Div_d) \
+ V(Loong64Div_w) \
+ V(Loong64Div_du) \
+ V(Loong64Div_wu) \
+ V(Loong64Mod_d) \
+ V(Loong64Mod_w) \
+ V(Loong64Mod_du) \
+ V(Loong64Mod_wu) \
+ V(Loong64And) \
+ V(Loong64And32) \
+ V(Loong64Or) \
+ V(Loong64Or32) \
+ V(Loong64Nor) \
+ V(Loong64Nor32) \
+ V(Loong64Xor) \
+ V(Loong64Xor32) \
+ V(Loong64Alsl_d) \
+ V(Loong64Alsl_w) \
+ V(Loong64Sll_d) \
+ V(Loong64Sll_w) \
+ V(Loong64Srl_d) \
+ V(Loong64Srl_w) \
+ V(Loong64Sra_d) \
+ V(Loong64Sra_w) \
+ V(Loong64Rotr_d) \
+ V(Loong64Rotr_w) \
+ V(Loong64Bstrpick_d) \
+ V(Loong64Bstrpick_w) \
+ V(Loong64Bstrins_d) \
+ V(Loong64Bstrins_w) \
+ V(Loong64ByteSwap64) \
+ V(Loong64ByteSwap32) \
+ V(Loong64Clz_d) \
+ V(Loong64Clz_w) \
+ V(Loong64Mov) \
+ V(Loong64Tst) \
+ V(Loong64Cmp) \
+ V(Loong64Float32Cmp) \
+ V(Loong64Float32Add) \
+ V(Loong64Float32Sub) \
+ V(Loong64Float32Mul) \
+ V(Loong64Float32Div) \
+ V(Loong64Float32Abs) \
+ V(Loong64Float32Neg) \
+ V(Loong64Float32Sqrt) \
+ V(Loong64Float32Max) \
+ V(Loong64Float32Min) \
+ V(Loong64Float32ToFloat64) \
+ V(Loong64Float32RoundDown) \
+ V(Loong64Float32RoundUp) \
+ V(Loong64Float32RoundTruncate) \
+ V(Loong64Float32RoundTiesEven) \
+ V(Loong64Float32ToInt32) \
+ V(Loong64Float32ToInt64) \
+ V(Loong64Float32ToUint32) \
+ V(Loong64Float32ToUint64) \
+ V(Loong64Float64Cmp) \
+ V(Loong64Float64Add) \
+ V(Loong64Float64Sub) \
+ V(Loong64Float64Mul) \
+ V(Loong64Float64Div) \
+ V(Loong64Float64Mod) \
+ V(Loong64Float64Abs) \
+ V(Loong64Float64Neg) \
+ V(Loong64Float64Sqrt) \
+ V(Loong64Float64Max) \
+ V(Loong64Float64Min) \
+ V(Loong64Float64ToFloat32) \
+ V(Loong64Float64RoundDown) \
+ V(Loong64Float64RoundUp) \
+ V(Loong64Float64RoundTruncate) \
+ V(Loong64Float64RoundTiesEven) \
+ V(Loong64Float64ToInt32) \
+ V(Loong64Float64ToInt64) \
+ V(Loong64Float64ToUint32) \
+ V(Loong64Float64ToUint64) \
+ V(Loong64Int32ToFloat32) \
+ V(Loong64Int32ToFloat64) \
+ V(Loong64Int64ToFloat32) \
+ V(Loong64Int64ToFloat64) \
+ V(Loong64Uint32ToFloat32) \
+ V(Loong64Uint32ToFloat64) \
+ V(Loong64Uint64ToFloat32) \
+ V(Loong64Uint64ToFloat64) \
+ V(Loong64Float64ExtractLowWord32) \
+ V(Loong64Float64ExtractHighWord32) \
+ V(Loong64Float64InsertLowWord32) \
+ V(Loong64Float64InsertHighWord32) \
+ V(Loong64BitcastDL) \
+ V(Loong64BitcastLD) \
+ V(Loong64Float64SilenceNaN) \
+ V(Loong64Ld_b) \
+ V(Loong64Ld_bu) \
+ V(Loong64St_b) \
+ V(Loong64Ld_h) \
+ V(Loong64Ld_hu) \
+ V(Loong64St_h) \
+ V(Loong64Ld_w) \
+ V(Loong64Ld_wu) \
+ V(Loong64St_w) \
+ V(Loong64Ld_d) \
+ V(Loong64St_d) \
+ V(Loong64Fld_s) \
+ V(Loong64Fst_s) \
+ V(Loong64Fld_d) \
+ V(Loong64Fst_d) \
+ V(Loong64Push) \
+ V(Loong64Peek) \
+ V(Loong64Poke) \
+ V(Loong64StackClaim) \
+ V(Loong64Ext_w_b) \
+ V(Loong64Ext_w_h) \
+ V(Loong64Dbar) \
+ V(Loong64S128Const) \
+ V(Loong64S128Zero) \
+ V(Loong64S128AllOnes) \
+ V(Loong64I32x4Splat) \
+ V(Loong64I32x4ExtractLane) \
+ V(Loong64I32x4ReplaceLane) \
+ V(Loong64I32x4Add) \
+ V(Loong64I32x4Sub) \
+ V(Loong64F64x2Abs) \
+ V(Loong64F64x2Neg) \
+ V(Loong64F32x4Splat) \
+ V(Loong64F32x4ExtractLane) \
+ V(Loong64F32x4ReplaceLane) \
+ V(Loong64F32x4SConvertI32x4) \
+ V(Loong64F32x4UConvertI32x4) \
+ V(Loong64I32x4Mul) \
+ V(Loong64I32x4MaxS) \
+ V(Loong64I32x4MinS) \
+ V(Loong64I32x4Eq) \
+ V(Loong64I32x4Ne) \
+ V(Loong64I32x4Shl) \
+ V(Loong64I32x4ShrS) \
+ V(Loong64I32x4ShrU) \
+ V(Loong64I32x4MaxU) \
+ V(Loong64I32x4MinU) \
+ V(Loong64F64x2Sqrt) \
+ V(Loong64F64x2Add) \
+ V(Loong64F64x2Sub) \
+ V(Loong64F64x2Mul) \
+ V(Loong64F64x2Div) \
+ V(Loong64F64x2Min) \
+ V(Loong64F64x2Max) \
+ V(Loong64F64x2Eq) \
+ V(Loong64F64x2Ne) \
+ V(Loong64F64x2Lt) \
+ V(Loong64F64x2Le) \
+ V(Loong64F64x2Splat) \
+ V(Loong64F64x2ExtractLane) \
+ V(Loong64F64x2ReplaceLane) \
+ V(Loong64F64x2Pmin) \
+ V(Loong64F64x2Pmax) \
+ V(Loong64F64x2Ceil) \
+ V(Loong64F64x2Floor) \
+ V(Loong64F64x2Trunc) \
+ V(Loong64F64x2NearestInt) \
+ V(Loong64F64x2ConvertLowI32x4S) \
+ V(Loong64F64x2ConvertLowI32x4U) \
+ V(Loong64F64x2PromoteLowF32x4) \
+ V(Loong64I64x2Splat) \
+ V(Loong64I64x2ExtractLane) \
+ V(Loong64I64x2ReplaceLane) \
+ V(Loong64I64x2Add) \
+ V(Loong64I64x2Sub) \
+ V(Loong64I64x2Mul) \
+ V(Loong64I64x2Neg) \
+ V(Loong64I64x2Shl) \
+ V(Loong64I64x2ShrS) \
+ V(Loong64I64x2ShrU) \
+ V(Loong64I64x2BitMask) \
+ V(Loong64I64x2Eq) \
+ V(Loong64I64x2Ne) \
+ V(Loong64I64x2GtS) \
+ V(Loong64I64x2GeS) \
+ V(Loong64I64x2Abs) \
+ V(Loong64I64x2SConvertI32x4Low) \
+ V(Loong64I64x2SConvertI32x4High) \
+ V(Loong64I64x2UConvertI32x4Low) \
+ V(Loong64I64x2UConvertI32x4High) \
+ V(Loong64ExtMulLow) \
+ V(Loong64ExtMulHigh) \
+ V(Loong64ExtAddPairwise) \
+ V(Loong64F32x4Abs) \
+ V(Loong64F32x4Neg) \
+ V(Loong64F32x4Sqrt) \
+ V(Loong64F32x4RecipApprox) \
+ V(Loong64F32x4RecipSqrtApprox) \
+ V(Loong64F32x4Add) \
+ V(Loong64F32x4Sub) \
+ V(Loong64F32x4Mul) \
+ V(Loong64F32x4Div) \
+ V(Loong64F32x4Max) \
+ V(Loong64F32x4Min) \
+ V(Loong64F32x4Eq) \
+ V(Loong64F32x4Ne) \
+ V(Loong64F32x4Lt) \
+ V(Loong64F32x4Le) \
+ V(Loong64F32x4Pmin) \
+ V(Loong64F32x4Pmax) \
+ V(Loong64F32x4Ceil) \
+ V(Loong64F32x4Floor) \
+ V(Loong64F32x4Trunc) \
+ V(Loong64F32x4NearestInt) \
+ V(Loong64F32x4DemoteF64x2Zero) \
+ V(Loong64I32x4SConvertF32x4) \
+ V(Loong64I32x4UConvertF32x4) \
+ V(Loong64I32x4Neg) \
+ V(Loong64I32x4GtS) \
+ V(Loong64I32x4GeS) \
+ V(Loong64I32x4GtU) \
+ V(Loong64I32x4GeU) \
+ V(Loong64I32x4Abs) \
+ V(Loong64I32x4BitMask) \
+ V(Loong64I32x4DotI16x8S) \
+ V(Loong64I32x4TruncSatF64x2SZero) \
+ V(Loong64I32x4TruncSatF64x2UZero) \
+ V(Loong64I16x8Splat) \
+ V(Loong64I16x8ExtractLaneU) \
+ V(Loong64I16x8ExtractLaneS) \
+ V(Loong64I16x8ReplaceLane) \
+ V(Loong64I16x8Neg) \
+ V(Loong64I16x8Shl) \
+ V(Loong64I16x8ShrS) \
+ V(Loong64I16x8ShrU) \
+ V(Loong64I16x8Add) \
+ V(Loong64I16x8AddSatS) \
+ V(Loong64I16x8Sub) \
+ V(Loong64I16x8SubSatS) \
+ V(Loong64I16x8Mul) \
+ V(Loong64I16x8MaxS) \
+ V(Loong64I16x8MinS) \
+ V(Loong64I16x8Eq) \
+ V(Loong64I16x8Ne) \
+ V(Loong64I16x8GtS) \
+ V(Loong64I16x8GeS) \
+ V(Loong64I16x8AddSatU) \
+ V(Loong64I16x8SubSatU) \
+ V(Loong64I16x8MaxU) \
+ V(Loong64I16x8MinU) \
+ V(Loong64I16x8GtU) \
+ V(Loong64I16x8GeU) \
+ V(Loong64I16x8RoundingAverageU) \
+ V(Loong64I16x8Abs) \
+ V(Loong64I16x8BitMask) \
+ V(Loong64I16x8Q15MulRSatS) \
+ V(Loong64I8x16Splat) \
+ V(Loong64I8x16ExtractLaneU) \
+ V(Loong64I8x16ExtractLaneS) \
+ V(Loong64I8x16ReplaceLane) \
+ V(Loong64I8x16Neg) \
+ V(Loong64I8x16Shl) \
+ V(Loong64I8x16ShrS) \
+ V(Loong64I8x16Add) \
+ V(Loong64I8x16AddSatS) \
+ V(Loong64I8x16Sub) \
+ V(Loong64I8x16SubSatS) \
+ V(Loong64I8x16MaxS) \
+ V(Loong64I8x16MinS) \
+ V(Loong64I8x16Eq) \
+ V(Loong64I8x16Ne) \
+ V(Loong64I8x16GtS) \
+ V(Loong64I8x16GeS) \
+ V(Loong64I8x16ShrU) \
+ V(Loong64I8x16AddSatU) \
+ V(Loong64I8x16SubSatU) \
+ V(Loong64I8x16MaxU) \
+ V(Loong64I8x16MinU) \
+ V(Loong64I8x16GtU) \
+ V(Loong64I8x16GeU) \
+ V(Loong64I8x16RoundingAverageU) \
+ V(Loong64I8x16Abs) \
+ V(Loong64I8x16Popcnt) \
+ V(Loong64I8x16BitMask) \
+ V(Loong64S128And) \
+ V(Loong64S128Or) \
+ V(Loong64S128Xor) \
+ V(Loong64S128Not) \
+ V(Loong64S128Select) \
+ V(Loong64S128AndNot) \
+ V(Loong64I64x2AllTrue) \
+ V(Loong64I32x4AllTrue) \
+ V(Loong64I16x8AllTrue) \
+ V(Loong64I8x16AllTrue) \
+ V(Loong64V128AnyTrue) \
+ V(Loong64S32x4InterleaveRight) \
+ V(Loong64S32x4InterleaveLeft) \
+ V(Loong64S32x4PackEven) \
+ V(Loong64S32x4PackOdd) \
+ V(Loong64S32x4InterleaveEven) \
+ V(Loong64S32x4InterleaveOdd) \
+ V(Loong64S32x4Shuffle) \
+ V(Loong64S16x8InterleaveRight) \
+ V(Loong64S16x8InterleaveLeft) \
+ V(Loong64S16x8PackEven) \
+ V(Loong64S16x8PackOdd) \
+ V(Loong64S16x8InterleaveEven) \
+ V(Loong64S16x8InterleaveOdd) \
+ V(Loong64S16x4Reverse) \
+ V(Loong64S16x2Reverse) \
+ V(Loong64S8x16InterleaveRight) \
+ V(Loong64S8x16InterleaveLeft) \
+ V(Loong64S8x16PackEven) \
+ V(Loong64S8x16PackOdd) \
+ V(Loong64S8x16InterleaveEven) \
+ V(Loong64S8x16InterleaveOdd) \
+ V(Loong64I8x16Shuffle) \
+ V(Loong64I8x16Swizzle) \
+ V(Loong64S8x16Concat) \
+ V(Loong64S8x8Reverse) \
+ V(Loong64S8x4Reverse) \
+ V(Loong64S8x2Reverse) \
+ V(Loong64S128LoadSplat) \
+ V(Loong64S128Load8x8S) \
+ V(Loong64S128Load8x8U) \
+ V(Loong64S128Load16x4S) \
+ V(Loong64S128Load16x4U) \
+ V(Loong64S128Load32x2S) \
+ V(Loong64S128Load32x2U) \
+ V(Loong64S128Load32Zero) \
+ V(Loong64S128Load64Zero) \
+ V(Loong64LoadLane) \
+ V(Loong64StoreLane) \
+ V(Loong64I32x4SConvertI16x8Low) \
+ V(Loong64I32x4SConvertI16x8High) \
+ V(Loong64I32x4UConvertI16x8Low) \
+ V(Loong64I32x4UConvertI16x8High) \
+ V(Loong64I16x8SConvertI8x16Low) \
+ V(Loong64I16x8SConvertI8x16High) \
+ V(Loong64I16x8SConvertI32x4) \
+ V(Loong64I16x8UConvertI32x4) \
+ V(Loong64I16x8UConvertI8x16Low) \
+ V(Loong64I16x8UConvertI8x16High) \
+ V(Loong64I8x16SConvertI16x8) \
+ V(Loong64I8x16UConvertI16x8) \
+ V(Loong64StoreCompressTagged) \
+ V(Loong64Word64AtomicLoadUint32) \
+ V(Loong64Word64AtomicLoadUint64) \
+ V(Loong64Word64AtomicStoreWord64) \
+ V(Loong64Word64AtomicAddUint64) \
+ V(Loong64Word64AtomicSubUint64) \
+ V(Loong64Word64AtomicAndUint64) \
+ V(Loong64Word64AtomicOrUint64) \
+ V(Loong64Word64AtomicXorUint64) \
+ V(Loong64Word64AtomicExchangeUint64) \
+ V(Loong64Word64AtomicCompareExchangeUint64)
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MRI = [register + immediate]
+// MRR = [register + register]
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+ V(MRI) /* [%r0 + K] */ \
+ V(MRR) /* [%r0 + %r1] */ \
+ V(Root) /* [%rr + K] */
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_BACKEND_LOONG64_INSTRUCTION_CODES_LOONG64_H_
diff --git a/chromium/v8/src/compiler/backend/loong64/instruction-scheduler-loong64.cc b/chromium/v8/src/compiler/backend/loong64/instruction-scheduler-loong64.cc
new file mode 100644
index 00000000000..3cfec9c4037
--- /dev/null
+++ b/chromium/v8/src/compiler/backend/loong64/instruction-scheduler-loong64.cc
@@ -0,0 +1,26 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/macro-assembler.h"
+#include "src/compiler/backend/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// TODO(LOONG_dev): LOONG64 Support instruction scheduler.
+bool InstructionScheduler::SchedulerSupported() { return false; }
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ UNREACHABLE();
+}
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ UNREACHABLE();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc b/chromium/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc
new file mode 100644
index 00000000000..29f9b111db0
--- /dev/null
+++ b/chromium/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc
@@ -0,0 +1,3108 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/bits.h"
+#include "src/base/platform/wrappers.h"
+#include "src/codegen/machine-type.h"
+#include "src/compiler/backend/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define TRACE_UNIMPL() \
+ PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
+
+#define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
+
+// Adds loong64-specific methods for generating InstructionOperands.
+class Loong64OperandGenerator final : public OperandGenerator {
+ public:
+ explicit Loong64OperandGenerator(InstructionSelector* selector)
+ : OperandGenerator(selector) {}
+
+ InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
+ if (CanBeImmediate(node, opcode)) {
+ return UseImmediate(node);
+ }
+ return UseRegister(node);
+ }
+
+ // Use the zero register if the node has the immediate value zero, otherwise
+ // assign a register.
+ InstructionOperand UseRegisterOrImmediateZero(Node* node) {
+ if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
+ (IsFloatConstant(node) &&
+ (bit_cast<int64_t>(GetFloatConstantValue(node)) == 0))) {
+ return UseImmediate(node);
+ }
+ return UseRegister(node);
+ }
+
+ bool IsIntegerConstant(Node* node) {
+ return (node->opcode() == IrOpcode::kInt32Constant) ||
+ (node->opcode() == IrOpcode::kInt64Constant);
+ }
+
+ int64_t GetIntegerConstantValue(Node* node) {
+ if (node->opcode() == IrOpcode::kInt32Constant) {
+ return OpParameter<int32_t>(node->op());
+ }
+ DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode());
+ return OpParameter<int64_t>(node->op());
+ }
+
+ bool IsFloatConstant(Node* node) {
+ return (node->opcode() == IrOpcode::kFloat32Constant) ||
+ (node->opcode() == IrOpcode::kFloat64Constant);
+ }
+
+ double GetFloatConstantValue(Node* node) {
+ if (node->opcode() == IrOpcode::kFloat32Constant) {
+ return OpParameter<float>(node->op());
+ }
+ DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
+ return OpParameter<double>(node->op());
+ }
+
+ bool CanBeImmediate(Node* node, InstructionCode mode) {
+ return IsIntegerConstant(node) &&
+ CanBeImmediate(GetIntegerConstantValue(node), mode);
+ }
+
+ bool CanBeImmediate(int64_t value, InstructionCode opcode) {
+ switch (ArchOpcodeField::decode(opcode)) {
+ case kLoong64Sll_w:
+ case kLoong64Srl_w:
+ case kLoong64Sra_w:
+ return is_uint5(value);
+ case kLoong64Sll_d:
+ case kLoong64Srl_d:
+ case kLoong64Sra_d:
+ return is_uint6(value);
+ case kLoong64And:
+ case kLoong64And32:
+ case kLoong64Or:
+ case kLoong64Or32:
+ case kLoong64Xor:
+ case kLoong64Xor32:
+ case kLoong64Tst:
+ return is_uint12(value);
+ case kLoong64Ld_b:
+ case kLoong64Ld_bu:
+ case kLoong64St_b:
+ case kLoong64Ld_h:
+ case kLoong64Ld_hu:
+ case kLoong64St_h:
+ case kLoong64Ld_w:
+ case kLoong64Ld_wu:
+ case kLoong64St_w:
+ case kLoong64Ld_d:
+ case kLoong64St_d:
+ case kLoong64Fld_s:
+ case kLoong64Fst_s:
+ case kLoong64Fld_d:
+ case kLoong64Fst_d:
+ return is_int16(value);
+ default:
+ return is_int12(value);
+ }
+ }
+
+ private:
+ bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
+ TRACE_UNIMPL();
+ return false;
+ }
+};
+
+static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Loong64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+static void VisitRRI(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Loong64OperandGenerator g(selector);
+ int32_t imm = OpParameter<int32_t>(node->op());
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
+}
+
+static void VisitSimdShift(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Loong64OperandGenerator g(selector);
+ if (g.IsIntegerConstant(node->InputAt(1))) {
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(node->InputAt(1)));
+ } else {
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+ }
+}
+
+static void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Loong64OperandGenerator g(selector);
+ int32_t imm = OpParameter<int32_t>(node->op());
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(imm),
+ g.UseRegister(node->InputAt(1)));
+}
+
+static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Loong64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+}
+
+static void VisitUniqueRRR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Loong64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+}
+
+void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+ Loong64OperandGenerator g(selector);
+ selector->Emit(
+ opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2)));
+}
+
+static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Loong64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseOperand(node->InputAt(1), opcode));
+}
+
+struct ExtendingLoadMatcher {
+ ExtendingLoadMatcher(Node* node, InstructionSelector* selector)
+ : matches_(false), selector_(selector), base_(nullptr), immediate_(0) {
+ Initialize(node);
+ }
+
+ bool Matches() const { return matches_; }
+
+ Node* base() const {
+ DCHECK(Matches());
+ return base_;
+ }
+ int64_t immediate() const {
+ DCHECK(Matches());
+ return immediate_;
+ }
+ ArchOpcode opcode() const {
+ DCHECK(Matches());
+ return opcode_;
+ }
+
+ private:
+ bool matches_;
+ InstructionSelector* selector_;
+ Node* base_;
+ int64_t immediate_;
+ ArchOpcode opcode_;
+
+ void Initialize(Node* node) {
+ Int64BinopMatcher m(node);
+ // When loading a 64-bit value and shifting by 32, we should
+ // just load and sign-extend the interesting 4 bytes instead.
+ // This happens, for example, when we're loading and untagging SMIs.
+ DCHECK(m.IsWord64Sar());
+ if (m.left().IsLoad() && m.right().Is(32) &&
+ selector_->CanCover(m.node(), m.left().node())) {
+ DCHECK_EQ(selector_->GetEffectLevel(node),
+ selector_->GetEffectLevel(m.left().node()));
+ MachineRepresentation rep =
+ LoadRepresentationOf(m.left().node()->op()).representation();
+ DCHECK_EQ(3, ElementSizeLog2Of(rep));
+ if (rep != MachineRepresentation::kTaggedSigned &&
+ rep != MachineRepresentation::kTaggedPointer &&
+ rep != MachineRepresentation::kTagged &&
+ rep != MachineRepresentation::kWord64) {
+ return;
+ }
+
+ Loong64OperandGenerator g(selector_);
+ Node* load = m.left().node();
+ Node* offset = load->InputAt(1);
+ base_ = load->InputAt(0);
+ opcode_ = kLoong64Ld_w;
+ if (g.CanBeImmediate(offset, opcode_)) {
+ immediate_ = g.GetIntegerConstantValue(offset) + 4;
+ matches_ = g.CanBeImmediate(immediate_, kLoong64Ld_w);
+ }
+ }
+ }
+};
+
+bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node,
+ Node* output_node) {
+ ExtendingLoadMatcher m(node, selector);
+ Loong64OperandGenerator g(selector);
+ if (m.Matches()) {
+ InstructionOperand inputs[2];
+ inputs[0] = g.UseRegister(m.base());
+ InstructionCode opcode =
+ m.opcode() | AddressingModeField::encode(kMode_MRI);
+ DCHECK(is_int32(m.immediate()));
+ inputs[1] = g.TempImmediate(static_cast<int32_t>(m.immediate()));
+ InstructionOperand outputs[] = {g.DefineAsRegister(output_node)};
+ selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs),
+ inputs);
+ return true;
+ }
+ return false;
+}
+
+bool TryMatchImmediate(InstructionSelector* selector,
+ InstructionCode* opcode_return, Node* node,
+ size_t* input_count_return, InstructionOperand* inputs) {
+ Loong64OperandGenerator g(selector);
+ if (g.CanBeImmediate(node, *opcode_return)) {
+ *opcode_return |= AddressingModeField::encode(kMode_MRI);
+ inputs[0] = g.UseImmediate(node);
+ *input_count_return = 1;
+ return true;
+ }
+ return false;
+}
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, bool has_reverse_opcode,
+ InstructionCode reverse_opcode,
+ FlagsContinuation* cont) {
+ Loong64OperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ InstructionOperand inputs[2];
+ size_t input_count = 0;
+ InstructionOperand outputs[1];
+ size_t output_count = 0;
+
+ if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count,
+ &inputs[1])) {
+ inputs[0] = g.UseRegister(m.left().node());
+ input_count++;
+ } else if (has_reverse_opcode &&
+ TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
+ &input_count, &inputs[1])) {
+ inputs[0] = g.UseRegister(m.right().node());
+ opcode = reverse_opcode;
+ input_count++;
+ } else {
+ inputs[input_count++] = g.UseRegister(m.left().node());
+ inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
+ }
+
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ DCHECK_NE(0u, input_count);
+ DCHECK_EQ(1u, output_count);
+ DCHECK_GE(arraysize(inputs), input_count);
+ DCHECK_GE(arraysize(outputs), output_count);
+
+ selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
+ inputs, cont);
+}
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, bool has_reverse_opcode,
+ InstructionCode reverse_opcode) {
+ FlagsContinuation cont;
+ VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont);
+}
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont) {
+ VisitBinop(selector, node, opcode, false, kArchNop, cont);
+}
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode) {
+ VisitBinop(selector, node, opcode, false, kArchNop);
+}
+
+void InstructionSelector::VisitStackSlot(Node* node) {
+ StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
+ int alignment = rep.alignment();
+ int slot = frame_->AllocateSpillSlot(rep.size(), alignment);
+ OperandGenerator g(this);
+
+ Emit(kArchStackSlot, g.DefineAsRegister(node),
+ sequence()->AddImmediate(Constant(slot)), 0, nullptr);
+}
+
+void InstructionSelector::VisitAbortCSADcheck(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
+}
+
+void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
+ Node* output = nullptr) {
+ Loong64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ ExternalReferenceMatcher m(base);
+ if (m.HasResolvedValue() && g.IsIntegerConstant(index) &&
+ selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
+ ptrdiff_t const delta =
+ g.GetIntegerConstantValue(index) +
+ TurboAssemblerBase::RootRegisterOffsetForExternalReference(
+ selector->isolate(), m.ResolvedValue());
+ // Check that the delta is a 32-bit integer due to the limitations of
+ // immediate operands.
+ if (is_int32(delta)) {
+ opcode |= AddressingModeField::encode(kMode_Root);
+ selector->Emit(opcode,
+ g.DefineAsRegister(output == nullptr ? node : output),
+ g.UseImmediate(static_cast<int32_t>(delta)));
+ return;
+ }
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(output == nullptr ? node : output),
+ g.UseRegister(base), g.UseImmediate(index));
+ } else {
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRR),
+ g.DefineAsRegister(output == nullptr ? node : output),
+ g.UseRegister(base), g.UseRegister(index));
+ }
+}
+
+void InstructionSelector::VisitStoreLane(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitLoadLane(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitLoadTransform(Node* node) {
+ LoadTransformParameters params = LoadTransformParametersOf(node->op());
+
+ InstructionCode opcode = kArchNop;
+ switch (params.transformation) {
+ // TODO(LOONG_dev): LOONG64 S128 LoadSplat
+ case LoadTransformation::kS128Load8Splat:
+ opcode = kLoong64S128LoadSplat;
+ break;
+ case LoadTransformation::kS128Load16Splat:
+ opcode = kLoong64S128LoadSplat;
+ break;
+ case LoadTransformation::kS128Load32Splat:
+ opcode = kLoong64S128LoadSplat;
+ break;
+ case LoadTransformation::kS128Load64Splat:
+ opcode = kLoong64S128LoadSplat;
+ break;
+ case LoadTransformation::kS128Load8x8S:
+ opcode = kLoong64S128Load8x8S;
+ break;
+ case LoadTransformation::kS128Load8x8U:
+ opcode = kLoong64S128Load8x8U;
+ break;
+ case LoadTransformation::kS128Load16x4S:
+ opcode = kLoong64S128Load16x4S;
+ break;
+ case LoadTransformation::kS128Load16x4U:
+ opcode = kLoong64S128Load16x4U;
+ break;
+ case LoadTransformation::kS128Load32x2S:
+ opcode = kLoong64S128Load32x2S;
+ break;
+ case LoadTransformation::kS128Load32x2U:
+ opcode = kLoong64S128Load32x2U;
+ break;
+ case LoadTransformation::kS128Load32Zero:
+ opcode = kLoong64S128Load32Zero;
+ break;
+ case LoadTransformation::kS128Load64Zero:
+ opcode = kLoong64S128Load64Zero;
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+
+ EmitLoad(this, node, opcode);
+}
+
+void InstructionSelector::VisitLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+
+ InstructionCode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
+ opcode = kLoong64Fld_s;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kLoong64Fld_d;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsUnsigned() ? kLoong64Ld_bu : kLoong64Ld_b;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsUnsigned() ? kLoong64Ld_hu : kLoong64Ld_h;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kLoong64Ld_w;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ opcode = kLoong64Ld_d;
+ break;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
+ case MachineRepresentation::kNone:
+ case MachineRepresentation::kSimd128:
+ UNREACHABLE();
+ }
+
+ EmitLoad(this, node, opcode);
+}
+
+void InstructionSelector::VisitProtectedLoad(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitStore(Node* node) {
+ Loong64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+ MachineRepresentation rep = store_rep.representation();
+
+ if (FLAG_enable_unconditional_write_barriers && CanBeTaggedPointer(rep)) {
+ write_barrier_kind = kFullWriteBarrier;
+ }
+
+ // TODO(loong64): I guess this could be done in a better way.
+ if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
+ DCHECK(CanBeTaggedPointer(rep));
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ // OutOfLineRecordWrite uses the index in an arithmetic instruction, so we
+ // must check kArithmeticImm as well as kLoadStoreImm64.
+ if (g.CanBeImmediate(index, kLoong64Add_d)) {
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MRR;
+ }
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode =
+ WriteBarrierKindToRecordWriteMode(write_barrier_kind);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= AddressingModeField::encode(addressing_mode);
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs);
+ } else {
+ ArchOpcode opcode;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ opcode = kLoong64Fst_s;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kLoong64Fst_d;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = kLoong64St_b;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kLoong64St_h;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kLoong64St_w;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ opcode = kLoong64St_d;
+ break;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
+ case MachineRepresentation::kNone:
+ case MachineRepresentation::kSimd128:
+ UNREACHABLE();
+ }
+
+ ExternalReferenceMatcher m(base);
+ if (m.HasResolvedValue() && g.IsIntegerConstant(index) &&
+ CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
+ ptrdiff_t const delta =
+ g.GetIntegerConstantValue(index) +
+ TurboAssemblerBase::RootRegisterOffsetForExternalReference(
+ isolate(), m.ResolvedValue());
+ // Check that the delta is a 32-bit integer due to the limitations of
+ // immediate operands.
+ if (is_int32(delta)) {
+ Emit(opcode | AddressingModeField::encode(kMode_Root), g.NoOutput(),
+ g.UseImmediate(static_cast<int32_t>(delta)), g.UseImmediate(0),
+ g.UseRegisterOrImmediateZero(value));
+ return;
+ }
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(index),
+ g.UseRegisterOrImmediateZero(value));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
+ g.UseRegister(base), g.UseRegister(index),
+ g.UseRegisterOrImmediateZero(value));
+ }
+ }
+}
+
+void InstructionSelector::VisitProtectedStore(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord32And(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
+ m.right().HasResolvedValue()) {
+ uint32_t mask = m.right().ResolvedValue();
+ uint32_t mask_width = base::bits::CountPopulation(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+ // The mask must be contiguous, and occupy the least-significant bits.
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
+
+ // Select Bstrpick_w for And(Shr(x, imm), mask) where the mask is in the
+ // least significant bits.
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue()) {
+ // Any shift value can match; int32 shifts use `value % 32`.
+ uint32_t lsb = mleft.right().ResolvedValue() & 0x1F;
+
+ // Bstrpick_w cannot extract bits past the register size, however since
+ // shifting the original value would have introduced some zeros we can
+ // still use Bstrpick_w with a smaller mask and the remaining bits will
+ // be zeros.
+ if (lsb + mask_width > 32) mask_width = 32 - lsb;
+
+ Emit(kLoong64Bstrpick_w, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ // Other cases fall through to the normal And operation.
+ }
+ }
+ if (m.right().HasResolvedValue()) {
+ uint32_t mask = m.right().ResolvedValue();
+ uint32_t shift = base::bits::CountPopulation(~mask);
+ uint32_t msb = base::bits::CountLeadingZeros32(~mask);
+ if (shift != 0 && shift != 32 && msb + shift == 32) {
+ // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
+ // and remove constant loading of inverted mask.
+ Emit(kLoong64Bstrins_w, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.TempImmediate(0),
+ g.TempImmediate(shift));
+ return;
+ }
+ }
+ VisitBinop(this, node, kLoong64And32, true, kLoong64And32);
+}
+
+void InstructionSelector::VisitWord64And(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
+ m.right().HasResolvedValue()) {
+ uint64_t mask = m.right().ResolvedValue();
+ uint32_t mask_width = base::bits::CountPopulation(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
+ // The mask must be contiguous, and occupy the least-significant bits.
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
+
+ // Select Bstrpick_d for And(Shr(x, imm), mask) where the mask is in the
+ // least significant bits.
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue()) {
+ // Any shift value can match; int64 shifts use `value % 64`.
+ uint32_t lsb =
+ static_cast<uint32_t>(mleft.right().ResolvedValue() & 0x3F);
+
+ // Bstrpick_d cannot extract bits past the register size, however since
+ // shifting the original value would have introduced some zeros we can
+ // still use Bstrpick_d with a smaller mask and the remaining bits will
+ // be zeros.
+ if (lsb + mask_width > 64) mask_width = 64 - lsb;
+
+ if (lsb == 0 && mask_width == 64) {
+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node()));
+ } else {
+ Emit(kLoong64Bstrpick_d, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(static_cast<int32_t>(mask_width)));
+ }
+ return;
+ }
+ // Other cases fall through to the normal And operation.
+ }
+ }
+ if (m.right().HasResolvedValue()) {
+ uint64_t mask = m.right().ResolvedValue();
+ uint32_t shift = base::bits::CountPopulation(~mask);
+ uint32_t msb = base::bits::CountLeadingZeros64(~mask);
+ if (shift != 0 && shift < 32 && msb + shift == 64) {
+ // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
+ // and remove constant loading of inverted mask. Dins cannot insert bits
+ // past word size, so shifts smaller than 32 are covered.
+ Emit(kLoong64Bstrins_d, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.TempImmediate(0),
+ g.TempImmediate(shift));
+ return;
+ }
+ }
+ VisitBinop(this, node, kLoong64And, true, kLoong64And);
+}
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+ VisitBinop(this, node, kLoong64Or32, true, kLoong64Or32);
+}
+
+void InstructionSelector::VisitWord64Or(Node* node) {
+ VisitBinop(this, node, kLoong64Or, true, kLoong64Or);
+}
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
+ m.right().Is(-1)) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (!mleft.right().HasResolvedValue()) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Nor32, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ }
+ if (m.right().Is(-1)) {
+ // Use Nor for bit negation and eliminate constant loading for xori.
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Nor32, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()), g.TempImmediate(0));
+ return;
+ }
+ VisitBinop(this, node, kLoong64Xor32, true, kLoong64Xor32);
+}
+
+void InstructionSelector::VisitWord64Xor(Node* node) {
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64Or() && CanCover(node, m.left().node()) &&
+ m.right().Is(-1)) {
+ Int64BinopMatcher mleft(m.left().node());
+ if (!mleft.right().HasResolvedValue()) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Nor, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ }
+ if (m.right().Is(-1)) {
+ // Use Nor for bit negation and eliminate constant loading for xori.
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Nor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(0));
+ return;
+ }
+ VisitBinop(this, node, kLoong64Xor, true, kLoong64Xor);
+}
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
+ m.right().IsInRange(1, 31)) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher mleft(m.left().node());
+ // Match Word32Shl(Word32And(x, mask), imm) to Sll_w where the mask is
+ // contiguous, and the shift immediate non-zero.
+ if (mleft.right().HasResolvedValue()) {
+ uint32_t mask = mleft.right().ResolvedValue();
+ uint32_t mask_width = base::bits::CountPopulation(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+ uint32_t shift = m.right().ResolvedValue();
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
+ DCHECK_NE(0u, shift);
+ if ((shift + mask_width) >= 32) {
+ // If the mask is contiguous and reaches or extends beyond the top
+ // bit, only the shift is needed.
+ Emit(kLoong64Sll_w, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+ }
+ }
+ VisitRRO(this, kLoong64Sll_w, node);
+}
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && m.right().HasResolvedValue()) {
+ uint32_t lsb = m.right().ResolvedValue() & 0x1F;
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue() &&
+ mleft.right().ResolvedValue() != 0) {
+ // Select Bstrpick_w for Shr(And(x, mask), imm) where the result of the
+ // mask is shifted into the least-significant bits.
+ uint32_t mask = (mleft.right().ResolvedValue() >> lsb) << lsb;
+ unsigned mask_width = base::bits::CountPopulation(mask);
+ unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_msb + mask_width + lsb) == 32) {
+ Loong64OperandGenerator g(this);
+ DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
+ Emit(kLoong64Bstrpick_w, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ }
+ }
+ VisitRRO(this, kLoong64Srl_w, node);
+}
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (m.right().HasResolvedValue() && mleft.right().HasResolvedValue()) {
+ Loong64OperandGenerator g(this);
+ uint32_t sar = m.right().ResolvedValue();
+ uint32_t shl = mleft.right().ResolvedValue();
+ if ((sar == shl) && (sar == 16)) {
+ Emit(kLoong64Ext_w_h, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()));
+ return;
+ } else if ((sar == shl) && (sar == 24)) {
+ Emit(kLoong64Ext_w_b, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()));
+ return;
+ } else if ((sar == shl) && (sar == 32)) {
+ Emit(kLoong64Sll_w, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(0));
+ return;
+ }
+ }
+ }
+ VisitRRO(this, kLoong64Sra_w, node);
+}
+
+void InstructionSelector::VisitWord64Shl(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
+ m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) {
+ // There's no need to sign/zero-extend to 64-bit if we shift out the upper
+ // 32 bits anyway.
+ Emit(kLoong64Sll_d, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()->InputAt(0)),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ if (m.left().IsWord64And() && CanCover(node, m.left().node()) &&
+ m.right().IsInRange(1, 63)) {
+ // Match Word64Shl(Word64And(x, mask), imm) to Sll_d where the mask is
+ // contiguous, and the shift immediate non-zero.
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue()) {
+ uint64_t mask = mleft.right().ResolvedValue();
+ uint32_t mask_width = base::bits::CountPopulation(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
+ uint64_t shift = m.right().ResolvedValue();
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
+ DCHECK_NE(0u, shift);
+
+ if ((shift + mask_width) >= 64) {
+ // If the mask is contiguous and reaches or extends beyond the top
+ // bit, only the shift is needed.
+ Emit(kLoong64Sll_d, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+ }
+ }
+ VisitRRO(this, kLoong64Sll_d, node);
+}
+
+void InstructionSelector::VisitWord64Shr(Node* node) {
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64And() && m.right().HasResolvedValue()) {
+ uint32_t lsb = m.right().ResolvedValue() & 0x3F;
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue() &&
+ mleft.right().ResolvedValue() != 0) {
+ // Select Bstrpick_d for Shr(And(x, mask), imm) where the result of the
+ // mask is shifted into the least-significant bits.
+ uint64_t mask = (mleft.right().ResolvedValue() >> lsb) << lsb;
+ unsigned mask_width = base::bits::CountPopulation(mask);
+ unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
+ if ((mask_msb + mask_width + lsb) == 64) {
+ Loong64OperandGenerator g(this);
+ DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
+ Emit(kLoong64Bstrpick_d, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ }
+ }
+ VisitRRO(this, kLoong64Srl_d, node);
+}
+
+void InstructionSelector::VisitWord64Sar(Node* node) {
+ if (TryEmitExtendingLoad(this, node, node)) return;
+ VisitRRO(this, kLoong64Sra_d, node);
+}
+
+void InstructionSelector::VisitWord32Rol(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord64Rol(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord32Ror(Node* node) {
+ VisitRRO(this, kLoong64Rotr_w, node);
+}
+
+void InstructionSelector::VisitWord64Ror(Node* node) {
+ VisitRRO(this, kLoong64Rotr_d, node);
+}
+
+void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64ByteSwap32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64ByteSwap64, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitWord32Clz(Node* node) {
+ VisitRR(this, kLoong64Clz_w, node);
+}
+
+void InstructionSelector::VisitWord64Clz(Node* node) {
+ VisitRR(this, kLoong64Clz_d, node);
+}
+
+void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+
+ // Select Alsl_w for (left + (left_of_right << imm)).
+ if (m.right().opcode() == IrOpcode::kWord32Shl &&
+ CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+ if (mright.right().HasResolvedValue() && !m.left().HasResolvedValue()) {
+ int32_t shift_value =
+ static_cast<int32_t>(mright.right().ResolvedValue());
+ if (shift_value > 0 && shift_value <= 31) {
+ Emit(kLoong64Alsl_w, g.DefineAsRegister(node),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(m.left().node()), g.TempImmediate(shift_value));
+ return;
+ }
+ }
+ }
+
+ // Select Alsl_w for ((left_of_left << imm) + right).
+ if (m.left().opcode() == IrOpcode::kWord32Shl &&
+ CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue() && !m.right().HasResolvedValue()) {
+ int32_t shift_value = static_cast<int32_t>(mleft.right().ResolvedValue());
+ if (shift_value > 0 && shift_value <= 31) {
+ Emit(kLoong64Alsl_w, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(m.right().node()), g.TempImmediate(shift_value));
+ return;
+ }
+ }
+ }
+
+ VisitBinop(this, node, kLoong64Add_w, true, kLoong64Add_w);
+}
+
+void InstructionSelector::VisitInt64Add(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+
+ // Select Alsl_d for (left + (left_of_right << imm)).
+ if (m.right().opcode() == IrOpcode::kWord64Shl &&
+ CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
+ Int64BinopMatcher mright(m.right().node());
+ if (mright.right().HasResolvedValue() && !m.left().HasResolvedValue()) {
+ int32_t shift_value =
+ static_cast<int32_t>(mright.right().ResolvedValue());
+ if (shift_value > 0 && shift_value <= 31) {
+ Emit(kLoong64Alsl_d, g.DefineAsRegister(node),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(m.left().node()), g.TempImmediate(shift_value));
+ return;
+ }
+ }
+ }
+
+ // Select Alsl_d for ((left_of_left << imm) + right).
+ if (m.left().opcode() == IrOpcode::kWord64Shl &&
+ CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue() && !m.right().HasResolvedValue()) {
+ int32_t shift_value = static_cast<int32_t>(mleft.right().ResolvedValue());
+ if (shift_value > 0 && shift_value <= 31) {
+ Emit(kLoong64Alsl_d, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(m.right().node()), g.TempImmediate(shift_value));
+ return;
+ }
+ }
+ }
+
+ VisitBinop(this, node, kLoong64Add_d, true, kLoong64Add_d);
+}
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+ VisitBinop(this, node, kLoong64Sub_w);
+}
+
+void InstructionSelector::VisitInt64Sub(Node* node) {
+ VisitBinop(this, node, kLoong64Sub_d);
+}
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
+ uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
+ if (base::bits::IsPowerOfTwo(value)) {
+ Emit(kLoong64Sll_w | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value)));
+ return;
+ }
+ if (base::bits::IsPowerOfTwo(value - 1) && value - 1 > 0 &&
+ value - 1 <= 31) {
+ Emit(kLoong64Alsl_w, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()), g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value - 1)));
+ return;
+ }
+ if (base::bits::IsPowerOfTwo(value + 1)) {
+ InstructionOperand temp = g.TempRegister();
+ Emit(kLoong64Sll_w | AddressingModeField::encode(kMode_None), temp,
+ g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1)));
+ Emit(kLoong64Sub_w | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
+ return;
+ }
+ }
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (CanCover(node, left) && CanCover(node, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher leftInput(left), rightInput(right);
+ if (leftInput.right().Is(32) && rightInput.right().Is(32)) {
+ // Combine untagging shifts with Mulh_d.
+ Emit(kLoong64Mulh_d, g.DefineSameAsFirst(node),
+ g.UseRegister(leftInput.left().node()),
+ g.UseRegister(rightInput.left().node()));
+ return;
+ }
+ }
+ }
+ VisitRRR(this, kLoong64Mul_w, node);
+}
+
+void InstructionSelector::VisitInt32MulHigh(Node* node) {
+ VisitRRR(this, kLoong64Mulh_w, node);
+}
+
+void InstructionSelector::VisitUint32MulHigh(Node* node) {
+ VisitRRR(this, kLoong64Mulh_wu, node);
+}
+
+void InstructionSelector::VisitInt64Mul(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
+ uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
+ if (base::bits::IsPowerOfTwo(value)) {
+ Emit(kLoong64Sll_d | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value)));
+ return;
+ }
+ if (base::bits::IsPowerOfTwo(value - 1) && value - 1 > 0 &&
+ value - 1 <= 31) {
+ // Alsl_d macro will handle the shifting value out of bound cases.
+ Emit(kLoong64Alsl_d, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()), g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value - 1)));
+ return;
+ }
+ if (base::bits::IsPowerOfTwo(value + 1)) {
+ InstructionOperand temp = g.TempRegister();
+ Emit(kLoong64Sll_d | AddressingModeField::encode(kMode_None), temp,
+ g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1)));
+ Emit(kLoong64Sub_d | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
+ return;
+ }
+ }
+ Emit(kLoong64Mul_d, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (CanCover(node, left) && CanCover(node, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher rightInput(right), leftInput(left);
+ if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
+ // Combine both shifted operands with Div_d.
+ Emit(kLoong64Div_d, g.DefineSameAsFirst(node),
+ g.UseRegister(leftInput.left().node()),
+ g.UseRegister(rightInput.left().node()));
+ return;
+ }
+ }
+ }
+ Emit(kLoong64Div_w, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitUint32Div(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Emit(kLoong64Div_wu, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (CanCover(node, left) && CanCover(node, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher rightInput(right), leftInput(left);
+ if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
+ // Combine both shifted operands with Mod_d.
+ Emit(kLoong64Mod_d, g.DefineSameAsFirst(node),
+ g.UseRegister(leftInput.left().node()),
+ g.UseRegister(rightInput.left().node()));
+ return;
+ }
+ }
+ }
+ Emit(kLoong64Mod_w, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitUint32Mod(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Emit(kLoong64Mod_wu, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitInt64Div(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ Emit(kLoong64Div_d, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitUint64Div(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ Emit(kLoong64Div_du, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitInt64Mod(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ Emit(kLoong64Mod_d, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitUint64Mod(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ Emit(kLoong64Mod_du, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
+ VisitRR(this, kLoong64Float32ToFloat64, node);
+}
+
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+ VisitRR(this, kLoong64Int32ToFloat32, node);
+}
+
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+ VisitRR(this, kLoong64Uint32ToFloat32, node);
+}
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+ VisitRR(this, kLoong64Int32ToFloat64, node);
+}
+
+void InstructionSelector::VisitChangeInt64ToFloat64(Node* node) {
+ VisitRR(this, kLoong64Int64ToFloat64, node);
+}
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+ VisitRR(this, kLoong64Uint32ToFloat64, node);
+}
+
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+ Loong64OperandGenerator g(this);
+ InstructionCode opcode = kLoong64Float32ToInt32;
+ TruncateKind kind = OpParameter<TruncateKind>(node->op());
+ if (kind == TruncateKind::kSetOverflowToMin) {
+ opcode |= MiscField::encode(true);
+ }
+ Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+ Loong64OperandGenerator g(this);
+ InstructionCode opcode = kLoong64Float32ToUint32;
+ TruncateKind kind = OpParameter<TruncateKind>(node->op());
+ if (kind == TruncateKind::kSetOverflowToMin) {
+ opcode |= MiscField::encode(true);
+ }
+ Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ // TODO(LOONG_dev): LOONG64 Match ChangeFloat64ToInt32(Float64Round##OP) to
+ // corresponding instruction which does rounding and conversion to
+ // integer format.
+ if (CanCover(node, value)) {
+ if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
+ Node* next = value->InputAt(0);
+ if (!CanCover(value, next)) {
+ // Match float32 -> float64 -> int32 representation change path.
+ Emit(kLoong64Float32ToInt32, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ }
+ }
+ VisitRR(this, kLoong64Float64ToInt32, node);
+}
+
+void InstructionSelector::VisitChangeFloat64ToInt64(Node* node) {
+ VisitRR(this, kLoong64Float64ToInt64, node);
+}
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+ VisitRR(this, kLoong64Float64ToUint32, node);
+}
+
+void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) {
+ VisitRR(this, kLoong64Float64ToUint64, node);
+}
+
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+ VisitRR(this, kLoong64Float64ToUint32, node);
+}
+
+void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) {
+ Loong64OperandGenerator g(this);
+ InstructionCode opcode = kLoong64Float64ToInt64;
+ TruncateKind kind = OpParameter<TruncateKind>(node->op());
+ if (kind == TruncateKind::kSetOverflowToMin) {
+ opcode |= MiscField::encode(true);
+ }
+ Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
+ Loong64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ this->Emit(kLoong64Float32ToInt64, output_count, outputs, 1, inputs);
+}
+
+void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
+ Loong64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kLoong64Float64ToInt64, output_count, outputs, 1, inputs);
+}
+
+void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
+ Loong64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kLoong64Float32ToUint64, output_count, outputs, 1, inputs);
+}
+
+void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
+ Loong64OperandGenerator g(this);
+
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kLoong64Float64ToUint64, output_count, outputs, 1, inputs);
+}
+
+void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+ // On LoongArch64, int32 values should all be sign-extended to 64-bit, so
+ // no need to sign-extend them here.
+ // But when call to a host function in simulator, if the function return an
+ // int32 value, the simulator do not sign-extend to int64, because in
+ // simulator we do not know the function whether return an int32 or int64.
+#ifdef USE_SIMULATOR
+ Node* value = node->InputAt(0);
+ if (value->opcode() == IrOpcode::kCall) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Sll_w, g.DefineAsRegister(node), g.UseRegister(value),
+ g.TempImmediate(0));
+ return;
+ }
+#endif
+ EmitIdentity(node);
+}
+
+bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
+ DCHECK_NE(node->opcode(), IrOpcode::kPhi);
+ switch (node->opcode()) {
+ // Comparisons only emit 0/1, so the upper 32 bits must be zero.
+ case IrOpcode::kWord32Equal:
+ case IrOpcode::kInt32LessThan:
+ case IrOpcode::kInt32LessThanOrEqual:
+ case IrOpcode::kUint32LessThan:
+ case IrOpcode::kUint32LessThanOrEqual:
+ return true;
+ case IrOpcode::kWord32And: {
+ Int32BinopMatcher m(node);
+ if (m.right().HasResolvedValue()) {
+ uint32_t mask = m.right().ResolvedValue();
+ return is_uint31(mask);
+ }
+ return false;
+ }
+ case IrOpcode::kWord32Shr: {
+ Int32BinopMatcher m(node);
+ if (m.right().HasResolvedValue()) {
+ uint8_t sa = m.right().ResolvedValue() & 0x1f;
+ return sa > 0;
+ }
+ return false;
+ }
+ case IrOpcode::kLoad:
+ case IrOpcode::kLoadImmutable: {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ if (load_rep.IsUnsigned()) {
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8: // Fall through.
+ case MachineRepresentation::kWord16:
+ return true;
+ default:
+ return false;
+ }
+ }
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+ Loong64OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+
+ if (value->opcode() == IrOpcode::kLoad) {
+ LoadRepresentation load_rep = LoadRepresentationOf(value->op());
+ if (load_rep.IsUnsigned() &&
+ load_rep.representation() == MachineRepresentation::kWord32) {
+ EmitLoad(this, value, kLoong64Ld_wu, node);
+ return;
+ }
+ }
+ if (ZeroExtendsWord32ToWord64(value)) {
+ EmitIdentity(node);
+ return;
+ }
+ Emit(kLoong64Bstrpick_d, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.TempImmediate(0),
+ g.TempImmediate(32));
+}
+
+void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ if (CanCover(node, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kWord64Sar: {
+ if (CanCoverTransitively(node, value, value->InputAt(0)) &&
+ TryEmitExtendingLoad(this, value, node)) {
+ return;
+ } else {
+ Int64BinopMatcher m(value);
+ if (m.right().IsInRange(32, 63)) {
+ // After smi untagging no need for truncate. Combine sequence.
+ Emit(kLoong64Sra_d, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ Emit(kLoong64Sll_w, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(0));
+}
+
+void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
+ // instruction.
+ if (CanCover(node, value) &&
+ value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
+ Emit(kLoong64Int32ToFloat32, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ VisitRR(this, kLoong64Float64ToFloat32, node);
+}
+
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+ VisitRR(this, kArchTruncateDoubleToI, node);
+}
+
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+ VisitRR(this, kLoong64Float64ToInt32, node);
+}
+
+void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
+ VisitRR(this, kLoong64Int64ToFloat32, node);
+}
+
+void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
+ VisitRR(this, kLoong64Int64ToFloat64, node);
+}
+
+void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
+ VisitRR(this, kLoong64Uint64ToFloat32, node);
+}
+
+void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
+ VisitRR(this, kLoong64Uint64ToFloat64, node);
+}
+
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ VisitRR(this, kLoong64Float64ExtractLowWord32, node);
+}
+
+void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
+ VisitRR(this, kLoong64BitcastDL, node);
+}
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Float64InsertLowWord32, g.DefineAsRegister(node),
+ ImmediateOperand(ImmediateOperand::INLINE_INT32, 0),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
+ VisitRR(this, kLoong64BitcastLD, node);
+}
+
+void InstructionSelector::VisitFloat32Add(Node* node) {
+ VisitRRR(this, kLoong64Float32Add, node);
+}
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+ VisitRRR(this, kLoong64Float64Add, node);
+}
+
+void InstructionSelector::VisitFloat32Sub(Node* node) {
+ VisitRRR(this, kLoong64Float32Sub, node);
+}
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+ VisitRRR(this, kLoong64Float64Sub, node);
+}
+
+void InstructionSelector::VisitFloat32Mul(Node* node) {
+ VisitRRR(this, kLoong64Float32Mul, node);
+}
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+ VisitRRR(this, kLoong64Float64Mul, node);
+}
+
+void InstructionSelector::VisitFloat32Div(Node* node) {
+ VisitRRR(this, kLoong64Float32Div, node);
+}
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+ VisitRRR(this, kLoong64Float64Div, node);
+}
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Float64Mod, g.DefineAsFixed(node, f0),
+ g.UseFixed(node->InputAt(0), f0), g.UseFixed(node->InputAt(1), f1))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat32Max(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Float32Max, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat64Max(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Float64Max, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat32Min(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Float32Min, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Float64Min, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat32Abs(Node* node) {
+ VisitRR(this, kLoong64Float32Abs, node);
+}
+
+void InstructionSelector::VisitFloat64Abs(Node* node) {
+ VisitRR(this, kLoong64Float64Abs, node);
+}
+
+void InstructionSelector::VisitFloat32Sqrt(Node* node) {
+ VisitRR(this, kLoong64Float32Sqrt, node);
+}
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+ VisitRR(this, kLoong64Float64Sqrt, node);
+}
+
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ VisitRR(this, kLoong64Float32RoundDown, node);
+}
+
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+ VisitRR(this, kLoong64Float64RoundDown, node);
+}
+
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ VisitRR(this, kLoong64Float32RoundUp, node);
+}
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ VisitRR(this, kLoong64Float64RoundUp, node);
+}
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ VisitRR(this, kLoong64Float32RoundTruncate, node);
+}
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+ VisitRR(this, kLoong64Float64RoundTruncate, node);
+}
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ VisitRR(this, kLoong64Float32RoundTiesEven, node);
+}
+
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ VisitRR(this, kLoong64Float64RoundTiesEven, node);
+}
+
+void InstructionSelector::VisitFloat32Neg(Node* node) {
+ VisitRR(this, kLoong64Float32Neg, node);
+}
+
+void InstructionSelector::VisitFloat64Neg(Node* node) {
+ VisitRR(this, kLoong64Float64Neg, node);
+}
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+ InstructionCode opcode) {
+ Loong64OperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f0),
+ g.UseFixed(node->InputAt(1), f1))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+ InstructionCode opcode) {
+ Loong64OperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f0))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
+ Node* node) {
+ Loong64OperandGenerator g(this);
+
+ // Prepare for C function call.
+ if (call_descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
+ call_descriptor->ParameterCount())),
+ 0, nullptr, 0, nullptr);
+
+ // Poke any stack arguments.
+ int slot = 0;
+ for (PushParameter input : (*arguments)) {
+ Emit(kLoong64Poke, g.NoOutput(), g.UseRegister(input.node),
+ g.TempImmediate(slot << kSystemPointerSizeLog2));
+ ++slot;
+ }
+ } else {
+ int push_count = static_cast<int>(call_descriptor->ParameterSlotCount());
+ if (push_count > 0) {
+ // Calculate needed space
+ int stack_size = 0;
+ for (PushParameter input : (*arguments)) {
+ if (input.node) {
+ stack_size += input.location.GetSizeInPointers();
+ }
+ }
+ Emit(kLoong64StackClaim, g.NoOutput(),
+ g.TempImmediate(stack_size << kSystemPointerSizeLog2));
+ }
+ for (size_t n = 0; n < arguments->size(); ++n) {
+ PushParameter input = (*arguments)[n];
+ if (input.node) {
+ Emit(kLoong64Poke, g.NoOutput(), g.UseRegister(input.node),
+ g.TempImmediate(static_cast<int>(n << kSystemPointerSizeLog2)));
+ }
+ }
+ }
+}
+
+void InstructionSelector::EmitPrepareResults(
+ ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
+ Node* node) {
+ Loong64OperandGenerator g(this);
+
+ for (PushParameter output : *results) {
+ if (!output.location.IsCallerFrameSlot()) continue;
+ // Skip any alignment holes in nodes.
+ if (output.node != nullptr) {
+ DCHECK(!call_descriptor->IsCFunctionCall());
+ if (output.location.GetType() == MachineType::Float32()) {
+ MarkAsFloat32(output.node);
+ } else if (output.location.GetType() == MachineType::Float64()) {
+ MarkAsFloat64(output.node);
+ } else if (output.location.GetType() == MachineType::Simd128()) {
+ abort();
+ }
+ int offset = call_descriptor->GetOffsetToReturns();
+ int reverse_slot = -output.location.GetLocation() - offset;
+ Emit(kLoong64Peek, g.DefineAsRegister(output.node),
+ g.UseImmediate(reverse_slot));
+ }
+ }
+}
+
+bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
+
+void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
+
+namespace {
+
+// Shared routine for multiple compare operations.
+static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+ InstructionOperand left, InstructionOperand right,
+ FlagsContinuation* cont) {
+ selector->EmitWithContinuation(opcode, left, right, cont);
+}
+
+// Shared routine for multiple float32 compare operations.
+void VisitFloat32Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ Loong64OperandGenerator g(selector);
+ Float32BinopMatcher m(node);
+ InstructionOperand lhs, rhs;
+
+ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
+ : g.UseRegister(m.left().node());
+ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
+ : g.UseRegister(m.right().node());
+ VisitCompare(selector, kLoong64Float32Cmp, lhs, rhs, cont);
+}
+
+// Shared routine for multiple float64 compare operations.
+void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ Loong64OperandGenerator g(selector);
+ Float64BinopMatcher m(node);
+ InstructionOperand lhs, rhs;
+
+ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
+ : g.UseRegister(m.left().node());
+ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
+ : g.UseRegister(m.right().node());
+ VisitCompare(selector, kLoong64Float64Cmp, lhs, rhs, cont);
+}
+
+// Shared routine for multiple word compare operations.
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont,
+ bool commutative) {
+ Loong64OperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+
+ // Match immediates on left or right side of comparison.
+ if (g.CanBeImmediate(right, opcode)) {
+ if (opcode == kLoong64Tst) {
+ if (left->opcode() == IrOpcode::kTruncateInt64ToInt32) {
+ VisitCompare(selector, opcode, g.UseRegister(left->InputAt(0)),
+ g.UseImmediate(right), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ }
+ } else {
+ switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseRegister(right), cont);
+ }
+ break;
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ break;
+ default:
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseRegister(right), cont);
+ }
+ }
+ } else if (g.CanBeImmediate(left, opcode)) {
+ if (!commutative) cont->Commute();
+ if (opcode == kLoong64Tst) {
+ VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
+ cont);
+ } else {
+ switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseImmediate(left), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseRegister(left), cont);
+ }
+ break;
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseImmediate(left), cont);
+ break;
+ default:
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseRegister(left), cont);
+ }
+ }
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
+ cont);
+ }
+}
+
+void VisitOptimizedWord32Compare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode,
+ FlagsContinuation* cont) {
+ // TODO(LOONG_dev): LOONG64 Add check for debug mode
+ VisitWordCompare(selector, node, opcode, cont, false);
+}
+
+#ifdef USE_SIMULATOR
+// Shared routine for multiple word compare operations.
+void VisitFullWord32Compare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont) {
+ Loong64OperandGenerator g(selector);
+ InstructionOperand leftOp = g.TempRegister();
+ InstructionOperand rightOp = g.TempRegister();
+
+ selector->Emit(kLoong64Sll_d, leftOp, g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(32));
+ selector->Emit(kLoong64Sll_d, rightOp, g.UseRegister(node->InputAt(1)),
+ g.TempImmediate(32));
+
+ VisitCompare(selector, opcode, leftOp, rightOp, cont);
+}
+#endif
+
+void VisitWord32Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ // LOONG64 doesn't support Word32 compare instructions. Instead it relies
+ // that the values in registers are correctly sign-extended and uses
+ // Word64 comparison instead.
+#ifdef USE_SIMULATOR
+ // When call to a host function in simulator, if the function return a
+ // int32 value, the simulator do not sign-extended to int64 because in
+ // simulator we do not know the function whether return a int32 or int64.
+ // so we need do a full word32 compare in this case.
+ if (node->InputAt(0)->opcode() == IrOpcode::kCall ||
+ node->InputAt(1)->opcode() == IrOpcode::kCall) {
+ VisitFullWord32Compare(selector, node, kLoong64Cmp, cont);
+ return;
+ }
+#endif
+ VisitOptimizedWord32Compare(selector, node, kLoong64Cmp, cont);
+}
+
+void VisitWord64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ VisitWordCompare(selector, node, kLoong64Cmp, cont, false);
+}
+
+void EmitWordCompareZero(InstructionSelector* selector, Node* value,
+ FlagsContinuation* cont) {
+ Loong64OperandGenerator g(selector);
+ selector->EmitWithContinuation(kLoong64Cmp, g.UseRegister(value),
+ g.TempImmediate(0), cont);
+}
+
+void VisitAtomicLoad(InstructionSelector* selector, Node* node,
+ AtomicWidth width) {
+ Loong64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ // The memory order is ignored.
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ InstructionCode code;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
+ code = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
+ code = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ code = (width == AtomicWidth::kWord32) ? kAtomicLoadWord32
+ : kLoong64Word64AtomicLoadUint32;
+ break;
+ case MachineRepresentation::kWord64:
+ code = kLoong64Word64AtomicLoadUint64;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ DCHECK_EQ(kTaggedSize, 8);
+ code = kLoong64Word64AtomicLoadUint64;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (g.CanBeImmediate(index, code)) {
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
+ g.DefineAsRegister(node), g.UseRegister(base),
+ g.UseImmediate(index));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ selector->Emit(kLoong64Add_d | AddressingModeField::encode(kMode_None),
+ addr_reg, g.UseRegister(index), g.UseRegister(base));
+ // Emit desired load opcode, using temp addr_reg.
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
+ g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
+ }
+}
+
+void VisitAtomicStore(InstructionSelector* selector, Node* node,
+ AtomicWidth width) {
+ Loong64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ // The memory order is ignored.
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_params.write_barrier_kind();
+ MachineRepresentation rep = store_params.representation();
+
+ if (FLAG_enable_unconditional_write_barriers &&
+ CanBeTaggedOrCompressedPointer(rep)) {
+ write_barrier_kind = kFullWriteBarrier;
+ }
+
+ InstructionCode code;
+
+ if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
+ DCHECK(CanBeTaggedPointer(rep));
+ DCHECK_EQ(kTaggedSize, 8);
+
+ RecordWriteMode record_write_mode =
+ WriteBarrierKindToRecordWriteMode(write_barrier_kind);
+ code = kArchAtomicStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ } else {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ code = kAtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ code = kAtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ code = kAtomicStoreWord32;
+ break;
+ case MachineRepresentation::kWord64:
+ DCHECK_EQ(width, AtomicWidth::kWord64);
+ code = kLoong64Word64AtomicStoreWord64;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ DCHECK_EQ(kTaggedSize, 8);
+ code = kLoong64StoreCompressTagged;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ if (g.CanBeImmediate(index, code)) {
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
+ g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
+ g.UseRegisterOrImmediateZero(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ selector->Emit(kLoong64Add_d | AddressingModeField::encode(kMode_None),
+ addr_reg, g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
+ g.NoOutput(), addr_reg, g.TempImmediate(0),
+ g.UseRegisterOrImmediateZero(value));
+ }
+}
+
+void VisitAtomicExchange(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, AtomicWidth width) {
+ Loong64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ AddressingMode addressing_mode = kMode_MRI;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temp[3];
+ temp[0] = g.TempRegister();
+ temp[1] = g.TempRegister();
+ temp[2] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
+ selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
+}
+
+void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, AtomicWidth width) {
+ Loong64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* old_value = node->InputAt(2);
+ Node* new_value = node->InputAt(3);
+
+ AddressingMode addressing_mode = kMode_MRI;
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(old_value);
+ inputs[input_count++] = g.UseUniqueRegister(new_value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temp[3];
+ temp[0] = g.TempRegister();
+ temp[1] = g.TempRegister();
+ temp[2] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
+ selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
+}
+
+void VisitAtomicBinop(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, AtomicWidth width) {
+ Loong64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ AddressingMode addressing_mode = kMode_MRI;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temps[4];
+ temps[0] = g.TempRegister();
+ temps[1] = g.TempRegister();
+ temps[2] = g.TempRegister();
+ temps[3] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
+ selector->Emit(code, 1, outputs, input_count, inputs, 4, temps);
+}
+
+} // namespace
+
+void InstructionSelector::VisitStackPointerGreaterThan(
+ Node* node, FlagsContinuation* cont) {
+ StackCheckKind kind = StackCheckKindOf(node->op());
+ InstructionCode opcode =
+ kArchStackPointerGreaterThan | MiscField::encode(static_cast<int>(kind));
+
+ Loong64OperandGenerator g(this);
+
+ // No outputs.
+ InstructionOperand* const outputs = nullptr;
+ const int output_count = 0;
+
+ // TempRegister(0) is used to store the comparison result.
+ // Applying an offset to this stack check requires a temp register. Offsets
+ // are only applied to the first stack check. If applying an offset, we must
+ // ensure the input and temp registers do not alias, thus kUniqueRegister.
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ const int temp_count = (kind == StackCheckKind::kJSFunctionEntry ? 2 : 1);
+ const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry)
+ ? OperandGenerator::kUniqueRegister
+ : OperandGenerator::kRegister;
+
+ Node* const value = node->InputAt(0);
+ InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)};
+ static constexpr int input_count = arraysize(inputs);
+
+ EmitWithContinuation(opcode, output_count, outputs, input_count, inputs,
+ temp_count, temps, cont);
+}
+
+// Shared routine for word comparisons against zero.
+void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
+ FlagsContinuation* cont) {
+ // Try to combine with comparisons against 0 by simply inverting the branch.
+ while (CanCover(user, value)) {
+ if (value->opcode() == IrOpcode::kWord32Equal) {
+ Int32BinopMatcher m(value);
+ if (!m.right().Is(0)) break;
+ user = value;
+ value = m.left().node();
+ } else if (value->opcode() == IrOpcode::kWord64Equal) {
+ Int64BinopMatcher m(value);
+ if (!m.right().Is(0)) break;
+ user = value;
+ value = m.left().node();
+ } else {
+ break;
+ }
+
+ cont->Negate();
+ }
+
+ if (CanCover(user, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kWord32Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitWord32Compare(this, value, cont);
+ case IrOpcode::kInt32LessThan:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWord32Compare(this, value, cont);
+ case IrOpcode::kInt32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWord32Compare(this, value, cont);
+ case IrOpcode::kUint32LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWord32Compare(this, value, cont);
+ case IrOpcode::kUint32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord32Compare(this, value, cont);
+ case IrOpcode::kWord64Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitWord64Compare(this, value, cont);
+ case IrOpcode::kInt64LessThan:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWord64Compare(this, value, cont);
+ case IrOpcode::kInt64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWord64Compare(this, value, cont);
+ case IrOpcode::kUint64LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWord64Compare(this, value, cont);
+ case IrOpcode::kUint64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord64Compare(this, value, cont);
+ case IrOpcode::kFloat32Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitFloat32Compare(this, value, cont);
+ case IrOpcode::kFloat32LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitFloat32Compare(this, value, cont);
+ case IrOpcode::kFloat32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitFloat32Compare(this, value, cont);
+ case IrOpcode::kFloat64Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitFloat64Compare(this, value, cont);
+ case IrOpcode::kFloat64LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitFloat64Compare(this, value, cont);
+ case IrOpcode::kFloat64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitFloat64Compare(this, value, cont);
+ case IrOpcode::kProjection:
+ // Check if this is the overflow output projection of an
+ // <Operation>WithOverflow node.
+ if (ProjectionIndexOf(value->op()) == 1u) {
+ // We cannot combine the <Operation>WithOverflow with this branch
+ // unless the 0th projection (the use of the actual value of the
+ // <Operation> is either nullptr, which means there's no use of the
+ // actual value, or was already defined, which means it is scheduled
+ // *AFTER* this branch).
+ Node* const node = value->InputAt(0);
+ Node* const result = NodeProperties::FindProjection(node, 0);
+ if (result == nullptr || IsDefined(result)) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32AddWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kLoong64Add_d, cont);
+ case IrOpcode::kInt32SubWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kLoong64Sub_d, cont);
+ case IrOpcode::kInt32MulWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kLoong64MulOvf_w, cont);
+ case IrOpcode::kInt64AddWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kLoong64AddOvf_d, cont);
+ case IrOpcode::kInt64SubWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kLoong64SubOvf_d, cont);
+ default:
+ break;
+ }
+ }
+ }
+ break;
+ case IrOpcode::kWord32And:
+ case IrOpcode::kWord64And:
+ return VisitWordCompare(this, value, kLoong64Tst, cont, true);
+ case IrOpcode::kStackPointerGreaterThan:
+ cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
+ return VisitStackPointerGreaterThan(value, cont);
+ default:
+ break;
+ }
+ }
+
+ // Continuation could not be combined with a compare, emit compare against 0.
+ EmitWordCompareZero(this, value, cont);
+}
+
+void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
+ Loong64OperandGenerator g(this);
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+
+ // Emit either ArchTableSwitch or ArchBinarySearchSwitch.
+ if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
+ size_t table_space_cost = 10 + 2 * sw.value_range();
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 2 + 2 * sw.case_count();
+ size_t lookup_time_cost = sw.case_count();
+ if (sw.case_count() > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value() > std::numeric_limits<int32_t>::min() &&
+ sw.value_range() <= kMaxTableSwitchValueRange) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value()) {
+ index_operand = g.TempRegister();
+ Emit(kLoong64Sub_w, index_operand, value_operand,
+ g.TempImmediate(sw.min_value()));
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
+ }
+ }
+
+ // Generate a tree of conditional jumps.
+ return EmitBinarySearchSwitch(sw, value_operand);
+}
+
+void InstructionSelector::VisitWord32Equal(Node* const node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) {
+ return VisitWordCompareZero(m.node(), m.left().node(), &cont);
+ }
+
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kLoong64Add_d, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kLoong64Add_d, &cont);
+}
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kLoong64Sub_d, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kLoong64Sub_d, &cont);
+}
+
+void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kLoong64MulOvf_w, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kLoong64MulOvf_w, &cont);
+}
+
+void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kLoong64AddOvf_d, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kLoong64AddOvf_d, &cont);
+}
+
+void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kLoong64SubOvf_d, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kLoong64SubOvf_d, &cont);
+}
+
+void InstructionSelector::VisitWord64Equal(Node* const node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+ Int64BinopMatcher m(node);
+ if (m.right().Is(0)) {
+ return VisitWordCompareZero(m.node(), m.left().node(), &cont);
+ }
+
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt64LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint64LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat32Equal(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat32LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
+ VisitRR(this, kLoong64Float64ExtractLowWord32, node);
+}
+
+void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
+ VisitRR(this, kLoong64Float64ExtractHighWord32, node);
+}
+
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+ VisitRR(this, kLoong64Float64SilenceNaN, node);
+}
+
+void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kLoong64Float64InsertLowWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kLoong64Float64InsertHighWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+void InstructionSelector::VisitMemoryBarrier(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Dbar, g.NoOutput());
+}
+
+void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
+ VisitAtomicLoad(this, node, AtomicWidth::kWord32);
+}
+
+void InstructionSelector::VisitWord32AtomicStore(Node* node) {
+ VisitAtomicStore(this, node, AtomicWidth::kWord32);
+}
+
+void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
+ VisitAtomicLoad(this, node, AtomicWidth::kWord64);
+}
+
+void InstructionSelector::VisitWord64AtomicStore(Node* node) {
+ VisitAtomicStore(this, node, AtomicWidth::kWord64);
+}
+
+void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Int8()) {
+ opcode = kAtomicExchangeInt8;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kAtomicExchangeUint8;
+ } else if (type == MachineType::Int16()) {
+ opcode = kAtomicExchangeInt16;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kAtomicExchangeUint16;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = kAtomicExchangeWord32;
+ } else {
+ UNREACHABLE();
+ }
+
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
+}
+
+void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint8()) {
+ opcode = kAtomicExchangeUint8;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kAtomicExchangeUint16;
+ } else if (type == MachineType::Uint32()) {
+ opcode = kAtomicExchangeWord32;
+ } else if (type == MachineType::Uint64()) {
+ opcode = kLoong64Word64AtomicExchangeUint64;
+ } else {
+ UNREACHABLE();
+ }
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64);
+}
+
+void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Int8()) {
+ opcode = kAtomicCompareExchangeInt8;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kAtomicCompareExchangeUint8;
+ } else if (type == MachineType::Int16()) {
+ opcode = kAtomicCompareExchangeInt16;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kAtomicCompareExchangeUint16;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = kAtomicCompareExchangeWord32;
+ } else {
+ UNREACHABLE();
+ }
+
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
+}
+
+void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint8()) {
+ opcode = kAtomicCompareExchangeUint8;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kAtomicCompareExchangeUint16;
+ } else if (type == MachineType::Uint32()) {
+ opcode = kAtomicCompareExchangeWord32;
+ } else if (type == MachineType::Uint64()) {
+ opcode = kLoong64Word64AtomicCompareExchangeUint64;
+ } else {
+ UNREACHABLE();
+ }
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64);
+}
+void InstructionSelector::VisitWord32AtomicBinaryOperation(
+ Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
+ ArchOpcode uint16_op, ArchOpcode word32_op) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Int8()) {
+ opcode = int8_op;
+ } else if (type == MachineType::Uint8()) {
+ opcode = uint8_op;
+ } else if (type == MachineType::Int16()) {
+ opcode = int16_op;
+ } else if (type == MachineType::Uint16()) {
+ opcode = uint16_op;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = word32_op;
+ } else {
+ UNREACHABLE();
+ }
+
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32);
+}
+
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
+ }
+VISIT_ATOMIC_BINOP(Add)
+VISIT_ATOMIC_BINOP(Sub)
+VISIT_ATOMIC_BINOP(And)
+VISIT_ATOMIC_BINOP(Or)
+VISIT_ATOMIC_BINOP(Xor)
+#undef VISIT_ATOMIC_BINOP
+
+void InstructionSelector::VisitWord64AtomicBinaryOperation(
+ Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
+ ArchOpcode uint64_op) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint8()) {
+ opcode = uint8_op;
+ } else if (type == MachineType::Uint16()) {
+ opcode = uint16_op;
+ } else if (type == MachineType::Uint32()) {
+ opcode = uint32_op;
+ } else if (type == MachineType::Uint64()) {
+ opcode = uint64_op;
+ } else {
+ UNREACHABLE();
+ }
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64);
+}
+
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
+ VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
+ kAtomic##op##Uint16, kAtomic##op##Word32, \
+ kLoong64Word64Atomic##op##Uint64); \
+ }
+VISIT_ATOMIC_BINOP(Add)
+VISIT_ATOMIC_BINOP(Sub)
+VISIT_ATOMIC_BINOP(And)
+VISIT_ATOMIC_BINOP(Or)
+VISIT_ATOMIC_BINOP(Xor)
+#undef VISIT_ATOMIC_BINOP
+
+void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
+ UNREACHABLE();
+}
+
+#define SIMD_TYPE_LIST(V) \
+ V(F64x2) \
+ V(F32x4) \
+ V(I64x2) \
+ V(I32x4) \
+ V(I16x8) \
+ V(I8x16)
+
+#define SIMD_UNOP_LIST(V) \
+ V(F64x2Abs, kLoong64F64x2Abs) \
+ V(F64x2Neg, kLoong64F64x2Neg) \
+ V(F64x2Sqrt, kLoong64F64x2Sqrt) \
+ V(F64x2Ceil, kLoong64F64x2Ceil) \
+ V(F64x2Floor, kLoong64F64x2Floor) \
+ V(F64x2Trunc, kLoong64F64x2Trunc) \
+ V(F64x2NearestInt, kLoong64F64x2NearestInt) \
+ V(I64x2Neg, kLoong64I64x2Neg) \
+ V(I64x2BitMask, kLoong64I64x2BitMask) \
+ V(F64x2ConvertLowI32x4S, kLoong64F64x2ConvertLowI32x4S) \
+ V(F64x2ConvertLowI32x4U, kLoong64F64x2ConvertLowI32x4U) \
+ V(F64x2PromoteLowF32x4, kLoong64F64x2PromoteLowF32x4) \
+ V(F32x4SConvertI32x4, kLoong64F32x4SConvertI32x4) \
+ V(F32x4UConvertI32x4, kLoong64F32x4UConvertI32x4) \
+ V(F32x4Abs, kLoong64F32x4Abs) \
+ V(F32x4Neg, kLoong64F32x4Neg) \
+ V(F32x4Sqrt, kLoong64F32x4Sqrt) \
+ V(F32x4RecipApprox, kLoong64F32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox, kLoong64F32x4RecipSqrtApprox) \
+ V(F32x4Ceil, kLoong64F32x4Ceil) \
+ V(F32x4Floor, kLoong64F32x4Floor) \
+ V(F32x4Trunc, kLoong64F32x4Trunc) \
+ V(F32x4NearestInt, kLoong64F32x4NearestInt) \
+ V(F32x4DemoteF64x2Zero, kLoong64F32x4DemoteF64x2Zero) \
+ V(I64x2Abs, kLoong64I64x2Abs) \
+ V(I64x2SConvertI32x4Low, kLoong64I64x2SConvertI32x4Low) \
+ V(I64x2SConvertI32x4High, kLoong64I64x2SConvertI32x4High) \
+ V(I64x2UConvertI32x4Low, kLoong64I64x2UConvertI32x4Low) \
+ V(I64x2UConvertI32x4High, kLoong64I64x2UConvertI32x4High) \
+ V(I32x4SConvertF32x4, kLoong64I32x4SConvertF32x4) \
+ V(I32x4UConvertF32x4, kLoong64I32x4UConvertF32x4) \
+ V(I32x4Neg, kLoong64I32x4Neg) \
+ V(I32x4SConvertI16x8Low, kLoong64I32x4SConvertI16x8Low) \
+ V(I32x4SConvertI16x8High, kLoong64I32x4SConvertI16x8High) \
+ V(I32x4UConvertI16x8Low, kLoong64I32x4UConvertI16x8Low) \
+ V(I32x4UConvertI16x8High, kLoong64I32x4UConvertI16x8High) \
+ V(I32x4Abs, kLoong64I32x4Abs) \
+ V(I32x4BitMask, kLoong64I32x4BitMask) \
+ V(I32x4TruncSatF64x2SZero, kLoong64I32x4TruncSatF64x2SZero) \
+ V(I32x4TruncSatF64x2UZero, kLoong64I32x4TruncSatF64x2UZero) \
+ V(I16x8Neg, kLoong64I16x8Neg) \
+ V(I16x8SConvertI8x16Low, kLoong64I16x8SConvertI8x16Low) \
+ V(I16x8SConvertI8x16High, kLoong64I16x8SConvertI8x16High) \
+ V(I16x8UConvertI8x16Low, kLoong64I16x8UConvertI8x16Low) \
+ V(I16x8UConvertI8x16High, kLoong64I16x8UConvertI8x16High) \
+ V(I16x8Abs, kLoong64I16x8Abs) \
+ V(I16x8BitMask, kLoong64I16x8BitMask) \
+ V(I8x16Neg, kLoong64I8x16Neg) \
+ V(I8x16Abs, kLoong64I8x16Abs) \
+ V(I8x16Popcnt, kLoong64I8x16Popcnt) \
+ V(I8x16BitMask, kLoong64I8x16BitMask) \
+ V(S128Not, kLoong64S128Not) \
+ V(I64x2AllTrue, kLoong64I64x2AllTrue) \
+ V(I32x4AllTrue, kLoong64I32x4AllTrue) \
+ V(I16x8AllTrue, kLoong64I16x8AllTrue) \
+ V(I8x16AllTrue, kLoong64I8x16AllTrue) \
+ V(V128AnyTrue, kLoong64V128AnyTrue)
+
+#define SIMD_SHIFT_OP_LIST(V) \
+ V(I64x2Shl) \
+ V(I64x2ShrS) \
+ V(I64x2ShrU) \
+ V(I32x4Shl) \
+ V(I32x4ShrS) \
+ V(I32x4ShrU) \
+ V(I16x8Shl) \
+ V(I16x8ShrS) \
+ V(I16x8ShrU) \
+ V(I8x16Shl) \
+ V(I8x16ShrS) \
+ V(I8x16ShrU)
+
+#define SIMD_BINOP_LIST(V) \
+ V(F64x2Add, kLoong64F64x2Add) \
+ V(F64x2Sub, kLoong64F64x2Sub) \
+ V(F64x2Mul, kLoong64F64x2Mul) \
+ V(F64x2Div, kLoong64F64x2Div) \
+ V(F64x2Min, kLoong64F64x2Min) \
+ V(F64x2Max, kLoong64F64x2Max) \
+ V(F64x2Eq, kLoong64F64x2Eq) \
+ V(F64x2Ne, kLoong64F64x2Ne) \
+ V(F64x2Lt, kLoong64F64x2Lt) \
+ V(F64x2Le, kLoong64F64x2Le) \
+ V(I64x2Eq, kLoong64I64x2Eq) \
+ V(I64x2Ne, kLoong64I64x2Ne) \
+ V(I64x2Add, kLoong64I64x2Add) \
+ V(I64x2Sub, kLoong64I64x2Sub) \
+ V(I64x2Mul, kLoong64I64x2Mul) \
+ V(I64x2GtS, kLoong64I64x2GtS) \
+ V(I64x2GeS, kLoong64I64x2GeS) \
+ V(F32x4Add, kLoong64F32x4Add) \
+ V(F32x4Sub, kLoong64F32x4Sub) \
+ V(F32x4Mul, kLoong64F32x4Mul) \
+ V(F32x4Div, kLoong64F32x4Div) \
+ V(F32x4Max, kLoong64F32x4Max) \
+ V(F32x4Min, kLoong64F32x4Min) \
+ V(F32x4Eq, kLoong64F32x4Eq) \
+ V(F32x4Ne, kLoong64F32x4Ne) \
+ V(F32x4Lt, kLoong64F32x4Lt) \
+ V(F32x4Le, kLoong64F32x4Le) \
+ V(I32x4Add, kLoong64I32x4Add) \
+ V(I32x4Sub, kLoong64I32x4Sub) \
+ V(I32x4Mul, kLoong64I32x4Mul) \
+ V(I32x4MaxS, kLoong64I32x4MaxS) \
+ V(I32x4MinS, kLoong64I32x4MinS) \
+ V(I32x4MaxU, kLoong64I32x4MaxU) \
+ V(I32x4MinU, kLoong64I32x4MinU) \
+ V(I32x4Eq, kLoong64I32x4Eq) \
+ V(I32x4Ne, kLoong64I32x4Ne) \
+ V(I32x4GtS, kLoong64I32x4GtS) \
+ V(I32x4GeS, kLoong64I32x4GeS) \
+ V(I32x4GtU, kLoong64I32x4GtU) \
+ V(I32x4GeU, kLoong64I32x4GeU) \
+ V(I32x4DotI16x8S, kLoong64I32x4DotI16x8S) \
+ V(I16x8Add, kLoong64I16x8Add) \
+ V(I16x8AddSatS, kLoong64I16x8AddSatS) \
+ V(I16x8AddSatU, kLoong64I16x8AddSatU) \
+ V(I16x8Sub, kLoong64I16x8Sub) \
+ V(I16x8SubSatS, kLoong64I16x8SubSatS) \
+ V(I16x8SubSatU, kLoong64I16x8SubSatU) \
+ V(I16x8Mul, kLoong64I16x8Mul) \
+ V(I16x8MaxS, kLoong64I16x8MaxS) \
+ V(I16x8MinS, kLoong64I16x8MinS) \
+ V(I16x8MaxU, kLoong64I16x8MaxU) \
+ V(I16x8MinU, kLoong64I16x8MinU) \
+ V(I16x8Eq, kLoong64I16x8Eq) \
+ V(I16x8Ne, kLoong64I16x8Ne) \
+ V(I16x8GtS, kLoong64I16x8GtS) \
+ V(I16x8GeS, kLoong64I16x8GeS) \
+ V(I16x8GtU, kLoong64I16x8GtU) \
+ V(I16x8GeU, kLoong64I16x8GeU) \
+ V(I16x8RoundingAverageU, kLoong64I16x8RoundingAverageU) \
+ V(I16x8SConvertI32x4, kLoong64I16x8SConvertI32x4) \
+ V(I16x8UConvertI32x4, kLoong64I16x8UConvertI32x4) \
+ V(I16x8Q15MulRSatS, kLoong64I16x8Q15MulRSatS) \
+ V(I8x16Add, kLoong64I8x16Add) \
+ V(I8x16AddSatS, kLoong64I8x16AddSatS) \
+ V(I8x16AddSatU, kLoong64I8x16AddSatU) \
+ V(I8x16Sub, kLoong64I8x16Sub) \
+ V(I8x16SubSatS, kLoong64I8x16SubSatS) \
+ V(I8x16SubSatU, kLoong64I8x16SubSatU) \
+ V(I8x16MaxS, kLoong64I8x16MaxS) \
+ V(I8x16MinS, kLoong64I8x16MinS) \
+ V(I8x16MaxU, kLoong64I8x16MaxU) \
+ V(I8x16MinU, kLoong64I8x16MinU) \
+ V(I8x16Eq, kLoong64I8x16Eq) \
+ V(I8x16Ne, kLoong64I8x16Ne) \
+ V(I8x16GtS, kLoong64I8x16GtS) \
+ V(I8x16GeS, kLoong64I8x16GeS) \
+ V(I8x16GtU, kLoong64I8x16GtU) \
+ V(I8x16GeU, kLoong64I8x16GeU) \
+ V(I8x16RoundingAverageU, kLoong64I8x16RoundingAverageU) \
+ V(I8x16SConvertI16x8, kLoong64I8x16SConvertI16x8) \
+ V(I8x16UConvertI16x8, kLoong64I8x16UConvertI16x8) \
+ V(S128And, kLoong64S128And) \
+ V(S128Or, kLoong64S128Or) \
+ V(S128Xor, kLoong64S128Xor) \
+ V(S128AndNot, kLoong64S128AndNot)
+
+void InstructionSelector::VisitS128Const(Node* node) {
+ Loong64OperandGenerator g(this);
+ static const int kUint32Immediates = kSimd128Size / sizeof(uint32_t);
+ uint32_t val[kUint32Immediates];
+ memcpy(val, S128ImmediateParameterOf(node->op()).data(), kSimd128Size);
+ // If all bytes are zeros or ones, avoid emitting code for generic constants
+ bool all_zeros = !(val[0] || val[1] || val[2] || val[3]);
+ bool all_ones = val[0] == UINT32_MAX && val[1] == UINT32_MAX &&
+ val[2] == UINT32_MAX && val[3] == UINT32_MAX;
+ InstructionOperand dst = g.DefineAsRegister(node);
+ if (all_zeros) {
+ Emit(kLoong64S128Zero, dst);
+ } else if (all_ones) {
+ Emit(kLoong64S128AllOnes, dst);
+ } else {
+ Emit(kLoong64S128Const, dst, g.UseImmediate(val[0]), g.UseImmediate(val[1]),
+ g.UseImmediate(val[2]), g.UseImmediate(val[3]));
+ }
+}
+
+void InstructionSelector::VisitS128Zero(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64S128Zero, g.DefineAsRegister(node));
+}
+
+#define SIMD_VISIT_SPLAT(Type) \
+ void InstructionSelector::Visit##Type##Splat(Node* node) { \
+ VisitRR(this, kLoong64##Type##Splat, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
+#undef SIMD_VISIT_SPLAT
+
+#define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \
+ void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
+ VisitRRI(this, kLoong64##Type##ExtractLane##Sign, node); \
+ }
+SIMD_VISIT_EXTRACT_LANE(F64x2, )
+SIMD_VISIT_EXTRACT_LANE(F32x4, )
+SIMD_VISIT_EXTRACT_LANE(I64x2, )
+SIMD_VISIT_EXTRACT_LANE(I32x4, )
+SIMD_VISIT_EXTRACT_LANE(I16x8, U)
+SIMD_VISIT_EXTRACT_LANE(I16x8, S)
+SIMD_VISIT_EXTRACT_LANE(I8x16, U)
+SIMD_VISIT_EXTRACT_LANE(I8x16, S)
+#undef SIMD_VISIT_EXTRACT_LANE
+
+#define SIMD_VISIT_REPLACE_LANE(Type) \
+ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
+ VisitRRIR(this, kLoong64##Type##ReplaceLane, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
+#undef SIMD_VISIT_REPLACE_LANE
+
+#define SIMD_VISIT_UNOP(Name, instruction) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRR(this, instruction, node); \
+ }
+SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
+#undef SIMD_VISIT_UNOP
+
+#define SIMD_VISIT_SHIFT_OP(Name) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitSimdShift(this, kLoong64##Name, node); \
+ }
+SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
+#undef SIMD_VISIT_SHIFT_OP
+
+#define SIMD_VISIT_BINOP(Name, instruction) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRRR(this, instruction, node); \
+ }
+SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
+#undef SIMD_VISIT_BINOP
+
+void InstructionSelector::VisitS128Select(Node* node) {
+ VisitRRRR(this, kLoong64S128Select, node);
+}
+
+#if V8_ENABLE_WEBASSEMBLY
+namespace {
+
+struct ShuffleEntry {
+ uint8_t shuffle[kSimd128Size];
+ ArchOpcode opcode;
+};
+
+static const ShuffleEntry arch_shuffles[] = {
+ {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
+ kLoong64S32x4InterleaveRight},
+ {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
+ kLoong64S32x4InterleaveLeft},
+ {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
+ kLoong64S32x4PackEven},
+ {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
+ kLoong64S32x4PackOdd},
+ {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
+ kLoong64S32x4InterleaveEven},
+ {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
+ kLoong64S32x4InterleaveOdd},
+
+ {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
+ kLoong64S16x8InterleaveRight},
+ {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
+ kLoong64S16x8InterleaveLeft},
+ {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
+ kLoong64S16x8PackEven},
+ {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
+ kLoong64S16x8PackOdd},
+ {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
+ kLoong64S16x8InterleaveEven},
+ {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
+ kLoong64S16x8InterleaveOdd},
+ {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9},
+ kLoong64S16x4Reverse},
+ {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13},
+ kLoong64S16x2Reverse},
+
+ {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
+ kLoong64S8x16InterleaveRight},
+ {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
+ kLoong64S8x16InterleaveLeft},
+ {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
+ kLoong64S8x16PackEven},
+ {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
+ kLoong64S8x16PackOdd},
+ {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
+ kLoong64S8x16InterleaveEven},
+ {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
+ kLoong64S8x16InterleaveOdd},
+ {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8},
+ kLoong64S8x8Reverse},
+ {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12},
+ kLoong64S8x4Reverse},
+ {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
+ kLoong64S8x2Reverse}};
+
+bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
+ size_t num_entries, bool is_swizzle,
+ ArchOpcode* opcode) {
+ uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
+ for (size_t i = 0; i < num_entries; ++i) {
+ const ShuffleEntry& entry = table[i];
+ int j = 0;
+ for (; j < kSimd128Size; ++j) {
+ if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
+ break;
+ }
+ }
+ if (j == kSimd128Size) {
+ *opcode = entry.opcode;
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace
+
+void InstructionSelector::VisitI8x16Shuffle(Node* node) {
+ uint8_t shuffle[kSimd128Size];
+ bool is_swizzle;
+ CanonicalizeShuffle(node, shuffle, &is_swizzle);
+ uint8_t shuffle32x4[4];
+ ArchOpcode opcode;
+ if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
+ is_swizzle, &opcode)) {
+ VisitRRR(this, opcode, node);
+ return;
+ }
+ Node* input0 = node->InputAt(0);
+ Node* input1 = node->InputAt(1);
+ uint8_t offset;
+ Loong64OperandGenerator g(this);
+ if (wasm::SimdShuffle::TryMatchConcat(shuffle, &offset)) {
+ Emit(kLoong64S8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1),
+ g.UseRegister(input0), g.UseImmediate(offset));
+ return;
+ }
+ if (wasm::SimdShuffle::TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
+ Emit(kLoong64S32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
+ g.UseRegister(input1),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle32x4)));
+ return;
+ }
+ Emit(kLoong64I8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
+ g.UseRegister(input1),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle)),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 4)),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 8)),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 12)));
+}
+#else
+void InstructionSelector::VisitI8x16Shuffle(Node* node) { UNREACHABLE(); }
+#endif // V8_ENABLE_WEBASSEMBLY
+
+void InstructionSelector::VisitI8x16Swizzle(Node* node) {
+ Loong64OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ // We don't want input 0 or input 1 to be the same as output, since we will
+ // modify output before do the calculation.
+ Emit(kLoong64I8x16Swizzle, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Ext_w_b, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Ext_w_h, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Ext_w_b, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Ext_w_h, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Sll_w, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(0));
+}
+
+void InstructionSelector::VisitF32x4Pmin(Node* node) {
+ VisitUniqueRRR(this, kLoong64F32x4Pmin, node);
+}
+
+void InstructionSelector::VisitF32x4Pmax(Node* node) {
+ VisitUniqueRRR(this, kLoong64F32x4Pmax, node);
+}
+
+void InstructionSelector::VisitF64x2Pmin(Node* node) {
+ VisitUniqueRRR(this, kLoong64F64x2Pmin, node);
+}
+
+void InstructionSelector::VisitF64x2Pmax(Node* node) {
+ VisitUniqueRRR(this, kLoong64F64x2Pmax, node);
+}
+
+#define VISIT_EXT_MUL(OPCODE1, OPCODE2) \
+ void InstructionSelector::Visit##OPCODE1##ExtMulLow##OPCODE2(Node* node) {} \
+ void InstructionSelector::Visit##OPCODE1##ExtMulHigh##OPCODE2(Node* node) {}
+
+VISIT_EXT_MUL(I64x2, I32x4S)
+VISIT_EXT_MUL(I64x2, I32x4U)
+VISIT_EXT_MUL(I32x4, I16x8S)
+VISIT_EXT_MUL(I32x4, I16x8U)
+VISIT_EXT_MUL(I16x8, I8x16S)
+VISIT_EXT_MUL(I16x8, I8x16U)
+#undef VISIT_EXT_MUL
+
+#define VISIT_EXTADD_PAIRWISE(OPCODE) \
+ void InstructionSelector::Visit##OPCODE(Node* node) { \
+ Loong64OperandGenerator g(this); \
+ Emit(kLoong64ExtAddPairwise, g.DefineAsRegister(node), \
+ g.UseRegister(node->InputAt(0))); \
+ }
+VISIT_EXTADD_PAIRWISE(I16x8ExtAddPairwiseI8x16S)
+VISIT_EXTADD_PAIRWISE(I16x8ExtAddPairwiseI8x16U)
+VISIT_EXTADD_PAIRWISE(I32x4ExtAddPairwiseI16x8S)
+VISIT_EXTADD_PAIRWISE(I32x4ExtAddPairwiseI16x8U)
+#undef VISIT_EXTADD_PAIRWISE
+
+void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g,
+ int first_input_index,
+ Node* node) {
+ UNREACHABLE();
+}
+
+// static
+MachineOperatorBuilder::Flags
+InstructionSelector::SupportedMachineOperatorFlags() {
+ MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
+ return flags | MachineOperatorBuilder::kWord32ShiftIsSafe |
+ MachineOperatorBuilder::kInt32DivIsSafe |
+ MachineOperatorBuilder::kUint32DivIsSafe |
+ MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat64RoundTruncate |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
+ MachineOperatorBuilder::kFloat64RoundTiesEven |
+ MachineOperatorBuilder::kFloat32RoundTiesEven;
+}
+
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+ return MachineOperatorBuilder::AlignmentRequirements::
+ FullUnalignedAccessSupport();
+}
+
+#undef SIMD_BINOP_LIST
+#undef SIMD_SHIFT_OP_LIST
+#undef SIMD_UNOP_LIST
+#undef SIMD_TYPE_LIST
+#undef TRACE_UNIMPL
+#undef TRACE
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/compiler/backend/mips/code-generator-mips.cc b/chromium/v8/src/compiler/backend/mips/code-generator-mips.cc
index 2b8197e7e64..97c9e0978ed 100644
--- a/chromium/v8/src/compiler/backend/mips/code-generator-mips.cc
+++ b/chromium/v8/src/compiler/backend/mips/code-generator-mips.cc
@@ -93,7 +93,6 @@ class MipsOperandConverter final : public InstructionOperandConverter {
constant.ToDelayedStringConstant());
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): RPO immediates on mips?
- break;
}
UNREACHABLE();
}
@@ -313,16 +312,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
<< "\""; \
UNIMPLEMENTED();
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- MipsOperandConverter const& i) {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- codegen->tasm()->And(value, value, kSpeculationPoisonRegister);
- }
-}
-
} // namespace
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
@@ -614,31 +603,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- // Calculate a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- // difference = (current - expected) | (expected - current)
- // poison = ~(difference >> (kBitsPerSystemPointer - 1))
- __ ComputeCodeStartAddress(kScratchReg);
- __ Move(kSpeculationPoisonRegister, kScratchReg);
- __ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kJavaScriptCallCodeStartRegister);
- __ subu(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
- kScratchReg);
- __ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kJavaScriptCallCodeStartRegister);
- __ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kBitsPerSystemPointer - 1);
- __ nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kSpeculationPoisonRegister);
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
- __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
- __ And(sp, sp, kSpeculationPoisonRegister);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -845,13 +809,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
DCHECK(i.InputRegister(0) == a0);
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
__ stop();
@@ -902,7 +866,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
i.InputDoubleRegister(0), DetermineStubCallMode());
break;
- case kArchStoreWithWriteBarrier: {
+ case kArchStoreWithWriteBarrier:
+ case kArchAtomicStoreWithWriteBarrier: {
RecordWriteMode mode =
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
Register object = i.InputRegister(0);
@@ -914,7 +879,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
scratch0, scratch1, mode,
DetermineStubCallMode());
__ Addu(kScratchReg, object, index);
- __ sw(value, MemOperand(kScratchReg));
+ if (arch_opcode == kArchStoreWithWriteBarrier) {
+ __ sw(value, MemOperand(kScratchReg));
+ } else {
+ DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode);
+ __ sync();
+ __ sw(value, MemOperand(kScratchReg));
+ __ sync();
+ }
if (mode > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value, ool->exit());
}
@@ -938,10 +910,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kArchWordPoisonOnSpeculation:
- __ And(i.OutputRegister(), i.InputRegister(0),
- kSpeculationPoisonRegister);
- break;
case kIeee754Float64Acos:
ASSEMBLE_IEEE754_UNOP(acos);
break;
@@ -1541,30 +1509,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMipsLbu:
__ lbu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsLb:
__ lb(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsSb:
__ sb(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMipsLhu:
__ lhu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsUlhu:
__ Ulhu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsLh:
__ lh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsUlh:
__ Ulh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsSh:
__ sh(i.InputOrZeroRegister(2), i.MemoryOperand());
@@ -1574,11 +1536,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMipsLw:
__ lw(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsUlw:
__ Ulw(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsSw:
__ sw(i.InputOrZeroRegister(2), i.MemoryOperand());
@@ -1658,7 +1618,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
default: {
UNREACHABLE();
- break;
}
}
} else {
@@ -1823,74 +1782,74 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ilvr_w(dst, kSimd128RegZero, dst);
break;
}
- case kWord32AtomicLoadInt8:
+ case kAtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lb);
break;
- case kWord32AtomicLoadUint8:
+ case kAtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lbu);
break;
- case kWord32AtomicLoadInt16:
+ case kAtomicLoadInt16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lh);
break;
- case kWord32AtomicLoadUint16:
+ case kAtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lhu);
break;
- case kWord32AtomicLoadWord32:
+ case kAtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lw);
break;
- case kWord32AtomicStoreWord8:
+ case kAtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(sb);
break;
- case kWord32AtomicStoreWord16:
+ case kAtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(sh);
break;
- case kWord32AtomicStoreWord32:
+ case kAtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(sw);
break;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 8);
break;
- case kWord32AtomicExchangeUint8:
+ case kAtomicExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 8);
break;
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 16);
break;
- case kWord32AtomicExchangeUint16:
+ case kAtomicExchangeUint16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 16);
break;
- case kWord32AtomicExchangeWord32:
+ case kAtomicExchangeWord32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER();
break;
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 8);
break;
- case kWord32AtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeUint8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 8);
break;
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 16);
break;
- case kWord32AtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeUint16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 16);
break;
- case kWord32AtomicCompareExchangeWord32:
+ case kAtomicCompareExchangeWord32:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER();
break;
#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
+ case kAtomic##op##Int8: \
ASSEMBLE_ATOMIC_BINOP_EXT(true, 8, inst); \
break; \
- case kWord32Atomic##op##Uint8: \
+ case kAtomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP_EXT(false, 8, inst); \
break; \
- case kWord32Atomic##op##Int16: \
+ case kAtomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP_EXT(true, 16, inst); \
break; \
- case kWord32Atomic##op##Uint16: \
+ case kAtomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP_EXT(false, 16, inst); \
break; \
- case kWord32Atomic##op##Word32: \
+ case kAtomic##op##Word32: \
ASSEMBLE_ATOMIC_BINOP(inst); \
break;
ATOMIC_BINOP_CASE(Add, Addu)
@@ -3675,7 +3634,6 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
break;
default:
UNSUPPORTED_COND(instr->arch_opcode(), condition);
- break;
}
} else if (instr->arch_opcode() == kMipsMulOvf) {
// Overflow occurs if overflow register is not zero
@@ -3688,7 +3646,6 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
break;
default:
UNSUPPORTED_COND(kMipsMulOvf, condition);
- break;
}
} else if (instr->arch_opcode() == kMipsCmp) {
cc = FlagsConditionToConditionCmp(condition);
@@ -3727,85 +3684,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
branch->fallthru);
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
- return;
- }
-
- MipsOperandConverter i(this, instr);
- condition = NegateFlagsCondition(condition);
-
- switch (instr->arch_opcode()) {
- case kMipsCmp: {
- __ LoadZeroOnCondition(kSpeculationPoisonRegister, i.InputRegister(0),
- i.InputOperand(1),
- FlagsConditionToConditionCmp(condition));
- }
- return;
- case kMipsTst: {
- switch (condition) {
- case kEqual:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
- break;
- case kNotEqual:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg);
- break;
- default:
- UNREACHABLE();
- }
- }
- return;
- case kMipsAddOvf:
- case kMipsSubOvf: {
- // Overflow occurs if overflow register is negative
- __ Slt(kScratchReg2, kScratchReg, zero_reg);
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg2);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kMipsMulOvf: {
- // Overflow occurs if overflow register is not zero
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kMipsCmpS:
- case kMipsCmpD: {
- bool predicate;
- FlagsConditionToConditionCmpFPU(&predicate, condition);
- if (predicate) {
- __ LoadZeroIfFPUCondition(kSpeculationPoisonRegister);
- } else {
- __ LoadZeroIfNotFPUCondition(kSpeculationPoisonRegister);
- }
- }
- return;
- default:
- UNREACHABLE();
- }
-}
-
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -4130,7 +4008,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
- ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
@@ -4333,7 +4210,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
break;
case Constant::kInt64:
UNREACHABLE();
- break;
case Constant::kFloat64:
__ li(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
break;
@@ -4357,7 +4233,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
UNREACHABLE();
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips.
- break;
}
if (destination->IsStackSlot()) __ sw(dst, g.ToMemOperand(destination));
} else if (src.type() == Constant::kFloat32) {
diff --git a/chromium/v8/src/compiler/backend/mips/instruction-codes-mips.h b/chromium/v8/src/compiler/backend/mips/instruction-codes-mips.h
index 40f1ef3e98b..3f0d8f9d393 100644
--- a/chromium/v8/src/compiler/backend/mips/instruction-codes-mips.h
+++ b/chromium/v8/src/compiler/backend/mips/instruction-codes-mips.h
@@ -11,369 +11,374 @@ namespace compiler {
// MIPS-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(MipsAdd) \
- V(MipsAddOvf) \
- V(MipsSub) \
- V(MipsSubOvf) \
- V(MipsMul) \
- V(MipsMulOvf) \
- V(MipsMulHigh) \
- V(MipsMulHighU) \
- V(MipsDiv) \
- V(MipsDivU) \
- V(MipsMod) \
- V(MipsModU) \
- V(MipsAnd) \
- V(MipsOr) \
- V(MipsNor) \
- V(MipsXor) \
- V(MipsClz) \
- V(MipsCtz) \
- V(MipsPopcnt) \
- V(MipsLsa) \
- V(MipsShl) \
- V(MipsShr) \
- V(MipsSar) \
- V(MipsShlPair) \
- V(MipsShrPair) \
- V(MipsSarPair) \
- V(MipsExt) \
- V(MipsIns) \
- V(MipsRor) \
- V(MipsMov) \
- V(MipsTst) \
- V(MipsCmp) \
- V(MipsCmpS) \
- V(MipsAddS) \
- V(MipsSubS) \
- V(MipsMulS) \
- V(MipsDivS) \
- V(MipsAbsS) \
- V(MipsSqrtS) \
- V(MipsMaxS) \
- V(MipsMinS) \
- V(MipsCmpD) \
- V(MipsAddD) \
- V(MipsSubD) \
- V(MipsMulD) \
- V(MipsDivD) \
- V(MipsModD) \
- V(MipsAbsD) \
- V(MipsSqrtD) \
- V(MipsMaxD) \
- V(MipsMinD) \
- V(MipsNegS) \
- V(MipsNegD) \
- V(MipsAddPair) \
- V(MipsSubPair) \
- V(MipsMulPair) \
- V(MipsMaddS) \
- V(MipsMaddD) \
- V(MipsMsubS) \
- V(MipsMsubD) \
- V(MipsFloat32RoundDown) \
- V(MipsFloat32RoundTruncate) \
- V(MipsFloat32RoundUp) \
- V(MipsFloat32RoundTiesEven) \
- V(MipsFloat64RoundDown) \
- V(MipsFloat64RoundTruncate) \
- V(MipsFloat64RoundUp) \
- V(MipsFloat64RoundTiesEven) \
- V(MipsCvtSD) \
- V(MipsCvtDS) \
- V(MipsTruncWD) \
- V(MipsRoundWD) \
- V(MipsFloorWD) \
- V(MipsCeilWD) \
- V(MipsTruncWS) \
- V(MipsRoundWS) \
- V(MipsFloorWS) \
- V(MipsCeilWS) \
- V(MipsTruncUwD) \
- V(MipsTruncUwS) \
- V(MipsCvtDW) \
- V(MipsCvtDUw) \
- V(MipsCvtSW) \
- V(MipsCvtSUw) \
- V(MipsLb) \
- V(MipsLbu) \
- V(MipsSb) \
- V(MipsLh) \
- V(MipsUlh) \
- V(MipsLhu) \
- V(MipsUlhu) \
- V(MipsSh) \
- V(MipsUsh) \
- V(MipsLw) \
- V(MipsUlw) \
- V(MipsSw) \
- V(MipsUsw) \
- V(MipsLwc1) \
- V(MipsUlwc1) \
- V(MipsSwc1) \
- V(MipsUswc1) \
- V(MipsLdc1) \
- V(MipsUldc1) \
- V(MipsSdc1) \
- V(MipsUsdc1) \
- V(MipsFloat64ExtractLowWord32) \
- V(MipsFloat64ExtractHighWord32) \
- V(MipsFloat64InsertLowWord32) \
- V(MipsFloat64InsertHighWord32) \
- V(MipsFloat64SilenceNaN) \
- V(MipsFloat32Max) \
- V(MipsFloat64Max) \
- V(MipsFloat32Min) \
- V(MipsFloat64Min) \
- V(MipsPush) \
- V(MipsPeek) \
- V(MipsStoreToStackSlot) \
- V(MipsByteSwap32) \
- V(MipsStackClaim) \
- V(MipsSeb) \
- V(MipsSeh) \
- V(MipsSync) \
- V(MipsS128Zero) \
- V(MipsI32x4Splat) \
- V(MipsI32x4ExtractLane) \
- V(MipsI32x4ReplaceLane) \
- V(MipsI32x4Add) \
- V(MipsI32x4Sub) \
- V(MipsF64x2Abs) \
- V(MipsF64x2Neg) \
- V(MipsF64x2Sqrt) \
- V(MipsF64x2Add) \
- V(MipsF64x2Sub) \
- V(MipsF64x2Mul) \
- V(MipsF64x2Div) \
- V(MipsF64x2Min) \
- V(MipsF64x2Max) \
- V(MipsF64x2Eq) \
- V(MipsF64x2Ne) \
- V(MipsF64x2Lt) \
- V(MipsF64x2Le) \
- V(MipsF64x2Pmin) \
- V(MipsF64x2Pmax) \
- V(MipsF64x2Ceil) \
- V(MipsF64x2Floor) \
- V(MipsF64x2Trunc) \
- V(MipsF64x2NearestInt) \
- V(MipsF64x2ConvertLowI32x4S) \
- V(MipsF64x2ConvertLowI32x4U) \
- V(MipsF64x2PromoteLowF32x4) \
- V(MipsI64x2Add) \
- V(MipsI64x2Sub) \
- V(MipsI64x2Mul) \
- V(MipsI64x2Neg) \
- V(MipsI64x2Shl) \
- V(MipsI64x2ShrS) \
- V(MipsI64x2ShrU) \
- V(MipsI64x2BitMask) \
- V(MipsI64x2Eq) \
- V(MipsI64x2Ne) \
- V(MipsI64x2GtS) \
- V(MipsI64x2GeS) \
- V(MipsI64x2Abs) \
- V(MipsI64x2SConvertI32x4Low) \
- V(MipsI64x2SConvertI32x4High) \
- V(MipsI64x2UConvertI32x4Low) \
- V(MipsI64x2UConvertI32x4High) \
- V(MipsI64x2ExtMulLowI32x4S) \
- V(MipsI64x2ExtMulHighI32x4S) \
- V(MipsI64x2ExtMulLowI32x4U) \
- V(MipsI64x2ExtMulHighI32x4U) \
- V(MipsF32x4Splat) \
- V(MipsF32x4ExtractLane) \
- V(MipsF32x4ReplaceLane) \
- V(MipsF32x4SConvertI32x4) \
- V(MipsF32x4UConvertI32x4) \
- V(MipsF32x4DemoteF64x2Zero) \
- V(MipsI32x4Mul) \
- V(MipsI32x4MaxS) \
- V(MipsI32x4MinS) \
- V(MipsI32x4Eq) \
- V(MipsI32x4Ne) \
- V(MipsI32x4Shl) \
- V(MipsI32x4ShrS) \
- V(MipsI32x4ShrU) \
- V(MipsI32x4MaxU) \
- V(MipsI32x4MinU) \
- V(MipsF64x2Splat) \
- V(MipsF64x2ExtractLane) \
- V(MipsF64x2ReplaceLane) \
- V(MipsF32x4Abs) \
- V(MipsF32x4Neg) \
- V(MipsF32x4Sqrt) \
- V(MipsF32x4RecipApprox) \
- V(MipsF32x4RecipSqrtApprox) \
- V(MipsF32x4Add) \
- V(MipsF32x4Sub) \
- V(MipsF32x4Mul) \
- V(MipsF32x4Div) \
- V(MipsF32x4Max) \
- V(MipsF32x4Min) \
- V(MipsF32x4Eq) \
- V(MipsF32x4Ne) \
- V(MipsF32x4Lt) \
- V(MipsF32x4Le) \
- V(MipsF32x4Pmin) \
- V(MipsF32x4Pmax) \
- V(MipsF32x4Ceil) \
- V(MipsF32x4Floor) \
- V(MipsF32x4Trunc) \
- V(MipsF32x4NearestInt) \
- V(MipsI32x4SConvertF32x4) \
- V(MipsI32x4UConvertF32x4) \
- V(MipsI32x4Neg) \
- V(MipsI32x4GtS) \
- V(MipsI32x4GeS) \
- V(MipsI32x4GtU) \
- V(MipsI32x4GeU) \
- V(MipsI32x4Abs) \
- V(MipsI32x4BitMask) \
- V(MipsI32x4DotI16x8S) \
- V(MipsI32x4ExtMulLowI16x8S) \
- V(MipsI32x4ExtMulHighI16x8S) \
- V(MipsI32x4ExtMulLowI16x8U) \
- V(MipsI32x4ExtMulHighI16x8U) \
- V(MipsI32x4TruncSatF64x2SZero) \
- V(MipsI32x4TruncSatF64x2UZero) \
- V(MipsI32x4ExtAddPairwiseI16x8S) \
- V(MipsI32x4ExtAddPairwiseI16x8U) \
- V(MipsI16x8Splat) \
- V(MipsI16x8ExtractLaneU) \
- V(MipsI16x8ExtractLaneS) \
- V(MipsI16x8ReplaceLane) \
- V(MipsI16x8Neg) \
- V(MipsI16x8Shl) \
- V(MipsI16x8ShrS) \
- V(MipsI16x8ShrU) \
- V(MipsI16x8Add) \
- V(MipsI16x8AddSatS) \
- V(MipsI16x8Sub) \
- V(MipsI16x8SubSatS) \
- V(MipsI16x8Mul) \
- V(MipsI16x8MaxS) \
- V(MipsI16x8MinS) \
- V(MipsI16x8Eq) \
- V(MipsI16x8Ne) \
- V(MipsI16x8GtS) \
- V(MipsI16x8GeS) \
- V(MipsI16x8AddSatU) \
- V(MipsI16x8SubSatU) \
- V(MipsI16x8MaxU) \
- V(MipsI16x8MinU) \
- V(MipsI16x8GtU) \
- V(MipsI16x8GeU) \
- V(MipsI16x8RoundingAverageU) \
- V(MipsI16x8Abs) \
- V(MipsI16x8BitMask) \
- V(MipsI16x8Q15MulRSatS) \
- V(MipsI16x8ExtMulLowI8x16S) \
- V(MipsI16x8ExtMulHighI8x16S) \
- V(MipsI16x8ExtMulLowI8x16U) \
- V(MipsI16x8ExtMulHighI8x16U) \
- V(MipsI16x8ExtAddPairwiseI8x16S) \
- V(MipsI16x8ExtAddPairwiseI8x16U) \
- V(MipsI8x16Splat) \
- V(MipsI8x16ExtractLaneU) \
- V(MipsI8x16ExtractLaneS) \
- V(MipsI8x16ReplaceLane) \
- V(MipsI8x16Neg) \
- V(MipsI8x16Shl) \
- V(MipsI8x16ShrS) \
- V(MipsI8x16Add) \
- V(MipsI8x16AddSatS) \
- V(MipsI8x16Sub) \
- V(MipsI8x16SubSatS) \
- V(MipsI8x16MaxS) \
- V(MipsI8x16MinS) \
- V(MipsI8x16Eq) \
- V(MipsI8x16Ne) \
- V(MipsI8x16GtS) \
- V(MipsI8x16GeS) \
- V(MipsI8x16ShrU) \
- V(MipsI8x16AddSatU) \
- V(MipsI8x16SubSatU) \
- V(MipsI8x16MaxU) \
- V(MipsI8x16MinU) \
- V(MipsI8x16GtU) \
- V(MipsI8x16GeU) \
- V(MipsI8x16RoundingAverageU) \
- V(MipsI8x16Abs) \
- V(MipsI8x16Popcnt) \
- V(MipsI8x16BitMask) \
- V(MipsS128And) \
- V(MipsS128Or) \
- V(MipsS128Xor) \
- V(MipsS128Not) \
- V(MipsS128Select) \
- V(MipsS128AndNot) \
- V(MipsI64x2AllTrue) \
- V(MipsI32x4AllTrue) \
- V(MipsI16x8AllTrue) \
- V(MipsI8x16AllTrue) \
- V(MipsV128AnyTrue) \
- V(MipsS32x4InterleaveRight) \
- V(MipsS32x4InterleaveLeft) \
- V(MipsS32x4PackEven) \
- V(MipsS32x4PackOdd) \
- V(MipsS32x4InterleaveEven) \
- V(MipsS32x4InterleaveOdd) \
- V(MipsS32x4Shuffle) \
- V(MipsS16x8InterleaveRight) \
- V(MipsS16x8InterleaveLeft) \
- V(MipsS16x8PackEven) \
- V(MipsS16x8PackOdd) \
- V(MipsS16x8InterleaveEven) \
- V(MipsS16x8InterleaveOdd) \
- V(MipsS16x4Reverse) \
- V(MipsS16x2Reverse) \
- V(MipsS8x16InterleaveRight) \
- V(MipsS8x16InterleaveLeft) \
- V(MipsS8x16PackEven) \
- V(MipsS8x16PackOdd) \
- V(MipsS8x16InterleaveEven) \
- V(MipsS8x16InterleaveOdd) \
- V(MipsI8x16Shuffle) \
- V(MipsI8x16Swizzle) \
- V(MipsS8x16Concat) \
- V(MipsS8x8Reverse) \
- V(MipsS8x4Reverse) \
- V(MipsS8x2Reverse) \
- V(MipsS128Load8Splat) \
- V(MipsS128Load16Splat) \
- V(MipsS128Load32Splat) \
- V(MipsS128Load64Splat) \
- V(MipsS128Load8x8S) \
- V(MipsS128Load8x8U) \
- V(MipsS128Load16x4S) \
- V(MipsS128Load16x4U) \
- V(MipsS128Load32x2S) \
- V(MipsS128Load32x2U) \
- V(MipsMsaLd) \
- V(MipsMsaSt) \
- V(MipsI32x4SConvertI16x8Low) \
- V(MipsI32x4SConvertI16x8High) \
- V(MipsI32x4UConvertI16x8Low) \
- V(MipsI32x4UConvertI16x8High) \
- V(MipsI16x8SConvertI8x16Low) \
- V(MipsI16x8SConvertI8x16High) \
- V(MipsI16x8SConvertI32x4) \
- V(MipsI16x8UConvertI32x4) \
- V(MipsI16x8UConvertI8x16Low) \
- V(MipsI16x8UConvertI8x16High) \
- V(MipsI8x16SConvertI16x8) \
- V(MipsI8x16UConvertI16x8) \
- V(MipsWord32AtomicPairLoad) \
- V(MipsWord32AtomicPairStore) \
- V(MipsWord32AtomicPairAdd) \
- V(MipsWord32AtomicPairSub) \
- V(MipsWord32AtomicPairAnd) \
- V(MipsWord32AtomicPairOr) \
- V(MipsWord32AtomicPairXor) \
- V(MipsWord32AtomicPairExchange) \
+
+// Opcodes that support a MemoryAccessMode.
+#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) // None.
+
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(MipsAdd) \
+ V(MipsAddOvf) \
+ V(MipsSub) \
+ V(MipsSubOvf) \
+ V(MipsMul) \
+ V(MipsMulOvf) \
+ V(MipsMulHigh) \
+ V(MipsMulHighU) \
+ V(MipsDiv) \
+ V(MipsDivU) \
+ V(MipsMod) \
+ V(MipsModU) \
+ V(MipsAnd) \
+ V(MipsOr) \
+ V(MipsNor) \
+ V(MipsXor) \
+ V(MipsClz) \
+ V(MipsCtz) \
+ V(MipsPopcnt) \
+ V(MipsLsa) \
+ V(MipsShl) \
+ V(MipsShr) \
+ V(MipsSar) \
+ V(MipsShlPair) \
+ V(MipsShrPair) \
+ V(MipsSarPair) \
+ V(MipsExt) \
+ V(MipsIns) \
+ V(MipsRor) \
+ V(MipsMov) \
+ V(MipsTst) \
+ V(MipsCmp) \
+ V(MipsCmpS) \
+ V(MipsAddS) \
+ V(MipsSubS) \
+ V(MipsMulS) \
+ V(MipsDivS) \
+ V(MipsAbsS) \
+ V(MipsSqrtS) \
+ V(MipsMaxS) \
+ V(MipsMinS) \
+ V(MipsCmpD) \
+ V(MipsAddD) \
+ V(MipsSubD) \
+ V(MipsMulD) \
+ V(MipsDivD) \
+ V(MipsModD) \
+ V(MipsAbsD) \
+ V(MipsSqrtD) \
+ V(MipsMaxD) \
+ V(MipsMinD) \
+ V(MipsNegS) \
+ V(MipsNegD) \
+ V(MipsAddPair) \
+ V(MipsSubPair) \
+ V(MipsMulPair) \
+ V(MipsMaddS) \
+ V(MipsMaddD) \
+ V(MipsMsubS) \
+ V(MipsMsubD) \
+ V(MipsFloat32RoundDown) \
+ V(MipsFloat32RoundTruncate) \
+ V(MipsFloat32RoundUp) \
+ V(MipsFloat32RoundTiesEven) \
+ V(MipsFloat64RoundDown) \
+ V(MipsFloat64RoundTruncate) \
+ V(MipsFloat64RoundUp) \
+ V(MipsFloat64RoundTiesEven) \
+ V(MipsCvtSD) \
+ V(MipsCvtDS) \
+ V(MipsTruncWD) \
+ V(MipsRoundWD) \
+ V(MipsFloorWD) \
+ V(MipsCeilWD) \
+ V(MipsTruncWS) \
+ V(MipsRoundWS) \
+ V(MipsFloorWS) \
+ V(MipsCeilWS) \
+ V(MipsTruncUwD) \
+ V(MipsTruncUwS) \
+ V(MipsCvtDW) \
+ V(MipsCvtDUw) \
+ V(MipsCvtSW) \
+ V(MipsCvtSUw) \
+ V(MipsLb) \
+ V(MipsLbu) \
+ V(MipsSb) \
+ V(MipsLh) \
+ V(MipsUlh) \
+ V(MipsLhu) \
+ V(MipsUlhu) \
+ V(MipsSh) \
+ V(MipsUsh) \
+ V(MipsLw) \
+ V(MipsUlw) \
+ V(MipsSw) \
+ V(MipsUsw) \
+ V(MipsLwc1) \
+ V(MipsUlwc1) \
+ V(MipsSwc1) \
+ V(MipsUswc1) \
+ V(MipsLdc1) \
+ V(MipsUldc1) \
+ V(MipsSdc1) \
+ V(MipsUsdc1) \
+ V(MipsFloat64ExtractLowWord32) \
+ V(MipsFloat64ExtractHighWord32) \
+ V(MipsFloat64InsertLowWord32) \
+ V(MipsFloat64InsertHighWord32) \
+ V(MipsFloat64SilenceNaN) \
+ V(MipsFloat32Max) \
+ V(MipsFloat64Max) \
+ V(MipsFloat32Min) \
+ V(MipsFloat64Min) \
+ V(MipsPush) \
+ V(MipsPeek) \
+ V(MipsStoreToStackSlot) \
+ V(MipsByteSwap32) \
+ V(MipsStackClaim) \
+ V(MipsSeb) \
+ V(MipsSeh) \
+ V(MipsSync) \
+ V(MipsS128Zero) \
+ V(MipsI32x4Splat) \
+ V(MipsI32x4ExtractLane) \
+ V(MipsI32x4ReplaceLane) \
+ V(MipsI32x4Add) \
+ V(MipsI32x4Sub) \
+ V(MipsF64x2Abs) \
+ V(MipsF64x2Neg) \
+ V(MipsF64x2Sqrt) \
+ V(MipsF64x2Add) \
+ V(MipsF64x2Sub) \
+ V(MipsF64x2Mul) \
+ V(MipsF64x2Div) \
+ V(MipsF64x2Min) \
+ V(MipsF64x2Max) \
+ V(MipsF64x2Eq) \
+ V(MipsF64x2Ne) \
+ V(MipsF64x2Lt) \
+ V(MipsF64x2Le) \
+ V(MipsF64x2Pmin) \
+ V(MipsF64x2Pmax) \
+ V(MipsF64x2Ceil) \
+ V(MipsF64x2Floor) \
+ V(MipsF64x2Trunc) \
+ V(MipsF64x2NearestInt) \
+ V(MipsF64x2ConvertLowI32x4S) \
+ V(MipsF64x2ConvertLowI32x4U) \
+ V(MipsF64x2PromoteLowF32x4) \
+ V(MipsI64x2Add) \
+ V(MipsI64x2Sub) \
+ V(MipsI64x2Mul) \
+ V(MipsI64x2Neg) \
+ V(MipsI64x2Shl) \
+ V(MipsI64x2ShrS) \
+ V(MipsI64x2ShrU) \
+ V(MipsI64x2BitMask) \
+ V(MipsI64x2Eq) \
+ V(MipsI64x2Ne) \
+ V(MipsI64x2GtS) \
+ V(MipsI64x2GeS) \
+ V(MipsI64x2Abs) \
+ V(MipsI64x2SConvertI32x4Low) \
+ V(MipsI64x2SConvertI32x4High) \
+ V(MipsI64x2UConvertI32x4Low) \
+ V(MipsI64x2UConvertI32x4High) \
+ V(MipsI64x2ExtMulLowI32x4S) \
+ V(MipsI64x2ExtMulHighI32x4S) \
+ V(MipsI64x2ExtMulLowI32x4U) \
+ V(MipsI64x2ExtMulHighI32x4U) \
+ V(MipsF32x4Splat) \
+ V(MipsF32x4ExtractLane) \
+ V(MipsF32x4ReplaceLane) \
+ V(MipsF32x4SConvertI32x4) \
+ V(MipsF32x4UConvertI32x4) \
+ V(MipsF32x4DemoteF64x2Zero) \
+ V(MipsI32x4Mul) \
+ V(MipsI32x4MaxS) \
+ V(MipsI32x4MinS) \
+ V(MipsI32x4Eq) \
+ V(MipsI32x4Ne) \
+ V(MipsI32x4Shl) \
+ V(MipsI32x4ShrS) \
+ V(MipsI32x4ShrU) \
+ V(MipsI32x4MaxU) \
+ V(MipsI32x4MinU) \
+ V(MipsF64x2Splat) \
+ V(MipsF64x2ExtractLane) \
+ V(MipsF64x2ReplaceLane) \
+ V(MipsF32x4Abs) \
+ V(MipsF32x4Neg) \
+ V(MipsF32x4Sqrt) \
+ V(MipsF32x4RecipApprox) \
+ V(MipsF32x4RecipSqrtApprox) \
+ V(MipsF32x4Add) \
+ V(MipsF32x4Sub) \
+ V(MipsF32x4Mul) \
+ V(MipsF32x4Div) \
+ V(MipsF32x4Max) \
+ V(MipsF32x4Min) \
+ V(MipsF32x4Eq) \
+ V(MipsF32x4Ne) \
+ V(MipsF32x4Lt) \
+ V(MipsF32x4Le) \
+ V(MipsF32x4Pmin) \
+ V(MipsF32x4Pmax) \
+ V(MipsF32x4Ceil) \
+ V(MipsF32x4Floor) \
+ V(MipsF32x4Trunc) \
+ V(MipsF32x4NearestInt) \
+ V(MipsI32x4SConvertF32x4) \
+ V(MipsI32x4UConvertF32x4) \
+ V(MipsI32x4Neg) \
+ V(MipsI32x4GtS) \
+ V(MipsI32x4GeS) \
+ V(MipsI32x4GtU) \
+ V(MipsI32x4GeU) \
+ V(MipsI32x4Abs) \
+ V(MipsI32x4BitMask) \
+ V(MipsI32x4DotI16x8S) \
+ V(MipsI32x4ExtMulLowI16x8S) \
+ V(MipsI32x4ExtMulHighI16x8S) \
+ V(MipsI32x4ExtMulLowI16x8U) \
+ V(MipsI32x4ExtMulHighI16x8U) \
+ V(MipsI32x4TruncSatF64x2SZero) \
+ V(MipsI32x4TruncSatF64x2UZero) \
+ V(MipsI32x4ExtAddPairwiseI16x8S) \
+ V(MipsI32x4ExtAddPairwiseI16x8U) \
+ V(MipsI16x8Splat) \
+ V(MipsI16x8ExtractLaneU) \
+ V(MipsI16x8ExtractLaneS) \
+ V(MipsI16x8ReplaceLane) \
+ V(MipsI16x8Neg) \
+ V(MipsI16x8Shl) \
+ V(MipsI16x8ShrS) \
+ V(MipsI16x8ShrU) \
+ V(MipsI16x8Add) \
+ V(MipsI16x8AddSatS) \
+ V(MipsI16x8Sub) \
+ V(MipsI16x8SubSatS) \
+ V(MipsI16x8Mul) \
+ V(MipsI16x8MaxS) \
+ V(MipsI16x8MinS) \
+ V(MipsI16x8Eq) \
+ V(MipsI16x8Ne) \
+ V(MipsI16x8GtS) \
+ V(MipsI16x8GeS) \
+ V(MipsI16x8AddSatU) \
+ V(MipsI16x8SubSatU) \
+ V(MipsI16x8MaxU) \
+ V(MipsI16x8MinU) \
+ V(MipsI16x8GtU) \
+ V(MipsI16x8GeU) \
+ V(MipsI16x8RoundingAverageU) \
+ V(MipsI16x8Abs) \
+ V(MipsI16x8BitMask) \
+ V(MipsI16x8Q15MulRSatS) \
+ V(MipsI16x8ExtMulLowI8x16S) \
+ V(MipsI16x8ExtMulHighI8x16S) \
+ V(MipsI16x8ExtMulLowI8x16U) \
+ V(MipsI16x8ExtMulHighI8x16U) \
+ V(MipsI16x8ExtAddPairwiseI8x16S) \
+ V(MipsI16x8ExtAddPairwiseI8x16U) \
+ V(MipsI8x16Splat) \
+ V(MipsI8x16ExtractLaneU) \
+ V(MipsI8x16ExtractLaneS) \
+ V(MipsI8x16ReplaceLane) \
+ V(MipsI8x16Neg) \
+ V(MipsI8x16Shl) \
+ V(MipsI8x16ShrS) \
+ V(MipsI8x16Add) \
+ V(MipsI8x16AddSatS) \
+ V(MipsI8x16Sub) \
+ V(MipsI8x16SubSatS) \
+ V(MipsI8x16MaxS) \
+ V(MipsI8x16MinS) \
+ V(MipsI8x16Eq) \
+ V(MipsI8x16Ne) \
+ V(MipsI8x16GtS) \
+ V(MipsI8x16GeS) \
+ V(MipsI8x16ShrU) \
+ V(MipsI8x16AddSatU) \
+ V(MipsI8x16SubSatU) \
+ V(MipsI8x16MaxU) \
+ V(MipsI8x16MinU) \
+ V(MipsI8x16GtU) \
+ V(MipsI8x16GeU) \
+ V(MipsI8x16RoundingAverageU) \
+ V(MipsI8x16Abs) \
+ V(MipsI8x16Popcnt) \
+ V(MipsI8x16BitMask) \
+ V(MipsS128And) \
+ V(MipsS128Or) \
+ V(MipsS128Xor) \
+ V(MipsS128Not) \
+ V(MipsS128Select) \
+ V(MipsS128AndNot) \
+ V(MipsI64x2AllTrue) \
+ V(MipsI32x4AllTrue) \
+ V(MipsI16x8AllTrue) \
+ V(MipsI8x16AllTrue) \
+ V(MipsV128AnyTrue) \
+ V(MipsS32x4InterleaveRight) \
+ V(MipsS32x4InterleaveLeft) \
+ V(MipsS32x4PackEven) \
+ V(MipsS32x4PackOdd) \
+ V(MipsS32x4InterleaveEven) \
+ V(MipsS32x4InterleaveOdd) \
+ V(MipsS32x4Shuffle) \
+ V(MipsS16x8InterleaveRight) \
+ V(MipsS16x8InterleaveLeft) \
+ V(MipsS16x8PackEven) \
+ V(MipsS16x8PackOdd) \
+ V(MipsS16x8InterleaveEven) \
+ V(MipsS16x8InterleaveOdd) \
+ V(MipsS16x4Reverse) \
+ V(MipsS16x2Reverse) \
+ V(MipsS8x16InterleaveRight) \
+ V(MipsS8x16InterleaveLeft) \
+ V(MipsS8x16PackEven) \
+ V(MipsS8x16PackOdd) \
+ V(MipsS8x16InterleaveEven) \
+ V(MipsS8x16InterleaveOdd) \
+ V(MipsI8x16Shuffle) \
+ V(MipsI8x16Swizzle) \
+ V(MipsS8x16Concat) \
+ V(MipsS8x8Reverse) \
+ V(MipsS8x4Reverse) \
+ V(MipsS8x2Reverse) \
+ V(MipsS128Load8Splat) \
+ V(MipsS128Load16Splat) \
+ V(MipsS128Load32Splat) \
+ V(MipsS128Load64Splat) \
+ V(MipsS128Load8x8S) \
+ V(MipsS128Load8x8U) \
+ V(MipsS128Load16x4S) \
+ V(MipsS128Load16x4U) \
+ V(MipsS128Load32x2S) \
+ V(MipsS128Load32x2U) \
+ V(MipsMsaLd) \
+ V(MipsMsaSt) \
+ V(MipsI32x4SConvertI16x8Low) \
+ V(MipsI32x4SConvertI16x8High) \
+ V(MipsI32x4UConvertI16x8Low) \
+ V(MipsI32x4UConvertI16x8High) \
+ V(MipsI16x8SConvertI8x16Low) \
+ V(MipsI16x8SConvertI8x16High) \
+ V(MipsI16x8SConvertI32x4) \
+ V(MipsI16x8UConvertI32x4) \
+ V(MipsI16x8UConvertI8x16Low) \
+ V(MipsI16x8UConvertI8x16High) \
+ V(MipsI8x16SConvertI16x8) \
+ V(MipsI8x16UConvertI16x8) \
+ V(MipsWord32AtomicPairLoad) \
+ V(MipsWord32AtomicPairStore) \
+ V(MipsWord32AtomicPairAdd) \
+ V(MipsWord32AtomicPairSub) \
+ V(MipsWord32AtomicPairAnd) \
+ V(MipsWord32AtomicPairOr) \
+ V(MipsWord32AtomicPairXor) \
+ V(MipsWord32AtomicPairExchange) \
V(MipsWord32AtomicPairCompareExchange)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/chromium/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc b/chromium/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
index 48635c9c15b..d59392b40a3 100644
--- a/chromium/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
+++ b/chromium/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
@@ -1427,7 +1427,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
2);
case kArchTableSwitch:
return AssembleArchTableSwitchLatency();
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
return CallLatency() + 1;
case kArchComment:
case kArchDeoptimize:
@@ -1444,8 +1444,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
AdduLatency(false) + AndLatency(false) + BranchShortLatency() + 1 +
SubuLatency() + AdduLatency();
}
- case kArchWordPoisonOnSpeculation:
- return AndLatency();
case kIeee754Float64Acos:
case kIeee754Float64Acosh:
case kIeee754Float64Asin:
@@ -1657,19 +1655,15 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
switch (op->representation()) {
case MachineRepresentation::kFloat32:
return Latency::SWC1 + SubuLatency(false);
- break;
case MachineRepresentation::kFloat64:
return Sdc1Latency() + SubuLatency(false);
- break;
default: {
UNREACHABLE();
- break;
}
}
} else {
return PushRegisterLatency();
}
- break;
}
case kMipsPeek: {
if (instr->OutputAt(0)->IsFPRegister()) {
@@ -1682,7 +1676,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
} else {
return 1;
}
- break;
}
case kMipsStackClaim:
return SubuLatency(false);
@@ -1699,41 +1692,40 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
} else {
return 1;
}
- break;
}
case kMipsByteSwap32:
return ByteSwapSignedLatency();
- case kWord32AtomicLoadInt8:
- case kWord32AtomicLoadUint8:
- case kWord32AtomicLoadInt16:
- case kWord32AtomicLoadUint16:
- case kWord32AtomicLoadWord32:
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
return 2;
- case kWord32AtomicStoreWord8:
- case kWord32AtomicStoreWord16:
- case kWord32AtomicStoreWord32:
+ case kAtomicStoreWord8:
+ case kAtomicStoreWord16:
+ case kAtomicStoreWord32:
return 3;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
return Word32AtomicExchangeLatency(true, 8);
- case kWord32AtomicExchangeUint8:
+ case kAtomicExchangeUint8:
return Word32AtomicExchangeLatency(false, 8);
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
return Word32AtomicExchangeLatency(true, 16);
- case kWord32AtomicExchangeUint16:
+ case kAtomicExchangeUint16:
return Word32AtomicExchangeLatency(false, 16);
- case kWord32AtomicExchangeWord32: {
+ case kAtomicExchangeWord32: {
return 1 + AdduLatency() + Ldc1Latency() + 1 + ScLatency(0) +
BranchShortLatency() + 1;
}
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
return Word32AtomicCompareExchangeLatency(true, 8);
- case kWord32AtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeUint8:
return Word32AtomicCompareExchangeLatency(false, 8);
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
return Word32AtomicCompareExchangeLatency(true, 16);
- case kWord32AtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeUint16:
return Word32AtomicCompareExchangeLatency(false, 16);
- case kWord32AtomicCompareExchangeWord32:
+ case kAtomicCompareExchangeWord32:
return AdduLatency() + 1 + LlLatency(0) + BranchShortLatency() + 1;
case kMipsTst:
return AndLatency(instr->InputAt(1)->IsRegister());
diff --git a/chromium/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/chromium/v8/src/compiler/backend/mips/instruction-selector-mips.cc
index c8236122461..39d1feef96f 100644
--- a/chromium/v8/src/compiler/backend/mips/instruction-selector-mips.cc
+++ b/chromium/v8/src/compiler/backend/mips/instruction-selector-mips.cc
@@ -278,9 +278,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+void InstructionSelector::VisitAbortCSADcheck(Node* node) {
MipsOperandGenerator g(this);
- Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
+ Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
}
void InstructionSelector::VisitLoadTransform(Node* node) {
@@ -375,10 +375,6 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kNone:
UNREACHABLE();
}
- if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
- }
if (g.CanBeImmediate(index, opcode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI),
@@ -393,8 +389,6 @@ void InstructionSelector::VisitLoad(Node* node) {
}
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
-
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -1906,22 +1900,26 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ // TODO(mips-dev): Confirm whether there is any mips32 chip in use and
+ // support atomic loads of tagged values with barriers.
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
MipsOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
- opcode =
- load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
- : kWord32AtomicLoadUint16;
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
case MachineRepresentation::kWord32:
- opcode = kWord32AtomicLoadWord32;
+ opcode = kAtomicLoadWord32;
break;
default:
UNREACHABLE();
@@ -1941,7 +1939,10 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ // TODO(mips-dev): Confirm whether there is any mips32 chip in use and
+ // support atomic stores of tagged values with barriers.
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ MachineRepresentation rep = store_params.representation();
MipsOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -1949,13 +1950,16 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
- opcode = kWord32AtomicStoreWord8;
+ opcode = kAtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
- opcode = kWord32AtomicStoreWord16;
+ opcode = kAtomicStoreWord16;
break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
case MachineRepresentation::kWord32:
- opcode = kWord32AtomicStoreWord32;
+ opcode = kAtomicStoreWord32;
break;
default:
UNREACHABLE();
@@ -1983,15 +1987,15 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
@@ -2021,15 +2025,15 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
@@ -2091,12 +2095,11 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
Emit(code, 1, outputs, input_count, inputs, 4, temps);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
diff --git a/chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc
index 6fce103d247..5d6a745407a 100644
--- a/chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc
+++ b/chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc
@@ -95,7 +95,6 @@ class MipsOperandConverter final : public InstructionOperandConverter {
constant.ToDelayedStringConstant());
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): RPO immediates on mips?
- break;
}
UNREACHABLE();
}
@@ -321,16 +320,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- MipsOperandConverter const& i) {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- codegen->tasm()->And(value, value, kSpeculationPoisonRegister);
- }
-}
-
} // namespace
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
@@ -577,31 +566,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- // Calculate a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- // difference = (current - expected) | (expected - current)
- // poison = ~(difference >> (kBitsPerSystemPointer - 1))
- __ ComputeCodeStartAddress(kScratchReg);
- __ Move(kSpeculationPoisonRegister, kScratchReg);
- __ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kJavaScriptCallCodeStartRegister);
- __ subu(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
- kScratchReg);
- __ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kJavaScriptCallCodeStartRegister);
- __ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kBitsPerSystemPointer - 1);
- __ nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kSpeculationPoisonRegister);
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
- __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
- __ And(sp, sp, kSpeculationPoisonRegister);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -803,17 +767,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchBinarySearchSwitch:
AssembleArchBinarySearchSwitch(instr);
break;
- break;
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
DCHECK(i.InputRegister(0) == a0);
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
__ stop();
@@ -864,7 +827,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
i.InputDoubleRegister(0), DetermineStubCallMode());
break;
- case kArchStoreWithWriteBarrier: {
+ case kArchStoreWithWriteBarrier: // Fall through.
+ case kArchAtomicStoreWithWriteBarrier: {
RecordWriteMode mode =
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
Register object = i.InputRegister(0);
@@ -876,7 +840,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
scratch0, scratch1, mode,
DetermineStubCallMode());
__ Daddu(kScratchReg, object, index);
- __ Sd(value, MemOperand(kScratchReg));
+ if (arch_opcode == kArchStoreWithWriteBarrier) {
+ __ Sd(value, MemOperand(kScratchReg));
+ } else {
+ DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode);
+ __ sync();
+ __ Sd(value, MemOperand(kScratchReg));
+ __ sync();
+ }
if (mode > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value, ool->exit());
}
@@ -900,10 +871,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kArchWordPoisonOnSpeculation:
- __ And(i.OutputRegister(), i.InputRegister(0),
- kSpeculationPoisonRegister);
- break;
case kIeee754Float64Acos:
ASSEMBLE_IEEE754_UNOP(acos);
break;
@@ -1065,14 +1032,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMips64And32:
__ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
- __ sll(i.OutputRegister(), i.OutputRegister(), 0x0);
break;
case kMips64Or:
__ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64Or32:
__ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
- __ sll(i.OutputRegister(), i.OutputRegister(), 0x0);
break;
case kMips64Nor:
if (instr->InputAt(1)->IsRegister()) {
@@ -1085,11 +1050,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMips64Nor32:
if (instr->InputAt(1)->IsRegister()) {
__ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
- __ sll(i.OutputRegister(), i.OutputRegister(), 0x0);
} else {
DCHECK_EQ(0, i.InputOperand(1).immediate());
__ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
- __ sll(i.OutputRegister(), i.OutputRegister(), 0x0);
}
break;
case kMips64Xor:
@@ -1136,23 +1099,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMips64Shr:
if (instr->InputAt(1)->IsRegister()) {
- __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
__ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
int64_t imm = i.InputOperand(1).immediate();
- __ sll(i.OutputRegister(), i.InputRegister(0), 0x0);
- __ srl(i.OutputRegister(), i.OutputRegister(),
+ __ srl(i.OutputRegister(), i.InputRegister(0),
static_cast<uint16_t>(imm));
}
break;
case kMips64Sar:
if (instr->InputAt(1)->IsRegister()) {
- __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
__ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
int64_t imm = i.InputOperand(1).immediate();
- __ sll(i.OutputRegister(), i.InputRegister(0), 0x0);
- __ sra(i.OutputRegister(), i.OutputRegister(),
+ __ sra(i.OutputRegister(), i.InputRegister(0),
static_cast<uint16_t>(imm));
}
break;
@@ -1646,30 +1605,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMips64Lbu:
__ Lbu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Lb:
__ Lb(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Sb:
__ Sb(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMips64Lhu:
__ Lhu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ulhu:
__ Ulhu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Lh:
__ Lh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ulh:
__ Ulh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Sh:
__ Sh(i.InputOrZeroRegister(2), i.MemoryOperand());
@@ -1679,27 +1632,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMips64Lw:
__ Lw(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ulw:
__ Ulw(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Lwu:
__ Lwu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ulwu:
__ Ulwu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ld:
__ Ld(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Uld:
__ Uld(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Sw:
__ Sw(i.InputOrZeroRegister(2), i.MemoryOperand());
@@ -1919,149 +1866,172 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ StoreLane(sz, src, i.InputUint8(1), i.MemoryOperand(2));
break;
}
- case kWord32AtomicLoadInt8:
+ case kAtomicLoadInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lb);
break;
- case kWord32AtomicLoadUint8:
+ case kAtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu);
break;
- case kWord32AtomicLoadInt16:
+ case kAtomicLoadInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lh);
break;
- case kWord32AtomicLoadUint16:
+ case kAtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu);
break;
- case kWord32AtomicLoadWord32:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Lw);
- break;
- case kMips64Word64AtomicLoadUint8:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu);
- break;
- case kMips64Word64AtomicLoadUint16:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu);
- break;
- case kMips64Word64AtomicLoadUint32:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Lwu);
+ case kAtomicLoadWord32:
+ if (AtomicWidthField::decode(opcode) == AtomicWidth::kWord32)
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Lw);
+ else
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Lwu);
break;
case kMips64Word64AtomicLoadUint64:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld);
break;
- case kWord32AtomicStoreWord8:
- ASSEMBLE_ATOMIC_STORE_INTEGER(Sb);
- break;
- case kWord32AtomicStoreWord16:
- ASSEMBLE_ATOMIC_STORE_INTEGER(Sh);
- break;
- case kWord32AtomicStoreWord32:
- ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
- break;
- case kMips64Word64AtomicStoreWord8:
+ case kAtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sb);
break;
- case kMips64Word64AtomicStoreWord16:
+ case kAtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sh);
break;
- case kMips64Word64AtomicStoreWord32:
+ case kAtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
break;
+ case kMips64StoreCompressTagged:
case kMips64Word64AtomicStoreWord64:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sd);
break;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8, 32);
break;
- case kWord32AtomicExchangeUint8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ case kAtomicExchangeUint8:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
+ break;
+ }
break;
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16, 32);
break;
- case kWord32AtomicExchangeUint16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
- break;
- case kWord32AtomicExchangeWord32:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Ll, Sc);
- break;
- case kMips64Word64AtomicExchangeUint8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
- break;
- case kMips64Word64AtomicExchangeUint16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ case kAtomicExchangeUint16:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ break;
+ }
break;
- case kMips64Word64AtomicExchangeUint32:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ case kAtomicExchangeWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Ll, Sc);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ break;
+ }
break;
case kMips64Word64AtomicExchangeUint64:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Lld, Scd);
break;
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8, 32);
break;
- case kWord32AtomicCompareExchangeUint8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ case kAtomicCompareExchangeUint8:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
+ break;
+ }
break;
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16, 32);
break;
- case kWord32AtomicCompareExchangeUint16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
- break;
- case kWord32AtomicCompareExchangeWord32:
- __ sll(i.InputRegister(2), i.InputRegister(2), 0);
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll, Sc);
- break;
- case kMips64Word64AtomicCompareExchangeUint8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
- break;
- case kMips64Word64AtomicCompareExchangeUint16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ case kAtomicCompareExchangeUint16:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ break;
+ }
break;
- case kMips64Word64AtomicCompareExchangeUint32:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ case kAtomicCompareExchangeWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ sll(i.InputRegister(2), i.InputRegister(2), 0);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll, Sc);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ break;
+ }
break;
case kMips64Word64AtomicCompareExchangeUint64:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Lld, Scd);
break;
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 8, inst, 32); \
- break; \
- case kWord32Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 8, inst, 32); \
- break; \
- case kWord32Atomic##op##Int16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 16, inst, 32); \
- break; \
- case kWord32Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 16, inst, 32); \
- break; \
- case kWord32Atomic##op##Word32: \
- ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst); \
+#define ATOMIC_BINOP_CASE(op, inst32, inst64) \
+ case kAtomic##op##Int8: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 8, inst32, 32); \
+ break; \
+ case kAtomic##op##Uint8: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 8, inst32, 32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 8, inst64, 64); \
+ break; \
+ } \
+ break; \
+ case kAtomic##op##Int16: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 16, inst32, 32); \
+ break; \
+ case kAtomic##op##Uint16: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 16, inst32, 32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 16, inst64, 64); \
+ break; \
+ } \
+ break; \
+ case kAtomic##op##Word32: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 32, inst64, 64); \
+ break; \
+ } \
+ break; \
+ case kMips64Word64Atomic##op##Uint64: \
+ ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst64); \
break;
- ATOMIC_BINOP_CASE(Add, Addu)
- ATOMIC_BINOP_CASE(Sub, Subu)
- ATOMIC_BINOP_CASE(And, And)
- ATOMIC_BINOP_CASE(Or, Or)
- ATOMIC_BINOP_CASE(Xor, Xor)
-#undef ATOMIC_BINOP_CASE
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kMips64Word64Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 8, inst, 64); \
- break; \
- case kMips64Word64Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 16, inst, 64); \
- break; \
- case kMips64Word64Atomic##op##Uint32: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 32, inst, 64); \
- break; \
- case kMips64Word64Atomic##op##Uint64: \
- ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst); \
- break;
- ATOMIC_BINOP_CASE(Add, Daddu)
- ATOMIC_BINOP_CASE(Sub, Dsubu)
- ATOMIC_BINOP_CASE(And, And)
- ATOMIC_BINOP_CASE(Or, Or)
- ATOMIC_BINOP_CASE(Xor, Xor)
+ ATOMIC_BINOP_CASE(Add, Addu, Daddu)
+ ATOMIC_BINOP_CASE(Sub, Subu, Dsubu)
+ ATOMIC_BINOP_CASE(And, And, And)
+ ATOMIC_BINOP_CASE(Or, Or, Or)
+ ATOMIC_BINOP_CASE(Xor, Xor, Xor)
#undef ATOMIC_BINOP_CASE
case kMips64AssertEqual:
__ Assert(eq, static_cast<AbortReason>(i.InputOperand(2).immediate()),
@@ -3851,7 +3821,6 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
break;
default:
UNSUPPORTED_COND(instr->arch_opcode(), condition);
- break;
}
} else if (instr->arch_opcode() == kMips64MulOvf) {
// Overflow occurs if overflow register is not zero
@@ -3864,7 +3833,6 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
break;
default:
UNSUPPORTED_COND(kMipsMulOvf, condition);
- break;
}
} else if (instr->arch_opcode() == kMips64Cmp) {
cc = FlagsConditionToConditionCmp(condition);
@@ -3904,104 +3872,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
branch->fallthru);
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
- return;
- }
-
- MipsOperandConverter i(this, instr);
- condition = NegateFlagsCondition(condition);
-
- switch (instr->arch_opcode()) {
- case kMips64Cmp: {
- __ LoadZeroOnCondition(kSpeculationPoisonRegister, i.InputRegister(0),
- i.InputOperand(1),
- FlagsConditionToConditionCmp(condition));
- }
- return;
- case kMips64Tst: {
- switch (condition) {
- case kEqual:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
- break;
- case kNotEqual:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg);
- break;
- default:
- UNREACHABLE();
- }
- }
- return;
- case kMips64Dadd:
- case kMips64Dsub: {
- // Check for overflow creates 1 or 0 for result.
- __ dsrl32(kScratchReg, i.OutputRegister(), 31);
- __ srl(kScratchReg2, i.OutputRegister(), 31);
- __ xor_(kScratchReg2, kScratchReg, kScratchReg2);
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg2);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kMips64DaddOvf:
- case kMips64DsubOvf: {
- // Overflow occurs if overflow register is negative
- __ Slt(kScratchReg2, kScratchReg, zero_reg);
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg2);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kMips64MulOvf: {
- // Overflow occurs if overflow register is not zero
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kMips64CmpS:
- case kMips64CmpD: {
- bool predicate;
- FlagsConditionToConditionCmpFPU(&predicate, condition);
- if (predicate) {
- __ LoadZeroIfFPUCondition(kSpeculationPoisonRegister);
- } else {
- __ LoadZeroIfNotFPUCondition(kSpeculationPoisonRegister);
- }
- }
- return;
- default:
- UNREACHABLE();
- }
-}
-
#undef UNSUPPORTED_COND
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
@@ -4340,7 +4210,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
- ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
@@ -4568,7 +4437,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
UNREACHABLE();
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips64.
- break;
}
if (destination->IsStackSlot()) __ Sd(dst, g.ToMemOperand(destination));
} else if (src.type() == Constant::kFloat32) {
diff --git a/chromium/v8/src/compiler/backend/mips64/instruction-codes-mips64.h b/chromium/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
index e1b40a4be58..003b6bd6c21 100644
--- a/chromium/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
+++ b/chromium/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
@@ -11,419 +11,398 @@ namespace compiler {
// MIPS64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(Mips64Add) \
- V(Mips64Dadd) \
- V(Mips64DaddOvf) \
- V(Mips64Sub) \
- V(Mips64Dsub) \
- V(Mips64DsubOvf) \
- V(Mips64Mul) \
- V(Mips64MulOvf) \
- V(Mips64MulHigh) \
- V(Mips64DMulHigh) \
- V(Mips64MulHighU) \
- V(Mips64Dmul) \
- V(Mips64Div) \
- V(Mips64Ddiv) \
- V(Mips64DivU) \
- V(Mips64DdivU) \
- V(Mips64Mod) \
- V(Mips64Dmod) \
- V(Mips64ModU) \
- V(Mips64DmodU) \
- V(Mips64And) \
- V(Mips64And32) \
- V(Mips64Or) \
- V(Mips64Or32) \
- V(Mips64Nor) \
- V(Mips64Nor32) \
- V(Mips64Xor) \
- V(Mips64Xor32) \
- V(Mips64Clz) \
- V(Mips64Lsa) \
- V(Mips64Dlsa) \
- V(Mips64Shl) \
- V(Mips64Shr) \
- V(Mips64Sar) \
- V(Mips64Ext) \
- V(Mips64Ins) \
- V(Mips64Dext) \
- V(Mips64Dins) \
- V(Mips64Dclz) \
- V(Mips64Ctz) \
- V(Mips64Dctz) \
- V(Mips64Popcnt) \
- V(Mips64Dpopcnt) \
- V(Mips64Dshl) \
- V(Mips64Dshr) \
- V(Mips64Dsar) \
- V(Mips64Ror) \
- V(Mips64Dror) \
- V(Mips64Mov) \
- V(Mips64Tst) \
- V(Mips64Cmp) \
- V(Mips64CmpS) \
- V(Mips64AddS) \
- V(Mips64SubS) \
- V(Mips64MulS) \
- V(Mips64DivS) \
- V(Mips64AbsS) \
- V(Mips64NegS) \
- V(Mips64SqrtS) \
- V(Mips64MaxS) \
- V(Mips64MinS) \
- V(Mips64CmpD) \
- V(Mips64AddD) \
- V(Mips64SubD) \
- V(Mips64MulD) \
- V(Mips64DivD) \
- V(Mips64ModD) \
- V(Mips64AbsD) \
- V(Mips64NegD) \
- V(Mips64SqrtD) \
- V(Mips64MaxD) \
- V(Mips64MinD) \
- V(Mips64Float64RoundDown) \
- V(Mips64Float64RoundTruncate) \
- V(Mips64Float64RoundUp) \
- V(Mips64Float64RoundTiesEven) \
- V(Mips64Float32RoundDown) \
- V(Mips64Float32RoundTruncate) \
- V(Mips64Float32RoundUp) \
- V(Mips64Float32RoundTiesEven) \
- V(Mips64CvtSD) \
- V(Mips64CvtDS) \
- V(Mips64TruncWD) \
- V(Mips64RoundWD) \
- V(Mips64FloorWD) \
- V(Mips64CeilWD) \
- V(Mips64TruncWS) \
- V(Mips64RoundWS) \
- V(Mips64FloorWS) \
- V(Mips64CeilWS) \
- V(Mips64TruncLS) \
- V(Mips64TruncLD) \
- V(Mips64TruncUwD) \
- V(Mips64TruncUwS) \
- V(Mips64TruncUlS) \
- V(Mips64TruncUlD) \
- V(Mips64CvtDW) \
- V(Mips64CvtSL) \
- V(Mips64CvtSW) \
- V(Mips64CvtSUw) \
- V(Mips64CvtSUl) \
- V(Mips64CvtDL) \
- V(Mips64CvtDUw) \
- V(Mips64CvtDUl) \
- V(Mips64Lb) \
- V(Mips64Lbu) \
- V(Mips64Sb) \
- V(Mips64Lh) \
- V(Mips64Ulh) \
- V(Mips64Lhu) \
- V(Mips64Ulhu) \
- V(Mips64Sh) \
- V(Mips64Ush) \
- V(Mips64Ld) \
- V(Mips64Uld) \
- V(Mips64Lw) \
- V(Mips64Ulw) \
- V(Mips64Lwu) \
- V(Mips64Ulwu) \
- V(Mips64Sw) \
- V(Mips64Usw) \
- V(Mips64Sd) \
- V(Mips64Usd) \
- V(Mips64Lwc1) \
- V(Mips64Ulwc1) \
- V(Mips64Swc1) \
- V(Mips64Uswc1) \
- V(Mips64Ldc1) \
- V(Mips64Uldc1) \
- V(Mips64Sdc1) \
- V(Mips64Usdc1) \
- V(Mips64BitcastDL) \
- V(Mips64BitcastLD) \
- V(Mips64Float64ExtractLowWord32) \
- V(Mips64Float64ExtractHighWord32) \
- V(Mips64Float64InsertLowWord32) \
- V(Mips64Float64InsertHighWord32) \
- V(Mips64Float32Max) \
- V(Mips64Float64Max) \
- V(Mips64Float32Min) \
- V(Mips64Float64Min) \
- V(Mips64Float64SilenceNaN) \
- V(Mips64Push) \
- V(Mips64Peek) \
- V(Mips64StoreToStackSlot) \
- V(Mips64ByteSwap64) \
- V(Mips64ByteSwap32) \
- V(Mips64StackClaim) \
- V(Mips64Seb) \
- V(Mips64Seh) \
- V(Mips64Sync) \
- V(Mips64AssertEqual) \
- V(Mips64S128Const) \
- V(Mips64S128Zero) \
- V(Mips64S128AllOnes) \
- V(Mips64I32x4Splat) \
- V(Mips64I32x4ExtractLane) \
- V(Mips64I32x4ReplaceLane) \
- V(Mips64I32x4Add) \
- V(Mips64I32x4Sub) \
- V(Mips64F64x2Abs) \
- V(Mips64F64x2Neg) \
- V(Mips64F32x4Splat) \
- V(Mips64F32x4ExtractLane) \
- V(Mips64F32x4ReplaceLane) \
- V(Mips64F32x4SConvertI32x4) \
- V(Mips64F32x4UConvertI32x4) \
- V(Mips64I32x4Mul) \
- V(Mips64I32x4MaxS) \
- V(Mips64I32x4MinS) \
- V(Mips64I32x4Eq) \
- V(Mips64I32x4Ne) \
- V(Mips64I32x4Shl) \
- V(Mips64I32x4ShrS) \
- V(Mips64I32x4ShrU) \
- V(Mips64I32x4MaxU) \
- V(Mips64I32x4MinU) \
- V(Mips64F64x2Sqrt) \
- V(Mips64F64x2Add) \
- V(Mips64F64x2Sub) \
- V(Mips64F64x2Mul) \
- V(Mips64F64x2Div) \
- V(Mips64F64x2Min) \
- V(Mips64F64x2Max) \
- V(Mips64F64x2Eq) \
- V(Mips64F64x2Ne) \
- V(Mips64F64x2Lt) \
- V(Mips64F64x2Le) \
- V(Mips64F64x2Splat) \
- V(Mips64F64x2ExtractLane) \
- V(Mips64F64x2ReplaceLane) \
- V(Mips64F64x2Pmin) \
- V(Mips64F64x2Pmax) \
- V(Mips64F64x2Ceil) \
- V(Mips64F64x2Floor) \
- V(Mips64F64x2Trunc) \
- V(Mips64F64x2NearestInt) \
- V(Mips64F64x2ConvertLowI32x4S) \
- V(Mips64F64x2ConvertLowI32x4U) \
- V(Mips64F64x2PromoteLowF32x4) \
- V(Mips64I64x2Splat) \
- V(Mips64I64x2ExtractLane) \
- V(Mips64I64x2ReplaceLane) \
- V(Mips64I64x2Add) \
- V(Mips64I64x2Sub) \
- V(Mips64I64x2Mul) \
- V(Mips64I64x2Neg) \
- V(Mips64I64x2Shl) \
- V(Mips64I64x2ShrS) \
- V(Mips64I64x2ShrU) \
- V(Mips64I64x2BitMask) \
- V(Mips64I64x2Eq) \
- V(Mips64I64x2Ne) \
- V(Mips64I64x2GtS) \
- V(Mips64I64x2GeS) \
- V(Mips64I64x2Abs) \
- V(Mips64I64x2SConvertI32x4Low) \
- V(Mips64I64x2SConvertI32x4High) \
- V(Mips64I64x2UConvertI32x4Low) \
- V(Mips64I64x2UConvertI32x4High) \
- V(Mips64ExtMulLow) \
- V(Mips64ExtMulHigh) \
- V(Mips64ExtAddPairwise) \
- V(Mips64F32x4Abs) \
- V(Mips64F32x4Neg) \
- V(Mips64F32x4Sqrt) \
- V(Mips64F32x4RecipApprox) \
- V(Mips64F32x4RecipSqrtApprox) \
- V(Mips64F32x4Add) \
- V(Mips64F32x4Sub) \
- V(Mips64F32x4Mul) \
- V(Mips64F32x4Div) \
- V(Mips64F32x4Max) \
- V(Mips64F32x4Min) \
- V(Mips64F32x4Eq) \
- V(Mips64F32x4Ne) \
- V(Mips64F32x4Lt) \
- V(Mips64F32x4Le) \
- V(Mips64F32x4Pmin) \
- V(Mips64F32x4Pmax) \
- V(Mips64F32x4Ceil) \
- V(Mips64F32x4Floor) \
- V(Mips64F32x4Trunc) \
- V(Mips64F32x4NearestInt) \
- V(Mips64F32x4DemoteF64x2Zero) \
- V(Mips64I32x4SConvertF32x4) \
- V(Mips64I32x4UConvertF32x4) \
- V(Mips64I32x4Neg) \
- V(Mips64I32x4GtS) \
- V(Mips64I32x4GeS) \
- V(Mips64I32x4GtU) \
- V(Mips64I32x4GeU) \
- V(Mips64I32x4Abs) \
- V(Mips64I32x4BitMask) \
- V(Mips64I32x4DotI16x8S) \
- V(Mips64I32x4TruncSatF64x2SZero) \
- V(Mips64I32x4TruncSatF64x2UZero) \
- V(Mips64I16x8Splat) \
- V(Mips64I16x8ExtractLaneU) \
- V(Mips64I16x8ExtractLaneS) \
- V(Mips64I16x8ReplaceLane) \
- V(Mips64I16x8Neg) \
- V(Mips64I16x8Shl) \
- V(Mips64I16x8ShrS) \
- V(Mips64I16x8ShrU) \
- V(Mips64I16x8Add) \
- V(Mips64I16x8AddSatS) \
- V(Mips64I16x8Sub) \
- V(Mips64I16x8SubSatS) \
- V(Mips64I16x8Mul) \
- V(Mips64I16x8MaxS) \
- V(Mips64I16x8MinS) \
- V(Mips64I16x8Eq) \
- V(Mips64I16x8Ne) \
- V(Mips64I16x8GtS) \
- V(Mips64I16x8GeS) \
- V(Mips64I16x8AddSatU) \
- V(Mips64I16x8SubSatU) \
- V(Mips64I16x8MaxU) \
- V(Mips64I16x8MinU) \
- V(Mips64I16x8GtU) \
- V(Mips64I16x8GeU) \
- V(Mips64I16x8RoundingAverageU) \
- V(Mips64I16x8Abs) \
- V(Mips64I16x8BitMask) \
- V(Mips64I16x8Q15MulRSatS) \
- V(Mips64I8x16Splat) \
- V(Mips64I8x16ExtractLaneU) \
- V(Mips64I8x16ExtractLaneS) \
- V(Mips64I8x16ReplaceLane) \
- V(Mips64I8x16Neg) \
- V(Mips64I8x16Shl) \
- V(Mips64I8x16ShrS) \
- V(Mips64I8x16Add) \
- V(Mips64I8x16AddSatS) \
- V(Mips64I8x16Sub) \
- V(Mips64I8x16SubSatS) \
- V(Mips64I8x16MaxS) \
- V(Mips64I8x16MinS) \
- V(Mips64I8x16Eq) \
- V(Mips64I8x16Ne) \
- V(Mips64I8x16GtS) \
- V(Mips64I8x16GeS) \
- V(Mips64I8x16ShrU) \
- V(Mips64I8x16AddSatU) \
- V(Mips64I8x16SubSatU) \
- V(Mips64I8x16MaxU) \
- V(Mips64I8x16MinU) \
- V(Mips64I8x16GtU) \
- V(Mips64I8x16GeU) \
- V(Mips64I8x16RoundingAverageU) \
- V(Mips64I8x16Abs) \
- V(Mips64I8x16Popcnt) \
- V(Mips64I8x16BitMask) \
- V(Mips64S128And) \
- V(Mips64S128Or) \
- V(Mips64S128Xor) \
- V(Mips64S128Not) \
- V(Mips64S128Select) \
- V(Mips64S128AndNot) \
- V(Mips64I64x2AllTrue) \
- V(Mips64I32x4AllTrue) \
- V(Mips64I16x8AllTrue) \
- V(Mips64I8x16AllTrue) \
- V(Mips64V128AnyTrue) \
- V(Mips64S32x4InterleaveRight) \
- V(Mips64S32x4InterleaveLeft) \
- V(Mips64S32x4PackEven) \
- V(Mips64S32x4PackOdd) \
- V(Mips64S32x4InterleaveEven) \
- V(Mips64S32x4InterleaveOdd) \
- V(Mips64S32x4Shuffle) \
- V(Mips64S16x8InterleaveRight) \
- V(Mips64S16x8InterleaveLeft) \
- V(Mips64S16x8PackEven) \
- V(Mips64S16x8PackOdd) \
- V(Mips64S16x8InterleaveEven) \
- V(Mips64S16x8InterleaveOdd) \
- V(Mips64S16x4Reverse) \
- V(Mips64S16x2Reverse) \
- V(Mips64S8x16InterleaveRight) \
- V(Mips64S8x16InterleaveLeft) \
- V(Mips64S8x16PackEven) \
- V(Mips64S8x16PackOdd) \
- V(Mips64S8x16InterleaveEven) \
- V(Mips64S8x16InterleaveOdd) \
- V(Mips64I8x16Shuffle) \
- V(Mips64I8x16Swizzle) \
- V(Mips64S8x16Concat) \
- V(Mips64S8x8Reverse) \
- V(Mips64S8x4Reverse) \
- V(Mips64S8x2Reverse) \
- V(Mips64S128LoadSplat) \
- V(Mips64S128Load8x8S) \
- V(Mips64S128Load8x8U) \
- V(Mips64S128Load16x4S) \
- V(Mips64S128Load16x4U) \
- V(Mips64S128Load32x2S) \
- V(Mips64S128Load32x2U) \
- V(Mips64S128Load32Zero) \
- V(Mips64S128Load64Zero) \
- V(Mips64S128LoadLane) \
- V(Mips64S128StoreLane) \
- V(Mips64MsaLd) \
- V(Mips64MsaSt) \
- V(Mips64I32x4SConvertI16x8Low) \
- V(Mips64I32x4SConvertI16x8High) \
- V(Mips64I32x4UConvertI16x8Low) \
- V(Mips64I32x4UConvertI16x8High) \
- V(Mips64I16x8SConvertI8x16Low) \
- V(Mips64I16x8SConvertI8x16High) \
- V(Mips64I16x8SConvertI32x4) \
- V(Mips64I16x8UConvertI32x4) \
- V(Mips64I16x8UConvertI8x16Low) \
- V(Mips64I16x8UConvertI8x16High) \
- V(Mips64I8x16SConvertI16x8) \
- V(Mips64I8x16UConvertI16x8) \
- V(Mips64Word64AtomicLoadUint8) \
- V(Mips64Word64AtomicLoadUint16) \
- V(Mips64Word64AtomicLoadUint32) \
- V(Mips64Word64AtomicLoadUint64) \
- V(Mips64Word64AtomicStoreWord8) \
- V(Mips64Word64AtomicStoreWord16) \
- V(Mips64Word64AtomicStoreWord32) \
- V(Mips64Word64AtomicStoreWord64) \
- V(Mips64Word64AtomicAddUint8) \
- V(Mips64Word64AtomicAddUint16) \
- V(Mips64Word64AtomicAddUint32) \
- V(Mips64Word64AtomicAddUint64) \
- V(Mips64Word64AtomicSubUint8) \
- V(Mips64Word64AtomicSubUint16) \
- V(Mips64Word64AtomicSubUint32) \
- V(Mips64Word64AtomicSubUint64) \
- V(Mips64Word64AtomicAndUint8) \
- V(Mips64Word64AtomicAndUint16) \
- V(Mips64Word64AtomicAndUint32) \
- V(Mips64Word64AtomicAndUint64) \
- V(Mips64Word64AtomicOrUint8) \
- V(Mips64Word64AtomicOrUint16) \
- V(Mips64Word64AtomicOrUint32) \
- V(Mips64Word64AtomicOrUint64) \
- V(Mips64Word64AtomicXorUint8) \
- V(Mips64Word64AtomicXorUint16) \
- V(Mips64Word64AtomicXorUint32) \
- V(Mips64Word64AtomicXorUint64) \
- V(Mips64Word64AtomicExchangeUint8) \
- V(Mips64Word64AtomicExchangeUint16) \
- V(Mips64Word64AtomicExchangeUint32) \
- V(Mips64Word64AtomicExchangeUint64) \
- V(Mips64Word64AtomicCompareExchangeUint8) \
- V(Mips64Word64AtomicCompareExchangeUint16) \
- V(Mips64Word64AtomicCompareExchangeUint32) \
+
+// Opcodes that support a MemoryAccessMode.
+#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) // None.
+
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(Mips64Add) \
+ V(Mips64Dadd) \
+ V(Mips64DaddOvf) \
+ V(Mips64Sub) \
+ V(Mips64Dsub) \
+ V(Mips64DsubOvf) \
+ V(Mips64Mul) \
+ V(Mips64MulOvf) \
+ V(Mips64MulHigh) \
+ V(Mips64DMulHigh) \
+ V(Mips64MulHighU) \
+ V(Mips64Dmul) \
+ V(Mips64Div) \
+ V(Mips64Ddiv) \
+ V(Mips64DivU) \
+ V(Mips64DdivU) \
+ V(Mips64Mod) \
+ V(Mips64Dmod) \
+ V(Mips64ModU) \
+ V(Mips64DmodU) \
+ V(Mips64And) \
+ V(Mips64And32) \
+ V(Mips64Or) \
+ V(Mips64Or32) \
+ V(Mips64Nor) \
+ V(Mips64Nor32) \
+ V(Mips64Xor) \
+ V(Mips64Xor32) \
+ V(Mips64Clz) \
+ V(Mips64Lsa) \
+ V(Mips64Dlsa) \
+ V(Mips64Shl) \
+ V(Mips64Shr) \
+ V(Mips64Sar) \
+ V(Mips64Ext) \
+ V(Mips64Ins) \
+ V(Mips64Dext) \
+ V(Mips64Dins) \
+ V(Mips64Dclz) \
+ V(Mips64Ctz) \
+ V(Mips64Dctz) \
+ V(Mips64Popcnt) \
+ V(Mips64Dpopcnt) \
+ V(Mips64Dshl) \
+ V(Mips64Dshr) \
+ V(Mips64Dsar) \
+ V(Mips64Ror) \
+ V(Mips64Dror) \
+ V(Mips64Mov) \
+ V(Mips64Tst) \
+ V(Mips64Cmp) \
+ V(Mips64CmpS) \
+ V(Mips64AddS) \
+ V(Mips64SubS) \
+ V(Mips64MulS) \
+ V(Mips64DivS) \
+ V(Mips64AbsS) \
+ V(Mips64NegS) \
+ V(Mips64SqrtS) \
+ V(Mips64MaxS) \
+ V(Mips64MinS) \
+ V(Mips64CmpD) \
+ V(Mips64AddD) \
+ V(Mips64SubD) \
+ V(Mips64MulD) \
+ V(Mips64DivD) \
+ V(Mips64ModD) \
+ V(Mips64AbsD) \
+ V(Mips64NegD) \
+ V(Mips64SqrtD) \
+ V(Mips64MaxD) \
+ V(Mips64MinD) \
+ V(Mips64Float64RoundDown) \
+ V(Mips64Float64RoundTruncate) \
+ V(Mips64Float64RoundUp) \
+ V(Mips64Float64RoundTiesEven) \
+ V(Mips64Float32RoundDown) \
+ V(Mips64Float32RoundTruncate) \
+ V(Mips64Float32RoundUp) \
+ V(Mips64Float32RoundTiesEven) \
+ V(Mips64CvtSD) \
+ V(Mips64CvtDS) \
+ V(Mips64TruncWD) \
+ V(Mips64RoundWD) \
+ V(Mips64FloorWD) \
+ V(Mips64CeilWD) \
+ V(Mips64TruncWS) \
+ V(Mips64RoundWS) \
+ V(Mips64FloorWS) \
+ V(Mips64CeilWS) \
+ V(Mips64TruncLS) \
+ V(Mips64TruncLD) \
+ V(Mips64TruncUwD) \
+ V(Mips64TruncUwS) \
+ V(Mips64TruncUlS) \
+ V(Mips64TruncUlD) \
+ V(Mips64CvtDW) \
+ V(Mips64CvtSL) \
+ V(Mips64CvtSW) \
+ V(Mips64CvtSUw) \
+ V(Mips64CvtSUl) \
+ V(Mips64CvtDL) \
+ V(Mips64CvtDUw) \
+ V(Mips64CvtDUl) \
+ V(Mips64Lb) \
+ V(Mips64Lbu) \
+ V(Mips64Sb) \
+ V(Mips64Lh) \
+ V(Mips64Ulh) \
+ V(Mips64Lhu) \
+ V(Mips64Ulhu) \
+ V(Mips64Sh) \
+ V(Mips64Ush) \
+ V(Mips64Ld) \
+ V(Mips64Uld) \
+ V(Mips64Lw) \
+ V(Mips64Ulw) \
+ V(Mips64Lwu) \
+ V(Mips64Ulwu) \
+ V(Mips64Sw) \
+ V(Mips64Usw) \
+ V(Mips64Sd) \
+ V(Mips64Usd) \
+ V(Mips64Lwc1) \
+ V(Mips64Ulwc1) \
+ V(Mips64Swc1) \
+ V(Mips64Uswc1) \
+ V(Mips64Ldc1) \
+ V(Mips64Uldc1) \
+ V(Mips64Sdc1) \
+ V(Mips64Usdc1) \
+ V(Mips64BitcastDL) \
+ V(Mips64BitcastLD) \
+ V(Mips64Float64ExtractLowWord32) \
+ V(Mips64Float64ExtractHighWord32) \
+ V(Mips64Float64InsertLowWord32) \
+ V(Mips64Float64InsertHighWord32) \
+ V(Mips64Float32Max) \
+ V(Mips64Float64Max) \
+ V(Mips64Float32Min) \
+ V(Mips64Float64Min) \
+ V(Mips64Float64SilenceNaN) \
+ V(Mips64Push) \
+ V(Mips64Peek) \
+ V(Mips64StoreToStackSlot) \
+ V(Mips64ByteSwap64) \
+ V(Mips64ByteSwap32) \
+ V(Mips64StackClaim) \
+ V(Mips64Seb) \
+ V(Mips64Seh) \
+ V(Mips64Sync) \
+ V(Mips64AssertEqual) \
+ V(Mips64S128Const) \
+ V(Mips64S128Zero) \
+ V(Mips64S128AllOnes) \
+ V(Mips64I32x4Splat) \
+ V(Mips64I32x4ExtractLane) \
+ V(Mips64I32x4ReplaceLane) \
+ V(Mips64I32x4Add) \
+ V(Mips64I32x4Sub) \
+ V(Mips64F64x2Abs) \
+ V(Mips64F64x2Neg) \
+ V(Mips64F32x4Splat) \
+ V(Mips64F32x4ExtractLane) \
+ V(Mips64F32x4ReplaceLane) \
+ V(Mips64F32x4SConvertI32x4) \
+ V(Mips64F32x4UConvertI32x4) \
+ V(Mips64I32x4Mul) \
+ V(Mips64I32x4MaxS) \
+ V(Mips64I32x4MinS) \
+ V(Mips64I32x4Eq) \
+ V(Mips64I32x4Ne) \
+ V(Mips64I32x4Shl) \
+ V(Mips64I32x4ShrS) \
+ V(Mips64I32x4ShrU) \
+ V(Mips64I32x4MaxU) \
+ V(Mips64I32x4MinU) \
+ V(Mips64F64x2Sqrt) \
+ V(Mips64F64x2Add) \
+ V(Mips64F64x2Sub) \
+ V(Mips64F64x2Mul) \
+ V(Mips64F64x2Div) \
+ V(Mips64F64x2Min) \
+ V(Mips64F64x2Max) \
+ V(Mips64F64x2Eq) \
+ V(Mips64F64x2Ne) \
+ V(Mips64F64x2Lt) \
+ V(Mips64F64x2Le) \
+ V(Mips64F64x2Splat) \
+ V(Mips64F64x2ExtractLane) \
+ V(Mips64F64x2ReplaceLane) \
+ V(Mips64F64x2Pmin) \
+ V(Mips64F64x2Pmax) \
+ V(Mips64F64x2Ceil) \
+ V(Mips64F64x2Floor) \
+ V(Mips64F64x2Trunc) \
+ V(Mips64F64x2NearestInt) \
+ V(Mips64F64x2ConvertLowI32x4S) \
+ V(Mips64F64x2ConvertLowI32x4U) \
+ V(Mips64F64x2PromoteLowF32x4) \
+ V(Mips64I64x2Splat) \
+ V(Mips64I64x2ExtractLane) \
+ V(Mips64I64x2ReplaceLane) \
+ V(Mips64I64x2Add) \
+ V(Mips64I64x2Sub) \
+ V(Mips64I64x2Mul) \
+ V(Mips64I64x2Neg) \
+ V(Mips64I64x2Shl) \
+ V(Mips64I64x2ShrS) \
+ V(Mips64I64x2ShrU) \
+ V(Mips64I64x2BitMask) \
+ V(Mips64I64x2Eq) \
+ V(Mips64I64x2Ne) \
+ V(Mips64I64x2GtS) \
+ V(Mips64I64x2GeS) \
+ V(Mips64I64x2Abs) \
+ V(Mips64I64x2SConvertI32x4Low) \
+ V(Mips64I64x2SConvertI32x4High) \
+ V(Mips64I64x2UConvertI32x4Low) \
+ V(Mips64I64x2UConvertI32x4High) \
+ V(Mips64ExtMulLow) \
+ V(Mips64ExtMulHigh) \
+ V(Mips64ExtAddPairwise) \
+ V(Mips64F32x4Abs) \
+ V(Mips64F32x4Neg) \
+ V(Mips64F32x4Sqrt) \
+ V(Mips64F32x4RecipApprox) \
+ V(Mips64F32x4RecipSqrtApprox) \
+ V(Mips64F32x4Add) \
+ V(Mips64F32x4Sub) \
+ V(Mips64F32x4Mul) \
+ V(Mips64F32x4Div) \
+ V(Mips64F32x4Max) \
+ V(Mips64F32x4Min) \
+ V(Mips64F32x4Eq) \
+ V(Mips64F32x4Ne) \
+ V(Mips64F32x4Lt) \
+ V(Mips64F32x4Le) \
+ V(Mips64F32x4Pmin) \
+ V(Mips64F32x4Pmax) \
+ V(Mips64F32x4Ceil) \
+ V(Mips64F32x4Floor) \
+ V(Mips64F32x4Trunc) \
+ V(Mips64F32x4NearestInt) \
+ V(Mips64F32x4DemoteF64x2Zero) \
+ V(Mips64I32x4SConvertF32x4) \
+ V(Mips64I32x4UConvertF32x4) \
+ V(Mips64I32x4Neg) \
+ V(Mips64I32x4GtS) \
+ V(Mips64I32x4GeS) \
+ V(Mips64I32x4GtU) \
+ V(Mips64I32x4GeU) \
+ V(Mips64I32x4Abs) \
+ V(Mips64I32x4BitMask) \
+ V(Mips64I32x4DotI16x8S) \
+ V(Mips64I32x4TruncSatF64x2SZero) \
+ V(Mips64I32x4TruncSatF64x2UZero) \
+ V(Mips64I16x8Splat) \
+ V(Mips64I16x8ExtractLaneU) \
+ V(Mips64I16x8ExtractLaneS) \
+ V(Mips64I16x8ReplaceLane) \
+ V(Mips64I16x8Neg) \
+ V(Mips64I16x8Shl) \
+ V(Mips64I16x8ShrS) \
+ V(Mips64I16x8ShrU) \
+ V(Mips64I16x8Add) \
+ V(Mips64I16x8AddSatS) \
+ V(Mips64I16x8Sub) \
+ V(Mips64I16x8SubSatS) \
+ V(Mips64I16x8Mul) \
+ V(Mips64I16x8MaxS) \
+ V(Mips64I16x8MinS) \
+ V(Mips64I16x8Eq) \
+ V(Mips64I16x8Ne) \
+ V(Mips64I16x8GtS) \
+ V(Mips64I16x8GeS) \
+ V(Mips64I16x8AddSatU) \
+ V(Mips64I16x8SubSatU) \
+ V(Mips64I16x8MaxU) \
+ V(Mips64I16x8MinU) \
+ V(Mips64I16x8GtU) \
+ V(Mips64I16x8GeU) \
+ V(Mips64I16x8RoundingAverageU) \
+ V(Mips64I16x8Abs) \
+ V(Mips64I16x8BitMask) \
+ V(Mips64I16x8Q15MulRSatS) \
+ V(Mips64I8x16Splat) \
+ V(Mips64I8x16ExtractLaneU) \
+ V(Mips64I8x16ExtractLaneS) \
+ V(Mips64I8x16ReplaceLane) \
+ V(Mips64I8x16Neg) \
+ V(Mips64I8x16Shl) \
+ V(Mips64I8x16ShrS) \
+ V(Mips64I8x16Add) \
+ V(Mips64I8x16AddSatS) \
+ V(Mips64I8x16Sub) \
+ V(Mips64I8x16SubSatS) \
+ V(Mips64I8x16MaxS) \
+ V(Mips64I8x16MinS) \
+ V(Mips64I8x16Eq) \
+ V(Mips64I8x16Ne) \
+ V(Mips64I8x16GtS) \
+ V(Mips64I8x16GeS) \
+ V(Mips64I8x16ShrU) \
+ V(Mips64I8x16AddSatU) \
+ V(Mips64I8x16SubSatU) \
+ V(Mips64I8x16MaxU) \
+ V(Mips64I8x16MinU) \
+ V(Mips64I8x16GtU) \
+ V(Mips64I8x16GeU) \
+ V(Mips64I8x16RoundingAverageU) \
+ V(Mips64I8x16Abs) \
+ V(Mips64I8x16Popcnt) \
+ V(Mips64I8x16BitMask) \
+ V(Mips64S128And) \
+ V(Mips64S128Or) \
+ V(Mips64S128Xor) \
+ V(Mips64S128Not) \
+ V(Mips64S128Select) \
+ V(Mips64S128AndNot) \
+ V(Mips64I64x2AllTrue) \
+ V(Mips64I32x4AllTrue) \
+ V(Mips64I16x8AllTrue) \
+ V(Mips64I8x16AllTrue) \
+ V(Mips64V128AnyTrue) \
+ V(Mips64S32x4InterleaveRight) \
+ V(Mips64S32x4InterleaveLeft) \
+ V(Mips64S32x4PackEven) \
+ V(Mips64S32x4PackOdd) \
+ V(Mips64S32x4InterleaveEven) \
+ V(Mips64S32x4InterleaveOdd) \
+ V(Mips64S32x4Shuffle) \
+ V(Mips64S16x8InterleaveRight) \
+ V(Mips64S16x8InterleaveLeft) \
+ V(Mips64S16x8PackEven) \
+ V(Mips64S16x8PackOdd) \
+ V(Mips64S16x8InterleaveEven) \
+ V(Mips64S16x8InterleaveOdd) \
+ V(Mips64S16x4Reverse) \
+ V(Mips64S16x2Reverse) \
+ V(Mips64S8x16InterleaveRight) \
+ V(Mips64S8x16InterleaveLeft) \
+ V(Mips64S8x16PackEven) \
+ V(Mips64S8x16PackOdd) \
+ V(Mips64S8x16InterleaveEven) \
+ V(Mips64S8x16InterleaveOdd) \
+ V(Mips64I8x16Shuffle) \
+ V(Mips64I8x16Swizzle) \
+ V(Mips64S8x16Concat) \
+ V(Mips64S8x8Reverse) \
+ V(Mips64S8x4Reverse) \
+ V(Mips64S8x2Reverse) \
+ V(Mips64S128LoadSplat) \
+ V(Mips64S128Load8x8S) \
+ V(Mips64S128Load8x8U) \
+ V(Mips64S128Load16x4S) \
+ V(Mips64S128Load16x4U) \
+ V(Mips64S128Load32x2S) \
+ V(Mips64S128Load32x2U) \
+ V(Mips64S128Load32Zero) \
+ V(Mips64S128Load64Zero) \
+ V(Mips64S128LoadLane) \
+ V(Mips64S128StoreLane) \
+ V(Mips64MsaLd) \
+ V(Mips64MsaSt) \
+ V(Mips64I32x4SConvertI16x8Low) \
+ V(Mips64I32x4SConvertI16x8High) \
+ V(Mips64I32x4UConvertI16x8Low) \
+ V(Mips64I32x4UConvertI16x8High) \
+ V(Mips64I16x8SConvertI8x16Low) \
+ V(Mips64I16x8SConvertI8x16High) \
+ V(Mips64I16x8SConvertI32x4) \
+ V(Mips64I16x8UConvertI32x4) \
+ V(Mips64I16x8UConvertI8x16Low) \
+ V(Mips64I16x8UConvertI8x16High) \
+ V(Mips64I8x16SConvertI16x8) \
+ V(Mips64I8x16UConvertI16x8) \
+ V(Mips64StoreCompressTagged) \
+ V(Mips64Word64AtomicLoadUint64) \
+ V(Mips64Word64AtomicStoreWord64) \
+ V(Mips64Word64AtomicAddUint64) \
+ V(Mips64Word64AtomicSubUint64) \
+ V(Mips64Word64AtomicAndUint64) \
+ V(Mips64Word64AtomicOrUint64) \
+ V(Mips64Word64AtomicXorUint64) \
+ V(Mips64Word64AtomicExchangeUint64) \
V(Mips64Word64AtomicCompareExchangeUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/chromium/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/chromium/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
index c63e0aa3d36..734009ca309 100644
--- a/chromium/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
+++ b/chromium/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
@@ -375,9 +375,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64S128Load32Zero:
case kMips64S128Load64Zero:
case kMips64S128LoadLane:
- case kMips64Word64AtomicLoadUint8:
- case kMips64Word64AtomicLoadUint16:
- case kMips64Word64AtomicLoadUint32:
case kMips64Word64AtomicLoadUint64:
return kIsLoadOperation;
@@ -400,37 +397,14 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64Uswc1:
case kMips64Sync:
case kMips64S128StoreLane:
- case kMips64Word64AtomicStoreWord8:
- case kMips64Word64AtomicStoreWord16:
- case kMips64Word64AtomicStoreWord32:
+ case kMips64StoreCompressTagged:
case kMips64Word64AtomicStoreWord64:
- case kMips64Word64AtomicAddUint8:
- case kMips64Word64AtomicAddUint16:
- case kMips64Word64AtomicAddUint32:
case kMips64Word64AtomicAddUint64:
- case kMips64Word64AtomicSubUint8:
- case kMips64Word64AtomicSubUint16:
- case kMips64Word64AtomicSubUint32:
case kMips64Word64AtomicSubUint64:
- case kMips64Word64AtomicAndUint8:
- case kMips64Word64AtomicAndUint16:
- case kMips64Word64AtomicAndUint32:
case kMips64Word64AtomicAndUint64:
- case kMips64Word64AtomicOrUint8:
- case kMips64Word64AtomicOrUint16:
- case kMips64Word64AtomicOrUint32:
case kMips64Word64AtomicOrUint64:
- case kMips64Word64AtomicXorUint8:
- case kMips64Word64AtomicXorUint16:
- case kMips64Word64AtomicXorUint32:
case kMips64Word64AtomicXorUint64:
- case kMips64Word64AtomicExchangeUint8:
- case kMips64Word64AtomicExchangeUint16:
- case kMips64Word64AtomicExchangeUint32:
case kMips64Word64AtomicExchangeUint64:
- case kMips64Word64AtomicCompareExchangeUint8:
- case kMips64Word64AtomicCompareExchangeUint16:
- case kMips64Word64AtomicCompareExchangeUint32:
case kMips64Word64AtomicCompareExchangeUint64:
return kHasSideEffect;
@@ -1327,7 +1301,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return AssembleArchJumpLatency();
case kArchTableSwitch:
return AssembleArchTableSwitchLatency();
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
return CallLatency() + 1;
case kArchDebugBreak:
return 1;
@@ -1352,8 +1326,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return DadduLatency(false) + AndLatency(false) + AssertLatency() +
DadduLatency(false) + AndLatency(false) + BranchShortLatency() +
1 + DsubuLatency() + DadduLatency();
- case kArchWordPoisonOnSpeculation:
- return AndLatency();
case kIeee754Float64Acos:
case kIeee754Float64Acosh:
case kIeee754Float64Asin:
@@ -1740,35 +1712,35 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return ByteSwapSignedLatency();
case kMips64ByteSwap32:
return ByteSwapSignedLatency();
- case kWord32AtomicLoadInt8:
- case kWord32AtomicLoadUint8:
- case kWord32AtomicLoadInt16:
- case kWord32AtomicLoadUint16:
- case kWord32AtomicLoadWord32:
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
return 2;
- case kWord32AtomicStoreWord8:
- case kWord32AtomicStoreWord16:
- case kWord32AtomicStoreWord32:
+ case kAtomicStoreWord8:
+ case kAtomicStoreWord16:
+ case kAtomicStoreWord32:
return 3;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
return Word32AtomicExchangeLatency(true, 8);
- case kWord32AtomicExchangeUint8:
+ case kAtomicExchangeUint8:
return Word32AtomicExchangeLatency(false, 8);
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
return Word32AtomicExchangeLatency(true, 16);
- case kWord32AtomicExchangeUint16:
+ case kAtomicExchangeUint16:
return Word32AtomicExchangeLatency(false, 16);
- case kWord32AtomicExchangeWord32:
+ case kAtomicExchangeWord32:
return 2 + LlLatency(0) + 1 + ScLatency(0) + BranchShortLatency() + 1;
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
return Word32AtomicCompareExchangeLatency(true, 8);
- case kWord32AtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeUint8:
return Word32AtomicCompareExchangeLatency(false, 8);
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
return Word32AtomicCompareExchangeLatency(true, 16);
- case kWord32AtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeUint16:
return Word32AtomicCompareExchangeLatency(false, 16);
- case kWord32AtomicCompareExchangeWord32:
+ case kAtomicCompareExchangeWord32:
return 3 + LlLatency(0) + BranchShortLatency() + 1 + ScLatency(0) +
BranchShortLatency() + 1;
case kMips64AssertEqual:
diff --git a/chromium/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/chromium/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
index bec7bbefdcb..93c123bd65c 100644
--- a/chromium/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
+++ b/chromium/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
@@ -311,14 +311,7 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
}
- if (cont->IsDeoptimize()) {
- // If we can deoptimize as a result of the binop, we need to make sure that
- // the deopt inputs are not overwritten by the binop result. One way
- // to achieve that is to declare the output register as same-as-first.
- outputs[output_count++] = g.DefineSameAsFirst(node);
- } else {
- outputs[output_count++] = g.DefineAsRegister(node);
- }
+ outputs[output_count++] = g.DefineAsRegister(node);
DCHECK_NE(0u, input_count);
DCHECK_EQ(1u, output_count);
@@ -356,9 +349,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+void InstructionSelector::VisitAbortCSADcheck(Node* node) {
Mips64OperandGenerator g(this);
- Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
+ Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
}
void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
@@ -498,7 +491,7 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
break;
case MachineRepresentation::kWord32:
- opcode = load_rep.IsUnsigned() ? kMips64Lwu : kMips64Lw;
+ opcode = kMips64Lw;
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
@@ -515,16 +508,10 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kNone:
UNREACHABLE();
}
- if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
- }
EmitLoad(this, node, opcode);
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
-
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -860,7 +847,7 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) {
// There's no need to sign/zero-extend to 64-bit if we shift out the upper
// 32 bits anyway.
- Emit(kMips64Dshl, g.DefineSameAsFirst(node),
+ Emit(kMips64Dshl, g.DefineAsRegister(node),
g.UseRegister(m.left().node()->InputAt(0)),
g.UseImmediate(m.right().node()));
return;
@@ -1452,44 +1439,49 @@ void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
}
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+ // On MIPS64, int32 values should all be sign-extended to 64-bit, so
+ // no need to sign-extend them here.
+ // But when call to a host function in simulator, if the function return an
+ // int32 value, the simulator do not sign-extend to int64, because in
+ // simulator we do not know the function whether return an int32 or int64.
+#ifdef USE_SIMULATOR
Node* value = node->InputAt(0);
- if ((value->opcode() == IrOpcode::kLoad ||
- value->opcode() == IrOpcode::kLoadImmutable) &&
- CanCover(node, value)) {
- // Generate sign-extending load.
- LoadRepresentation load_rep = LoadRepresentationOf(value->op());
- InstructionCode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
- break;
- case MachineRepresentation::kWord32:
- opcode = kMips64Lw;
- break;
- default:
- UNREACHABLE();
- }
- EmitLoad(this, value, opcode, node);
- } else {
+ if (value->opcode() == IrOpcode::kCall) {
Mips64OperandGenerator g(this);
- Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(value),
g.TempImmediate(0));
+ return;
}
+#endif
+ EmitIdentity(node);
}
bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
DCHECK_NE(node->opcode(), IrOpcode::kPhi);
switch (node->opcode()) {
- // 32-bit operations will write their result in a 64 bit register,
- // clearing the top 32 bits of the destination register.
- case IrOpcode::kUint32Div:
- case IrOpcode::kUint32Mod:
- case IrOpcode::kUint32MulHigh:
+ // Comparisons only emit 0/1, so the upper 32 bits must be zero.
+ case IrOpcode::kWord32Equal:
+ case IrOpcode::kInt32LessThan:
+ case IrOpcode::kInt32LessThanOrEqual:
+ case IrOpcode::kUint32LessThan:
+ case IrOpcode::kUint32LessThanOrEqual:
return true;
+ case IrOpcode::kWord32And: {
+ Int32BinopMatcher m(node);
+ if (m.right().HasResolvedValue()) {
+ uint32_t mask = m.right().ResolvedValue();
+ return is_uint31(mask);
+ }
+ return false;
+ }
+ case IrOpcode::kWord32Shr: {
+ Int32BinopMatcher m(node);
+ if (m.right().HasResolvedValue()) {
+ uint8_t sa = m.right().ResolvedValue() & 0x1f;
+ return sa > 0;
+ }
+ return false;
+ }
case IrOpcode::kLoad:
case IrOpcode::kLoadImmutable: {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
@@ -1497,7 +1489,6 @@ bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
- case MachineRepresentation::kWord32:
return true;
default:
return false;
@@ -1513,10 +1504,24 @@ bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
Mips64OperandGenerator g(this);
Node* value = node->InputAt(0);
+ IrOpcode::Value opcode = value->opcode();
+
+ if (opcode == IrOpcode::kLoad || opcode == IrOpcode::kUnalignedLoad) {
+ LoadRepresentation load_rep = LoadRepresentationOf(value->op());
+ ArchOpcode arch_opcode =
+ opcode == IrOpcode::kUnalignedLoad ? kMips64Ulwu : kMips64Lwu;
+ if (load_rep.IsUnsigned() &&
+ load_rep.representation() == MachineRepresentation::kWord32) {
+ EmitLoad(this, value, arch_opcode, node);
+ return;
+ }
+ }
+
if (ZeroExtendsWord32ToWord64(value)) {
- Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+ EmitIdentity(node);
return;
}
+
Emit(kMips64Dext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
g.TempImmediate(0), g.TempImmediate(32));
}
@@ -1534,7 +1539,7 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
Int64BinopMatcher m(value);
if (m.right().IsInRange(32, 63)) {
// After smi untagging no need for truncate. Combine sequence.
- Emit(kMips64Dsar, g.DefineSameAsFirst(node),
+ Emit(kMips64Dsar, g.DefineAsRegister(node),
g.UseRegister(m.left().node()),
g.UseImmediate(m.right().node()));
return;
@@ -1546,8 +1551,8 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
break;
}
}
- Emit(kMips64Ext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
- g.TempImmediate(0), g.TempImmediate(32));
+ Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(0));
}
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
@@ -1842,7 +1847,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
opcode = load_rep.IsUnsigned() ? kMips64Ulhu : kMips64Ulh;
break;
case MachineRepresentation::kWord32:
- opcode = load_rep.IsUnsigned() ? kMips64Ulwu : kMips64Ulw;
+ opcode = kMips64Ulw;
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
@@ -2041,10 +2046,13 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
bool IsNodeUnsigned(Node* n) {
NodeMatcher m(n);
- if (m.IsLoad() || m.IsUnalignedLoad() || m.IsPoisonedLoad() ||
- m.IsProtectedLoad() || m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
+ if (m.IsLoad() || m.IsUnalignedLoad() || m.IsProtectedLoad()) {
LoadRepresentation load_rep = LoadRepresentationOf(n->op());
return load_rep.IsUnsigned();
+ } else if (m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(n->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ return load_rep.IsUnsigned();
} else {
return m.IsUint32Div() || m.IsUint32LessThan() ||
m.IsUint32LessThanOrEqual() || m.IsUint32Mod() ||
@@ -2144,12 +2152,43 @@ void EmitWordCompareZero(InstructionSelector* selector, Node* value,
}
void VisitAtomicLoad(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ AtomicWidth width) {
Mips64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- if (g.CanBeImmediate(index, opcode)) {
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+
+ // The memory order is ignored.
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ InstructionCode code;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
+ code = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
+ code = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ code = kAtomicLoadWord32;
+ break;
+ case MachineRepresentation::kWord64:
+ code = kMips64Word64AtomicLoadUint64;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ DCHECK_EQ(kTaggedSize, 8);
+ code = kMips64Word64AtomicLoadUint64;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (g.CanBeImmediate(index, code)) {
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
g.DefineAsRegister(node), g.UseRegister(base),
g.UseImmediate(index));
} else {
@@ -2157,35 +2196,93 @@ void VisitAtomicLoad(InstructionSelector* selector, Node* node,
selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
addr_reg, g.UseRegister(index), g.UseRegister(base));
// Emit desired load opcode, using temp addr_reg.
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
}
}
void VisitAtomicStore(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ AtomicWidth width) {
Mips64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- if (g.CanBeImmediate(index, opcode)) {
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
- g.UseRegisterOrImmediateZero(value));
+ // The memory order is ignored.
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_params.write_barrier_kind();
+ MachineRepresentation rep = store_params.representation();
+
+ if (FLAG_enable_unconditional_write_barriers &&
+ CanBeTaggedOrCompressedPointer(rep)) {
+ write_barrier_kind = kFullWriteBarrier;
+ }
+
+ InstructionCode code;
+
+ if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
+ DCHECK(CanBeTaggedPointer(rep));
+ DCHECK_EQ(AtomicWidthSize(width), kTaggedSize);
+
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode =
+ WriteBarrierKindToRecordWriteMode(write_barrier_kind);
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ code = kArchAtomicStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ selector->Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- InstructionOperand addr_reg = g.TempRegister();
- selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
- addr_reg, g.UseRegister(index), g.UseRegister(base));
- // Emit desired store opcode, using temp addr_reg.
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.NoOutput(), addr_reg, g.TempImmediate(0),
- g.UseRegisterOrImmediateZero(value));
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ code = kAtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ code = kAtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ code = kAtomicStoreWord32;
+ break;
+ case MachineRepresentation::kWord64:
+ DCHECK_EQ(width, AtomicWidth::kWord64);
+ code = kMips64Word64AtomicStoreWord64;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ DCHECK_EQ(AtomicWidthSize(width), kTaggedSize);
+ code = kMips64StoreCompressTagged;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ code |= AtomicWidthField::encode(width);
+
+ if (g.CanBeImmediate(index, code)) {
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
+ g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
+ g.UseRegisterOrImmediateZero(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
+ addr_reg, g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
+ g.NoOutput(), addr_reg, g.TempImmediate(0),
+ g.UseRegisterOrImmediateZero(value));
+ }
}
}
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
Mips64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2203,12 +2300,13 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
temp[0] = g.TempRegister();
temp[1] = g.TempRegister();
temp[2] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
}
void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
Mips64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2228,12 +2326,13 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
temp[0] = g.TempRegister();
temp[1] = g.TempRegister();
temp[2] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
}
void VisitAtomicBinop(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
Mips64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2252,7 +2351,8 @@ void VisitAtomicBinop(InstructionSelector* selector, Node* node,
temps[1] = g.TempRegister();
temps[2] = g.TempRegister();
temps[3] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, 1, outputs, input_count, inputs, 4, temps);
}
@@ -2615,163 +2715,93 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode =
- load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
- : kWord32AtomicLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicLoadWord32;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicLoad(this, node, opcode);
+ VisitAtomicLoad(this, node, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kWord32AtomicStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kWord32AtomicStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicStoreWord32;
- break;
- default:
- UNREACHABLE();
- }
-
- VisitAtomicStore(this, node, opcode);
+ VisitAtomicStore(this, node, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = kMips64Word64AtomicLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kMips64Word64AtomicLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kMips64Word64AtomicLoadUint32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kMips64Word64AtomicLoadUint64;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicLoad(this, node, opcode);
+ VisitAtomicLoad(this, node, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kMips64Word64AtomicStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kMips64Word64AtomicStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kMips64Word64AtomicStoreWord32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kMips64Word64AtomicStoreWord64;
- break;
- default:
- UNREACHABLE();
- }
-
- VisitAtomicStore(this, node, opcode);
+ VisitAtomicStore(this, node, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
- opcode = kMips64Word64AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kMips64Word64AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kMips64Word64AtomicExchangeUint32;
+ opcode = kAtomicExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kMips64Word64AtomicExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
- opcode = kMips64Word64AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kMips64Word64AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kMips64Word64AtomicCompareExchangeUint32;
+ opcode = kAtomicCompareExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kMips64Word64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
@@ -2792,15 +2822,14 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2825,14 +2854,14 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64);
}
#define VISIT_ATOMIC_BINOP(op) \
void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
- VisitWord64AtomicBinaryOperation( \
- node, kMips64Word64Atomic##op##Uint8, kMips64Word64Atomic##op##Uint16, \
- kMips64Word64Atomic##op##Uint32, kMips64Word64Atomic##op##Uint64); \
+ VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
+ kAtomic##op##Uint16, kAtomic##op##Word32, \
+ kMips64Word64Atomic##op##Uint64); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
diff --git a/chromium/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/chromium/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index cf324353f2c..b91f6209f21 100644
--- a/chromium/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/chromium/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -38,9 +38,7 @@ class PPCOperandConverter final : public InstructionOperandConverter {
RCBit OutputRCBit() const {
switch (instr_->flags_mode()) {
case kFlags_branch:
- case kFlags_branch_and_poison:
case kFlags_deoptimize:
- case kFlags_deoptimize_and_poison:
case kFlags_set:
case kFlags_trap:
case kFlags_select:
@@ -289,15 +287,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
- PPCOperandConverter const& i) {
- const MemoryAccessMode access_mode = AccessModeField::decode(instr->opcode());
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- codegen->tasm()->and_(value, value, kSpeculationPoisonRegister);
- }
-}
-
} // namespace
#define ASSEMBLE_FLOAT_UNOP_RC(asm_instr, round) \
@@ -575,69 +564,35 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
__ sync(); \
} while (0)
-#define ASSEMBLE_ATOMIC_BINOP(bin_inst, load_inst, store_inst) \
- do { \
- MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
- Label binop; \
- __ lwsync(); \
- __ bind(&binop); \
- __ load_inst(i.OutputRegister(), operand); \
- __ bin_inst(kScratchReg, i.OutputRegister(), i.InputRegister(2)); \
- __ store_inst(kScratchReg, operand); \
- __ bne(&binop, cr0); \
- __ sync(); \
- } while (false)
-
-#define ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(bin_inst, load_inst, store_inst, \
- ext_instr) \
- do { \
- MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
- Label binop; \
- __ lwsync(); \
- __ bind(&binop); \
- __ load_inst(i.OutputRegister(), operand); \
- __ ext_instr(i.OutputRegister(), i.OutputRegister()); \
- __ bin_inst(kScratchReg, i.OutputRegister(), i.InputRegister(2)); \
- __ store_inst(kScratchReg, operand); \
- __ bne(&binop, cr0); \
- __ sync(); \
- } while (false)
-
-#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmp_inst, load_inst, store_inst, \
- input_ext) \
+#define ASSEMBLE_ATOMIC_BINOP(bin_inst, _type) \
do { \
- MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
- Label loop; \
- Label exit; \
- __ input_ext(r0, i.InputRegister(2)); \
- __ lwsync(); \
- __ bind(&loop); \
- __ load_inst(i.OutputRegister(), operand); \
- __ cmp_inst(i.OutputRegister(), r0, cr0); \
- __ bne(&exit, cr0); \
- __ store_inst(i.InputRegister(3), operand); \
- __ bne(&loop, cr0); \
- __ bind(&exit); \
- __ sync(); \
- } while (false)
-
-#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(cmp_inst, load_inst, \
- store_inst, ext_instr) \
- do { \
- MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
- Label loop; \
- Label exit; \
- __ ext_instr(r0, i.InputRegister(2)); \
- __ lwsync(); \
- __ bind(&loop); \
- __ load_inst(i.OutputRegister(), operand); \
- __ ext_instr(i.OutputRegister(), i.OutputRegister()); \
- __ cmp_inst(i.OutputRegister(), r0, cr0); \
- __ bne(&exit, cr0); \
- __ store_inst(i.InputRegister(3), operand); \
- __ bne(&loop, cr0); \
- __ bind(&exit); \
- __ sync(); \
+ auto bin_op = [&](Register dst, Register lhs, Register rhs) { \
+ if (std::is_signed<_type>::value) { \
+ switch (sizeof(_type)) { \
+ case 1: \
+ __ extsb(dst, lhs); \
+ break; \
+ case 2: \
+ __ extsh(dst, lhs); \
+ break; \
+ case 4: \
+ __ extsw(dst, lhs); \
+ break; \
+ case 8: \
+ break; \
+ default: \
+ UNREACHABLE(); \
+ } \
+ __ bin_inst(dst, dst, rhs); \
+ } else { \
+ __ bin_inst(dst, lhs, rhs); \
+ } \
+ }; \
+ MemOperand dst_operand = \
+ MemOperand(i.InputRegister(0), i.InputRegister(1)); \
+ __ AtomicOps<_type>(dst_operand, i.InputRegister(2), i.OutputRegister(), \
+ kScratchReg, bin_op); \
+ break; \
} while (false)
void CodeGenerator::AssembleDeconstructFrame() {
@@ -777,25 +732,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, ne, cr0);
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- Register scratch = kScratchReg;
-
- __ ComputeCodeStartAddress(scratch);
-
- // Calculate a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- __ CmpS64(kJavaScriptCallCodeStartRegister, scratch);
- __ li(scratch, Operand::Zero());
- __ notx(kSpeculationPoisonRegister, scratch);
- __ isel(eq, kSpeculationPoisonRegister, kSpeculationPoisonRegister, scratch);
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- __ and_(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
- __ and_(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
- __ and_(sp, sp, kSpeculationPoisonRegister);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -918,8 +854,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchPrepareCallCFunction: {
- int const num_parameters = MiscField::decode(instr->opcode());
- __ PrepareCallCFunction(num_parameters, kScratchReg);
+ int const num_gp_parameters = ParamField::decode(instr->opcode());
+ int const num_fp_parameters = FPParamField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_gp_parameters + num_fp_parameters,
+ kScratchReg);
// Frame alignment requires using FP-relative frame addressing.
frame_access_state()->SetFrameAccessToFP();
break;
@@ -962,8 +900,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif
break;
case kArchCallCFunction: {
- int misc_field = MiscField::decode(instr->opcode());
- int num_parameters = misc_field;
+ int const num_gp_parameters = ParamField::decode(instr->opcode());
+ int const fp_param_field = FPParamField::decode(instr->opcode());
+ int num_fp_parameters = fp_param_field;
bool has_function_descriptor = false;
int offset = 20 * kInstrSize;
@@ -984,10 +923,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
#if ABI_USES_FUNCTION_DESCRIPTORS
// AIX/PPC64BE Linux uses a function descriptor
- int kNumParametersMask = kHasFunctionDescriptorBitMask - 1;
- num_parameters = kNumParametersMask & misc_field;
+ int kNumFPParametersMask = kHasFunctionDescriptorBitMask - 1;
+ num_fp_parameters = kNumFPParametersMask & fp_param_field;
has_function_descriptor =
- (misc_field & kHasFunctionDescriptorBitMask) != 0;
+ (fp_param_field & kHasFunctionDescriptorBitMask) != 0;
// AIX may emit 2 extra Load instructions under CallCFunctionHelper
// due to having function descriptor.
if (has_function_descriptor) {
@@ -1010,10 +949,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif // V8_ENABLE_WEBASSEMBLY
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
- __ CallCFunction(ref, num_parameters, has_function_descriptor);
+ __ CallCFunction(ref, num_gp_parameters, num_fp_parameters,
+ has_function_descriptor);
} else {
Register func = i.InputRegister(0);
- __ CallCFunction(func, num_parameters, has_function_descriptor);
+ __ CallCFunction(func, num_gp_parameters, num_fp_parameters,
+ has_function_descriptor);
}
// TODO(miladfar): In the above block, kScratchReg must be populated with
// the strictly-correct PC, which is the return address at this spot. The
@@ -1056,13 +997,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssembleArchTableSwitch(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
DCHECK(i.InputRegister(0) == r4);
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
__ stop();
@@ -1164,10 +1105,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand(offset.offset()), r0);
break;
}
- case kArchWordPoisonOnSpeculation:
- __ and_(i.OutputRegister(), i.InputRegister(0),
- kSpeculationPoisonRegister);
- break;
case kPPC_Peek: {
int reverse_slot = i.InputInt32(0);
int offset =
@@ -1953,10 +1890,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kPPC_BitcastFloat32ToInt32:
- __ MovFloatToInt(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ MovFloatToInt(i.OutputRegister(), i.InputDoubleRegister(0),
+ kScratchDoubleReg);
break;
case kPPC_BitcastInt32ToFloat32:
- __ MovIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
+ __ MovIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0), ip);
break;
#if V8_TARGET_ARCH_PPC64
case kPPC_BitcastDoubleToInt64:
@@ -1968,33 +1906,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif
case kPPC_LoadWordU8:
ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordS8:
ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
__ extsb(i.OutputRegister(), i.OutputRegister());
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordU16:
ASSEMBLE_LOAD_INTEGER(lhz, lhzx);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordS16:
ASSEMBLE_LOAD_INTEGER(lha, lhax);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordU32:
ASSEMBLE_LOAD_INTEGER(lwz, lwzx);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordS32:
ASSEMBLE_LOAD_INTEGER(lwa, lwax);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
#if V8_TARGET_ARCH_PPC64
case kPPC_LoadWord64:
ASSEMBLE_LOAD_INTEGER(ld, ldx);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
#endif
case kPPC_LoadFloat32:
@@ -2051,78 +1982,98 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
}
- case kWord32AtomicLoadInt8:
- case kPPC_AtomicLoadUint8:
- case kWord32AtomicLoadInt16:
- case kPPC_AtomicLoadUint16:
- case kPPC_AtomicLoadWord32:
- case kPPC_AtomicLoadWord64:
- case kPPC_AtomicStoreUint8:
- case kPPC_AtomicStoreUint16:
- case kPPC_AtomicStoreWord32:
- case kPPC_AtomicStoreWord64:
+ case kAtomicLoadInt8:
+ case kAtomicLoadInt16:
UNREACHABLE();
- case kWord32AtomicExchangeInt8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lbarx, stbcx);
- __ extsb(i.OutputRegister(0), i.OutputRegister(0));
+ case kAtomicExchangeInt8:
+ __ AtomicExchange<int8_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.OutputRegister());
break;
case kPPC_AtomicExchangeUint8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lbarx, stbcx);
+ __ AtomicExchange<uint8_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.OutputRegister());
break;
- case kWord32AtomicExchangeInt16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lharx, sthcx);
- __ extsh(i.OutputRegister(0), i.OutputRegister(0));
+ case kAtomicExchangeInt16:
+ __ AtomicExchange<int16_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.OutputRegister());
break;
case kPPC_AtomicExchangeUint16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lharx, sthcx);
+ __ AtomicExchange<uint16_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.OutputRegister());
break;
case kPPC_AtomicExchangeWord32:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lwarx, stwcx);
+ __ AtomicExchange<uint32_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.OutputRegister());
break;
case kPPC_AtomicExchangeWord64:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldarx, stdcx);
+ __ AtomicExchange<uint64_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.OutputRegister());
break;
- case kWord32AtomicCompareExchangeInt8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(CmpS64, lbarx, stbcx, extsb);
+ case kAtomicCompareExchangeInt8:
+ __ AtomicCompareExchange<int8_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.InputRegister(3), i.OutputRegister(),
+ kScratchReg);
break;
case kPPC_AtomicCompareExchangeUint8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(CmpS64, lbarx, stbcx, ZeroExtByte);
+ __ AtomicCompareExchange<uint8_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.InputRegister(3), i.OutputRegister(),
+ kScratchReg);
break;
- case kWord32AtomicCompareExchangeInt16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(CmpS64, lharx, sthcx, extsh);
+ case kAtomicCompareExchangeInt16:
+ __ AtomicCompareExchange<int16_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.InputRegister(3), i.OutputRegister(),
+ kScratchReg);
break;
case kPPC_AtomicCompareExchangeUint16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(CmpS64, lharx, sthcx, ZeroExtHalfWord);
+ __ AtomicCompareExchange<uint16_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.InputRegister(3), i.OutputRegister(),
+ kScratchReg);
break;
case kPPC_AtomicCompareExchangeWord32:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmpw, lwarx, stwcx, ZeroExtWord32);
+ __ AtomicCompareExchange<uint32_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.InputRegister(3), i.OutputRegister(),
+ kScratchReg);
break;
case kPPC_AtomicCompareExchangeWord64:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(CmpS64, ldarx, stdcx, mr);
- break;
-
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kPPC_Atomic##op##Int8: \
- ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(inst, lbarx, stbcx, extsb); \
- break; \
- case kPPC_Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP(inst, lbarx, stbcx); \
- break; \
- case kPPC_Atomic##op##Int16: \
- ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(inst, lharx, sthcx, extsh); \
- break; \
- case kPPC_Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP(inst, lharx, sthcx); \
- break; \
- case kPPC_Atomic##op##Int32: \
- ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(inst, lwarx, stwcx, extsw); \
- break; \
- case kPPC_Atomic##op##Uint32: \
- ASSEMBLE_ATOMIC_BINOP(inst, lwarx, stwcx); \
- break; \
- case kPPC_Atomic##op##Int64: \
- case kPPC_Atomic##op##Uint64: \
- ASSEMBLE_ATOMIC_BINOP(inst, ldarx, stdcx); \
+ __ AtomicCompareExchange<uint64_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.InputRegister(3), i.OutputRegister(),
+ kScratchReg);
+ break;
+
+#define ATOMIC_BINOP_CASE(op, inst) \
+ case kPPC_Atomic##op##Int8: \
+ ASSEMBLE_ATOMIC_BINOP(inst, int8_t); \
+ break; \
+ case kPPC_Atomic##op##Uint8: \
+ ASSEMBLE_ATOMIC_BINOP(inst, uint8_t); \
+ break; \
+ case kPPC_Atomic##op##Int16: \
+ ASSEMBLE_ATOMIC_BINOP(inst, int16_t); \
+ break; \
+ case kPPC_Atomic##op##Uint16: \
+ ASSEMBLE_ATOMIC_BINOP(inst, uint16_t); \
+ break; \
+ case kPPC_Atomic##op##Int32: \
+ ASSEMBLE_ATOMIC_BINOP(inst, int32_t); \
+ break; \
+ case kPPC_Atomic##op##Uint32: \
+ ASSEMBLE_ATOMIC_BINOP(inst, uint32_t); \
+ break; \
+ case kPPC_Atomic##op##Int64: \
+ case kPPC_Atomic##op##Uint64: \
+ ASSEMBLE_ATOMIC_BINOP(inst, uint64_t); \
break;
ATOMIC_BINOP_CASE(Add, add)
ATOMIC_BINOP_CASE(Sub, sub)
@@ -2135,6 +2086,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register input = i.InputRegister(0);
Register output = i.OutputRegister();
Register temp1 = r0;
+ if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
+ __ brw(output, input);
+ break;
+ }
__ rotlwi(temp1, input, 8);
__ rlwimi(temp1, input, 24, 0, 7);
__ rlwimi(temp1, input, 24, 16, 23);
@@ -2143,7 +2098,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kPPC_LoadByteRev32: {
ASSEMBLE_LOAD_INTEGER_RR(lwbrx);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
}
case kPPC_StoreByteRev32: {
@@ -2156,6 +2110,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register temp1 = r0;
Register temp2 = kScratchReg;
Register temp3 = i.TempRegister(0);
+ if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
+ __ brd(output, input);
+ break;
+ }
__ rldicl(temp1, input, 32, 32);
__ rotlwi(temp2, input, 8);
__ rlwimi(temp2, input, 24, 0, 7);
@@ -2169,7 +2127,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kPPC_LoadByteRev64: {
ASSEMBLE_LOAD_INTEGER_RR(ldbrx);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
}
case kPPC_StoreByteRev64: {
@@ -2186,7 +2143,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kPPC_F32x4Splat: {
Simd128Register dst = i.OutputSimd128Register();
- __ MovFloatToInt(kScratchReg, i.InputDoubleRegister(0));
+ __ MovFloatToInt(kScratchReg, i.InputDoubleRegister(0),
+ kScratchDoubleReg);
__ mtvsrd(dst, kScratchReg);
__ vspltw(dst, dst, Operand(1));
break;
@@ -2229,7 +2187,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vextractuw(kScratchSimd128Reg, i.InputSimd128Register(0),
Operand((3 - i.InputInt8(1)) * lane_width_in_bytes));
__ mfvsrd(kScratchReg, kScratchSimd128Reg);
- __ MovIntToFloat(i.OutputDoubleRegister(), kScratchReg);
+ __ MovIntToFloat(i.OutputDoubleRegister(), kScratchReg, ip);
break;
}
case kPPC_I64x2ExtractLane: {
@@ -2292,7 +2250,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
constexpr int lane_width_in_bytes = 4;
Simd128Register dst = i.OutputSimd128Register();
- __ MovFloatToInt(r0, i.InputDoubleRegister(2));
+ __ MovFloatToInt(r0, i.InputDoubleRegister(2), kScratchDoubleReg);
if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
__ vinsw(dst, r0, Operand((3 - i.InputInt8(1)) * lane_width_in_bytes));
} else {
@@ -3522,7 +3480,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
MemOperand operand = i.MemoryOperand(&mode, &index);
DCHECK_EQ(mode, kMode_MRR);
__ vextractub(kScratchSimd128Reg, i.InputSimd128Register(0),
- Operand(15 - i.InputInt8(3)));
+ Operand(15 - i.InputUint8(3)));
__ stxsibx(kScratchSimd128Reg, operand);
break;
}
@@ -3799,21 +3757,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(John) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual ||
- condition == kOverflow || condition == kNotOverflow) {
- return;
- }
-
- ArchOpcode op = instr->arch_opcode();
- condition = NegateFlagsCondition(condition);
- __ li(kScratchReg, Operand::Zero());
- __ isel(FlagsConditionToCondition(condition, op), kSpeculationPoisonRegister,
- kScratchReg, kSpeculationPoisonRegister, cr0);
-}
-
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -3940,7 +3883,6 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
break;
default:
UNREACHABLE();
- break;
}
} else {
if (reg_value != 0) __ li(reg, Operand::Zero());
@@ -4079,7 +4021,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
- ResetSpeculationPoison();
}
const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
@@ -4353,7 +4294,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): loading RPO constants on PPC.
- break;
}
if (destination->IsStackSlot()) {
__ StoreU64(dst, g.ToMemOperand(destination), r0);
diff --git a/chromium/v8/src/compiler/backend/ppc/instruction-codes-ppc.h b/chromium/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
index 64f532a52b0..4f9003257f6 100644
--- a/chromium/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
+++ b/chromium/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
@@ -11,417 +11,411 @@ namespace compiler {
// PPC-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(PPC_Peek) \
- V(PPC_Sync) \
- V(PPC_And) \
- V(PPC_AndComplement) \
- V(PPC_Or) \
- V(PPC_OrComplement) \
- V(PPC_Xor) \
- V(PPC_ShiftLeft32) \
- V(PPC_ShiftLeft64) \
- V(PPC_ShiftLeftPair) \
- V(PPC_ShiftRight32) \
- V(PPC_ShiftRight64) \
- V(PPC_ShiftRightPair) \
- V(PPC_ShiftRightAlg32) \
- V(PPC_ShiftRightAlg64) \
- V(PPC_ShiftRightAlgPair) \
- V(PPC_RotRight32) \
- V(PPC_RotRight64) \
- V(PPC_Not) \
- V(PPC_RotLeftAndMask32) \
- V(PPC_RotLeftAndClear64) \
- V(PPC_RotLeftAndClearLeft64) \
- V(PPC_RotLeftAndClearRight64) \
- V(PPC_Add32) \
- V(PPC_Add64) \
- V(PPC_AddWithOverflow32) \
- V(PPC_AddPair) \
- V(PPC_AddDouble) \
- V(PPC_Sub) \
- V(PPC_SubWithOverflow32) \
- V(PPC_SubPair) \
- V(PPC_SubDouble) \
- V(PPC_Mul32) \
- V(PPC_Mul32WithHigh32) \
- V(PPC_Mul64) \
- V(PPC_MulHigh32) \
- V(PPC_MulHighU32) \
- V(PPC_MulPair) \
- V(PPC_MulDouble) \
- V(PPC_Div32) \
- V(PPC_Div64) \
- V(PPC_DivU32) \
- V(PPC_DivU64) \
- V(PPC_DivDouble) \
- V(PPC_Mod32) \
- V(PPC_Mod64) \
- V(PPC_ModU32) \
- V(PPC_ModU64) \
- V(PPC_ModDouble) \
- V(PPC_Neg) \
- V(PPC_NegDouble) \
- V(PPC_SqrtDouble) \
- V(PPC_FloorDouble) \
- V(PPC_CeilDouble) \
- V(PPC_TruncateDouble) \
- V(PPC_RoundDouble) \
- V(PPC_MaxDouble) \
- V(PPC_MinDouble) \
- V(PPC_AbsDouble) \
- V(PPC_Cntlz32) \
- V(PPC_Cntlz64) \
- V(PPC_Popcnt32) \
- V(PPC_Popcnt64) \
- V(PPC_Cmp32) \
- V(PPC_Cmp64) \
- V(PPC_CmpDouble) \
- V(PPC_Tst32) \
- V(PPC_Tst64) \
- V(PPC_Push) \
- V(PPC_PushFrame) \
- V(PPC_StoreToStackSlot) \
- V(PPC_ExtendSignWord8) \
- V(PPC_ExtendSignWord16) \
- V(PPC_ExtendSignWord32) \
- V(PPC_Uint32ToUint64) \
- V(PPC_Int64ToInt32) \
- V(PPC_Int64ToFloat32) \
- V(PPC_Int64ToDouble) \
- V(PPC_Uint64ToFloat32) \
- V(PPC_Uint64ToDouble) \
- V(PPC_Int32ToFloat32) \
- V(PPC_Int32ToDouble) \
- V(PPC_Uint32ToFloat32) \
- V(PPC_Float32ToInt32) \
- V(PPC_Float32ToUint32) \
- V(PPC_Uint32ToDouble) \
- V(PPC_Float32ToDouble) \
- V(PPC_Float64SilenceNaN) \
- V(PPC_DoubleToInt32) \
- V(PPC_DoubleToUint32) \
- V(PPC_DoubleToInt64) \
- V(PPC_DoubleToUint64) \
- V(PPC_DoubleToFloat32) \
- V(PPC_DoubleExtractLowWord32) \
- V(PPC_DoubleExtractHighWord32) \
- V(PPC_DoubleInsertLowWord32) \
- V(PPC_DoubleInsertHighWord32) \
- V(PPC_DoubleConstruct) \
- V(PPC_BitcastInt32ToFloat32) \
- V(PPC_BitcastFloat32ToInt32) \
- V(PPC_BitcastInt64ToDouble) \
- V(PPC_BitcastDoubleToInt64) \
- V(PPC_LoadWordS8) \
- V(PPC_LoadWordU8) \
- V(PPC_LoadWordS16) \
- V(PPC_LoadWordU16) \
- V(PPC_LoadWordS32) \
- V(PPC_LoadWordU32) \
- V(PPC_LoadByteRev32) \
- V(PPC_LoadWord64) \
- V(PPC_LoadByteRev64) \
- V(PPC_LoadFloat32) \
- V(PPC_LoadDouble) \
- V(PPC_LoadSimd128) \
- V(PPC_LoadReverseSimd128RR) \
- V(PPC_StoreWord8) \
- V(PPC_StoreWord16) \
- V(PPC_StoreWord32) \
- V(PPC_StoreByteRev32) \
- V(PPC_StoreWord64) \
- V(PPC_StoreByteRev64) \
- V(PPC_StoreFloat32) \
- V(PPC_StoreDouble) \
- V(PPC_StoreSimd128) \
- V(PPC_ByteRev32) \
- V(PPC_ByteRev64) \
- V(PPC_CompressSigned) \
- V(PPC_CompressPointer) \
- V(PPC_CompressAny) \
- V(PPC_AtomicStoreUint8) \
- V(PPC_AtomicStoreUint16) \
- V(PPC_AtomicStoreWord32) \
- V(PPC_AtomicStoreWord64) \
- V(PPC_AtomicLoadUint8) \
- V(PPC_AtomicLoadUint16) \
- V(PPC_AtomicLoadWord32) \
- V(PPC_AtomicLoadWord64) \
- V(PPC_AtomicExchangeUint8) \
- V(PPC_AtomicExchangeUint16) \
- V(PPC_AtomicExchangeWord32) \
- V(PPC_AtomicExchangeWord64) \
- V(PPC_AtomicCompareExchangeUint8) \
- V(PPC_AtomicCompareExchangeUint16) \
- V(PPC_AtomicCompareExchangeWord32) \
- V(PPC_AtomicCompareExchangeWord64) \
- V(PPC_AtomicAddUint8) \
- V(PPC_AtomicAddUint16) \
- V(PPC_AtomicAddUint32) \
- V(PPC_AtomicAddUint64) \
- V(PPC_AtomicAddInt8) \
- V(PPC_AtomicAddInt16) \
- V(PPC_AtomicAddInt32) \
- V(PPC_AtomicAddInt64) \
- V(PPC_AtomicSubUint8) \
- V(PPC_AtomicSubUint16) \
- V(PPC_AtomicSubUint32) \
- V(PPC_AtomicSubUint64) \
- V(PPC_AtomicSubInt8) \
- V(PPC_AtomicSubInt16) \
- V(PPC_AtomicSubInt32) \
- V(PPC_AtomicSubInt64) \
- V(PPC_AtomicAndUint8) \
- V(PPC_AtomicAndUint16) \
- V(PPC_AtomicAndUint32) \
- V(PPC_AtomicAndUint64) \
- V(PPC_AtomicAndInt8) \
- V(PPC_AtomicAndInt16) \
- V(PPC_AtomicAndInt32) \
- V(PPC_AtomicAndInt64) \
- V(PPC_AtomicOrUint8) \
- V(PPC_AtomicOrUint16) \
- V(PPC_AtomicOrUint32) \
- V(PPC_AtomicOrUint64) \
- V(PPC_AtomicOrInt8) \
- V(PPC_AtomicOrInt16) \
- V(PPC_AtomicOrInt32) \
- V(PPC_AtomicOrInt64) \
- V(PPC_AtomicXorUint8) \
- V(PPC_AtomicXorUint16) \
- V(PPC_AtomicXorUint32) \
- V(PPC_AtomicXorUint64) \
- V(PPC_AtomicXorInt8) \
- V(PPC_AtomicXorInt16) \
- V(PPC_AtomicXorInt32) \
- V(PPC_AtomicXorInt64) \
- V(PPC_F64x2Splat) \
- V(PPC_F64x2ExtractLane) \
- V(PPC_F64x2ReplaceLane) \
- V(PPC_F64x2Add) \
- V(PPC_F64x2Sub) \
- V(PPC_F64x2Mul) \
- V(PPC_F64x2Eq) \
- V(PPC_F64x2Ne) \
- V(PPC_F64x2Le) \
- V(PPC_F64x2Lt) \
- V(PPC_F64x2Abs) \
- V(PPC_F64x2Neg) \
- V(PPC_F64x2Sqrt) \
- V(PPC_F64x2Qfma) \
- V(PPC_F64x2Qfms) \
- V(PPC_F64x2Div) \
- V(PPC_F64x2Min) \
- V(PPC_F64x2Max) \
- V(PPC_F64x2Ceil) \
- V(PPC_F64x2Floor) \
- V(PPC_F64x2Trunc) \
- V(PPC_F64x2Pmin) \
- V(PPC_F64x2Pmax) \
- V(PPC_F64x2ConvertLowI32x4S) \
- V(PPC_F64x2ConvertLowI32x4U) \
- V(PPC_F64x2PromoteLowF32x4) \
- V(PPC_F32x4Splat) \
- V(PPC_F32x4ExtractLane) \
- V(PPC_F32x4ReplaceLane) \
- V(PPC_F32x4Add) \
- V(PPC_F32x4Sub) \
- V(PPC_F32x4Mul) \
- V(PPC_F32x4Eq) \
- V(PPC_F32x4Ne) \
- V(PPC_F32x4Lt) \
- V(PPC_F32x4Le) \
- V(PPC_F32x4Abs) \
- V(PPC_F32x4Neg) \
- V(PPC_F32x4RecipApprox) \
- V(PPC_F32x4RecipSqrtApprox) \
- V(PPC_F32x4Sqrt) \
- V(PPC_F32x4SConvertI32x4) \
- V(PPC_F32x4UConvertI32x4) \
- V(PPC_F32x4Div) \
- V(PPC_F32x4Min) \
- V(PPC_F32x4Max) \
- V(PPC_F32x4Ceil) \
- V(PPC_F32x4Floor) \
- V(PPC_F32x4Trunc) \
- V(PPC_F32x4Pmin) \
- V(PPC_F32x4Pmax) \
- V(PPC_F32x4Qfma) \
- V(PPC_F32x4Qfms) \
- V(PPC_F32x4DemoteF64x2Zero) \
- V(PPC_I64x2Splat) \
- V(PPC_I64x2ExtractLane) \
- V(PPC_I64x2ReplaceLane) \
- V(PPC_I64x2Add) \
- V(PPC_I64x2Sub) \
- V(PPC_I64x2Mul) \
- V(PPC_I64x2Eq) \
- V(PPC_I64x2Ne) \
- V(PPC_I64x2GtS) \
- V(PPC_I64x2GeS) \
- V(PPC_I64x2Shl) \
- V(PPC_I64x2ShrS) \
- V(PPC_I64x2ShrU) \
- V(PPC_I64x2Neg) \
- V(PPC_I64x2BitMask) \
- V(PPC_I64x2SConvertI32x4Low) \
- V(PPC_I64x2SConvertI32x4High) \
- V(PPC_I64x2UConvertI32x4Low) \
- V(PPC_I64x2UConvertI32x4High) \
- V(PPC_I64x2ExtMulLowI32x4S) \
- V(PPC_I64x2ExtMulHighI32x4S) \
- V(PPC_I64x2ExtMulLowI32x4U) \
- V(PPC_I64x2ExtMulHighI32x4U) \
- V(PPC_I64x2Abs) \
- V(PPC_I32x4Splat) \
- V(PPC_I32x4ExtractLane) \
- V(PPC_I32x4ReplaceLane) \
- V(PPC_I32x4Add) \
- V(PPC_I32x4Sub) \
- V(PPC_I32x4Mul) \
- V(PPC_I32x4MinS) \
- V(PPC_I32x4MinU) \
- V(PPC_I32x4MaxS) \
- V(PPC_I32x4MaxU) \
- V(PPC_I32x4Eq) \
- V(PPC_I32x4Ne) \
- V(PPC_I32x4GtS) \
- V(PPC_I32x4GeS) \
- V(PPC_I32x4GtU) \
- V(PPC_I32x4GeU) \
- V(PPC_I32x4Shl) \
- V(PPC_I32x4ShrS) \
- V(PPC_I32x4ShrU) \
- V(PPC_I32x4Neg) \
- V(PPC_I32x4Abs) \
- V(PPC_I32x4SConvertF32x4) \
- V(PPC_I32x4UConvertF32x4) \
- V(PPC_I32x4SConvertI16x8Low) \
- V(PPC_I32x4SConvertI16x8High) \
- V(PPC_I32x4UConvertI16x8Low) \
- V(PPC_I32x4UConvertI16x8High) \
- V(PPC_I32x4BitMask) \
- V(PPC_I32x4DotI16x8S) \
- V(PPC_I32x4ExtAddPairwiseI16x8S) \
- V(PPC_I32x4ExtAddPairwiseI16x8U) \
- V(PPC_I32x4ExtMulLowI16x8S) \
- V(PPC_I32x4ExtMulHighI16x8S) \
- V(PPC_I32x4ExtMulLowI16x8U) \
- V(PPC_I32x4ExtMulHighI16x8U) \
- V(PPC_I32x4TruncSatF64x2SZero) \
- V(PPC_I32x4TruncSatF64x2UZero) \
- V(PPC_I16x8Splat) \
- V(PPC_I16x8ExtractLaneU) \
- V(PPC_I16x8ExtractLaneS) \
- V(PPC_I16x8ReplaceLane) \
- V(PPC_I16x8Add) \
- V(PPC_I16x8Sub) \
- V(PPC_I16x8Mul) \
- V(PPC_I16x8MinS) \
- V(PPC_I16x8MinU) \
- V(PPC_I16x8MaxS) \
- V(PPC_I16x8MaxU) \
- V(PPC_I16x8Eq) \
- V(PPC_I16x8Ne) \
- V(PPC_I16x8GtS) \
- V(PPC_I16x8GeS) \
- V(PPC_I16x8GtU) \
- V(PPC_I16x8GeU) \
- V(PPC_I16x8Shl) \
- V(PPC_I16x8ShrS) \
- V(PPC_I16x8ShrU) \
- V(PPC_I16x8Neg) \
- V(PPC_I16x8Abs) \
- V(PPC_I16x8SConvertI32x4) \
- V(PPC_I16x8UConvertI32x4) \
- V(PPC_I16x8SConvertI8x16Low) \
- V(PPC_I16x8SConvertI8x16High) \
- V(PPC_I16x8UConvertI8x16Low) \
- V(PPC_I16x8UConvertI8x16High) \
- V(PPC_I16x8AddSatS) \
- V(PPC_I16x8SubSatS) \
- V(PPC_I16x8AddSatU) \
- V(PPC_I16x8SubSatU) \
- V(PPC_I16x8RoundingAverageU) \
- V(PPC_I16x8BitMask) \
- V(PPC_I16x8ExtAddPairwiseI8x16S) \
- V(PPC_I16x8ExtAddPairwiseI8x16U) \
- V(PPC_I16x8Q15MulRSatS) \
- V(PPC_I16x8ExtMulLowI8x16S) \
- V(PPC_I16x8ExtMulHighI8x16S) \
- V(PPC_I16x8ExtMulLowI8x16U) \
- V(PPC_I16x8ExtMulHighI8x16U) \
- V(PPC_I8x16Splat) \
- V(PPC_I8x16ExtractLaneU) \
- V(PPC_I8x16ExtractLaneS) \
- V(PPC_I8x16ReplaceLane) \
- V(PPC_I8x16Add) \
- V(PPC_I8x16Sub) \
- V(PPC_I8x16MinS) \
- V(PPC_I8x16MinU) \
- V(PPC_I8x16MaxS) \
- V(PPC_I8x16MaxU) \
- V(PPC_I8x16Eq) \
- V(PPC_I8x16Ne) \
- V(PPC_I8x16GtS) \
- V(PPC_I8x16GeS) \
- V(PPC_I8x16GtU) \
- V(PPC_I8x16GeU) \
- V(PPC_I8x16Shl) \
- V(PPC_I8x16ShrS) \
- V(PPC_I8x16ShrU) \
- V(PPC_I8x16Neg) \
- V(PPC_I8x16Abs) \
- V(PPC_I8x16SConvertI16x8) \
- V(PPC_I8x16UConvertI16x8) \
- V(PPC_I8x16AddSatS) \
- V(PPC_I8x16SubSatS) \
- V(PPC_I8x16AddSatU) \
- V(PPC_I8x16SubSatU) \
- V(PPC_I8x16RoundingAverageU) \
- V(PPC_I8x16Shuffle) \
- V(PPC_I8x16Swizzle) \
- V(PPC_I8x16BitMask) \
- V(PPC_I8x16Popcnt) \
- V(PPC_I64x2AllTrue) \
- V(PPC_I32x4AllTrue) \
- V(PPC_I16x8AllTrue) \
- V(PPC_I8x16AllTrue) \
- V(PPC_V128AnyTrue) \
- V(PPC_S128And) \
- V(PPC_S128Or) \
- V(PPC_S128Xor) \
- V(PPC_S128Const) \
- V(PPC_S128Zero) \
- V(PPC_S128AllOnes) \
- V(PPC_S128Not) \
- V(PPC_S128Select) \
- V(PPC_S128AndNot) \
- V(PPC_S128Load8Splat) \
- V(PPC_S128Load16Splat) \
- V(PPC_S128Load32Splat) \
- V(PPC_S128Load64Splat) \
- V(PPC_S128Load8x8S) \
- V(PPC_S128Load8x8U) \
- V(PPC_S128Load16x4S) \
- V(PPC_S128Load16x4U) \
- V(PPC_S128Load32x2S) \
- V(PPC_S128Load32x2U) \
- V(PPC_S128Load32Zero) \
- V(PPC_S128Load64Zero) \
- V(PPC_S128Load8Lane) \
- V(PPC_S128Load16Lane) \
- V(PPC_S128Load32Lane) \
- V(PPC_S128Load64Lane) \
- V(PPC_S128Store8Lane) \
- V(PPC_S128Store16Lane) \
- V(PPC_S128Store32Lane) \
- V(PPC_S128Store64Lane) \
- V(PPC_StoreCompressTagged) \
- V(PPC_LoadDecompressTaggedSigned) \
- V(PPC_LoadDecompressTaggedPointer) \
+
+// Opcodes that support a MemoryAccessMode.
+#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) // None.
+
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(PPC_Peek) \
+ V(PPC_Sync) \
+ V(PPC_And) \
+ V(PPC_AndComplement) \
+ V(PPC_Or) \
+ V(PPC_OrComplement) \
+ V(PPC_Xor) \
+ V(PPC_ShiftLeft32) \
+ V(PPC_ShiftLeft64) \
+ V(PPC_ShiftLeftPair) \
+ V(PPC_ShiftRight32) \
+ V(PPC_ShiftRight64) \
+ V(PPC_ShiftRightPair) \
+ V(PPC_ShiftRightAlg32) \
+ V(PPC_ShiftRightAlg64) \
+ V(PPC_ShiftRightAlgPair) \
+ V(PPC_RotRight32) \
+ V(PPC_RotRight64) \
+ V(PPC_Not) \
+ V(PPC_RotLeftAndMask32) \
+ V(PPC_RotLeftAndClear64) \
+ V(PPC_RotLeftAndClearLeft64) \
+ V(PPC_RotLeftAndClearRight64) \
+ V(PPC_Add32) \
+ V(PPC_Add64) \
+ V(PPC_AddWithOverflow32) \
+ V(PPC_AddPair) \
+ V(PPC_AddDouble) \
+ V(PPC_Sub) \
+ V(PPC_SubWithOverflow32) \
+ V(PPC_SubPair) \
+ V(PPC_SubDouble) \
+ V(PPC_Mul32) \
+ V(PPC_Mul32WithHigh32) \
+ V(PPC_Mul64) \
+ V(PPC_MulHigh32) \
+ V(PPC_MulHighU32) \
+ V(PPC_MulPair) \
+ V(PPC_MulDouble) \
+ V(PPC_Div32) \
+ V(PPC_Div64) \
+ V(PPC_DivU32) \
+ V(PPC_DivU64) \
+ V(PPC_DivDouble) \
+ V(PPC_Mod32) \
+ V(PPC_Mod64) \
+ V(PPC_ModU32) \
+ V(PPC_ModU64) \
+ V(PPC_ModDouble) \
+ V(PPC_Neg) \
+ V(PPC_NegDouble) \
+ V(PPC_SqrtDouble) \
+ V(PPC_FloorDouble) \
+ V(PPC_CeilDouble) \
+ V(PPC_TruncateDouble) \
+ V(PPC_RoundDouble) \
+ V(PPC_MaxDouble) \
+ V(PPC_MinDouble) \
+ V(PPC_AbsDouble) \
+ V(PPC_Cntlz32) \
+ V(PPC_Cntlz64) \
+ V(PPC_Popcnt32) \
+ V(PPC_Popcnt64) \
+ V(PPC_Cmp32) \
+ V(PPC_Cmp64) \
+ V(PPC_CmpDouble) \
+ V(PPC_Tst32) \
+ V(PPC_Tst64) \
+ V(PPC_Push) \
+ V(PPC_PushFrame) \
+ V(PPC_StoreToStackSlot) \
+ V(PPC_ExtendSignWord8) \
+ V(PPC_ExtendSignWord16) \
+ V(PPC_ExtendSignWord32) \
+ V(PPC_Uint32ToUint64) \
+ V(PPC_Int64ToInt32) \
+ V(PPC_Int64ToFloat32) \
+ V(PPC_Int64ToDouble) \
+ V(PPC_Uint64ToFloat32) \
+ V(PPC_Uint64ToDouble) \
+ V(PPC_Int32ToFloat32) \
+ V(PPC_Int32ToDouble) \
+ V(PPC_Uint32ToFloat32) \
+ V(PPC_Float32ToInt32) \
+ V(PPC_Float32ToUint32) \
+ V(PPC_Uint32ToDouble) \
+ V(PPC_Float32ToDouble) \
+ V(PPC_Float64SilenceNaN) \
+ V(PPC_DoubleToInt32) \
+ V(PPC_DoubleToUint32) \
+ V(PPC_DoubleToInt64) \
+ V(PPC_DoubleToUint64) \
+ V(PPC_DoubleToFloat32) \
+ V(PPC_DoubleExtractLowWord32) \
+ V(PPC_DoubleExtractHighWord32) \
+ V(PPC_DoubleInsertLowWord32) \
+ V(PPC_DoubleInsertHighWord32) \
+ V(PPC_DoubleConstruct) \
+ V(PPC_BitcastInt32ToFloat32) \
+ V(PPC_BitcastFloat32ToInt32) \
+ V(PPC_BitcastInt64ToDouble) \
+ V(PPC_BitcastDoubleToInt64) \
+ V(PPC_LoadWordS8) \
+ V(PPC_LoadWordU8) \
+ V(PPC_LoadWordS16) \
+ V(PPC_LoadWordU16) \
+ V(PPC_LoadWordS32) \
+ V(PPC_LoadWordU32) \
+ V(PPC_LoadByteRev32) \
+ V(PPC_LoadWord64) \
+ V(PPC_LoadByteRev64) \
+ V(PPC_LoadFloat32) \
+ V(PPC_LoadDouble) \
+ V(PPC_LoadSimd128) \
+ V(PPC_LoadReverseSimd128RR) \
+ V(PPC_StoreWord8) \
+ V(PPC_StoreWord16) \
+ V(PPC_StoreWord32) \
+ V(PPC_StoreByteRev32) \
+ V(PPC_StoreWord64) \
+ V(PPC_StoreByteRev64) \
+ V(PPC_StoreFloat32) \
+ V(PPC_StoreDouble) \
+ V(PPC_StoreSimd128) \
+ V(PPC_ByteRev32) \
+ V(PPC_ByteRev64) \
+ V(PPC_AtomicExchangeUint8) \
+ V(PPC_AtomicExchangeUint16) \
+ V(PPC_AtomicExchangeWord32) \
+ V(PPC_AtomicExchangeWord64) \
+ V(PPC_AtomicCompareExchangeUint8) \
+ V(PPC_AtomicCompareExchangeUint16) \
+ V(PPC_AtomicCompareExchangeWord32) \
+ V(PPC_AtomicCompareExchangeWord64) \
+ V(PPC_AtomicAddUint8) \
+ V(PPC_AtomicAddUint16) \
+ V(PPC_AtomicAddUint32) \
+ V(PPC_AtomicAddUint64) \
+ V(PPC_AtomicAddInt8) \
+ V(PPC_AtomicAddInt16) \
+ V(PPC_AtomicAddInt32) \
+ V(PPC_AtomicAddInt64) \
+ V(PPC_AtomicSubUint8) \
+ V(PPC_AtomicSubUint16) \
+ V(PPC_AtomicSubUint32) \
+ V(PPC_AtomicSubUint64) \
+ V(PPC_AtomicSubInt8) \
+ V(PPC_AtomicSubInt16) \
+ V(PPC_AtomicSubInt32) \
+ V(PPC_AtomicSubInt64) \
+ V(PPC_AtomicAndUint8) \
+ V(PPC_AtomicAndUint16) \
+ V(PPC_AtomicAndUint32) \
+ V(PPC_AtomicAndUint64) \
+ V(PPC_AtomicAndInt8) \
+ V(PPC_AtomicAndInt16) \
+ V(PPC_AtomicAndInt32) \
+ V(PPC_AtomicAndInt64) \
+ V(PPC_AtomicOrUint8) \
+ V(PPC_AtomicOrUint16) \
+ V(PPC_AtomicOrUint32) \
+ V(PPC_AtomicOrUint64) \
+ V(PPC_AtomicOrInt8) \
+ V(PPC_AtomicOrInt16) \
+ V(PPC_AtomicOrInt32) \
+ V(PPC_AtomicOrInt64) \
+ V(PPC_AtomicXorUint8) \
+ V(PPC_AtomicXorUint16) \
+ V(PPC_AtomicXorUint32) \
+ V(PPC_AtomicXorUint64) \
+ V(PPC_AtomicXorInt8) \
+ V(PPC_AtomicXorInt16) \
+ V(PPC_AtomicXorInt32) \
+ V(PPC_AtomicXorInt64) \
+ V(PPC_F64x2Splat) \
+ V(PPC_F64x2ExtractLane) \
+ V(PPC_F64x2ReplaceLane) \
+ V(PPC_F64x2Add) \
+ V(PPC_F64x2Sub) \
+ V(PPC_F64x2Mul) \
+ V(PPC_F64x2Eq) \
+ V(PPC_F64x2Ne) \
+ V(PPC_F64x2Le) \
+ V(PPC_F64x2Lt) \
+ V(PPC_F64x2Abs) \
+ V(PPC_F64x2Neg) \
+ V(PPC_F64x2Sqrt) \
+ V(PPC_F64x2Qfma) \
+ V(PPC_F64x2Qfms) \
+ V(PPC_F64x2Div) \
+ V(PPC_F64x2Min) \
+ V(PPC_F64x2Max) \
+ V(PPC_F64x2Ceil) \
+ V(PPC_F64x2Floor) \
+ V(PPC_F64x2Trunc) \
+ V(PPC_F64x2Pmin) \
+ V(PPC_F64x2Pmax) \
+ V(PPC_F64x2ConvertLowI32x4S) \
+ V(PPC_F64x2ConvertLowI32x4U) \
+ V(PPC_F64x2PromoteLowF32x4) \
+ V(PPC_F32x4Splat) \
+ V(PPC_F32x4ExtractLane) \
+ V(PPC_F32x4ReplaceLane) \
+ V(PPC_F32x4Add) \
+ V(PPC_F32x4Sub) \
+ V(PPC_F32x4Mul) \
+ V(PPC_F32x4Eq) \
+ V(PPC_F32x4Ne) \
+ V(PPC_F32x4Lt) \
+ V(PPC_F32x4Le) \
+ V(PPC_F32x4Abs) \
+ V(PPC_F32x4Neg) \
+ V(PPC_F32x4RecipApprox) \
+ V(PPC_F32x4RecipSqrtApprox) \
+ V(PPC_F32x4Sqrt) \
+ V(PPC_F32x4SConvertI32x4) \
+ V(PPC_F32x4UConvertI32x4) \
+ V(PPC_F32x4Div) \
+ V(PPC_F32x4Min) \
+ V(PPC_F32x4Max) \
+ V(PPC_F32x4Ceil) \
+ V(PPC_F32x4Floor) \
+ V(PPC_F32x4Trunc) \
+ V(PPC_F32x4Pmin) \
+ V(PPC_F32x4Pmax) \
+ V(PPC_F32x4Qfma) \
+ V(PPC_F32x4Qfms) \
+ V(PPC_F32x4DemoteF64x2Zero) \
+ V(PPC_I64x2Splat) \
+ V(PPC_I64x2ExtractLane) \
+ V(PPC_I64x2ReplaceLane) \
+ V(PPC_I64x2Add) \
+ V(PPC_I64x2Sub) \
+ V(PPC_I64x2Mul) \
+ V(PPC_I64x2Eq) \
+ V(PPC_I64x2Ne) \
+ V(PPC_I64x2GtS) \
+ V(PPC_I64x2GeS) \
+ V(PPC_I64x2Shl) \
+ V(PPC_I64x2ShrS) \
+ V(PPC_I64x2ShrU) \
+ V(PPC_I64x2Neg) \
+ V(PPC_I64x2BitMask) \
+ V(PPC_I64x2SConvertI32x4Low) \
+ V(PPC_I64x2SConvertI32x4High) \
+ V(PPC_I64x2UConvertI32x4Low) \
+ V(PPC_I64x2UConvertI32x4High) \
+ V(PPC_I64x2ExtMulLowI32x4S) \
+ V(PPC_I64x2ExtMulHighI32x4S) \
+ V(PPC_I64x2ExtMulLowI32x4U) \
+ V(PPC_I64x2ExtMulHighI32x4U) \
+ V(PPC_I64x2Abs) \
+ V(PPC_I32x4Splat) \
+ V(PPC_I32x4ExtractLane) \
+ V(PPC_I32x4ReplaceLane) \
+ V(PPC_I32x4Add) \
+ V(PPC_I32x4Sub) \
+ V(PPC_I32x4Mul) \
+ V(PPC_I32x4MinS) \
+ V(PPC_I32x4MinU) \
+ V(PPC_I32x4MaxS) \
+ V(PPC_I32x4MaxU) \
+ V(PPC_I32x4Eq) \
+ V(PPC_I32x4Ne) \
+ V(PPC_I32x4GtS) \
+ V(PPC_I32x4GeS) \
+ V(PPC_I32x4GtU) \
+ V(PPC_I32x4GeU) \
+ V(PPC_I32x4Shl) \
+ V(PPC_I32x4ShrS) \
+ V(PPC_I32x4ShrU) \
+ V(PPC_I32x4Neg) \
+ V(PPC_I32x4Abs) \
+ V(PPC_I32x4SConvertF32x4) \
+ V(PPC_I32x4UConvertF32x4) \
+ V(PPC_I32x4SConvertI16x8Low) \
+ V(PPC_I32x4SConvertI16x8High) \
+ V(PPC_I32x4UConvertI16x8Low) \
+ V(PPC_I32x4UConvertI16x8High) \
+ V(PPC_I32x4BitMask) \
+ V(PPC_I32x4DotI16x8S) \
+ V(PPC_I32x4ExtAddPairwiseI16x8S) \
+ V(PPC_I32x4ExtAddPairwiseI16x8U) \
+ V(PPC_I32x4ExtMulLowI16x8S) \
+ V(PPC_I32x4ExtMulHighI16x8S) \
+ V(PPC_I32x4ExtMulLowI16x8U) \
+ V(PPC_I32x4ExtMulHighI16x8U) \
+ V(PPC_I32x4TruncSatF64x2SZero) \
+ V(PPC_I32x4TruncSatF64x2UZero) \
+ V(PPC_I16x8Splat) \
+ V(PPC_I16x8ExtractLaneU) \
+ V(PPC_I16x8ExtractLaneS) \
+ V(PPC_I16x8ReplaceLane) \
+ V(PPC_I16x8Add) \
+ V(PPC_I16x8Sub) \
+ V(PPC_I16x8Mul) \
+ V(PPC_I16x8MinS) \
+ V(PPC_I16x8MinU) \
+ V(PPC_I16x8MaxS) \
+ V(PPC_I16x8MaxU) \
+ V(PPC_I16x8Eq) \
+ V(PPC_I16x8Ne) \
+ V(PPC_I16x8GtS) \
+ V(PPC_I16x8GeS) \
+ V(PPC_I16x8GtU) \
+ V(PPC_I16x8GeU) \
+ V(PPC_I16x8Shl) \
+ V(PPC_I16x8ShrS) \
+ V(PPC_I16x8ShrU) \
+ V(PPC_I16x8Neg) \
+ V(PPC_I16x8Abs) \
+ V(PPC_I16x8SConvertI32x4) \
+ V(PPC_I16x8UConvertI32x4) \
+ V(PPC_I16x8SConvertI8x16Low) \
+ V(PPC_I16x8SConvertI8x16High) \
+ V(PPC_I16x8UConvertI8x16Low) \
+ V(PPC_I16x8UConvertI8x16High) \
+ V(PPC_I16x8AddSatS) \
+ V(PPC_I16x8SubSatS) \
+ V(PPC_I16x8AddSatU) \
+ V(PPC_I16x8SubSatU) \
+ V(PPC_I16x8RoundingAverageU) \
+ V(PPC_I16x8BitMask) \
+ V(PPC_I16x8ExtAddPairwiseI8x16S) \
+ V(PPC_I16x8ExtAddPairwiseI8x16U) \
+ V(PPC_I16x8Q15MulRSatS) \
+ V(PPC_I16x8ExtMulLowI8x16S) \
+ V(PPC_I16x8ExtMulHighI8x16S) \
+ V(PPC_I16x8ExtMulLowI8x16U) \
+ V(PPC_I16x8ExtMulHighI8x16U) \
+ V(PPC_I8x16Splat) \
+ V(PPC_I8x16ExtractLaneU) \
+ V(PPC_I8x16ExtractLaneS) \
+ V(PPC_I8x16ReplaceLane) \
+ V(PPC_I8x16Add) \
+ V(PPC_I8x16Sub) \
+ V(PPC_I8x16MinS) \
+ V(PPC_I8x16MinU) \
+ V(PPC_I8x16MaxS) \
+ V(PPC_I8x16MaxU) \
+ V(PPC_I8x16Eq) \
+ V(PPC_I8x16Ne) \
+ V(PPC_I8x16GtS) \
+ V(PPC_I8x16GeS) \
+ V(PPC_I8x16GtU) \
+ V(PPC_I8x16GeU) \
+ V(PPC_I8x16Shl) \
+ V(PPC_I8x16ShrS) \
+ V(PPC_I8x16ShrU) \
+ V(PPC_I8x16Neg) \
+ V(PPC_I8x16Abs) \
+ V(PPC_I8x16SConvertI16x8) \
+ V(PPC_I8x16UConvertI16x8) \
+ V(PPC_I8x16AddSatS) \
+ V(PPC_I8x16SubSatS) \
+ V(PPC_I8x16AddSatU) \
+ V(PPC_I8x16SubSatU) \
+ V(PPC_I8x16RoundingAverageU) \
+ V(PPC_I8x16Shuffle) \
+ V(PPC_I8x16Swizzle) \
+ V(PPC_I8x16BitMask) \
+ V(PPC_I8x16Popcnt) \
+ V(PPC_I64x2AllTrue) \
+ V(PPC_I32x4AllTrue) \
+ V(PPC_I16x8AllTrue) \
+ V(PPC_I8x16AllTrue) \
+ V(PPC_V128AnyTrue) \
+ V(PPC_S128And) \
+ V(PPC_S128Or) \
+ V(PPC_S128Xor) \
+ V(PPC_S128Const) \
+ V(PPC_S128Zero) \
+ V(PPC_S128AllOnes) \
+ V(PPC_S128Not) \
+ V(PPC_S128Select) \
+ V(PPC_S128AndNot) \
+ V(PPC_S128Load8Splat) \
+ V(PPC_S128Load16Splat) \
+ V(PPC_S128Load32Splat) \
+ V(PPC_S128Load64Splat) \
+ V(PPC_S128Load8x8S) \
+ V(PPC_S128Load8x8U) \
+ V(PPC_S128Load16x4S) \
+ V(PPC_S128Load16x4U) \
+ V(PPC_S128Load32x2S) \
+ V(PPC_S128Load32x2U) \
+ V(PPC_S128Load32Zero) \
+ V(PPC_S128Load64Zero) \
+ V(PPC_S128Load8Lane) \
+ V(PPC_S128Load16Lane) \
+ V(PPC_S128Load32Lane) \
+ V(PPC_S128Load64Lane) \
+ V(PPC_S128Store8Lane) \
+ V(PPC_S128Store16Lane) \
+ V(PPC_S128Store32Lane) \
+ V(PPC_S128Store64Lane) \
+ V(PPC_StoreCompressTagged) \
+ V(PPC_LoadDecompressTaggedSigned) \
+ V(PPC_LoadDecompressTaggedPointer) \
V(PPC_LoadDecompressAnyTagged)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/chromium/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc b/chromium/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
index aeb1377879e..0270dc401eb 100644
--- a/chromium/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
+++ b/chromium/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
@@ -112,9 +112,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_BitcastDoubleToInt64:
case kPPC_ByteRev32:
case kPPC_ByteRev64:
- case kPPC_CompressSigned:
- case kPPC_CompressPointer:
- case kPPC_CompressAny:
case kPPC_F64x2Splat:
case kPPC_F64x2ExtractLane:
case kPPC_F64x2ReplaceLane:
@@ -332,10 +329,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_LoadFloat32:
case kPPC_LoadDouble:
case kPPC_LoadSimd128:
- case kPPC_AtomicLoadUint8:
- case kPPC_AtomicLoadUint16:
- case kPPC_AtomicLoadWord32:
- case kPPC_AtomicLoadWord64:
case kPPC_Peek:
case kPPC_LoadDecompressTaggedSigned:
case kPPC_LoadDecompressTaggedPointer:
@@ -378,10 +371,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_S128Store64Lane:
return kHasSideEffect;
- case kPPC_AtomicStoreUint8:
- case kPPC_AtomicStoreUint16:
- case kPPC_AtomicStoreWord32:
- case kPPC_AtomicStoreWord64:
case kPPC_AtomicExchangeUint8:
case kPPC_AtomicExchangeUint16:
case kPPC_AtomicExchangeWord32:
diff --git a/chromium/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/chromium/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
index c74211aa389..28f071ec681 100644
--- a/chromium/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
+++ b/chromium/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
@@ -162,14 +162,14 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+void InstructionSelector::VisitAbortCSADcheck(Node* node) {
PPCOperandGenerator g(this);
- Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r4));
+ Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), r4));
}
-void InstructionSelector::VisitLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- PPCOperandGenerator g(this);
+static void VisitLoadCommon(InstructionSelector* selector, Node* node,
+ LoadRepresentation load_rep) {
+ PPCOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* offset = node->InputAt(1);
InstructionCode opcode = kArchNop;
@@ -229,54 +229,51 @@ void InstructionSelector::VisitLoad(Node* node) {
UNREACHABLE();
}
- if (node->opcode() == IrOpcode::kPoisonedLoad &&
- poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
- opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
- }
-
bool is_atomic = (node->opcode() == IrOpcode::kWord32AtomicLoad ||
node->opcode() == IrOpcode::kWord64AtomicLoad);
if (g.CanBeImmediate(offset, mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(offset),
- g.UseImmediate(is_atomic));
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(base),
+ g.UseImmediate(offset), g.UseImmediate(is_atomic));
} else if (g.CanBeImmediate(base, mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), g.UseRegister(offset), g.UseImmediate(base),
- g.UseImmediate(is_atomic));
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(offset),
+ g.UseImmediate(base), g.UseImmediate(is_atomic));
} else {
- Emit(opcode | AddressingModeField::encode(kMode_MRR),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
- g.UseImmediate(is_atomic));
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRR),
+ g.DefineAsRegister(node), g.UseRegister(base),
+ g.UseRegister(offset), g.UseImmediate(is_atomic));
}
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
+void InstructionSelector::VisitLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ VisitLoadCommon(this, node, load_rep);
+}
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
}
-void InstructionSelector::VisitStore(Node* node) {
- PPCOperandGenerator g(this);
+void VisitStoreCommon(InstructionSelector* selector, Node* node,
+ StoreRepresentation store_rep,
+ base::Optional<AtomicMemoryOrder> atomic_order) {
+ PPCOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* offset = node->InputAt(1);
Node* value = node->InputAt(2);
+ // TODO(miladfarca): maybe use atomic_order?
bool is_atomic = (node->opcode() == IrOpcode::kWord32AtomicStore ||
node->opcode() == IrOpcode::kWord64AtomicStore);
- MachineRepresentation rep;
+ MachineRepresentation rep = store_rep.representation();
WriteBarrierKind write_barrier_kind = kNoWriteBarrier;
- if (is_atomic) {
- rep = AtomicStoreRepresentationOf(node->op());
- } else {
- StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ if (!is_atomic) {
write_barrier_kind = store_rep.write_barrier_kind();
- rep = store_rep.representation();
}
if (FLAG_enable_unconditional_write_barriers &&
@@ -312,7 +309,7 @@ void InstructionSelector::VisitStore(Node* node) {
code |= AddressingModeField::encode(addressing_mode);
code |= MiscField::encode(static_cast<int>(record_write_mode));
CHECK_EQ(is_atomic, false);
- Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
+ selector->Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
ArchOpcode opcode;
ImmediateMode mode = kInt16Imm;
@@ -346,7 +343,6 @@ void InstructionSelector::VisitStore(Node* node) {
break;
#else
UNREACHABLE();
- break;
#endif
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
@@ -374,21 +370,26 @@ void InstructionSelector::VisitStore(Node* node) {
}
if (g.CanBeImmediate(offset, mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(base), g.UseImmediate(offset), g.UseRegister(value),
- g.UseImmediate(is_atomic));
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.NoOutput(), g.UseRegister(base), g.UseImmediate(offset),
+ g.UseRegister(value), g.UseImmediate(is_atomic));
} else if (g.CanBeImmediate(base, mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(offset), g.UseImmediate(base), g.UseRegister(value),
- g.UseImmediate(is_atomic));
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.NoOutput(), g.UseRegister(offset), g.UseImmediate(base),
+ g.UseRegister(value), g.UseImmediate(is_atomic));
} else {
- Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
- g.UseRegister(base), g.UseRegister(offset), g.UseRegister(value),
- g.UseImmediate(is_atomic));
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRR),
+ g.NoOutput(), g.UseRegister(base), g.UseRegister(offset),
+ g.UseRegister(value), g.UseImmediate(is_atomic));
}
}
}
+void InstructionSelector::VisitStore(Node* node) {
+ VisitStoreCommon(this, node, StoreRepresentationOf(node->op()),
+ base::nullopt);
+}
+
void InstructionSelector::VisitProtectedStore(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -1956,16 +1957,28 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
Emit(kPPC_Sync, g.NoOutput());
}
-void InstructionSelector::VisitWord32AtomicLoad(Node* node) { VisitLoad(node); }
+void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ VisitLoadCommon(this, node, load_rep);
+}
-void InstructionSelector::VisitWord64AtomicLoad(Node* node) { VisitLoad(node); }
+void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ VisitLoadCommon(this, node, load_rep);
+}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- VisitStore(node);
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ VisitStoreCommon(this, node, store_params.store_representation(),
+ store_params.order());
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
- VisitStore(node);
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ VisitStoreCommon(this, node, store_params.store_representation(),
+ store_params.order());
}
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
@@ -1991,11 +2004,11 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
opcode = kPPC_AtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
opcode = kPPC_AtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
@@ -2052,11 +2065,11 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
opcode = kPPC_AtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
opcode = kPPC_AtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
diff --git a/chromium/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc b/chromium/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
index 2d92ae1567e..c95299ee1d2 100644
--- a/chromium/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
+++ b/chromium/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
@@ -106,7 +106,6 @@ class RiscvOperandConverter final : public InstructionOperandConverter {
constant.ToDelayedStringConstant());
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): RPO immediates
- break;
}
UNREACHABLE();
}
@@ -307,17 +306,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- RiscvOperandConverter const& i) {
- const MemoryAccessMode access_mode =
- static_cast<MemoryAccessMode>(MiscField::decode(opcode));
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- codegen->tasm()->And(value, value, kSpeculationPoisonRegister);
- }
-}
-
} // namespace
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
@@ -336,7 +324,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
#define ASSEMBLE_ATOMIC_BINOP(load_linked, store_conditional, bin_instr) \
do { \
Label binop; \
- __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
__ sync(); \
__ bind(&binop); \
__ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
@@ -351,7 +339,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
size, bin_instr, representation) \
do { \
Label binop; \
- __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
if (representation == 32) { \
__ And(i.TempRegister(3), i.TempRegister(0), 0x3); \
} else { \
@@ -380,7 +368,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
Label exchange; \
__ sync(); \
__ bind(&exchange); \
- __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
__ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
__ Move(i.TempRegister(1), i.InputRegister(2)); \
__ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
@@ -392,7 +380,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
load_linked, store_conditional, sign_extend, size, representation) \
do { \
Label exchange; \
- __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
if (representation == 32) { \
__ And(i.TempRegister(1), i.TempRegister(0), 0x3); \
} else { \
@@ -419,7 +407,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
do { \
Label compareExchange; \
Label exit; \
- __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
__ sync(); \
__ bind(&compareExchange); \
__ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
@@ -438,7 +426,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
do { \
Label compareExchange; \
Label exit; \
- __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
if (representation == 32) { \
__ And(i.TempRegister(1), i.TempRegister(0), 0x3); \
} else { \
@@ -453,8 +441,8 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
__ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
__ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
size, sign_extend); \
- __ ExtractBits(i.InputRegister(2), i.InputRegister(2), i.TempRegister(1), \
- size, sign_extend); \
+ __ ExtractBits(i.InputRegister(2), i.InputRegister(2), 0, size, \
+ sign_extend); \
__ BranchShort(&exit, ne, i.InputRegister(2), \
Operand(i.OutputRegister(0))); \
__ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \
@@ -570,31 +558,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- // Calculate a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- // difference = (current - expected) | (expected - current)
- // poison = ~(difference >> (kBitsPerSystemPointer - 1))
- __ ComputeCodeStartAddress(kScratchReg);
- __ Move(kSpeculationPoisonRegister, kScratchReg);
- __ Sub32(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kJavaScriptCallCodeStartRegister);
- __ Sub32(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
- kScratchReg);
- __ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kJavaScriptCallCodeStartRegister);
- __ Sra64(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kBitsPerSystemPointer - 1);
- __ Nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kSpeculationPoisonRegister);
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
- __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
- __ And(sp, sp, kSpeculationPoisonRegister);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -780,13 +743,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
DCHECK(i.InputRegister(0) == a0);
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
__ stop();
@@ -887,10 +850,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
- case kArchWordPoisonOnSpeculation:
- __ And(i.OutputRegister(), i.InputRegister(0),
- kSpeculationPoisonRegister);
- break;
case kIeee754Float64Acos:
ASSEMBLE_IEEE754_UNOP(acos);
break;
@@ -1094,17 +1053,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kRiscvPopcnt32: {
Register src = i.InputRegister(0);
Register dst = i.OutputRegister();
- __ Popcnt32(dst, src);
+ __ Popcnt32(dst, src, kScratchReg);
} break;
case kRiscvPopcnt64: {
Register src = i.InputRegister(0);
Register dst = i.OutputRegister();
- __ Popcnt64(dst, src);
+ __ Popcnt64(dst, src, kScratchReg);
} break;
case kRiscvShl32:
if (instr->InputAt(1)->IsRegister()) {
- __ Sll32(i.OutputRegister(), i.InputRegister(0),
- i.InputRegister(1));
+ __ Sll32(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
int64_t imm = i.InputOperand(1).immediate();
__ Sll32(i.OutputRegister(), i.InputRegister(0),
@@ -1113,8 +1071,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kRiscvShr32:
if (instr->InputAt(1)->IsRegister()) {
- __ Srl32(i.OutputRegister(), i.InputRegister(0),
- i.InputRegister(1));
+ __ Srl32(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
int64_t imm = i.InputOperand(1).immediate();
__ Srl32(i.OutputRegister(), i.InputRegister(0),
@@ -1123,8 +1080,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kRiscvSar32:
if (instr->InputAt(1)->IsRegister()) {
- __ Sra32(i.OutputRegister(), i.InputRegister(0),
- i.InputRegister(1));
+ __ Sra32(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
int64_t imm = i.InputOperand(1).immediate();
__ Sra32(i.OutputRegister(), i.InputRegister(0),
@@ -1553,30 +1509,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kRiscvLbu:
__ Lbu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvLb:
__ Lb(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvSb:
__ Sb(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kRiscvLhu:
__ Lhu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvUlhu:
__ Ulhu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvLh:
__ Lh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvUlh:
__ Ulh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvSh:
__ Sh(i.InputOrZeroRegister(2), i.MemoryOperand());
@@ -1586,27 +1536,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kRiscvLw:
__ Lw(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvUlw:
__ Ulw(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvLwu:
__ Lwu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvUlwu:
__ Ulwu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvLd:
__ Ld(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvUld:
__ Uld(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvSw:
__ Sw(i.InputOrZeroRegister(2), i.MemoryOperand());
@@ -1625,7 +1569,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kRiscvULoadFloat: {
- __ ULoadFloat(i.OutputSingleRegister(), i.MemoryOperand());
+ __ ULoadFloat(i.OutputSingleRegister(), i.MemoryOperand(), kScratchReg);
break;
}
case kRiscvStoreFloat: {
@@ -1645,14 +1589,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (ft == kDoubleRegZero && !__ IsSingleZeroRegSet()) {
__ LoadFPRImmediate(kDoubleRegZero, 0.0f);
}
- __ UStoreFloat(ft, operand);
+ __ UStoreFloat(ft, operand, kScratchReg);
break;
}
case kRiscvLoadDouble:
__ LoadDouble(i.OutputDoubleRegister(), i.MemoryOperand());
break;
case kRiscvULoadDouble:
- __ ULoadDouble(i.OutputDoubleRegister(), i.MemoryOperand());
+ __ ULoadDouble(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
break;
case kRiscvStoreDouble: {
FPURegister ft = i.InputOrZeroDoubleRegister(2);
@@ -1667,7 +1611,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
__ LoadFPRImmediate(kDoubleRegZero, 0.0);
}
- __ UStoreDouble(ft, i.MemoryOperand());
+ __ UStoreDouble(ft, i.MemoryOperand(), kScratchReg);
break;
}
case kRiscvSync: {
@@ -1723,156 +1667,175 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kRiscvByteSwap64: {
- __ ByteSwap(i.OutputRegister(0), i.InputRegister(0), 8);
+ __ ByteSwap(i.OutputRegister(0), i.InputRegister(0), 8, kScratchReg);
break;
}
case kRiscvByteSwap32: {
- __ ByteSwap(i.OutputRegister(0), i.InputRegister(0), 4);
+ __ ByteSwap(i.OutputRegister(0), i.InputRegister(0), 4, kScratchReg);
break;
}
- case kWord32AtomicLoadInt8:
+ case kAtomicLoadInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lb);
break;
- case kWord32AtomicLoadUint8:
+ case kAtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu);
break;
- case kWord32AtomicLoadInt16:
+ case kAtomicLoadInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lh);
break;
- case kWord32AtomicLoadUint16:
+ case kAtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu);
break;
- case kWord32AtomicLoadWord32:
+ case kAtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lw);
break;
- case kRiscvWord64AtomicLoadUint8:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu);
- break;
- case kRiscvWord64AtomicLoadUint16:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu);
- break;
- case kRiscvWord64AtomicLoadUint32:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Lwu);
- break;
case kRiscvWord64AtomicLoadUint64:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld);
break;
- case kWord32AtomicStoreWord8:
- ASSEMBLE_ATOMIC_STORE_INTEGER(Sb);
- break;
- case kWord32AtomicStoreWord16:
- ASSEMBLE_ATOMIC_STORE_INTEGER(Sh);
- break;
- case kWord32AtomicStoreWord32:
- ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
- break;
- case kRiscvWord64AtomicStoreWord8:
+ case kAtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sb);
break;
- case kRiscvWord64AtomicStoreWord16:
+ case kAtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sh);
break;
- case kRiscvWord64AtomicStoreWord32:
+ case kAtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
break;
case kRiscvWord64AtomicStoreWord64:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sd);
break;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8, 32);
break;
- case kWord32AtomicExchangeUint8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ case kAtomicExchangeUint8:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
+ break;
+ }
break;
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16, 32);
break;
- case kWord32AtomicExchangeUint16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
- break;
- case kWord32AtomicExchangeWord32:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Ll, Sc);
- break;
- case kRiscvWord64AtomicExchangeUint8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
- break;
- case kRiscvWord64AtomicExchangeUint16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ case kAtomicExchangeUint16:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ break;
+ }
break;
- case kRiscvWord64AtomicExchangeUint32:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ case kAtomicExchangeWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Ll, Sc);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ break;
+ }
break;
case kRiscvWord64AtomicExchangeUint64:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Lld, Scd);
break;
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8, 32);
break;
- case kWord32AtomicCompareExchangeUint8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ case kAtomicCompareExchangeUint8:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
+ break;
+ }
break;
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16, 32);
break;
- case kWord32AtomicCompareExchangeUint16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
- break;
- case kWord32AtomicCompareExchangeWord32:
- __ Sll32(i.InputRegister(2), i.InputRegister(2), 0);
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll, Sc);
- break;
- case kRiscvWord64AtomicCompareExchangeUint8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
- break;
- case kRiscvWord64AtomicCompareExchangeUint16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ case kAtomicCompareExchangeUint16:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ break;
+ }
break;
- case kRiscvWord64AtomicCompareExchangeUint32:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ case kAtomicCompareExchangeWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ Sll32(i.InputRegister(2), i.InputRegister(2), 0);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll, Sc);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ break;
+ }
break;
case kRiscvWord64AtomicCompareExchangeUint64:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Lld, Scd);
break;
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 8, inst, 32); \
- break; \
- case kWord32Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 8, inst, 32); \
- break; \
- case kWord32Atomic##op##Int16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 16, inst, 32); \
- break; \
- case kWord32Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 16, inst, 32); \
- break; \
- case kWord32Atomic##op##Word32: \
- ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst); \
- break;
- ATOMIC_BINOP_CASE(Add, Add32)
- ATOMIC_BINOP_CASE(Sub, Sub32)
- ATOMIC_BINOP_CASE(And, And)
- ATOMIC_BINOP_CASE(Or, Or)
- ATOMIC_BINOP_CASE(Xor, Xor)
-#undef ATOMIC_BINOP_CASE
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kRiscvWord64Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 8, inst, 64); \
- break; \
- case kRiscvWord64Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 16, inst, 64); \
- break; \
- case kRiscvWord64Atomic##op##Uint32: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 32, inst, 64); \
- break; \
- case kRiscvWord64Atomic##op##Uint64: \
- ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst); \
+#define ATOMIC_BINOP_CASE(op, inst32, inst64) \
+ case kAtomic##op##Int8: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 8, inst32, 32); \
+ break; \
+ case kAtomic##op##Uint8: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 8, inst32, 32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 8, inst64, 64); \
+ break; \
+ } \
+ break; \
+ case kAtomic##op##Int16: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 16, inst32, 32); \
+ break; \
+ case kAtomic##op##Uint16: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 16, inst32, 32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 16, inst64, 64); \
+ break; \
+ } \
+ break; \
+ case kAtomic##op##Word32: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 32, inst64, 64); \
+ break; \
+ } \
+ break; \
+ case kRiscvWord64Atomic##op##Uint64: \
+ ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst64); \
break;
- ATOMIC_BINOP_CASE(Add, Add64)
- ATOMIC_BINOP_CASE(Sub, Sub64)
- ATOMIC_BINOP_CASE(And, And)
- ATOMIC_BINOP_CASE(Or, Or)
- ATOMIC_BINOP_CASE(Xor, Xor)
+ ATOMIC_BINOP_CASE(Add, Add32, Add64)
+ ATOMIC_BINOP_CASE(Sub, Sub32, Sub64)
+ ATOMIC_BINOP_CASE(And, And, And)
+ ATOMIC_BINOP_CASE(Or, Or, Or)
+ ATOMIC_BINOP_CASE(Xor, Xor, Xor)
#undef ATOMIC_BINOP_CASE
case kRiscvAssertEqual:
__ Assert(eq, static_cast<AbortReason>(i.InputOperand(2).immediate()),
@@ -1905,7 +1868,720 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ DecompressAnyTagged(result, operand);
break;
}
+ case kRiscvRvvSt: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ Register dst = i.MemoryOperand().offset() == 0 ? i.MemoryOperand().rm()
+ : kScratchReg;
+ if (i.MemoryOperand().offset() != 0) {
+ __ Add64(dst, i.MemoryOperand().rm(), i.MemoryOperand().offset());
+ }
+ __ vs(i.InputSimd128Register(2), dst, 0, VSew::E8);
+ break;
+ }
+ case kRiscvRvvLd: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ Register src = i.MemoryOperand().offset() == 0 ? i.MemoryOperand().rm()
+ : kScratchReg;
+ if (i.MemoryOperand().offset() != 0) {
+ __ Add64(src, i.MemoryOperand().rm(), i.MemoryOperand().offset());
+ }
+ __ vl(i.OutputSimd128Register(), src, 0, VSew::E8);
+ break;
+ }
+ case kRiscvS128Const: {
+ Simd128Register dst = i.OutputSimd128Register();
+ uint8_t imm[16];
+ *reinterpret_cast<uint64_t*>(imm) =
+ make_uint64(i.InputUint32(1), i.InputUint32(0));
+ *(reinterpret_cast<uint64_t*>(imm) + 1) =
+ make_uint64(i.InputUint32(3), i.InputUint32(2));
+ __ WasmRvvS128const(dst, imm);
+ break;
+ }
+ case kRiscvI64x2Add: {
+ (__ VU).set(kScratchReg, VSew::E64, Vlmul::m1);
+ __ vadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI32x4Add: {
+ (__ VU).set(kScratchReg, VSew::E32, Vlmul::m1);
+ __ vadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI16x8Add: {
+ (__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
+ __ vadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI16x8AddSatS: {
+ (__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
+ __ vsadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI16x8AddSatU: {
+ (__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
+ __ vsaddu_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI8x16Add: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI8x16AddSatS: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vsadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI8x16AddSatU: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vsaddu_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI64x2Sub: {
+ (__ VU).set(kScratchReg, VSew::E64, Vlmul::m1);
+ __ vsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI32x4Sub: {
+ (__ VU).set(kScratchReg, VSew::E32, Vlmul::m1);
+ __ vsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI16x8Sub: {
+ (__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
+ __ vsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI16x8SubSatS: {
+ (__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
+ __ vssub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI16x8SubSatU: {
+ (__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
+ __ vssubu_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI8x16Sub: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI8x16SubSatS: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vssub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI8x16SubSatU: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vssubu_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvS128And: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vand_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvS128Or: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vor_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvS128Xor: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vxor_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvS128Not: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vnot_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvS128AndNot: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vnot_vv(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ vand_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.OutputSimd128Register());
+ break;
+ }
+ case kRiscvI32x4ExtractLane: {
+ __ WasmRvvExtractLane(i.OutputRegister(), i.InputSimd128Register(0),
+ i.InputInt8(1), E32, m1);
+ break;
+ }
+ case kRiscvI8x16Splat: {
+ (__ VU).set(kScratchReg, E8, m1);
+ __ vmv_vx(i.OutputSimd128Register(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvI16x8Splat: {
+ (__ VU).set(kScratchReg, E16, m1);
+ __ vmv_vx(i.OutputSimd128Register(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvI32x4Splat: {
+ (__ VU).set(kScratchReg, E32, m1);
+ __ vmv_vx(i.OutputSimd128Register(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvI64x2Splat: {
+ (__ VU).set(kScratchReg, E64, m1);
+ __ vmv_vx(i.OutputSimd128Register(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvF32x4Splat: {
+ (__ VU).set(kScratchReg, E32, m1);
+ __ fmv_x_w(kScratchReg, i.InputSingleRegister(0));
+ __ vmv_vx(i.OutputSimd128Register(), kScratchReg);
+ break;
+ }
+ case kRiscvF64x2Splat: {
+ (__ VU).set(kScratchReg, E64, m1);
+ __ fmv_x_d(kScratchReg, i.InputDoubleRegister(0));
+ __ vmv_vx(i.OutputSimd128Register(), kScratchReg);
+ break;
+ }
+ case kRiscvI32x4Abs: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmv_vx(kSimd128RegZero, zero_reg);
+ __ vmv_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ vmslt_vv(v0, i.InputSimd128Register(0), kSimd128RegZero);
+ __ vsub_vv(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0), Mask);
+ break;
+ }
+ case kRiscvI8x16Eq: {
+ __ WasmRvvEq(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E8, m1);
+ break;
+ }
+ case kRiscvI16x8Eq: {
+ __ WasmRvvEq(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E16, m1);
+ break;
+ }
+ case kRiscvI32x4Eq: {
+ __ WasmRvvEq(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E32, m1);
+ break;
+ }
+ case kRiscvI64x2Eq: {
+ __ WasmRvvEq(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E64, m1);
+ break;
+ }
+ case kRiscvI8x16Ne: {
+ __ WasmRvvNe(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E8, m1);
+ break;
+ }
+ case kRiscvI16x8Ne: {
+ __ WasmRvvNe(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E16, m1);
+ break;
+ }
+ case kRiscvI32x4Ne: {
+ __ WasmRvvNe(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E32, m1);
+ break;
+ }
+ case kRiscvI64x2Ne: {
+ __ WasmRvvNe(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E64, m1);
+ break;
+ }
+ case kRiscvI8x16GeS: {
+ __ WasmRvvGeS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E8, m1);
+ break;
+ }
+ case kRiscvI16x8GeS: {
+ __ WasmRvvGeS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E16, m1);
+ break;
+ }
+ case kRiscvI32x4GeS: {
+ __ WasmRvvGeS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E32, m1);
+ break;
+ }
+ case kRiscvI64x2GeS: {
+ __ WasmRvvGeS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E64, m1);
+ break;
+ }
+ case kRiscvI8x16GeU: {
+ __ WasmRvvGeU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E8, m1);
+ break;
+ }
+ case kRiscvI16x8GeU: {
+ __ WasmRvvGeU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E16, m1);
+ break;
+ }
+ case kRiscvI32x4GeU: {
+ __ WasmRvvGeU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E32, m1);
+ break;
+ }
+ case kRiscvI8x16GtS: {
+ __ WasmRvvGtS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E8, m1);
+ break;
+ }
+ case kRiscvI16x8GtS: {
+ __ WasmRvvGtS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E16, m1);
+ break;
+ }
+ case kRiscvI32x4GtS: {
+ __ WasmRvvGtS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E32, m1);
+ break;
+ }
+ case kRiscvI64x2GtS: {
+ __ WasmRvvGtS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E64, m1);
+ break;
+ }
+ case kRiscvI8x16GtU: {
+ __ WasmRvvGtU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E8, m1);
+ break;
+ }
+ case kRiscvI16x8GtU: {
+ __ WasmRvvGtU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E16, m1);
+ break;
+ }
+ case kRiscvI32x4GtU: {
+ __ WasmRvvGtU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E32, m1);
+ break;
+ }
+ case kRiscvI8x16Shl: {
+ __ VU.set(kScratchReg, E8, m1);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ vsll_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputRegister(1));
+ } else {
+ __ vsll_vi(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt3(1));
+ }
+ break;
+ }
+ case kRiscvI16x8Shl: {
+ __ VU.set(kScratchReg, E16, m1);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ vsll_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputRegister(1));
+ } else {
+ __ vsll_vi(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt4(1));
+ }
+ break;
+ }
+ case kRiscvI32x4Shl: {
+ __ VU.set(kScratchReg, E32, m1);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ vsll_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputRegister(1));
+ } else {
+ __ vsll_vi(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt5(1));
+ }
+ break;
+ }
+ case kRiscvI64x2Shl: {
+ __ VU.set(kScratchReg, E64, m1);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ vsll_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputRegister(1));
+ } else {
+ if (is_int5(i.InputInt6(1))) {
+ __ vsll_vi(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt6(1));
+ } else {
+ __ li(kScratchReg, i.InputInt6(1));
+ __ vsll_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchReg);
+ }
+ }
+ break;
+ }
+ case kRiscvI8x16ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ VU.set(kScratchReg, E32, m1);
+ __ li(kScratchReg, 0x1 << i.InputInt8(1));
+ __ vmv_sx(v0, kScratchReg);
+ __ vmerge_vx(dst, i.InputRegister(2), src);
+ break;
+ }
+ case kRiscvI16x8ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ VU.set(kScratchReg, E16, m1);
+ __ li(kScratchReg, 0x1 << i.InputInt8(1));
+ __ vmv_sx(v0, kScratchReg);
+ __ vmerge_vx(dst, i.InputRegister(2), src);
+ break;
+ }
+ case kRiscvI64x2ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ VU.set(kScratchReg, E64, m1);
+ __ li(kScratchReg, 0x1 << i.InputInt8(1));
+ __ vmv_sx(v0, kScratchReg);
+ __ vmerge_vx(dst, i.InputRegister(2), src);
+ break;
+ }
+ case kRiscvI32x4ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ VU.set(kScratchReg, E32, m1);
+ __ li(kScratchReg, 0x1 << i.InputInt8(1));
+ __ vmv_sx(v0, kScratchReg);
+ __ vmerge_vx(dst, i.InputRegister(2), src);
+ break;
+ }
+ case kRiscvI8x16BitMask: {
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ VU.set(kScratchReg, E8, m1);
+ __ vmv_vx(kSimd128RegZero, zero_reg);
+ __ vmslt_vv(kSimd128ScratchReg, src, kSimd128RegZero);
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvI16x8BitMask: {
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ VU.set(kScratchReg, E16, m1);
+ __ vmv_vx(kSimd128RegZero, zero_reg);
+ __ vmslt_vv(kSimd128ScratchReg, src, kSimd128RegZero);
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvI32x4BitMask: {
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmv_vx(kSimd128RegZero, zero_reg);
+ __ vmslt_vv(kSimd128ScratchReg, src, kSimd128RegZero);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvI64x2BitMask: {
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ VU.set(kScratchReg, E64, m1);
+ __ vmv_vx(kSimd128RegZero, zero_reg);
+ __ vmslt_vv(kSimd128ScratchReg, src, kSimd128RegZero);
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvV128AnyTrue: {
+ __ VU.set(kScratchReg, E8, m1);
+ Register dst = i.OutputRegister();
+ Label t;
+ __ vmv_sx(kSimd128ScratchReg, zero_reg);
+ __ vredmaxu_vs(kSimd128ScratchReg, i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ __ beq(dst, zero_reg, &t);
+ __ li(dst, 1);
+ __ bind(&t);
+ break;
+ }
+ case kRiscvI64x2AllTrue: {
+ __ VU.set(kScratchReg, E64, m1);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ li(kScratchReg, -1);
+ __ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ __ vredminu_vs(kSimd128ScratchReg, i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ __ beqz(dst, &all_true);
+ __ li(dst, 1);
+ __ bind(&all_true);
+ break;
+ }
+ case kRiscvI32x4AllTrue: {
+ __ VU.set(kScratchReg, E32, m1);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ li(kScratchReg, -1);
+ __ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ __ vredminu_vs(kSimd128ScratchReg, i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ __ beqz(dst, &all_true);
+ __ li(dst, 1);
+ __ bind(&all_true);
+ break;
+ }
+ case kRiscvI16x8AllTrue: {
+ __ VU.set(kScratchReg, E16, m1);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ li(kScratchReg, -1);
+ __ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ __ vredminu_vs(kSimd128ScratchReg, i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ __ beqz(dst, &all_true);
+ __ li(dst, 1);
+ __ bind(&all_true);
+ break;
+ }
+ case kRiscvI8x16AllTrue: {
+ __ VU.set(kScratchReg, E8, m1);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ li(kScratchReg, -1);
+ __ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ __ vredminu_vs(kSimd128ScratchReg, i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ __ beqz(dst, &all_true);
+ __ li(dst, 1);
+ __ bind(&all_true);
+ break;
+ }
+ case kRiscvI8x16Shuffle: {
+ VRegister dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+
+ int64_t imm1 = make_uint64(i.InputInt32(3), i.InputInt32(2));
+ int64_t imm2 = make_uint64(i.InputInt32(5), i.InputInt32(4));
+ __ VU.set(kScratchReg, VSew::E64, Vlmul::m1);
+ __ li(kScratchReg, 1);
+ __ vmv_vx(v0, kScratchReg);
+ __ li(kScratchReg, imm1);
+ __ vmerge_vx(kSimd128ScratchReg, kScratchReg, kSimd128ScratchReg);
+ __ li(kScratchReg, imm2);
+ __ vsll_vi(v0, v0, 1);
+ __ vmerge_vx(kSimd128ScratchReg, kScratchReg, kSimd128ScratchReg);
+
+ __ VU.set(kScratchReg, E8, m1);
+ if (dst == src0) {
+ __ vmv_vv(kSimd128ScratchReg2, src0);
+ src0 = kSimd128ScratchReg2;
+ } else if (dst == src1) {
+ __ vmv_vv(kSimd128ScratchReg2, src1);
+ src1 = kSimd128ScratchReg2;
+ }
+ __ vrgather_vv(dst, src0, kSimd128ScratchReg);
+ __ vadd_vi(kSimd128ScratchReg, kSimd128ScratchReg, -16);
+ __ vrgather_vv(kSimd128ScratchReg, src1, kSimd128ScratchReg);
+ __ vor_vv(dst, dst, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvF32x4Abs: {
+ __ VU.set(kScratchReg, VSew::E32, Vlmul::m1);
+ __ vfabs_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF64x2Abs: {
+ __ VU.set(kScratchReg, VSew::E64, Vlmul::m1);
+ __ vfabs_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF32x4Neg: {
+ __ VU.set(kScratchReg, VSew::E32, Vlmul::m1);
+ __ vfneg_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF64x2Neg: {
+ __ VU.set(kScratchReg, VSew::E64, Vlmul::m1);
+ __ vfneg_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF32x4DemoteF64x2Zero: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vfncvt_f_f_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ vmv_vi(v0, 12);
+ __ vmerge_vx(i.OutputSimd128Register(), zero_reg,
+ i.OutputSimd128Register());
+ break;
+ }
+ case kRiscvF32x4Add: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vfadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvF32x4Sub: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vfsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvF64x2Add: {
+ __ VU.set(kScratchReg, E64, m1);
+ __ vfadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvF64x2Sub: {
+ __ VU.set(kScratchReg, E64, m1);
+ __ vfsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvF32x4Ceil: {
+ __ Ceil_f(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchReg, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvF64x2Ceil: {
+ __ Ceil_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchReg, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvF32x4Floor: {
+ __ Floor_f(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchReg, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvF64x2Floor: {
+ __ Floor_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchReg, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvS128Select: {
+ __ VU.set(kScratchReg, E8, m1);
+ __ vand_vv(kSimd128ScratchReg, i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ __ vnot_vv(kSimd128ScratchReg2, i.InputSimd128Register(0));
+ __ vand_vv(kSimd128ScratchReg2, i.InputSimd128Register(2),
+ kSimd128ScratchReg2);
+ __ vor_vv(i.OutputSimd128Register(), kSimd128ScratchReg,
+ kSimd128ScratchReg2);
+ break;
+ }
+ case kRiscvF32x4UConvertI32x4: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ VU.set(RoundingMode::RTZ);
+ __ vfcvt_f_xu_v(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF32x4SConvertI32x4: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ VU.set(RoundingMode::RTZ);
+ __ vfcvt_f_x_v(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF32x4Div: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ VU.set(RoundingMode::RTZ);
+ __ vfdiv_vv(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF32x4Mul: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ VU.set(RoundingMode::RTZ);
+ __ vfmul_vv(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF32x4Eq: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmfeq_vv(v0, i.InputSimd128Register(1), i.InputSimd128Register(0));
+ __ vmv_vx(i.OutputSimd128Register(), zero_reg);
+ __ vmerge_vi(i.OutputSimd128Register(), -1, i.OutputSimd128Register());
+ break;
+ }
+ case kRiscvF32x4Ne: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmfne_vv(v0, i.InputSimd128Register(1), i.InputSimd128Register(0));
+ __ vmv_vx(i.OutputSimd128Register(), zero_reg);
+ __ vmerge_vi(i.OutputSimd128Register(), -1, i.OutputSimd128Register());
+ break;
+ }
+ case kRiscvF32x4Lt: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmflt_vv(v0, i.InputSimd128Register(1), i.InputSimd128Register(0));
+ __ vmv_vx(i.OutputSimd128Register(), zero_reg);
+ __ vmerge_vi(i.OutputSimd128Register(), -1, i.OutputSimd128Register());
+ break;
+ }
+ case kRiscvF32x4Le: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmfle_vv(v0, i.InputSimd128Register(1), i.InputSimd128Register(0));
+ __ vmv_vx(i.OutputSimd128Register(), zero_reg);
+ __ vmerge_vi(i.OutputSimd128Register(), -1, i.OutputSimd128Register());
+ break;
+ }
+ case kRiscvF32x4Max: {
+ __ VU.set(kScratchReg, E32, m1);
+ const int32_t kNaN = 0x7FC00000;
+ __ vmfeq_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(0));
+ __ vmfeq_vv(kSimd128ScratchReg, i.InputSimd128Register(1),
+ i.InputSimd128Register(1));
+ __ vand_vv(v0, v0, kSimd128ScratchReg);
+ __ li(kScratchReg, kNaN);
+ __ vmv_vx(kSimd128ScratchReg, kScratchReg);
+ __ vfmax_vv(kSimd128ScratchReg, i.InputSimd128Register(1),
+ i.InputSimd128Register(0), Mask);
+ __ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvF32x4Min: {
+ __ VU.set(kScratchReg, E32, m1);
+ const int32_t kNaN = 0x7FC00000;
+ __ vmfeq_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(0));
+ __ vmfeq_vv(kSimd128ScratchReg, i.InputSimd128Register(1),
+ i.InputSimd128Register(1));
+ __ vand_vv(v0, v0, kSimd128ScratchReg);
+ __ li(kScratchReg, kNaN);
+ __ vmv_vx(kSimd128ScratchReg, kScratchReg);
+ __ vfmin_vv(kSimd128ScratchReg, i.InputSimd128Register(1),
+ i.InputSimd128Register(0), Mask);
+ __ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg);
+ break;
+ }
default:
+#ifdef DEBUG
+ switch (arch_opcode) {
+#define Print(name) \
+ case k##name: \
+ printf("k%s", #name); \
+ break;
+ TARGET_ARCH_OPCODE_LIST(Print);
+#undef Print
+ default:
+ break;
+ }
+#endif
UNIMPLEMENTED();
}
return kSuccess;
@@ -1916,6 +2592,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
<< "\""; \
UNIMPLEMENTED();
+bool IsInludeEqual(Condition cc) {
+ switch (cc) {
+ case equal:
+ case greater_equal:
+ case less_equal:
+ case Uless_equal:
+ case Ugreater_equal:
+ return true;
+ default:
+ return false;
+ }
+}
+
void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
Instruction* instr, FlagsCondition condition,
Label* tlabel, Label* flabel, bool fallthru) {
@@ -1952,7 +2641,6 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
break;
default:
UNSUPPORTED_COND(instr->arch_opcode(), condition);
- break;
}
} else if (instr->arch_opcode() == kRiscvMulOvf32) {
// Overflow occurs if overflow register is not zero
@@ -1965,14 +2653,17 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
break;
default:
UNSUPPORTED_COND(kRiscvMulOvf32, condition);
- break;
}
} else if (instr->arch_opcode() == kRiscvCmp) {
cc = FlagsConditionToConditionCmp(condition);
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
} else if (instr->arch_opcode() == kRiscvCmpZero) {
cc = FlagsConditionToConditionCmp(condition);
- __ Branch(tlabel, cc, i.InputRegister(0), Operand(zero_reg));
+ if (i.InputOrZeroRegister(0) == zero_reg && IsInludeEqual(cc)) {
+ __ Branch(tlabel);
+ } else if (i.InputOrZeroRegister(0) != zero_reg) {
+ __ Branch(tlabel, cc, i.InputRegister(0), Operand(zero_reg));
+ }
} else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
cc = FlagsConditionToConditionCmp(condition);
Register lhs_register = sp;
@@ -2011,110 +2702,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
branch->fallthru);
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
- return;
- }
-
- RiscvOperandConverter i(this, instr);
- condition = NegateFlagsCondition(condition);
-
- switch (instr->arch_opcode()) {
- case kRiscvCmp: {
- __ CompareI(kScratchReg, i.InputRegister(0), i.InputOperand(1),
- FlagsConditionToConditionCmp(condition));
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, kScratchReg);
- }
- return;
- case kRiscvCmpZero: {
- __ CompareI(kScratchReg, i.InputRegister(0), Operand(zero_reg),
- FlagsConditionToConditionCmp(condition));
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, kScratchReg);
- }
- return;
- case kRiscvTst: {
- switch (condition) {
- case kEqual:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
- break;
- case kNotEqual:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg);
- break;
- default:
- UNREACHABLE();
- }
- }
- return;
- case kRiscvAdd64:
- case kRiscvSub64: {
- // Check for overflow creates 1 or 0 for result.
- __ Srl64(kScratchReg, i.OutputRegister(), 63);
- __ Srl32(kScratchReg2, i.OutputRegister(), 31);
- __ Xor(kScratchReg2, kScratchReg, kScratchReg2);
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg2);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kRiscvAddOvf64:
- case kRiscvSubOvf64: {
- // Overflow occurs if overflow register is negative
- __ Slt(kScratchReg2, kScratchReg, zero_reg);
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg2);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kRiscvMulOvf32: {
- // Overflow occurs if overflow register is not zero
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kRiscvCmpS:
- case kRiscvCmpD: {
- bool predicate;
- FlagsConditionToConditionCmpFPU(&predicate, condition);
- if (predicate) {
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, kScratchReg);
- } else {
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
- }
- }
- return;
- default:
- UNREACHABLE();
- }
-}
-
#undef UNSUPPORTED_COND
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
@@ -2489,7 +3076,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
- ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
@@ -2652,7 +3238,18 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
void CodeGenerator::FinishCode() { __ ForceConstantPoolEmissionWithoutJump(); }
void CodeGenerator::PrepareForDeoptimizationExits(
- ZoneDeque<DeoptimizationExit*>* exits) {}
+ ZoneDeque<DeoptimizationExit*>* exits) {
+ __ ForceConstantPoolEmissionWithoutJump();
+ int total_size = 0;
+ for (DeoptimizationExit* exit : deoptimization_exits_) {
+ total_size += (exit->kind() == DeoptimizeKind::kLazy)
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize;
+ }
+
+ __ CheckTrampolinePoolQuick(total_size);
+ DCHECK(Deoptimizer::kSupportsFixedDeoptExitSizes);
+}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
@@ -2735,7 +3332,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): loading RPO numbers
- break;
}
if (destination->IsStackSlot()) __ Sd(dst, g.ToMemOperand(destination));
} else if (src.type() == Constant::kFloat32) {
@@ -2765,7 +3361,21 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else if (source->IsFPRegister()) {
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kSimd128) {
- UNIMPLEMENTED();
+ VRegister src = g.ToSimd128Register(source);
+ if (destination->IsSimd128Register()) {
+ VRegister dst = g.ToSimd128Register(destination);
+ __ vmv_vv(dst, src);
+ } else {
+ DCHECK(destination->IsSimd128StackSlot());
+ Register dst = g.ToMemOperand(destination).offset() == 0
+ ? g.ToMemOperand(destination).rm()
+ : kScratchReg;
+ if (g.ToMemOperand(destination).offset() != 0) {
+ __ Add64(dst, g.ToMemOperand(destination).rm(),
+ g.ToMemOperand(destination).offset());
+ }
+ __ vs(src, dst, 0, E8);
+ }
} else {
FPURegister src = g.ToDoubleRegister(source);
if (destination->IsFPRegister()) {
@@ -2786,7 +3396,25 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
MemOperand src = g.ToMemOperand(source);
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kSimd128) {
- UNIMPLEMENTED();
+ Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg;
+ if (src.offset() != 0) {
+ __ Add64(src_reg, src.rm(), src.offset());
+ }
+ if (destination->IsSimd128Register()) {
+ __ vl(g.ToSimd128Register(destination), src_reg, 0, E8);
+ } else {
+ DCHECK(destination->IsSimd128StackSlot());
+ VRegister temp = kSimd128ScratchReg;
+ Register dst = g.ToMemOperand(destination).offset() == 0
+ ? g.ToMemOperand(destination).rm()
+ : kScratchReg;
+ if (g.ToMemOperand(destination).offset() != 0) {
+ __ Add64(dst, g.ToMemOperand(destination).rm(),
+ g.ToMemOperand(destination).offset());
+ }
+ __ vl(temp, src_reg, 0, E8);
+ __ vs(temp, dst, 0, E8);
+ }
} else {
if (destination->IsFPRegister()) {
if (rep == MachineRepresentation::kFloat32) {
diff --git a/chromium/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h b/chromium/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
index 2f51c2b1c79..f3aa0f29a83 100644
--- a/chromium/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
+++ b/chromium/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
@@ -9,423 +9,400 @@ namespace v8 {
namespace internal {
namespace compiler {
+// Opcodes that support a MemoryAccessMode.
+#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) // None.
+
// RISC-V-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(RiscvAdd32) \
- V(RiscvAdd64) \
- V(RiscvAddOvf64) \
- V(RiscvSub32) \
- V(RiscvSub64) \
- V(RiscvSubOvf64) \
- V(RiscvMul32) \
- V(RiscvMulOvf32) \
- V(RiscvMulHigh32) \
- V(RiscvMulHigh64) \
- V(RiscvMulHighU32) \
- V(RiscvMul64) \
- V(RiscvDiv32) \
- V(RiscvDiv64) \
- V(RiscvDivU32) \
- V(RiscvDivU64) \
- V(RiscvMod32) \
- V(RiscvMod64) \
- V(RiscvModU32) \
- V(RiscvModU64) \
- V(RiscvAnd) \
- V(RiscvAnd32) \
- V(RiscvOr) \
- V(RiscvOr32) \
- V(RiscvNor) \
- V(RiscvNor32) \
- V(RiscvXor) \
- V(RiscvXor32) \
- V(RiscvClz32) \
- V(RiscvShl32) \
- V(RiscvShr32) \
- V(RiscvSar32) \
- V(RiscvZeroExtendWord) \
- V(RiscvSignExtendWord) \
- V(RiscvClz64) \
- V(RiscvCtz32) \
- V(RiscvCtz64) \
- V(RiscvPopcnt32) \
- V(RiscvPopcnt64) \
- V(RiscvShl64) \
- V(RiscvShr64) \
- V(RiscvSar64) \
- V(RiscvRor32) \
- V(RiscvRor64) \
- V(RiscvMov) \
- V(RiscvTst) \
- V(RiscvCmp) \
- V(RiscvCmpZero) \
- V(RiscvCmpS) \
- V(RiscvAddS) \
- V(RiscvSubS) \
- V(RiscvMulS) \
- V(RiscvDivS) \
- V(RiscvModS) \
- V(RiscvAbsS) \
- V(RiscvNegS) \
- V(RiscvSqrtS) \
- V(RiscvMaxS) \
- V(RiscvMinS) \
- V(RiscvCmpD) \
- V(RiscvAddD) \
- V(RiscvSubD) \
- V(RiscvMulD) \
- V(RiscvDivD) \
- V(RiscvModD) \
- V(RiscvAbsD) \
- V(RiscvNegD) \
- V(RiscvSqrtD) \
- V(RiscvMaxD) \
- V(RiscvMinD) \
- V(RiscvFloat64RoundDown) \
- V(RiscvFloat64RoundTruncate) \
- V(RiscvFloat64RoundUp) \
- V(RiscvFloat64RoundTiesEven) \
- V(RiscvFloat32RoundDown) \
- V(RiscvFloat32RoundTruncate) \
- V(RiscvFloat32RoundUp) \
- V(RiscvFloat32RoundTiesEven) \
- V(RiscvCvtSD) \
- V(RiscvCvtDS) \
- V(RiscvTruncWD) \
- V(RiscvRoundWD) \
- V(RiscvFloorWD) \
- V(RiscvCeilWD) \
- V(RiscvTruncWS) \
- V(RiscvRoundWS) \
- V(RiscvFloorWS) \
- V(RiscvCeilWS) \
- V(RiscvTruncLS) \
- V(RiscvTruncLD) \
- V(RiscvTruncUwD) \
- V(RiscvTruncUwS) \
- V(RiscvTruncUlS) \
- V(RiscvTruncUlD) \
- V(RiscvCvtDW) \
- V(RiscvCvtSL) \
- V(RiscvCvtSW) \
- V(RiscvCvtSUw) \
- V(RiscvCvtSUl) \
- V(RiscvCvtDL) \
- V(RiscvCvtDUw) \
- V(RiscvCvtDUl) \
- V(RiscvLb) \
- V(RiscvLbu) \
- V(RiscvSb) \
- V(RiscvLh) \
- V(RiscvUlh) \
- V(RiscvLhu) \
- V(RiscvUlhu) \
- V(RiscvSh) \
- V(RiscvUsh) \
- V(RiscvLd) \
- V(RiscvUld) \
- V(RiscvLw) \
- V(RiscvUlw) \
- V(RiscvLwu) \
- V(RiscvUlwu) \
- V(RiscvSw) \
- V(RiscvUsw) \
- V(RiscvSd) \
- V(RiscvUsd) \
- V(RiscvLoadFloat) \
- V(RiscvULoadFloat) \
- V(RiscvStoreFloat) \
- V(RiscvUStoreFloat) \
- V(RiscvLoadDouble) \
- V(RiscvULoadDouble) \
- V(RiscvStoreDouble) \
- V(RiscvUStoreDouble) \
- V(RiscvBitcastDL) \
- V(RiscvBitcastLD) \
- V(RiscvBitcastInt32ToFloat32) \
- V(RiscvBitcastFloat32ToInt32) \
- V(RiscvFloat64ExtractLowWord32) \
- V(RiscvFloat64ExtractHighWord32) \
- V(RiscvFloat64InsertLowWord32) \
- V(RiscvFloat64InsertHighWord32) \
- V(RiscvFloat32Max) \
- V(RiscvFloat64Max) \
- V(RiscvFloat32Min) \
- V(RiscvFloat64Min) \
- V(RiscvFloat64SilenceNaN) \
- V(RiscvPush) \
- V(RiscvPeek) \
- V(RiscvByteSwap64) \
- V(RiscvByteSwap32) \
- V(RiscvStoreToStackSlot) \
- V(RiscvStackClaim) \
- V(RiscvSignExtendByte) \
- V(RiscvSignExtendShort) \
- V(RiscvSync) \
- V(RiscvAssertEqual) \
- V(RiscvS128Const) \
- V(RiscvS128Zero) \
- V(RiscvS128AllOnes) \
- V(RiscvI32x4Splat) \
- V(RiscvI32x4ExtractLane) \
- V(RiscvI32x4ReplaceLane) \
- V(RiscvI32x4Add) \
- V(RiscvI32x4Sub) \
- V(RiscvF64x2Abs) \
- V(RiscvF64x2Neg) \
- V(RiscvF32x4Splat) \
- V(RiscvF32x4ExtractLane) \
- V(RiscvF32x4ReplaceLane) \
- V(RiscvF32x4SConvertI32x4) \
- V(RiscvF32x4UConvertI32x4) \
- V(RiscvI64x2SConvertI32x4Low) \
- V(RiscvI64x2SConvertI32x4High) \
- V(RiscvI64x2UConvertI32x4Low) \
- V(RiscvI64x2UConvertI32x4High) \
- V(RiscvI32x4Mul) \
- V(RiscvI32x4MaxS) \
- V(RiscvI32x4MinS) \
- V(RiscvI32x4Eq) \
- V(RiscvI32x4Ne) \
- V(RiscvI32x4Shl) \
- V(RiscvI32x4ShrS) \
- V(RiscvI32x4ShrU) \
- V(RiscvI32x4MaxU) \
- V(RiscvI32x4MinU) \
- V(RiscvI64x2GtS) \
- V(RiscvI64x2GeS) \
- V(RiscvI64x2Eq) \
- V(RiscvI64x2Ne) \
- V(RiscvF64x2Sqrt) \
- V(RiscvF64x2Add) \
- V(RiscvF64x2Sub) \
- V(RiscvF64x2Mul) \
- V(RiscvF64x2Div) \
- V(RiscvF64x2Min) \
- V(RiscvF64x2Max) \
- V(RiscvF64x2ConvertLowI32x4S) \
- V(RiscvF64x2ConvertLowI32x4U) \
- V(RiscvF64x2PromoteLowF32x4) \
- V(RiscvF64x2Eq) \
- V(RiscvF64x2Ne) \
- V(RiscvF64x2Lt) \
- V(RiscvF64x2Le) \
- V(RiscvF64x2Splat) \
- V(RiscvF64x2ExtractLane) \
- V(RiscvF64x2ReplaceLane) \
- V(RiscvF64x2Pmin) \
- V(RiscvF64x2Pmax) \
- V(RiscvF64x2Ceil) \
- V(RiscvF64x2Floor) \
- V(RiscvF64x2Trunc) \
- V(RiscvF64x2NearestInt) \
- V(RiscvI64x2Splat) \
- V(RiscvI64x2ExtractLane) \
- V(RiscvI64x2ReplaceLane) \
- V(RiscvI64x2Add) \
- V(RiscvI64x2Sub) \
- V(RiscvI64x2Mul) \
- V(RiscvI64x2Abs) \
- V(RiscvI64x2Neg) \
- V(RiscvI64x2Shl) \
- V(RiscvI64x2ShrS) \
- V(RiscvI64x2ShrU) \
- V(RiscvI64x2BitMask) \
- V(RiscvF32x4Abs) \
- V(RiscvF32x4Neg) \
- V(RiscvF32x4Sqrt) \
- V(RiscvF32x4RecipApprox) \
- V(RiscvF32x4RecipSqrtApprox) \
- V(RiscvF32x4Add) \
- V(RiscvF32x4Sub) \
- V(RiscvF32x4Mul) \
- V(RiscvF32x4Div) \
- V(RiscvF32x4Max) \
- V(RiscvF32x4Min) \
- V(RiscvF32x4Eq) \
- V(RiscvF32x4Ne) \
- V(RiscvF32x4Lt) \
- V(RiscvF32x4Le) \
- V(RiscvF32x4Pmin) \
- V(RiscvF32x4Pmax) \
- V(RiscvF32x4DemoteF64x2Zero) \
- V(RiscvF32x4Ceil) \
- V(RiscvF32x4Floor) \
- V(RiscvF32x4Trunc) \
- V(RiscvF32x4NearestInt) \
- V(RiscvI32x4SConvertF32x4) \
- V(RiscvI32x4UConvertF32x4) \
- V(RiscvI32x4Neg) \
- V(RiscvI32x4GtS) \
- V(RiscvI32x4GeS) \
- V(RiscvI32x4GtU) \
- V(RiscvI32x4GeU) \
- V(RiscvI32x4Abs) \
- V(RiscvI32x4BitMask) \
- V(RiscvI32x4DotI16x8S) \
- V(RiscvI32x4TruncSatF64x2SZero) \
- V(RiscvI32x4TruncSatF64x2UZero) \
- V(RiscvI16x8Splat) \
- V(RiscvI16x8ExtractLaneU) \
- V(RiscvI16x8ExtractLaneS) \
- V(RiscvI16x8ReplaceLane) \
- V(RiscvI16x8Neg) \
- V(RiscvI16x8Shl) \
- V(RiscvI16x8ShrS) \
- V(RiscvI16x8ShrU) \
- V(RiscvI16x8Add) \
- V(RiscvI16x8AddSatS) \
- V(RiscvI16x8Sub) \
- V(RiscvI16x8SubSatS) \
- V(RiscvI16x8Mul) \
- V(RiscvI16x8MaxS) \
- V(RiscvI16x8MinS) \
- V(RiscvI16x8Eq) \
- V(RiscvI16x8Ne) \
- V(RiscvI16x8GtS) \
- V(RiscvI16x8GeS) \
- V(RiscvI16x8AddSatU) \
- V(RiscvI16x8SubSatU) \
- V(RiscvI16x8MaxU) \
- V(RiscvI16x8MinU) \
- V(RiscvI16x8GtU) \
- V(RiscvI16x8GeU) \
- V(RiscvI16x8RoundingAverageU) \
- V(RiscvI16x8Q15MulRSatS) \
- V(RiscvI16x8Abs) \
- V(RiscvI16x8BitMask) \
- V(RiscvI8x16Splat) \
- V(RiscvI8x16ExtractLaneU) \
- V(RiscvI8x16ExtractLaneS) \
- V(RiscvI8x16ReplaceLane) \
- V(RiscvI8x16Neg) \
- V(RiscvI8x16Shl) \
- V(RiscvI8x16ShrS) \
- V(RiscvI8x16Add) \
- V(RiscvI8x16AddSatS) \
- V(RiscvI8x16Sub) \
- V(RiscvI8x16SubSatS) \
- V(RiscvI8x16MaxS) \
- V(RiscvI8x16MinS) \
- V(RiscvI8x16Eq) \
- V(RiscvI8x16Ne) \
- V(RiscvI8x16GtS) \
- V(RiscvI8x16GeS) \
- V(RiscvI8x16ShrU) \
- V(RiscvI8x16AddSatU) \
- V(RiscvI8x16SubSatU) \
- V(RiscvI8x16MaxU) \
- V(RiscvI8x16MinU) \
- V(RiscvI8x16GtU) \
- V(RiscvI8x16GeU) \
- V(RiscvI8x16RoundingAverageU) \
- V(RiscvI8x16Abs) \
- V(RiscvI8x16BitMask) \
- V(RiscvI8x16Popcnt) \
- V(RiscvS128And) \
- V(RiscvS128Or) \
- V(RiscvS128Xor) \
- V(RiscvS128Not) \
- V(RiscvS128Select) \
- V(RiscvS128AndNot) \
- V(RiscvI32x4AllTrue) \
- V(RiscvI16x8AllTrue) \
- V(RiscvV128AnyTrue) \
- V(RiscvI8x16AllTrue) \
- V(RiscvI64x2AllTrue) \
- V(RiscvS32x4InterleaveRight) \
- V(RiscvS32x4InterleaveLeft) \
- V(RiscvS32x4PackEven) \
- V(RiscvS32x4PackOdd) \
- V(RiscvS32x4InterleaveEven) \
- V(RiscvS32x4InterleaveOdd) \
- V(RiscvS32x4Shuffle) \
- V(RiscvS16x8InterleaveRight) \
- V(RiscvS16x8InterleaveLeft) \
- V(RiscvS16x8PackEven) \
- V(RiscvS16x8PackOdd) \
- V(RiscvS16x8InterleaveEven) \
- V(RiscvS16x8InterleaveOdd) \
- V(RiscvS16x4Reverse) \
- V(RiscvS16x2Reverse) \
- V(RiscvS8x16InterleaveRight) \
- V(RiscvS8x16InterleaveLeft) \
- V(RiscvS8x16PackEven) \
- V(RiscvS8x16PackOdd) \
- V(RiscvS8x16InterleaveEven) \
- V(RiscvS8x16InterleaveOdd) \
- V(RiscvS8x16Shuffle) \
- V(RiscvI8x16Swizzle) \
- V(RiscvS8x16Concat) \
- V(RiscvS8x8Reverse) \
- V(RiscvS8x4Reverse) \
- V(RiscvS8x2Reverse) \
- V(RiscvS128Load8Splat) \
- V(RiscvS128Load16Splat) \
- V(RiscvS128Load32Splat) \
- V(RiscvS128Load64Splat) \
- V(RiscvS128Load8x8S) \
- V(RiscvS128Load8x8U) \
- V(RiscvS128Load16x4S) \
- V(RiscvS128Load16x4U) \
- V(RiscvS128Load32x2S) \
- V(RiscvS128Load32x2U) \
- V(RiscvS128LoadLane) \
- V(RiscvS128StoreLane) \
- V(RiscvMsaLd) \
- V(RiscvMsaSt) \
- V(RiscvI32x4SConvertI16x8Low) \
- V(RiscvI32x4SConvertI16x8High) \
- V(RiscvI32x4UConvertI16x8Low) \
- V(RiscvI32x4UConvertI16x8High) \
- V(RiscvI16x8SConvertI8x16Low) \
- V(RiscvI16x8SConvertI8x16High) \
- V(RiscvI16x8SConvertI32x4) \
- V(RiscvI16x8UConvertI32x4) \
- V(RiscvI16x8UConvertI8x16Low) \
- V(RiscvI16x8UConvertI8x16High) \
- V(RiscvI8x16SConvertI16x8) \
- V(RiscvI8x16UConvertI16x8) \
- V(RiscvWord64AtomicLoadUint8) \
- V(RiscvWord64AtomicLoadUint16) \
- V(RiscvWord64AtomicLoadUint32) \
- V(RiscvWord64AtomicLoadUint64) \
- V(RiscvWord64AtomicStoreWord8) \
- V(RiscvWord64AtomicStoreWord16) \
- V(RiscvWord64AtomicStoreWord32) \
- V(RiscvWord64AtomicStoreWord64) \
- V(RiscvWord64AtomicAddUint8) \
- V(RiscvWord64AtomicAddUint16) \
- V(RiscvWord64AtomicAddUint32) \
- V(RiscvWord64AtomicAddUint64) \
- V(RiscvWord64AtomicSubUint8) \
- V(RiscvWord64AtomicSubUint16) \
- V(RiscvWord64AtomicSubUint32) \
- V(RiscvWord64AtomicSubUint64) \
- V(RiscvWord64AtomicAndUint8) \
- V(RiscvWord64AtomicAndUint16) \
- V(RiscvWord64AtomicAndUint32) \
- V(RiscvWord64AtomicAndUint64) \
- V(RiscvWord64AtomicOrUint8) \
- V(RiscvWord64AtomicOrUint16) \
- V(RiscvWord64AtomicOrUint32) \
- V(RiscvWord64AtomicOrUint64) \
- V(RiscvWord64AtomicXorUint8) \
- V(RiscvWord64AtomicXorUint16) \
- V(RiscvWord64AtomicXorUint32) \
- V(RiscvWord64AtomicXorUint64) \
- V(RiscvWord64AtomicExchangeUint8) \
- V(RiscvWord64AtomicExchangeUint16) \
- V(RiscvWord64AtomicExchangeUint32) \
- V(RiscvWord64AtomicExchangeUint64) \
- V(RiscvWord64AtomicCompareExchangeUint8) \
- V(RiscvWord64AtomicCompareExchangeUint16) \
- V(RiscvWord64AtomicCompareExchangeUint32) \
- V(RiscvWord64AtomicCompareExchangeUint64) \
- V(RiscvStoreCompressTagged) \
- V(RiscvLoadDecompressTaggedSigned) \
- V(RiscvLoadDecompressTaggedPointer) \
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(RiscvAdd32) \
+ V(RiscvAdd64) \
+ V(RiscvAddOvf64) \
+ V(RiscvSub32) \
+ V(RiscvSub64) \
+ V(RiscvSubOvf64) \
+ V(RiscvMul32) \
+ V(RiscvMulOvf32) \
+ V(RiscvMulHigh32) \
+ V(RiscvMulHigh64) \
+ V(RiscvMulHighU32) \
+ V(RiscvMul64) \
+ V(RiscvDiv32) \
+ V(RiscvDiv64) \
+ V(RiscvDivU32) \
+ V(RiscvDivU64) \
+ V(RiscvMod32) \
+ V(RiscvMod64) \
+ V(RiscvModU32) \
+ V(RiscvModU64) \
+ V(RiscvAnd) \
+ V(RiscvAnd32) \
+ V(RiscvOr) \
+ V(RiscvOr32) \
+ V(RiscvNor) \
+ V(RiscvNor32) \
+ V(RiscvXor) \
+ V(RiscvXor32) \
+ V(RiscvClz32) \
+ V(RiscvShl32) \
+ V(RiscvShr32) \
+ V(RiscvSar32) \
+ V(RiscvZeroExtendWord) \
+ V(RiscvSignExtendWord) \
+ V(RiscvClz64) \
+ V(RiscvCtz32) \
+ V(RiscvCtz64) \
+ V(RiscvPopcnt32) \
+ V(RiscvPopcnt64) \
+ V(RiscvShl64) \
+ V(RiscvShr64) \
+ V(RiscvSar64) \
+ V(RiscvRor32) \
+ V(RiscvRor64) \
+ V(RiscvMov) \
+ V(RiscvTst) \
+ V(RiscvCmp) \
+ V(RiscvCmpZero) \
+ V(RiscvCmpS) \
+ V(RiscvAddS) \
+ V(RiscvSubS) \
+ V(RiscvMulS) \
+ V(RiscvDivS) \
+ V(RiscvModS) \
+ V(RiscvAbsS) \
+ V(RiscvNegS) \
+ V(RiscvSqrtS) \
+ V(RiscvMaxS) \
+ V(RiscvMinS) \
+ V(RiscvCmpD) \
+ V(RiscvAddD) \
+ V(RiscvSubD) \
+ V(RiscvMulD) \
+ V(RiscvDivD) \
+ V(RiscvModD) \
+ V(RiscvAbsD) \
+ V(RiscvNegD) \
+ V(RiscvSqrtD) \
+ V(RiscvMaxD) \
+ V(RiscvMinD) \
+ V(RiscvFloat64RoundDown) \
+ V(RiscvFloat64RoundTruncate) \
+ V(RiscvFloat64RoundUp) \
+ V(RiscvFloat64RoundTiesEven) \
+ V(RiscvFloat32RoundDown) \
+ V(RiscvFloat32RoundTruncate) \
+ V(RiscvFloat32RoundUp) \
+ V(RiscvFloat32RoundTiesEven) \
+ V(RiscvCvtSD) \
+ V(RiscvCvtDS) \
+ V(RiscvTruncWD) \
+ V(RiscvRoundWD) \
+ V(RiscvFloorWD) \
+ V(RiscvCeilWD) \
+ V(RiscvTruncWS) \
+ V(RiscvRoundWS) \
+ V(RiscvFloorWS) \
+ V(RiscvCeilWS) \
+ V(RiscvTruncLS) \
+ V(RiscvTruncLD) \
+ V(RiscvTruncUwD) \
+ V(RiscvTruncUwS) \
+ V(RiscvTruncUlS) \
+ V(RiscvTruncUlD) \
+ V(RiscvCvtDW) \
+ V(RiscvCvtSL) \
+ V(RiscvCvtSW) \
+ V(RiscvCvtSUw) \
+ V(RiscvCvtSUl) \
+ V(RiscvCvtDL) \
+ V(RiscvCvtDUw) \
+ V(RiscvCvtDUl) \
+ V(RiscvLb) \
+ V(RiscvLbu) \
+ V(RiscvSb) \
+ V(RiscvLh) \
+ V(RiscvUlh) \
+ V(RiscvLhu) \
+ V(RiscvUlhu) \
+ V(RiscvSh) \
+ V(RiscvUsh) \
+ V(RiscvLd) \
+ V(RiscvUld) \
+ V(RiscvLw) \
+ V(RiscvUlw) \
+ V(RiscvLwu) \
+ V(RiscvUlwu) \
+ V(RiscvSw) \
+ V(RiscvUsw) \
+ V(RiscvSd) \
+ V(RiscvUsd) \
+ V(RiscvLoadFloat) \
+ V(RiscvULoadFloat) \
+ V(RiscvStoreFloat) \
+ V(RiscvUStoreFloat) \
+ V(RiscvLoadDouble) \
+ V(RiscvULoadDouble) \
+ V(RiscvStoreDouble) \
+ V(RiscvUStoreDouble) \
+ V(RiscvBitcastDL) \
+ V(RiscvBitcastLD) \
+ V(RiscvBitcastInt32ToFloat32) \
+ V(RiscvBitcastFloat32ToInt32) \
+ V(RiscvFloat64ExtractLowWord32) \
+ V(RiscvFloat64ExtractHighWord32) \
+ V(RiscvFloat64InsertLowWord32) \
+ V(RiscvFloat64InsertHighWord32) \
+ V(RiscvFloat32Max) \
+ V(RiscvFloat64Max) \
+ V(RiscvFloat32Min) \
+ V(RiscvFloat64Min) \
+ V(RiscvFloat64SilenceNaN) \
+ V(RiscvPush) \
+ V(RiscvPeek) \
+ V(RiscvByteSwap64) \
+ V(RiscvByteSwap32) \
+ V(RiscvStoreToStackSlot) \
+ V(RiscvStackClaim) \
+ V(RiscvSignExtendByte) \
+ V(RiscvSignExtendShort) \
+ V(RiscvSync) \
+ V(RiscvAssertEqual) \
+ V(RiscvS128Const) \
+ V(RiscvS128Zero) \
+ V(RiscvS128AllOnes) \
+ V(RiscvI32x4Splat) \
+ V(RiscvI32x4ExtractLane) \
+ V(RiscvI32x4ReplaceLane) \
+ V(RiscvI32x4Add) \
+ V(RiscvI32x4Sub) \
+ V(RiscvF64x2Abs) \
+ V(RiscvF64x2Neg) \
+ V(RiscvF32x4Splat) \
+ V(RiscvF32x4ExtractLane) \
+ V(RiscvF32x4ReplaceLane) \
+ V(RiscvF32x4SConvertI32x4) \
+ V(RiscvF32x4UConvertI32x4) \
+ V(RiscvI64x2SConvertI32x4Low) \
+ V(RiscvI64x2SConvertI32x4High) \
+ V(RiscvI64x2UConvertI32x4Low) \
+ V(RiscvI64x2UConvertI32x4High) \
+ V(RiscvI32x4Mul) \
+ V(RiscvI32x4MaxS) \
+ V(RiscvI32x4MinS) \
+ V(RiscvI32x4Eq) \
+ V(RiscvI32x4Ne) \
+ V(RiscvI32x4Shl) \
+ V(RiscvI32x4ShrS) \
+ V(RiscvI32x4ShrU) \
+ V(RiscvI32x4MaxU) \
+ V(RiscvI32x4MinU) \
+ V(RiscvI64x2GtS) \
+ V(RiscvI64x2GeS) \
+ V(RiscvI64x2Eq) \
+ V(RiscvI64x2Ne) \
+ V(RiscvF64x2Sqrt) \
+ V(RiscvF64x2Add) \
+ V(RiscvF64x2Sub) \
+ V(RiscvF64x2Mul) \
+ V(RiscvF64x2Div) \
+ V(RiscvF64x2Min) \
+ V(RiscvF64x2Max) \
+ V(RiscvF64x2ConvertLowI32x4S) \
+ V(RiscvF64x2ConvertLowI32x4U) \
+ V(RiscvF64x2PromoteLowF32x4) \
+ V(RiscvF64x2Eq) \
+ V(RiscvF64x2Ne) \
+ V(RiscvF64x2Lt) \
+ V(RiscvF64x2Le) \
+ V(RiscvF64x2Splat) \
+ V(RiscvF64x2ExtractLane) \
+ V(RiscvF64x2ReplaceLane) \
+ V(RiscvF64x2Pmin) \
+ V(RiscvF64x2Pmax) \
+ V(RiscvF64x2Ceil) \
+ V(RiscvF64x2Floor) \
+ V(RiscvF64x2Trunc) \
+ V(RiscvF64x2NearestInt) \
+ V(RiscvI64x2Splat) \
+ V(RiscvI64x2ExtractLane) \
+ V(RiscvI64x2ReplaceLane) \
+ V(RiscvI64x2Add) \
+ V(RiscvI64x2Sub) \
+ V(RiscvI64x2Mul) \
+ V(RiscvI64x2Abs) \
+ V(RiscvI64x2Neg) \
+ V(RiscvI64x2Shl) \
+ V(RiscvI64x2ShrS) \
+ V(RiscvI64x2ShrU) \
+ V(RiscvI64x2BitMask) \
+ V(RiscvF32x4Abs) \
+ V(RiscvF32x4Neg) \
+ V(RiscvF32x4Sqrt) \
+ V(RiscvF32x4RecipApprox) \
+ V(RiscvF32x4RecipSqrtApprox) \
+ V(RiscvF32x4Add) \
+ V(RiscvF32x4Sub) \
+ V(RiscvF32x4Mul) \
+ V(RiscvF32x4Div) \
+ V(RiscvF32x4Max) \
+ V(RiscvF32x4Min) \
+ V(RiscvF32x4Eq) \
+ V(RiscvF32x4Ne) \
+ V(RiscvF32x4Lt) \
+ V(RiscvF32x4Le) \
+ V(RiscvF32x4Pmin) \
+ V(RiscvF32x4Pmax) \
+ V(RiscvF32x4DemoteF64x2Zero) \
+ V(RiscvF32x4Ceil) \
+ V(RiscvF32x4Floor) \
+ V(RiscvF32x4Trunc) \
+ V(RiscvF32x4NearestInt) \
+ V(RiscvI32x4SConvertF32x4) \
+ V(RiscvI32x4UConvertF32x4) \
+ V(RiscvI32x4Neg) \
+ V(RiscvI32x4GtS) \
+ V(RiscvI32x4GeS) \
+ V(RiscvI32x4GtU) \
+ V(RiscvI32x4GeU) \
+ V(RiscvI32x4Abs) \
+ V(RiscvI32x4BitMask) \
+ V(RiscvI32x4DotI16x8S) \
+ V(RiscvI32x4TruncSatF64x2SZero) \
+ V(RiscvI32x4TruncSatF64x2UZero) \
+ V(RiscvI16x8Splat) \
+ V(RiscvI16x8ExtractLaneU) \
+ V(RiscvI16x8ExtractLaneS) \
+ V(RiscvI16x8ReplaceLane) \
+ V(RiscvI16x8Neg) \
+ V(RiscvI16x8Shl) \
+ V(RiscvI16x8ShrS) \
+ V(RiscvI16x8ShrU) \
+ V(RiscvI16x8Add) \
+ V(RiscvI16x8AddSatS) \
+ V(RiscvI16x8Sub) \
+ V(RiscvI16x8SubSatS) \
+ V(RiscvI16x8Mul) \
+ V(RiscvI16x8MaxS) \
+ V(RiscvI16x8MinS) \
+ V(RiscvI16x8Eq) \
+ V(RiscvI16x8Ne) \
+ V(RiscvI16x8GtS) \
+ V(RiscvI16x8GeS) \
+ V(RiscvI16x8AddSatU) \
+ V(RiscvI16x8SubSatU) \
+ V(RiscvI16x8MaxU) \
+ V(RiscvI16x8MinU) \
+ V(RiscvI16x8GtU) \
+ V(RiscvI16x8GeU) \
+ V(RiscvI16x8RoundingAverageU) \
+ V(RiscvI16x8Q15MulRSatS) \
+ V(RiscvI16x8Abs) \
+ V(RiscvI16x8BitMask) \
+ V(RiscvI8x16Splat) \
+ V(RiscvI8x16ExtractLaneU) \
+ V(RiscvI8x16ExtractLaneS) \
+ V(RiscvI8x16ReplaceLane) \
+ V(RiscvI8x16Neg) \
+ V(RiscvI8x16Shl) \
+ V(RiscvI8x16ShrS) \
+ V(RiscvI8x16Add) \
+ V(RiscvI8x16AddSatS) \
+ V(RiscvI8x16Sub) \
+ V(RiscvI8x16SubSatS) \
+ V(RiscvI8x16MaxS) \
+ V(RiscvI8x16MinS) \
+ V(RiscvI8x16Eq) \
+ V(RiscvI8x16Ne) \
+ V(RiscvI8x16GtS) \
+ V(RiscvI8x16GeS) \
+ V(RiscvI8x16ShrU) \
+ V(RiscvI8x16AddSatU) \
+ V(RiscvI8x16SubSatU) \
+ V(RiscvI8x16MaxU) \
+ V(RiscvI8x16MinU) \
+ V(RiscvI8x16GtU) \
+ V(RiscvI8x16GeU) \
+ V(RiscvI8x16RoundingAverageU) \
+ V(RiscvI8x16Abs) \
+ V(RiscvI8x16BitMask) \
+ V(RiscvI8x16Popcnt) \
+ V(RiscvS128And) \
+ V(RiscvS128Or) \
+ V(RiscvS128Xor) \
+ V(RiscvS128Not) \
+ V(RiscvS128Select) \
+ V(RiscvS128AndNot) \
+ V(RiscvI32x4AllTrue) \
+ V(RiscvI16x8AllTrue) \
+ V(RiscvV128AnyTrue) \
+ V(RiscvI8x16AllTrue) \
+ V(RiscvI64x2AllTrue) \
+ V(RiscvS32x4InterleaveRight) \
+ V(RiscvS32x4InterleaveLeft) \
+ V(RiscvS32x4PackEven) \
+ V(RiscvS32x4PackOdd) \
+ V(RiscvS32x4InterleaveEven) \
+ V(RiscvS32x4InterleaveOdd) \
+ V(RiscvS32x4Shuffle) \
+ V(RiscvS16x8InterleaveRight) \
+ V(RiscvS16x8InterleaveLeft) \
+ V(RiscvS16x8PackEven) \
+ V(RiscvS16x8PackOdd) \
+ V(RiscvS16x8InterleaveEven) \
+ V(RiscvS16x8InterleaveOdd) \
+ V(RiscvS16x4Reverse) \
+ V(RiscvS16x2Reverse) \
+ V(RiscvS8x16InterleaveRight) \
+ V(RiscvS8x16InterleaveLeft) \
+ V(RiscvS8x16PackEven) \
+ V(RiscvS8x16PackOdd) \
+ V(RiscvS8x16InterleaveEven) \
+ V(RiscvS8x16InterleaveOdd) \
+ V(RiscvI8x16Shuffle) \
+ V(RiscvI8x16Swizzle) \
+ V(RiscvS8x16Concat) \
+ V(RiscvS8x8Reverse) \
+ V(RiscvS8x4Reverse) \
+ V(RiscvS8x2Reverse) \
+ V(RiscvS128Load8Splat) \
+ V(RiscvS128Load16Splat) \
+ V(RiscvS128Load32Splat) \
+ V(RiscvS128Load64Splat) \
+ V(RiscvS128Load8x8S) \
+ V(RiscvS128Load8x8U) \
+ V(RiscvS128Load16x4S) \
+ V(RiscvS128Load16x4U) \
+ V(RiscvS128Load32x2S) \
+ V(RiscvS128Load32x2U) \
+ V(RiscvS128LoadLane) \
+ V(RiscvS128StoreLane) \
+ V(RiscvRvvLd) \
+ V(RiscvRvvSt) \
+ V(RiscvI32x4SConvertI16x8Low) \
+ V(RiscvI32x4SConvertI16x8High) \
+ V(RiscvI32x4UConvertI16x8Low) \
+ V(RiscvI32x4UConvertI16x8High) \
+ V(RiscvI16x8SConvertI8x16Low) \
+ V(RiscvI16x8SConvertI8x16High) \
+ V(RiscvI16x8SConvertI32x4) \
+ V(RiscvI16x8UConvertI32x4) \
+ V(RiscvI16x8UConvertI8x16Low) \
+ V(RiscvI16x8UConvertI8x16High) \
+ V(RiscvI8x16SConvertI16x8) \
+ V(RiscvI8x16UConvertI16x8) \
+ V(RiscvWord64AtomicLoadUint64) \
+ V(RiscvWord64AtomicStoreWord64) \
+ V(RiscvWord64AtomicAddUint64) \
+ V(RiscvWord64AtomicSubUint64) \
+ V(RiscvWord64AtomicAndUint64) \
+ V(RiscvWord64AtomicOrUint64) \
+ V(RiscvWord64AtomicXorUint64) \
+ V(RiscvWord64AtomicExchangeUint64) \
+ V(RiscvWord64AtomicCompareExchangeUint64) \
+ V(RiscvStoreCompressTagged) \
+ V(RiscvLoadDecompressTaggedSigned) \
+ V(RiscvLoadDecompressTaggedPointer) \
V(RiscvLoadDecompressAnyTagged)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/chromium/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc b/chromium/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
index 157b11c9308..54d9a98663f 100644
--- a/chromium/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
+++ b/chromium/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
@@ -318,7 +318,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvS8x2Reverse:
case kRiscvS8x4Reverse:
case kRiscvS8x8Reverse:
- case kRiscvS8x16Shuffle:
+ case kRiscvI8x16Shuffle:
case kRiscvI8x16Swizzle:
case kRiscvSar32:
case kRiscvSignExtendByte:
@@ -352,7 +352,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvLw:
case kRiscvLoadFloat:
case kRiscvLwu:
- case kRiscvMsaLd:
+ case kRiscvRvvLd:
case kRiscvPeek:
case kRiscvUld:
case kRiscvULoadDouble:
@@ -372,9 +372,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvS128Load32x2S:
case kRiscvS128Load32x2U:
case kRiscvS128LoadLane:
- case kRiscvWord64AtomicLoadUint8:
- case kRiscvWord64AtomicLoadUint16:
- case kRiscvWord64AtomicLoadUint32:
case kRiscvWord64AtomicLoadUint64:
case kRiscvLoadDecompressTaggedSigned:
case kRiscvLoadDecompressTaggedPointer:
@@ -383,7 +380,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvModD:
case kRiscvModS:
- case kRiscvMsaSt:
+ case kRiscvRvvSt:
case kRiscvPush:
case kRiscvSb:
case kRiscvSd:
@@ -399,37 +396,13 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvUsw:
case kRiscvUStoreFloat:
case kRiscvSync:
- case kRiscvWord64AtomicStoreWord8:
- case kRiscvWord64AtomicStoreWord16:
- case kRiscvWord64AtomicStoreWord32:
case kRiscvWord64AtomicStoreWord64:
- case kRiscvWord64AtomicAddUint8:
- case kRiscvWord64AtomicAddUint16:
- case kRiscvWord64AtomicAddUint32:
case kRiscvWord64AtomicAddUint64:
- case kRiscvWord64AtomicSubUint8:
- case kRiscvWord64AtomicSubUint16:
- case kRiscvWord64AtomicSubUint32:
case kRiscvWord64AtomicSubUint64:
- case kRiscvWord64AtomicAndUint8:
- case kRiscvWord64AtomicAndUint16:
- case kRiscvWord64AtomicAndUint32:
case kRiscvWord64AtomicAndUint64:
- case kRiscvWord64AtomicOrUint8:
- case kRiscvWord64AtomicOrUint16:
- case kRiscvWord64AtomicOrUint32:
case kRiscvWord64AtomicOrUint64:
- case kRiscvWord64AtomicXorUint8:
- case kRiscvWord64AtomicXorUint16:
- case kRiscvWord64AtomicXorUint32:
case kRiscvWord64AtomicXorUint64:
- case kRiscvWord64AtomicExchangeUint8:
- case kRiscvWord64AtomicExchangeUint16:
- case kRiscvWord64AtomicExchangeUint32:
case kRiscvWord64AtomicExchangeUint64:
- case kRiscvWord64AtomicCompareExchangeUint8:
- case kRiscvWord64AtomicCompareExchangeUint16:
- case kRiscvWord64AtomicCompareExchangeUint32:
case kRiscvWord64AtomicCompareExchangeUint64:
case kRiscvStoreCompressTagged:
case kRiscvS128StoreLane:
@@ -1144,7 +1117,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return AssembleArchJumpLatency();
case kArchTableSwitch:
return AssembleArchTableSwitchLatency();
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
return CallLatency() + 1;
case kArchDebugBreak:
return 1;
@@ -1169,8 +1142,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return Add64Latency(false) + AndLatency(false) + AssertLatency() +
Add64Latency(false) + AndLatency(false) + BranchShortLatency() +
1 + Sub64Latency() + Add64Latency();
- case kArchWordPoisonOnSpeculation:
- return AndLatency();
case kIeee754Float64Acos:
case kIeee754Float64Acosh:
case kIeee754Float64Asin:
@@ -1541,35 +1512,35 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return ByteSwapSignedLatency();
case kRiscvByteSwap32:
return ByteSwapSignedLatency();
- case kWord32AtomicLoadInt8:
- case kWord32AtomicLoadUint8:
- case kWord32AtomicLoadInt16:
- case kWord32AtomicLoadUint16:
- case kWord32AtomicLoadWord32:
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
return 2;
- case kWord32AtomicStoreWord8:
- case kWord32AtomicStoreWord16:
- case kWord32AtomicStoreWord32:
+ case kAtomicStoreWord8:
+ case kAtomicStoreWord16:
+ case kAtomicStoreWord32:
return 3;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
return Word32AtomicExchangeLatency(true, 8);
- case kWord32AtomicExchangeUint8:
+ case kAtomicExchangeUint8:
return Word32AtomicExchangeLatency(false, 8);
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
return Word32AtomicExchangeLatency(true, 16);
- case kWord32AtomicExchangeUint16:
+ case kAtomicExchangeUint16:
return Word32AtomicExchangeLatency(false, 16);
- case kWord32AtomicExchangeWord32:
+ case kAtomicExchangeWord32:
return 2 + LlLatency(0) + 1 + ScLatency(0) + BranchShortLatency() + 1;
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
return Word32AtomicCompareExchangeLatency(true, 8);
- case kWord32AtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeUint8:
return Word32AtomicCompareExchangeLatency(false, 8);
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
return Word32AtomicCompareExchangeLatency(true, 16);
- case kWord32AtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeUint16:
return Word32AtomicCompareExchangeLatency(false, 16);
- case kWord32AtomicCompareExchangeWord32:
+ case kAtomicCompareExchangeWord32:
return 3 + LlLatency(0) + BranchShortLatency() + 1 + ScLatency(0) +
BranchShortLatency() + 1;
case kRiscvAssertEqual:
diff --git a/chromium/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc b/chromium/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
index 72706201e2a..6fc64256ec7 100644
--- a/chromium/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
+++ b/chromium/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
@@ -363,9 +363,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(alignment)), 0, nullptr);
}
-void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+void InstructionSelector::VisitAbortCSADcheck(Node* node) {
RiscvOperandGenerator g(this);
- Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
+ Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
}
void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
@@ -454,7 +454,7 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode = load_rep.IsUnsigned() ? kRiscvLhu : kRiscvLh;
break;
case MachineRepresentation::kWord32:
- opcode = load_rep.IsUnsigned() ? kRiscvLwu : kRiscvLw;
+ opcode = kRiscvLw;
break;
#ifdef V8_COMPRESS_POINTERS
case MachineRepresentation::kTaggedSigned:
@@ -475,7 +475,7 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode = kRiscvLd;
break;
case MachineRepresentation::kSimd128:
- opcode = kRiscvMsaLd;
+ opcode = kRiscvRvvLd;
break;
case MachineRepresentation::kCompressedPointer:
case MachineRepresentation::kCompressed:
@@ -489,16 +489,10 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kNone:
UNREACHABLE();
}
- if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= MiscField::encode(kMemoryAccessPoisoned);
- }
EmitLoad(this, node, opcode);
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
-
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -560,7 +554,7 @@ void InstructionSelector::VisitStore(Node* node) {
opcode = kRiscvSd;
break;
case MachineRepresentation::kSimd128:
- opcode = kRiscvMsaSt;
+ opcode = kRiscvRvvSt;
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed:
@@ -569,7 +563,6 @@ void InstructionSelector::VisitStore(Node* node) {
break;
#else
UNREACHABLE();
- break;
#endif
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
@@ -1294,7 +1287,6 @@ bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
- case MachineRepresentation::kWord32:
return true;
default:
return false;
@@ -1630,7 +1622,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
opcode = load_rep.IsUnsigned() ? kRiscvUlhu : kRiscvUlh;
break;
case MachineRepresentation::kWord32:
- opcode = load_rep.IsUnsigned() ? kRiscvUlwu : kRiscvUlw;
+ opcode = kRiscvUlw;
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
@@ -1639,7 +1631,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
opcode = kRiscvUld;
break;
case MachineRepresentation::kSimd128:
- opcode = kRiscvMsaLd;
+ opcode = kRiscvRvvLd;
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kCompressedPointer: // Fall through.
@@ -1693,7 +1685,7 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
opcode = kRiscvUsd;
break;
case MachineRepresentation::kSimd128:
- opcode = kRiscvMsaSt;
+ opcode = kRiscvRvvSt;
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kCompressedPointer: // Fall through.
@@ -1789,7 +1781,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
Int32BinopMatcher m(node, true);
NumberBinopMatcher n(node, true);
if (m.right().Is(0) || n.right().IsZero()) {
- VisitWordCompareZero(selector, g.UseRegister(left), cont);
+ VisitWordCompareZero(selector, g.UseRegisterOrImmediateZero(left),
+ cont);
} else {
VisitCompare(selector, opcode, g.UseRegister(left),
g.UseRegister(right), cont);
@@ -1802,7 +1795,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
case kUnsignedGreaterThanOrEqual: {
Int32BinopMatcher m(node, true);
if (m.right().Is(0)) {
- VisitWordCompareZero(selector, g.UseRegister(left), cont);
+ VisitWordCompareZero(selector, g.UseRegisterOrImmediateZero(left),
+ cont);
} else {
VisitCompare(selector, opcode, g.UseRegister(left),
g.UseImmediate(right), cont);
@@ -1811,7 +1805,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
default:
Int32BinopMatcher m(node, true);
if (m.right().Is(0)) {
- VisitWordCompareZero(selector, g.UseRegister(left), cont);
+ VisitWordCompareZero(selector, g.UseRegisterOrImmediateZero(left),
+ cont);
} else {
VisitCompare(selector, opcode, g.UseRegister(left),
g.UseRegister(right), cont);
@@ -1827,10 +1822,13 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
bool IsNodeUnsigned(Node* n) {
NodeMatcher m(n);
- if (m.IsLoad() || m.IsUnalignedLoad() || m.IsPoisonedLoad() ||
- m.IsProtectedLoad() || m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
+ if (m.IsLoad() || m.IsUnalignedLoad() || m.IsProtectedLoad()) {
LoadRepresentation load_rep = LoadRepresentationOf(n->op());
return load_rep.IsUnsigned();
+ } else if (m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(n->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ return load_rep.IsUnsigned();
} else {
return m.IsUint32Div() || m.IsUint32LessThan() ||
m.IsUint32LessThanOrEqual() || m.IsUint32Mod() ||
@@ -1930,16 +1928,18 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node,
void EmitWordCompareZero(InstructionSelector* selector, Node* value,
FlagsContinuation* cont) {
RiscvOperandGenerator g(selector);
- selector->EmitWithContinuation(kRiscvCmpZero, g.UseRegister(value), cont);
+ selector->EmitWithContinuation(kRiscvCmpZero,
+ g.UseRegisterOrImmediateZero(value), cont);
}
void VisitAtomicLoad(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
RiscvOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
if (g.CanBeImmediate(index, opcode)) {
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
g.DefineAsRegister(node), g.UseRegister(base),
g.UseImmediate(index));
} else {
@@ -1947,20 +1947,22 @@ void VisitAtomicLoad(InstructionSelector* selector, Node* node,
selector->Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None),
addr_reg, g.UseRegister(index), g.UseRegister(base));
// Emit desired load opcode, using temp addr_reg.
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
}
}
void VisitAtomicStore(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
RiscvOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
if (g.CanBeImmediate(index, opcode)) {
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
g.UseRegisterOrImmediateZero(value));
} else {
@@ -1968,14 +1970,15 @@ void VisitAtomicStore(InstructionSelector* selector, Node* node,
selector->Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None),
addr_reg, g.UseRegister(index), g.UseRegister(base));
// Emit desired store opcode, using temp addr_reg.
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
g.NoOutput(), addr_reg, g.TempImmediate(0),
g.UseRegisterOrImmediateZero(value));
}
}
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
RiscvOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -1993,12 +1996,13 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
temp[0] = g.TempRegister();
temp[1] = g.TempRegister();
temp[2] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
}
void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
RiscvOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2018,12 +2022,13 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
temp[0] = g.TempRegister();
temp[1] = g.TempRegister();
temp[2] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
}
void VisitAtomicBinop(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
RiscvOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2042,7 +2047,8 @@ void VisitAtomicBinop(InstructionSelector* selector, Node* node,
temps[1] = g.TempRegister();
temps[2] = g.TempRegister();
temps[3] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, 1, outputs, input_count, inputs, 4, temps);
}
@@ -2404,163 +2410,201 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
- opcode =
- load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
- : kWord32AtomicLoadUint16;
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
- opcode = kWord32AtomicLoadWord32;
+ opcode = kAtomicLoadWord32;
break;
default:
UNREACHABLE();
}
- VisitAtomicLoad(this, node, opcode);
+ VisitAtomicLoad(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ MachineRepresentation rep = store_params.representation();
ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
- opcode = kWord32AtomicStoreWord8;
+ opcode = kAtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
- opcode = kWord32AtomicStoreWord16;
+ opcode = kAtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
- opcode = kWord32AtomicStoreWord32;
+ opcode = kAtomicStoreWord32;
break;
default:
UNREACHABLE();
}
- VisitAtomicStore(this, node, opcode);
+ VisitAtomicStore(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
- opcode = kRiscvWord64AtomicLoadUint8;
+ opcode = kAtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
- opcode = kRiscvWord64AtomicLoadUint16;
+ opcode = kAtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
- opcode = kRiscvWord64AtomicLoadUint32;
+ opcode = kAtomicLoadWord32;
break;
case MachineRepresentation::kWord64:
opcode = kRiscvWord64AtomicLoadUint64;
break;
+#ifdef V8_COMPRESS_POINTERS
+ case MachineRepresentation::kTaggedSigned:
+ opcode = kRiscv64LdDecompressTaggedSigned;
+ break;
+ case MachineRepresentation::kTaggedPointer:
+ opcode = kRiscv64LdDecompressTaggedPointer;
+ break;
+ case MachineRepresentation::kTagged:
+ opcode = kRiscv64LdDecompressAnyTagged;
+ break;
+#else
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ if (kTaggedSize == 8) {
+ opcode = kRiscvWord64AtomicLoadUint64;
+ } else {
+ opcode = kAtomicLoadWord32;
+ }
+ break;
+#endif
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed:
+ DCHECK(COMPRESS_POINTERS_BOOL);
+ opcode = kAtomicLoadWord32;
+ break;
default:
UNREACHABLE();
}
- VisitAtomicLoad(this, node, opcode);
+ VisitAtomicLoad(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ MachineRepresentation rep = store_params.representation();
ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
- opcode = kRiscvWord64AtomicStoreWord8;
+ opcode = kAtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
- opcode = kRiscvWord64AtomicStoreWord16;
+ opcode = kAtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
- opcode = kRiscvWord64AtomicStoreWord32;
+ opcode = kAtomicStoreWord32;
break;
case MachineRepresentation::kWord64:
opcode = kRiscvWord64AtomicStoreWord64;
break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ opcode = kRiscvWord64AtomicStoreWord64;
+ break;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed:
+ CHECK(COMPRESS_POINTERS_BOOL);
+ opcode = kAtomicStoreWord32;
+ break;
default:
UNREACHABLE();
}
- VisitAtomicStore(this, node, opcode);
+ VisitAtomicStore(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
- opcode = kRiscvWord64AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kRiscvWord64AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kRiscvWord64AtomicExchangeUint32;
+ opcode = kAtomicExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kRiscvWord64AtomicExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
- opcode = kRiscvWord64AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kRiscvWord64AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kRiscvWord64AtomicCompareExchangeUint32;
+ opcode = kAtomicCompareExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kRiscvWord64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
@@ -2581,15 +2625,14 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2614,14 +2657,14 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
- VisitWord64AtomicBinaryOperation( \
- node, kRiscvWord64Atomic##op##Uint8, kRiscvWord64Atomic##op##Uint16, \
- kRiscvWord64Atomic##op##Uint32, kRiscvWord64Atomic##op##Uint64); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
+ VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
+ kAtomic##op##Uint16, kAtomic##op##Word32, \
+ kRiscvWord64Atomic##op##Uint64); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2640,6 +2683,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
#define SIMD_TYPE_LIST(V) \
V(F32x4) \
+ V(I64x2) \
V(I32x4) \
V(I16x8) \
V(I8x16)
@@ -2844,6 +2888,7 @@ SIMD_VISIT_SPLAT(F64x2)
SIMD_VISIT_EXTRACT_LANE(F64x2, )
SIMD_VISIT_EXTRACT_LANE(F32x4, )
SIMD_VISIT_EXTRACT_LANE(I32x4, )
+SIMD_VISIT_EXTRACT_LANE(I64x2, )
SIMD_VISIT_EXTRACT_LANE(I16x8, U)
SIMD_VISIT_EXTRACT_LANE(I16x8, S)
SIMD_VISIT_EXTRACT_LANE(I8x16, U)
@@ -2890,73 +2935,75 @@ struct ShuffleEntry {
ArchOpcode opcode;
};
-static const ShuffleEntry arch_shuffles[] = {
- {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
- kRiscvS32x4InterleaveRight},
- {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
- kRiscvS32x4InterleaveLeft},
- {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
- kRiscvS32x4PackEven},
- {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
- kRiscvS32x4PackOdd},
- {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
- kRiscvS32x4InterleaveEven},
- {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
- kRiscvS32x4InterleaveOdd},
-
- {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
- kRiscvS16x8InterleaveRight},
- {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
- kRiscvS16x8InterleaveLeft},
- {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
- kRiscvS16x8PackEven},
- {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
- kRiscvS16x8PackOdd},
- {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
- kRiscvS16x8InterleaveEven},
- {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
- kRiscvS16x8InterleaveOdd},
- {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9},
- kRiscvS16x4Reverse},
- {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13},
- kRiscvS16x2Reverse},
-
- {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
- kRiscvS8x16InterleaveRight},
- {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
- kRiscvS8x16InterleaveLeft},
- {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
- kRiscvS8x16PackEven},
- {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
- kRiscvS8x16PackOdd},
- {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
- kRiscvS8x16InterleaveEven},
- {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
- kRiscvS8x16InterleaveOdd},
- {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}, kRiscvS8x8Reverse},
- {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}, kRiscvS8x4Reverse},
- {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
- kRiscvS8x2Reverse}};
-
-bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
- size_t num_entries, bool is_swizzle,
- ArchOpcode* opcode) {
- uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
- for (size_t i = 0; i < num_entries; ++i) {
- const ShuffleEntry& entry = table[i];
- int j = 0;
- for (; j < kSimd128Size; ++j) {
- if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
- break;
- }
- }
- if (j == kSimd128Size) {
- *opcode = entry.opcode;
- return true;
- }
- }
- return false;
-}
+// static const ShuffleEntry arch_shuffles[] = {
+// {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
+// kRiscvS32x4InterleaveRight},
+// {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
+// kRiscvS32x4InterleaveLeft},
+// {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
+// kRiscvS32x4PackEven},
+// {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
+// kRiscvS32x4PackOdd},
+// {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
+// kRiscvS32x4InterleaveEven},
+// {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
+// kRiscvS32x4InterleaveOdd},
+
+// {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
+// kRiscvS16x8InterleaveRight},
+// {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
+// kRiscvS16x8InterleaveLeft},
+// {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
+// kRiscvS16x8PackEven},
+// {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
+// kRiscvS16x8PackOdd},
+// {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
+// kRiscvS16x8InterleaveEven},
+// {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
+// kRiscvS16x8InterleaveOdd},
+// {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9},
+// kRiscvS16x4Reverse},
+// {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13},
+// kRiscvS16x2Reverse},
+
+// {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
+// kRiscvS8x16InterleaveRight},
+// {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
+// kRiscvS8x16InterleaveLeft},
+// {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
+// kRiscvS8x16PackEven},
+// {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
+// kRiscvS8x16PackOdd},
+// {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
+// kRiscvS8x16InterleaveEven},
+// {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
+// kRiscvS8x16InterleaveOdd},
+// {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8},
+// kRiscvS8x8Reverse},
+// {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12},
+// kRiscvS8x4Reverse},
+// {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
+// kRiscvS8x2Reverse}};
+
+// bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
+// size_t num_entries, bool is_swizzle,
+// ArchOpcode* opcode) {
+// uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
+// for (size_t i = 0; i < num_entries; ++i) {
+// const ShuffleEntry& entry = table[i];
+// int j = 0;
+// for (; j < kSimd128Size; ++j) {
+// if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
+// break;
+// }
+// }
+// if (j == kSimd128Size) {
+// *opcode = entry.opcode;
+// return true;
+// }
+// }
+// return false;
+// }
} // namespace
@@ -2964,29 +3011,29 @@ void InstructionSelector::VisitI8x16Shuffle(Node* node) {
uint8_t shuffle[kSimd128Size];
bool is_swizzle;
CanonicalizeShuffle(node, shuffle, &is_swizzle);
- uint8_t shuffle32x4[4];
- ArchOpcode opcode;
- if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
- is_swizzle, &opcode)) {
- VisitRRR(this, opcode, node);
- return;
- }
Node* input0 = node->InputAt(0);
Node* input1 = node->InputAt(1);
- uint8_t offset;
RiscvOperandGenerator g(this);
- if (wasm::SimdShuffle::TryMatchConcat(shuffle, &offset)) {
- Emit(kRiscvS8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1),
- g.UseRegister(input0), g.UseImmediate(offset));
- return;
- }
- if (wasm::SimdShuffle::TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
- Emit(kRiscvS32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
- g.UseRegister(input1),
- g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle32x4)));
- return;
- }
- Emit(kRiscvS8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
+ // uint8_t shuffle32x4[4];
+ // ArchOpcode opcode;
+ // if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
+ // is_swizzle, &opcode)) {
+ // VisitRRR(this, opcode, node);
+ // return;
+ // }
+ // uint8_t offset;
+ // if (wasm::SimdShuffle::TryMatchConcat(shuffle, &offset)) {
+ // Emit(kRiscvS8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1),
+ // g.UseRegister(input0), g.UseImmediate(offset));
+ // return;
+ // }
+ // if (wasm::SimdShuffle::TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
+ // Emit(kRiscvS32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
+ // g.UseRegister(input1),
+ // g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle32x4)));
+ // return;
+ // }
+ Emit(kRiscvI8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
g.UseRegister(input1),
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle)),
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 4)),
diff --git a/chromium/v8/src/compiler/backend/s390/code-generator-s390.cc b/chromium/v8/src/compiler/backend/s390/code-generator-s390.cc
index 685293169d7..e58a0ed5761 100644
--- a/chromium/v8/src/compiler/backend/s390/code-generator-s390.cc
+++ b/chromium/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -985,15 +985,6 @@ void AdjustStackPointerForTailCall(
}
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
- S390OperandConverter const& i) {
- const MemoryAccessMode access_mode = AccessModeField::decode(instr->opcode());
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- codegen->tasm()->AndP(value, kSpeculationPoisonRegister);
- }
-}
-
} // namespace
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
@@ -1071,25 +1062,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, ne);
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- Register scratch = r1;
-
- __ ComputeCodeStartAddress(scratch);
-
- // Calculate a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- __ mov(kSpeculationPoisonRegister, Operand::Zero());
- __ mov(r0, Operand(-1));
- __ CmpS64(kJavaScriptCallCodeStartRegister, scratch);
- __ LoadOnConditionP(eq, kSpeculationPoisonRegister, r0);
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- __ AndP(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
- __ AndP(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
- __ AndP(sp, sp, kSpeculationPoisonRegister);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -1202,8 +1174,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchPrepareCallCFunction: {
- int const num_parameters = MiscField::decode(instr->opcode());
- __ PrepareCallCFunction(num_parameters, kScratchReg);
+ int const num_gp_parameters = ParamField::decode(instr->opcode());
+ int const num_fp_parameters = FPParamField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_gp_parameters + num_fp_parameters,
+ kScratchReg);
// Frame alignment requires using FP-relative frame addressing.
frame_access_state()->SetFrameAccessToFP();
break;
@@ -1239,7 +1213,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssemblePrepareTailCall();
break;
case kArchCallCFunction: {
- int const num_parameters = MiscField::decode(instr->opcode());
+ int const num_gp_parameters = ParamField::decode(instr->opcode());
+ int const num_fp_parameters = FPParamField::decode(instr->opcode());
Label return_location;
// Put the return address in a stack slot.
#if V8_ENABLE_WEBASSEMBLY
@@ -1252,10 +1227,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif // V8_ENABLE_WEBASSEMBLY
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
- __ CallCFunction(ref, num_parameters);
+ __ CallCFunction(ref, num_gp_parameters, num_fp_parameters);
} else {
Register func = i.InputRegister(0);
- __ CallCFunction(func, num_parameters);
+ __ CallCFunction(func, num_gp_parameters, num_fp_parameters);
}
__ bind(&return_location);
#if V8_ENABLE_WEBASSEMBLY
@@ -1291,13 +1266,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
DCHECK(i.InputRegister(0) == r3);
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
__ stop();
@@ -1395,10 +1370,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand(offset.offset()));
break;
}
- case kArchWordPoisonOnSpeculation:
- DCHECK_EQ(i.OutputRegister(), i.InputRegister(0));
- __ AndP(i.InputRegister(0), kSpeculationPoisonRegister);
- break;
case kS390_Peek: {
int reverse_slot = i.InputInt32(0);
int offset =
@@ -2155,7 +2126,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kS390_LoadWordS8:
ASSEMBLE_LOAD_INTEGER(LoadS8);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_BitcastFloat32ToInt32:
ASSEMBLE_UNARY_OP(R_DInstr(MovFloatToInt), R_MInstr(LoadU32), nullInstr);
@@ -2173,35 +2143,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif
case kS390_LoadWordU8:
ASSEMBLE_LOAD_INTEGER(LoadU8);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadWordU16:
ASSEMBLE_LOAD_INTEGER(LoadU16);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadWordS16:
ASSEMBLE_LOAD_INTEGER(LoadS16);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadWordU32:
ASSEMBLE_LOAD_INTEGER(LoadU32);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadWordS32:
ASSEMBLE_LOAD_INTEGER(LoadS32);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadReverse16:
ASSEMBLE_LOAD_INTEGER(lrvh);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadReverse32:
ASSEMBLE_LOAD_INTEGER(lrv);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadReverse64:
ASSEMBLE_LOAD_INTEGER(lrvg);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadReverse16RR:
__ lrvr(i.OutputRegister(), i.InputRegister(0));
@@ -2238,7 +2200,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kS390_LoadWord64:
ASSEMBLE_LOAD_INTEGER(lg);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadAndTestWord32: {
ASSEMBLE_LOADANDTEST32(ltr, lt_z);
@@ -2258,7 +2219,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AddressingMode mode = kMode_None;
MemOperand operand = i.MemoryOperand(&mode);
__ vl(i.OutputSimd128Register(), operand, Condition(0));
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
}
case kS390_StoreWord8:
@@ -2327,40 +2287,37 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ lay(i.OutputRegister(), mem);
break;
}
- case kS390_Word64AtomicExchangeUint8:
- case kWord32AtomicExchangeInt8:
- case kWord32AtomicExchangeUint8: {
+ case kAtomicExchangeInt8:
+ case kAtomicExchangeUint8: {
Register base = i.InputRegister(0);
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
Register output = i.OutputRegister();
__ la(r1, MemOperand(base, index));
__ AtomicExchangeU8(r1, value, output, r0);
- if (opcode == kWord32AtomicExchangeInt8) {
+ if (opcode == kAtomicExchangeInt8) {
__ LoadS8(output, output);
} else {
__ LoadU8(output, output);
}
break;
}
- case kS390_Word64AtomicExchangeUint16:
- case kWord32AtomicExchangeInt16:
- case kWord32AtomicExchangeUint16: {
+ case kAtomicExchangeInt16:
+ case kAtomicExchangeUint16: {
Register base = i.InputRegister(0);
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
Register output = i.OutputRegister();
__ la(r1, MemOperand(base, index));
__ AtomicExchangeU16(r1, value, output, r0);
- if (opcode == kWord32AtomicExchangeInt16) {
+ if (opcode == kAtomicExchangeInt16) {
__ lghr(output, output);
} else {
__ llghr(output, output);
}
break;
}
- case kS390_Word64AtomicExchangeUint32:
- case kWord32AtomicExchangeWord32: {
+ case kAtomicExchangeWord32: {
Register base = i.InputRegister(0);
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
@@ -2373,34 +2330,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bne(&do_cs, Label::kNear);
break;
}
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_BYTE(LoadS8);
break;
- case kS390_Word64AtomicCompareExchangeUint8:
- case kWord32AtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeUint8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_BYTE(LoadU8);
break;
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(LoadS16);
break;
- case kS390_Word64AtomicCompareExchangeUint16:
- case kWord32AtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeUint16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(LoadU16);
break;
- case kS390_Word64AtomicCompareExchangeUint32:
- case kWord32AtomicCompareExchangeWord32:
+ case kAtomicCompareExchangeWord32:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_WORD();
break;
#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
+ case kAtomic##op##Int8: \
ASSEMBLE_ATOMIC_BINOP_BYTE(inst, [&]() { \
intptr_t shift_right = static_cast<intptr_t>(shift_amount); \
__ srlk(result, prev, Operand(shift_right)); \
- __ LoadS8(result, result); \
+ __ LoadS8(result, result); \
}); \
break; \
- case kS390_Word64Atomic##op##Uint8: \
- case kWord32Atomic##op##Uint8: \
+ case kAtomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP_BYTE(inst, [&]() { \
int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount; \
__ RotateInsertSelectBits(result, prev, Operand(56), Operand(63), \
@@ -2408,15 +2361,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
true); \
}); \
break; \
- case kWord32Atomic##op##Int16: \
+ case kAtomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP_HALFWORD(inst, [&]() { \
intptr_t shift_right = static_cast<intptr_t>(shift_amount); \
__ srlk(result, prev, Operand(shift_right)); \
- __ LoadS16(result, result); \
+ __ LoadS16(result, result); \
}); \
break; \
- case kS390_Word64Atomic##op##Uint16: \
- case kWord32Atomic##op##Uint16: \
+ case kAtomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP_HALFWORD(inst, [&]() { \
int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount; \
__ RotateInsertSelectBits(result, prev, Operand(48), Operand(63), \
@@ -2430,24 +2382,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, Or)
ATOMIC_BINOP_CASE(Xor, Xor)
#undef ATOMIC_BINOP_CASE
- case kS390_Word64AtomicAddUint32:
- case kWord32AtomicAddWord32:
+ case kAtomicAddWord32:
ASSEMBLE_ATOMIC_BINOP_WORD(laa);
break;
- case kS390_Word64AtomicSubUint32:
- case kWord32AtomicSubWord32:
+ case kAtomicSubWord32:
ASSEMBLE_ATOMIC_BINOP_WORD(LoadAndSub32);
break;
- case kS390_Word64AtomicAndUint32:
- case kWord32AtomicAndWord32:
+ case kAtomicAndWord32:
ASSEMBLE_ATOMIC_BINOP_WORD(lan);
break;
- case kS390_Word64AtomicOrUint32:
- case kWord32AtomicOrWord32:
+ case kAtomicOrWord32:
ASSEMBLE_ATOMIC_BINOP_WORD(lao);
break;
- case kS390_Word64AtomicXorUint32:
- case kWord32AtomicXorWord32:
+ case kAtomicXorWord32:
ASSEMBLE_ATOMIC_BINOP_WORD(lax);
break;
case kS390_Word64AtomicAddUint64:
@@ -2482,77 +2429,89 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD64();
break;
// Simd Support.
-#define SIMD_BINOP_LIST(V) \
- V(F64x2Add) \
- V(F64x2Sub) \
- V(F64x2Mul) \
- V(F64x2Div) \
- V(F64x2Min) \
- V(F64x2Max) \
- V(F64x2Eq) \
- V(F64x2Ne) \
- V(F64x2Lt) \
- V(F64x2Le) \
- V(F32x4Add) \
- V(F32x4Sub) \
- V(F32x4Mul) \
- V(F32x4Div) \
- V(F32x4Min) \
- V(F32x4Max) \
- V(F32x4Eq) \
- V(F32x4Ne) \
- V(F32x4Lt) \
- V(F32x4Le) \
- V(I64x2Add) \
- V(I64x2Sub) \
- V(I64x2Mul) \
- V(I64x2Eq) \
- V(I64x2Ne) \
- V(I64x2GtS) \
- V(I64x2GeS) \
- V(I32x4Add) \
- V(I32x4Sub) \
- V(I32x4Mul) \
- V(I32x4Eq) \
- V(I32x4Ne) \
- V(I32x4GtS) \
- V(I32x4GeS) \
- V(I32x4GtU) \
- V(I32x4GeU) \
- V(I32x4MinS) \
- V(I32x4MinU) \
- V(I32x4MaxS) \
- V(I32x4MaxU) \
- V(I16x8Add) \
- V(I16x8Sub) \
- V(I16x8Mul) \
- V(I16x8Eq) \
- V(I16x8Ne) \
- V(I16x8GtS) \
- V(I16x8GeS) \
- V(I16x8GtU) \
- V(I16x8GeU) \
- V(I16x8MinS) \
- V(I16x8MinU) \
- V(I16x8MaxS) \
- V(I16x8MaxU) \
- V(I8x16Add) \
- V(I8x16Sub) \
- V(I8x16Eq) \
- V(I8x16Ne) \
- V(I8x16GtS) \
- V(I8x16GeS) \
- V(I8x16GtU) \
- V(I8x16GeU) \
- V(I8x16MinS) \
- V(I8x16MinU) \
- V(I8x16MaxS) \
- V(I8x16MaxU)
-
-#define EMIT_SIMD_BINOP(name) \
+#define SIMD_BINOP_LIST(V) \
+ V(F64x2Add, Simd128Register) \
+ V(F64x2Sub, Simd128Register) \
+ V(F64x2Mul, Simd128Register) \
+ V(F64x2Div, Simd128Register) \
+ V(F64x2Min, Simd128Register) \
+ V(F64x2Max, Simd128Register) \
+ V(F64x2Eq, Simd128Register) \
+ V(F64x2Ne, Simd128Register) \
+ V(F64x2Lt, Simd128Register) \
+ V(F64x2Le, Simd128Register) \
+ V(F32x4Add, Simd128Register) \
+ V(F32x4Sub, Simd128Register) \
+ V(F32x4Mul, Simd128Register) \
+ V(F32x4Div, Simd128Register) \
+ V(F32x4Min, Simd128Register) \
+ V(F32x4Max, Simd128Register) \
+ V(F32x4Eq, Simd128Register) \
+ V(F32x4Ne, Simd128Register) \
+ V(F32x4Lt, Simd128Register) \
+ V(F32x4Le, Simd128Register) \
+ V(I64x2Add, Simd128Register) \
+ V(I64x2Sub, Simd128Register) \
+ V(I64x2Mul, Simd128Register) \
+ V(I64x2Eq, Simd128Register) \
+ V(I64x2Ne, Simd128Register) \
+ V(I64x2GtS, Simd128Register) \
+ V(I64x2GeS, Simd128Register) \
+ V(I64x2Shl, Register) \
+ V(I64x2ShrS, Register) \
+ V(I64x2ShrU, Register) \
+ V(I32x4Add, Simd128Register) \
+ V(I32x4Sub, Simd128Register) \
+ V(I32x4Mul, Simd128Register) \
+ V(I32x4Eq, Simd128Register) \
+ V(I32x4Ne, Simd128Register) \
+ V(I32x4GtS, Simd128Register) \
+ V(I32x4GeS, Simd128Register) \
+ V(I32x4GtU, Simd128Register) \
+ V(I32x4GeU, Simd128Register) \
+ V(I32x4MinS, Simd128Register) \
+ V(I32x4MinU, Simd128Register) \
+ V(I32x4MaxS, Simd128Register) \
+ V(I32x4MaxU, Simd128Register) \
+ V(I32x4Shl, Register) \
+ V(I32x4ShrS, Register) \
+ V(I32x4ShrU, Register) \
+ V(I16x8Add, Simd128Register) \
+ V(I16x8Sub, Simd128Register) \
+ V(I16x8Mul, Simd128Register) \
+ V(I16x8Eq, Simd128Register) \
+ V(I16x8Ne, Simd128Register) \
+ V(I16x8GtS, Simd128Register) \
+ V(I16x8GeS, Simd128Register) \
+ V(I16x8GtU, Simd128Register) \
+ V(I16x8GeU, Simd128Register) \
+ V(I16x8MinS, Simd128Register) \
+ V(I16x8MinU, Simd128Register) \
+ V(I16x8MaxS, Simd128Register) \
+ V(I16x8MaxU, Simd128Register) \
+ V(I16x8Shl, Register) \
+ V(I16x8ShrS, Register) \
+ V(I16x8ShrU, Register) \
+ V(I8x16Add, Simd128Register) \
+ V(I8x16Sub, Simd128Register) \
+ V(I8x16Eq, Simd128Register) \
+ V(I8x16Ne, Simd128Register) \
+ V(I8x16GtS, Simd128Register) \
+ V(I8x16GeS, Simd128Register) \
+ V(I8x16GtU, Simd128Register) \
+ V(I8x16GeU, Simd128Register) \
+ V(I8x16MinS, Simd128Register) \
+ V(I8x16MinU, Simd128Register) \
+ V(I8x16MaxS, Simd128Register) \
+ V(I8x16MaxU, Simd128Register) \
+ V(I8x16Shl, Register) \
+ V(I8x16ShrS, Register) \
+ V(I8x16ShrU, Register)
+
+#define EMIT_SIMD_BINOP(name, stype) \
case kS390_##name: { \
__ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
- i.InputSimd128Register(1)); \
+ i.Input##stype(1)); \
break; \
}
SIMD_BINOP_LIST(EMIT_SIMD_BINOP)
@@ -2657,64 +2616,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0));
break;
}
- // vector shifts
-#define VECTOR_SHIFT(op, mode) \
- { \
- __ vlvg(kScratchDoubleReg, i.InputRegister(1), MemOperand(r0, 0), \
- Condition(mode)); \
- __ vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(0), \
- Condition(mode)); \
- __ op(i.OutputSimd128Register(), i.InputSimd128Register(0), \
- kScratchDoubleReg, Condition(0), Condition(0), Condition(mode)); \
- }
- case kS390_I64x2Shl: {
- VECTOR_SHIFT(veslv, 3);
- break;
- }
- case kS390_I64x2ShrS: {
- VECTOR_SHIFT(vesrav, 3);
- break;
- }
- case kS390_I64x2ShrU: {
- VECTOR_SHIFT(vesrlv, 3);
- break;
- }
- case kS390_I32x4Shl: {
- VECTOR_SHIFT(veslv, 2);
- break;
- }
- case kS390_I32x4ShrS: {
- VECTOR_SHIFT(vesrav, 2);
- break;
- }
- case kS390_I32x4ShrU: {
- VECTOR_SHIFT(vesrlv, 2);
- break;
- }
- case kS390_I16x8Shl: {
- VECTOR_SHIFT(veslv, 1);
- break;
- }
- case kS390_I16x8ShrS: {
- VECTOR_SHIFT(vesrav, 1);
- break;
- }
- case kS390_I16x8ShrU: {
- VECTOR_SHIFT(vesrlv, 1);
- break;
- }
- case kS390_I8x16Shl: {
- VECTOR_SHIFT(veslv, 0);
- break;
- }
- case kS390_I8x16ShrS: {
- VECTOR_SHIFT(vesrav, 0);
- break;
- }
- case kS390_I8x16ShrU: {
- VECTOR_SHIFT(vesrlv, 0);
- break;
- }
// vector unary ops
case kS390_F64x2Abs: {
__ vfpso(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -3489,6 +3390,120 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpkls(dst, dst, kScratchDoubleReg, Condition(0), Condition(3));
break;
}
+#define LOAD_SPLAT(type) \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode); \
+ Simd128Register dst = i.OutputSimd128Register(); \
+ __ LoadAndSplat##type##LE(dst, operand);
+ case kS390_S128Load64Splat: {
+ LOAD_SPLAT(64x2);
+ break;
+ }
+ case kS390_S128Load32Splat: {
+ LOAD_SPLAT(32x4);
+ break;
+ }
+ case kS390_S128Load16Splat: {
+ LOAD_SPLAT(16x8);
+ break;
+ }
+ case kS390_S128Load8Splat: {
+ LOAD_SPLAT(8x16);
+ break;
+ }
+#undef LOAD_SPLAT
+#define LOAD_EXTEND(type) \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode); \
+ Simd128Register dst = i.OutputSimd128Register(); \
+ __ LoadAndExtend##type##LE(dst, operand);
+ case kS390_S128Load32x2U: {
+ LOAD_EXTEND(32x2U);
+ break;
+ }
+ case kS390_S128Load32x2S: {
+ LOAD_EXTEND(32x2S);
+ break;
+ }
+ case kS390_S128Load16x4U: {
+ LOAD_EXTEND(16x4U);
+ break;
+ }
+ case kS390_S128Load16x4S: {
+ LOAD_EXTEND(16x4S);
+ break;
+ }
+ case kS390_S128Load8x8U: {
+ LOAD_EXTEND(8x8U);
+ break;
+ }
+ case kS390_S128Load8x8S: {
+ LOAD_EXTEND(8x8S);
+ break;
+ }
+#undef LOAD_EXTEND
+#define LOAD_AND_ZERO(type) \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode); \
+ Simd128Register dst = i.OutputSimd128Register(); \
+ __ LoadV##type##ZeroLE(dst, operand);
+ case kS390_S128Load32Zero: {
+ LOAD_AND_ZERO(32);
+ break;
+ }
+ case kS390_S128Load64Zero: {
+ LOAD_AND_ZERO(64);
+ break;
+ }
+#undef LOAD_AND_ZERO
+#undef LOAD_EXTEND
+#define LOAD_LANE(type, lane) \
+ AddressingMode mode = kMode_None; \
+ size_t index = 2; \
+ MemOperand operand = i.MemoryOperand(&mode, &index); \
+ Simd128Register dst = i.OutputSimd128Register(); \
+ DCHECK_EQ(dst, i.InputSimd128Register(0)); \
+ __ LoadLane##type##LE(dst, operand, lane);
+ case kS390_S128Load8Lane: {
+ LOAD_LANE(8, 15 - i.InputUint8(1));
+ break;
+ }
+ case kS390_S128Load16Lane: {
+ LOAD_LANE(16, 7 - i.InputUint8(1));
+ break;
+ }
+ case kS390_S128Load32Lane: {
+ LOAD_LANE(32, 3 - i.InputUint8(1));
+ break;
+ }
+ case kS390_S128Load64Lane: {
+ LOAD_LANE(64, 1 - i.InputUint8(1));
+ break;
+ }
+#undef LOAD_LANE
+#define STORE_LANE(type, lane) \
+ AddressingMode mode = kMode_None; \
+ size_t index = 2; \
+ MemOperand operand = i.MemoryOperand(&mode, &index); \
+ Simd128Register src = i.InputSimd128Register(0); \
+ __ StoreLane##type##LE(src, operand, lane);
+ case kS390_S128Store8Lane: {
+ STORE_LANE(8, 15 - i.InputUint8(1));
+ break;
+ }
+ case kS390_S128Store16Lane: {
+ STORE_LANE(16, 7 - i.InputUint8(1));
+ break;
+ }
+ case kS390_S128Store32Lane: {
+ STORE_LANE(32, 3 - i.InputUint8(1));
+ break;
+ }
+ case kS390_S128Store64Lane: {
+ STORE_LANE(64, 1 - i.InputUint8(1));
+ break;
+ }
+#undef STORE_LANE
case kS390_StoreCompressTagged: {
CHECK(!instr->HasOutput());
size_t index = 0;
@@ -3541,20 +3556,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(John) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual ||
- condition == kOverflow || condition == kNotOverflow) {
- return;
- }
-
- condition = NegateFlagsCondition(condition);
- __ mov(r0, Operand::Zero());
- __ LoadOnConditionP(FlagsConditionToCondition(condition, kArchNop),
- kSpeculationPoisonRegister, r0);
-}
-
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -3781,7 +3782,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
- ResetSpeculationPoison();
}
const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
@@ -4028,7 +4028,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): loading RPO constants on S390.
- break;
}
if (destination->IsStackSlot()) {
__ StoreU64(dst, g.ToMemOperand(destination), r0);
diff --git a/chromium/v8/src/compiler/backend/s390/instruction-codes-s390.h b/chromium/v8/src/compiler/backend/s390/instruction-codes-s390.h
index 4eea2fa8658..7dcd7212c96 100644
--- a/chromium/v8/src/compiler/backend/s390/instruction-codes-s390.h
+++ b/chromium/v8/src/compiler/backend/s390/instruction-codes-s390.h
@@ -11,396 +11,397 @@ namespace compiler {
// S390-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(S390_Peek) \
- V(S390_Abs32) \
- V(S390_Abs64) \
- V(S390_And32) \
- V(S390_And64) \
- V(S390_Or32) \
- V(S390_Or64) \
- V(S390_Xor32) \
- V(S390_Xor64) \
- V(S390_ShiftLeft32) \
- V(S390_ShiftLeft64) \
- V(S390_ShiftRight32) \
- V(S390_ShiftRight64) \
- V(S390_ShiftRightArith32) \
- V(S390_ShiftRightArith64) \
- V(S390_RotRight32) \
- V(S390_RotRight64) \
- V(S390_Not32) \
- V(S390_Not64) \
- V(S390_RotLeftAndClear64) \
- V(S390_RotLeftAndClearLeft64) \
- V(S390_RotLeftAndClearRight64) \
- V(S390_Lay) \
- V(S390_Add32) \
- V(S390_Add64) \
- V(S390_AddFloat) \
- V(S390_AddDouble) \
- V(S390_Sub32) \
- V(S390_Sub64) \
- V(S390_SubFloat) \
- V(S390_SubDouble) \
- V(S390_Mul32) \
- V(S390_Mul32WithOverflow) \
- V(S390_Mul64) \
- V(S390_MulHigh32) \
- V(S390_MulHighU32) \
- V(S390_MulFloat) \
- V(S390_MulDouble) \
- V(S390_Div32) \
- V(S390_Div64) \
- V(S390_DivU32) \
- V(S390_DivU64) \
- V(S390_DivFloat) \
- V(S390_DivDouble) \
- V(S390_Mod32) \
- V(S390_Mod64) \
- V(S390_ModU32) \
- V(S390_ModU64) \
- V(S390_ModDouble) \
- V(S390_Neg32) \
- V(S390_Neg64) \
- V(S390_NegDouble) \
- V(S390_NegFloat) \
- V(S390_SqrtFloat) \
- V(S390_FloorFloat) \
- V(S390_CeilFloat) \
- V(S390_TruncateFloat) \
- V(S390_FloatNearestInt) \
- V(S390_AbsFloat) \
- V(S390_SqrtDouble) \
- V(S390_FloorDouble) \
- V(S390_CeilDouble) \
- V(S390_TruncateDouble) \
- V(S390_RoundDouble) \
- V(S390_DoubleNearestInt) \
- V(S390_MaxFloat) \
- V(S390_MaxDouble) \
- V(S390_MinFloat) \
- V(S390_MinDouble) \
- V(S390_AbsDouble) \
- V(S390_Cntlz32) \
- V(S390_Cntlz64) \
- V(S390_Popcnt32) \
- V(S390_Popcnt64) \
- V(S390_Cmp32) \
- V(S390_Cmp64) \
- V(S390_CmpFloat) \
- V(S390_CmpDouble) \
- V(S390_Tst32) \
- V(S390_Tst64) \
- V(S390_Push) \
- V(S390_PushFrame) \
- V(S390_StoreToStackSlot) \
- V(S390_SignExtendWord8ToInt32) \
- V(S390_SignExtendWord16ToInt32) \
- V(S390_SignExtendWord8ToInt64) \
- V(S390_SignExtendWord16ToInt64) \
- V(S390_SignExtendWord32ToInt64) \
- V(S390_Uint32ToUint64) \
- V(S390_Int64ToInt32) \
- V(S390_Int64ToFloat32) \
- V(S390_Int64ToDouble) \
- V(S390_Uint64ToFloat32) \
- V(S390_Uint64ToDouble) \
- V(S390_Int32ToFloat32) \
- V(S390_Int32ToDouble) \
- V(S390_Uint32ToFloat32) \
- V(S390_Uint32ToDouble) \
- V(S390_Float32ToInt64) \
- V(S390_Float32ToUint64) \
- V(S390_Float32ToInt32) \
- V(S390_Float32ToUint32) \
- V(S390_Float32ToDouble) \
- V(S390_Float64SilenceNaN) \
- V(S390_DoubleToInt32) \
- V(S390_DoubleToUint32) \
- V(S390_DoubleToInt64) \
- V(S390_DoubleToUint64) \
- V(S390_DoubleToFloat32) \
- V(S390_DoubleExtractLowWord32) \
- V(S390_DoubleExtractHighWord32) \
- V(S390_DoubleInsertLowWord32) \
- V(S390_DoubleInsertHighWord32) \
- V(S390_DoubleConstruct) \
- V(S390_BitcastInt32ToFloat32) \
- V(S390_BitcastFloat32ToInt32) \
- V(S390_BitcastInt64ToDouble) \
- V(S390_BitcastDoubleToInt64) \
- V(S390_LoadWordS8) \
- V(S390_LoadWordU8) \
- V(S390_LoadWordS16) \
- V(S390_LoadWordU16) \
- V(S390_LoadWordS32) \
- V(S390_LoadWordU32) \
- V(S390_LoadAndTestWord32) \
- V(S390_LoadAndTestWord64) \
- V(S390_LoadAndTestFloat32) \
- V(S390_LoadAndTestFloat64) \
- V(S390_LoadReverse16RR) \
- V(S390_LoadReverse32RR) \
- V(S390_LoadReverse64RR) \
- V(S390_LoadReverseSimd128RR) \
- V(S390_LoadReverseSimd128) \
- V(S390_LoadReverse16) \
- V(S390_LoadReverse32) \
- V(S390_LoadReverse64) \
- V(S390_LoadWord64) \
- V(S390_LoadFloat32) \
- V(S390_LoadDouble) \
- V(S390_StoreWord8) \
- V(S390_StoreWord16) \
- V(S390_StoreWord32) \
- V(S390_StoreWord64) \
- V(S390_StoreReverse16) \
- V(S390_StoreReverse32) \
- V(S390_StoreReverse64) \
- V(S390_StoreReverseSimd128) \
- V(S390_StoreFloat32) \
- V(S390_StoreDouble) \
- V(S390_CompressSigned) \
- V(S390_CompressPointer) \
- V(S390_CompressAny) \
- V(S390_Word64AtomicExchangeUint8) \
- V(S390_Word64AtomicExchangeUint16) \
- V(S390_Word64AtomicExchangeUint32) \
- V(S390_Word64AtomicExchangeUint64) \
- V(S390_Word64AtomicCompareExchangeUint8) \
- V(S390_Word64AtomicCompareExchangeUint16) \
- V(S390_Word64AtomicCompareExchangeUint32) \
- V(S390_Word64AtomicCompareExchangeUint64) \
- V(S390_Word64AtomicAddUint8) \
- V(S390_Word64AtomicAddUint16) \
- V(S390_Word64AtomicAddUint32) \
- V(S390_Word64AtomicAddUint64) \
- V(S390_Word64AtomicSubUint8) \
- V(S390_Word64AtomicSubUint16) \
- V(S390_Word64AtomicSubUint32) \
- V(S390_Word64AtomicSubUint64) \
- V(S390_Word64AtomicAndUint8) \
- V(S390_Word64AtomicAndUint16) \
- V(S390_Word64AtomicAndUint32) \
- V(S390_Word64AtomicAndUint64) \
- V(S390_Word64AtomicOrUint8) \
- V(S390_Word64AtomicOrUint16) \
- V(S390_Word64AtomicOrUint32) \
- V(S390_Word64AtomicOrUint64) \
- V(S390_Word64AtomicXorUint8) \
- V(S390_Word64AtomicXorUint16) \
- V(S390_Word64AtomicXorUint32) \
- V(S390_Word64AtomicXorUint64) \
- V(S390_F64x2Splat) \
- V(S390_F64x2ReplaceLane) \
- V(S390_F64x2Abs) \
- V(S390_F64x2Neg) \
- V(S390_F64x2Sqrt) \
- V(S390_F64x2Add) \
- V(S390_F64x2Sub) \
- V(S390_F64x2Mul) \
- V(S390_F64x2Div) \
- V(S390_F64x2Eq) \
- V(S390_F64x2Ne) \
- V(S390_F64x2Lt) \
- V(S390_F64x2Le) \
- V(S390_F64x2Min) \
- V(S390_F64x2Max) \
- V(S390_F64x2ExtractLane) \
- V(S390_F64x2Qfma) \
- V(S390_F64x2Qfms) \
- V(S390_F64x2Pmin) \
- V(S390_F64x2Pmax) \
- V(S390_F64x2Ceil) \
- V(S390_F64x2Floor) \
- V(S390_F64x2Trunc) \
- V(S390_F64x2NearestInt) \
- V(S390_F64x2ConvertLowI32x4S) \
- V(S390_F64x2ConvertLowI32x4U) \
- V(S390_F64x2PromoteLowF32x4) \
- V(S390_F32x4Splat) \
- V(S390_F32x4ExtractLane) \
- V(S390_F32x4ReplaceLane) \
- V(S390_F32x4Add) \
- V(S390_F32x4Sub) \
- V(S390_F32x4Mul) \
- V(S390_F32x4Eq) \
- V(S390_F32x4Ne) \
- V(S390_F32x4Lt) \
- V(S390_F32x4Le) \
- V(S390_F32x4Abs) \
- V(S390_F32x4Neg) \
- V(S390_F32x4RecipApprox) \
- V(S390_F32x4RecipSqrtApprox) \
- V(S390_F32x4SConvertI32x4) \
- V(S390_F32x4UConvertI32x4) \
- V(S390_F32x4Sqrt) \
- V(S390_F32x4Div) \
- V(S390_F32x4Min) \
- V(S390_F32x4Max) \
- V(S390_F32x4Qfma) \
- V(S390_F32x4Qfms) \
- V(S390_F32x4Pmin) \
- V(S390_F32x4Pmax) \
- V(S390_F32x4Ceil) \
- V(S390_F32x4Floor) \
- V(S390_F32x4Trunc) \
- V(S390_F32x4NearestInt) \
- V(S390_F32x4DemoteF64x2Zero) \
- V(S390_I64x2Neg) \
- V(S390_I64x2Add) \
- V(S390_I64x2Sub) \
- V(S390_I64x2Shl) \
- V(S390_I64x2ShrS) \
- V(S390_I64x2ShrU) \
- V(S390_I64x2Mul) \
- V(S390_I64x2Splat) \
- V(S390_I64x2ReplaceLane) \
- V(S390_I64x2ExtractLane) \
- V(S390_I64x2Eq) \
- V(S390_I64x2BitMask) \
- V(S390_I64x2ExtMulLowI32x4S) \
- V(S390_I64x2ExtMulHighI32x4S) \
- V(S390_I64x2ExtMulLowI32x4U) \
- V(S390_I64x2ExtMulHighI32x4U) \
- V(S390_I64x2SConvertI32x4Low) \
- V(S390_I64x2SConvertI32x4High) \
- V(S390_I64x2UConvertI32x4Low) \
- V(S390_I64x2UConvertI32x4High) \
- V(S390_I64x2Ne) \
- V(S390_I64x2GtS) \
- V(S390_I64x2GeS) \
- V(S390_I64x2Abs) \
- V(S390_I32x4Splat) \
- V(S390_I32x4ExtractLane) \
- V(S390_I32x4ReplaceLane) \
- V(S390_I32x4Add) \
- V(S390_I32x4Sub) \
- V(S390_I32x4Mul) \
- V(S390_I32x4MinS) \
- V(S390_I32x4MinU) \
- V(S390_I32x4MaxS) \
- V(S390_I32x4MaxU) \
- V(S390_I32x4Eq) \
- V(S390_I32x4Ne) \
- V(S390_I32x4GtS) \
- V(S390_I32x4GeS) \
- V(S390_I32x4GtU) \
- V(S390_I32x4GeU) \
- V(S390_I32x4Neg) \
- V(S390_I32x4Shl) \
- V(S390_I32x4ShrS) \
- V(S390_I32x4ShrU) \
- V(S390_I32x4SConvertF32x4) \
- V(S390_I32x4UConvertF32x4) \
- V(S390_I32x4SConvertI16x8Low) \
- V(S390_I32x4SConvertI16x8High) \
- V(S390_I32x4UConvertI16x8Low) \
- V(S390_I32x4UConvertI16x8High) \
- V(S390_I32x4Abs) \
- V(S390_I32x4BitMask) \
- V(S390_I32x4DotI16x8S) \
- V(S390_I32x4ExtMulLowI16x8S) \
- V(S390_I32x4ExtMulHighI16x8S) \
- V(S390_I32x4ExtMulLowI16x8U) \
- V(S390_I32x4ExtMulHighI16x8U) \
- V(S390_I32x4ExtAddPairwiseI16x8S) \
- V(S390_I32x4ExtAddPairwiseI16x8U) \
- V(S390_I32x4TruncSatF64x2SZero) \
- V(S390_I32x4TruncSatF64x2UZero) \
- V(S390_I16x8Splat) \
- V(S390_I16x8ExtractLaneU) \
- V(S390_I16x8ExtractLaneS) \
- V(S390_I16x8ReplaceLane) \
- V(S390_I16x8Add) \
- V(S390_I16x8Sub) \
- V(S390_I16x8Mul) \
- V(S390_I16x8MinS) \
- V(S390_I16x8MinU) \
- V(S390_I16x8MaxS) \
- V(S390_I16x8MaxU) \
- V(S390_I16x8Eq) \
- V(S390_I16x8Ne) \
- V(S390_I16x8GtS) \
- V(S390_I16x8GeS) \
- V(S390_I16x8GtU) \
- V(S390_I16x8GeU) \
- V(S390_I16x8Shl) \
- V(S390_I16x8ShrS) \
- V(S390_I16x8ShrU) \
- V(S390_I16x8Neg) \
- V(S390_I16x8SConvertI32x4) \
- V(S390_I16x8UConvertI32x4) \
- V(S390_I16x8SConvertI8x16Low) \
- V(S390_I16x8SConvertI8x16High) \
- V(S390_I16x8UConvertI8x16Low) \
- V(S390_I16x8UConvertI8x16High) \
- V(S390_I16x8AddSatS) \
- V(S390_I16x8SubSatS) \
- V(S390_I16x8AddSatU) \
- V(S390_I16x8SubSatU) \
- V(S390_I16x8RoundingAverageU) \
- V(S390_I16x8Abs) \
- V(S390_I16x8BitMask) \
- V(S390_I16x8ExtMulLowI8x16S) \
- V(S390_I16x8ExtMulHighI8x16S) \
- V(S390_I16x8ExtMulLowI8x16U) \
- V(S390_I16x8ExtMulHighI8x16U) \
- V(S390_I16x8ExtAddPairwiseI8x16S) \
- V(S390_I16x8ExtAddPairwiseI8x16U) \
- V(S390_I16x8Q15MulRSatS) \
- V(S390_I8x16Splat) \
- V(S390_I8x16ExtractLaneU) \
- V(S390_I8x16ExtractLaneS) \
- V(S390_I8x16ReplaceLane) \
- V(S390_I8x16Add) \
- V(S390_I8x16Sub) \
- V(S390_I8x16MinS) \
- V(S390_I8x16MinU) \
- V(S390_I8x16MaxS) \
- V(S390_I8x16MaxU) \
- V(S390_I8x16Eq) \
- V(S390_I8x16Ne) \
- V(S390_I8x16GtS) \
- V(S390_I8x16GeS) \
- V(S390_I8x16GtU) \
- V(S390_I8x16GeU) \
- V(S390_I8x16Shl) \
- V(S390_I8x16ShrS) \
- V(S390_I8x16ShrU) \
- V(S390_I8x16Neg) \
- V(S390_I8x16SConvertI16x8) \
- V(S390_I8x16UConvertI16x8) \
- V(S390_I8x16AddSatS) \
- V(S390_I8x16SubSatS) \
- V(S390_I8x16AddSatU) \
- V(S390_I8x16SubSatU) \
- V(S390_I8x16RoundingAverageU) \
- V(S390_I8x16Abs) \
- V(S390_I8x16BitMask) \
- V(S390_I8x16Shuffle) \
- V(S390_I8x16Swizzle) \
- V(S390_I8x16Popcnt) \
- V(S390_I64x2AllTrue) \
- V(S390_I32x4AllTrue) \
- V(S390_I16x8AllTrue) \
- V(S390_I8x16AllTrue) \
- V(S390_V128AnyTrue) \
- V(S390_S128And) \
- V(S390_S128Or) \
- V(S390_S128Xor) \
- V(S390_S128Const) \
- V(S390_S128Zero) \
- V(S390_S128AllOnes) \
- V(S390_S128Not) \
- V(S390_S128Select) \
- V(S390_S128AndNot) \
- V(S390_StoreSimd128) \
- V(S390_LoadSimd128) \
- V(S390_StoreCompressTagged) \
- V(S390_LoadDecompressTaggedSigned) \
- V(S390_LoadDecompressTaggedPointer) \
+
+// Opcodes that support a MemoryAccessMode.
+#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) // None.
+
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(S390_Peek) \
+ V(S390_Abs32) \
+ V(S390_Abs64) \
+ V(S390_And32) \
+ V(S390_And64) \
+ V(S390_Or32) \
+ V(S390_Or64) \
+ V(S390_Xor32) \
+ V(S390_Xor64) \
+ V(S390_ShiftLeft32) \
+ V(S390_ShiftLeft64) \
+ V(S390_ShiftRight32) \
+ V(S390_ShiftRight64) \
+ V(S390_ShiftRightArith32) \
+ V(S390_ShiftRightArith64) \
+ V(S390_RotRight32) \
+ V(S390_RotRight64) \
+ V(S390_Not32) \
+ V(S390_Not64) \
+ V(S390_RotLeftAndClear64) \
+ V(S390_RotLeftAndClearLeft64) \
+ V(S390_RotLeftAndClearRight64) \
+ V(S390_Lay) \
+ V(S390_Add32) \
+ V(S390_Add64) \
+ V(S390_AddFloat) \
+ V(S390_AddDouble) \
+ V(S390_Sub32) \
+ V(S390_Sub64) \
+ V(S390_SubFloat) \
+ V(S390_SubDouble) \
+ V(S390_Mul32) \
+ V(S390_Mul32WithOverflow) \
+ V(S390_Mul64) \
+ V(S390_MulHigh32) \
+ V(S390_MulHighU32) \
+ V(S390_MulFloat) \
+ V(S390_MulDouble) \
+ V(S390_Div32) \
+ V(S390_Div64) \
+ V(S390_DivU32) \
+ V(S390_DivU64) \
+ V(S390_DivFloat) \
+ V(S390_DivDouble) \
+ V(S390_Mod32) \
+ V(S390_Mod64) \
+ V(S390_ModU32) \
+ V(S390_ModU64) \
+ V(S390_ModDouble) \
+ V(S390_Neg32) \
+ V(S390_Neg64) \
+ V(S390_NegDouble) \
+ V(S390_NegFloat) \
+ V(S390_SqrtFloat) \
+ V(S390_FloorFloat) \
+ V(S390_CeilFloat) \
+ V(S390_TruncateFloat) \
+ V(S390_FloatNearestInt) \
+ V(S390_AbsFloat) \
+ V(S390_SqrtDouble) \
+ V(S390_FloorDouble) \
+ V(S390_CeilDouble) \
+ V(S390_TruncateDouble) \
+ V(S390_RoundDouble) \
+ V(S390_DoubleNearestInt) \
+ V(S390_MaxFloat) \
+ V(S390_MaxDouble) \
+ V(S390_MinFloat) \
+ V(S390_MinDouble) \
+ V(S390_AbsDouble) \
+ V(S390_Cntlz32) \
+ V(S390_Cntlz64) \
+ V(S390_Popcnt32) \
+ V(S390_Popcnt64) \
+ V(S390_Cmp32) \
+ V(S390_Cmp64) \
+ V(S390_CmpFloat) \
+ V(S390_CmpDouble) \
+ V(S390_Tst32) \
+ V(S390_Tst64) \
+ V(S390_Push) \
+ V(S390_PushFrame) \
+ V(S390_StoreToStackSlot) \
+ V(S390_SignExtendWord8ToInt32) \
+ V(S390_SignExtendWord16ToInt32) \
+ V(S390_SignExtendWord8ToInt64) \
+ V(S390_SignExtendWord16ToInt64) \
+ V(S390_SignExtendWord32ToInt64) \
+ V(S390_Uint32ToUint64) \
+ V(S390_Int64ToInt32) \
+ V(S390_Int64ToFloat32) \
+ V(S390_Int64ToDouble) \
+ V(S390_Uint64ToFloat32) \
+ V(S390_Uint64ToDouble) \
+ V(S390_Int32ToFloat32) \
+ V(S390_Int32ToDouble) \
+ V(S390_Uint32ToFloat32) \
+ V(S390_Uint32ToDouble) \
+ V(S390_Float32ToInt64) \
+ V(S390_Float32ToUint64) \
+ V(S390_Float32ToInt32) \
+ V(S390_Float32ToUint32) \
+ V(S390_Float32ToDouble) \
+ V(S390_Float64SilenceNaN) \
+ V(S390_DoubleToInt32) \
+ V(S390_DoubleToUint32) \
+ V(S390_DoubleToInt64) \
+ V(S390_DoubleToUint64) \
+ V(S390_DoubleToFloat32) \
+ V(S390_DoubleExtractLowWord32) \
+ V(S390_DoubleExtractHighWord32) \
+ V(S390_DoubleInsertLowWord32) \
+ V(S390_DoubleInsertHighWord32) \
+ V(S390_DoubleConstruct) \
+ V(S390_BitcastInt32ToFloat32) \
+ V(S390_BitcastFloat32ToInt32) \
+ V(S390_BitcastInt64ToDouble) \
+ V(S390_BitcastDoubleToInt64) \
+ V(S390_LoadWordS8) \
+ V(S390_LoadWordU8) \
+ V(S390_LoadWordS16) \
+ V(S390_LoadWordU16) \
+ V(S390_LoadWordS32) \
+ V(S390_LoadWordU32) \
+ V(S390_LoadAndTestWord32) \
+ V(S390_LoadAndTestWord64) \
+ V(S390_LoadAndTestFloat32) \
+ V(S390_LoadAndTestFloat64) \
+ V(S390_LoadReverse16RR) \
+ V(S390_LoadReverse32RR) \
+ V(S390_LoadReverse64RR) \
+ V(S390_LoadReverseSimd128RR) \
+ V(S390_LoadReverseSimd128) \
+ V(S390_LoadReverse16) \
+ V(S390_LoadReverse32) \
+ V(S390_LoadReverse64) \
+ V(S390_LoadWord64) \
+ V(S390_LoadFloat32) \
+ V(S390_LoadDouble) \
+ V(S390_StoreWord8) \
+ V(S390_StoreWord16) \
+ V(S390_StoreWord32) \
+ V(S390_StoreWord64) \
+ V(S390_StoreReverse16) \
+ V(S390_StoreReverse32) \
+ V(S390_StoreReverse64) \
+ V(S390_StoreReverseSimd128) \
+ V(S390_StoreFloat32) \
+ V(S390_StoreDouble) \
+ V(S390_Word64AtomicExchangeUint64) \
+ V(S390_Word64AtomicCompareExchangeUint64) \
+ V(S390_Word64AtomicAddUint64) \
+ V(S390_Word64AtomicSubUint64) \
+ V(S390_Word64AtomicAndUint64) \
+ V(S390_Word64AtomicOrUint64) \
+ V(S390_Word64AtomicXorUint64) \
+ V(S390_F64x2Splat) \
+ V(S390_F64x2ReplaceLane) \
+ V(S390_F64x2Abs) \
+ V(S390_F64x2Neg) \
+ V(S390_F64x2Sqrt) \
+ V(S390_F64x2Add) \
+ V(S390_F64x2Sub) \
+ V(S390_F64x2Mul) \
+ V(S390_F64x2Div) \
+ V(S390_F64x2Eq) \
+ V(S390_F64x2Ne) \
+ V(S390_F64x2Lt) \
+ V(S390_F64x2Le) \
+ V(S390_F64x2Min) \
+ V(S390_F64x2Max) \
+ V(S390_F64x2ExtractLane) \
+ V(S390_F64x2Qfma) \
+ V(S390_F64x2Qfms) \
+ V(S390_F64x2Pmin) \
+ V(S390_F64x2Pmax) \
+ V(S390_F64x2Ceil) \
+ V(S390_F64x2Floor) \
+ V(S390_F64x2Trunc) \
+ V(S390_F64x2NearestInt) \
+ V(S390_F64x2ConvertLowI32x4S) \
+ V(S390_F64x2ConvertLowI32x4U) \
+ V(S390_F64x2PromoteLowF32x4) \
+ V(S390_F32x4Splat) \
+ V(S390_F32x4ExtractLane) \
+ V(S390_F32x4ReplaceLane) \
+ V(S390_F32x4Add) \
+ V(S390_F32x4Sub) \
+ V(S390_F32x4Mul) \
+ V(S390_F32x4Eq) \
+ V(S390_F32x4Ne) \
+ V(S390_F32x4Lt) \
+ V(S390_F32x4Le) \
+ V(S390_F32x4Abs) \
+ V(S390_F32x4Neg) \
+ V(S390_F32x4RecipApprox) \
+ V(S390_F32x4RecipSqrtApprox) \
+ V(S390_F32x4SConvertI32x4) \
+ V(S390_F32x4UConvertI32x4) \
+ V(S390_F32x4Sqrt) \
+ V(S390_F32x4Div) \
+ V(S390_F32x4Min) \
+ V(S390_F32x4Max) \
+ V(S390_F32x4Qfma) \
+ V(S390_F32x4Qfms) \
+ V(S390_F32x4Pmin) \
+ V(S390_F32x4Pmax) \
+ V(S390_F32x4Ceil) \
+ V(S390_F32x4Floor) \
+ V(S390_F32x4Trunc) \
+ V(S390_F32x4NearestInt) \
+ V(S390_F32x4DemoteF64x2Zero) \
+ V(S390_I64x2Neg) \
+ V(S390_I64x2Add) \
+ V(S390_I64x2Sub) \
+ V(S390_I64x2Shl) \
+ V(S390_I64x2ShrS) \
+ V(S390_I64x2ShrU) \
+ V(S390_I64x2Mul) \
+ V(S390_I64x2Splat) \
+ V(S390_I64x2ReplaceLane) \
+ V(S390_I64x2ExtractLane) \
+ V(S390_I64x2Eq) \
+ V(S390_I64x2BitMask) \
+ V(S390_I64x2ExtMulLowI32x4S) \
+ V(S390_I64x2ExtMulHighI32x4S) \
+ V(S390_I64x2ExtMulLowI32x4U) \
+ V(S390_I64x2ExtMulHighI32x4U) \
+ V(S390_I64x2SConvertI32x4Low) \
+ V(S390_I64x2SConvertI32x4High) \
+ V(S390_I64x2UConvertI32x4Low) \
+ V(S390_I64x2UConvertI32x4High) \
+ V(S390_I64x2Ne) \
+ V(S390_I64x2GtS) \
+ V(S390_I64x2GeS) \
+ V(S390_I64x2Abs) \
+ V(S390_I32x4Splat) \
+ V(S390_I32x4ExtractLane) \
+ V(S390_I32x4ReplaceLane) \
+ V(S390_I32x4Add) \
+ V(S390_I32x4Sub) \
+ V(S390_I32x4Mul) \
+ V(S390_I32x4MinS) \
+ V(S390_I32x4MinU) \
+ V(S390_I32x4MaxS) \
+ V(S390_I32x4MaxU) \
+ V(S390_I32x4Eq) \
+ V(S390_I32x4Ne) \
+ V(S390_I32x4GtS) \
+ V(S390_I32x4GeS) \
+ V(S390_I32x4GtU) \
+ V(S390_I32x4GeU) \
+ V(S390_I32x4Neg) \
+ V(S390_I32x4Shl) \
+ V(S390_I32x4ShrS) \
+ V(S390_I32x4ShrU) \
+ V(S390_I32x4SConvertF32x4) \
+ V(S390_I32x4UConvertF32x4) \
+ V(S390_I32x4SConvertI16x8Low) \
+ V(S390_I32x4SConvertI16x8High) \
+ V(S390_I32x4UConvertI16x8Low) \
+ V(S390_I32x4UConvertI16x8High) \
+ V(S390_I32x4Abs) \
+ V(S390_I32x4BitMask) \
+ V(S390_I32x4DotI16x8S) \
+ V(S390_I32x4ExtMulLowI16x8S) \
+ V(S390_I32x4ExtMulHighI16x8S) \
+ V(S390_I32x4ExtMulLowI16x8U) \
+ V(S390_I32x4ExtMulHighI16x8U) \
+ V(S390_I32x4ExtAddPairwiseI16x8S) \
+ V(S390_I32x4ExtAddPairwiseI16x8U) \
+ V(S390_I32x4TruncSatF64x2SZero) \
+ V(S390_I32x4TruncSatF64x2UZero) \
+ V(S390_I16x8Splat) \
+ V(S390_I16x8ExtractLaneU) \
+ V(S390_I16x8ExtractLaneS) \
+ V(S390_I16x8ReplaceLane) \
+ V(S390_I16x8Add) \
+ V(S390_I16x8Sub) \
+ V(S390_I16x8Mul) \
+ V(S390_I16x8MinS) \
+ V(S390_I16x8MinU) \
+ V(S390_I16x8MaxS) \
+ V(S390_I16x8MaxU) \
+ V(S390_I16x8Eq) \
+ V(S390_I16x8Ne) \
+ V(S390_I16x8GtS) \
+ V(S390_I16x8GeS) \
+ V(S390_I16x8GtU) \
+ V(S390_I16x8GeU) \
+ V(S390_I16x8Shl) \
+ V(S390_I16x8ShrS) \
+ V(S390_I16x8ShrU) \
+ V(S390_I16x8Neg) \
+ V(S390_I16x8SConvertI32x4) \
+ V(S390_I16x8UConvertI32x4) \
+ V(S390_I16x8SConvertI8x16Low) \
+ V(S390_I16x8SConvertI8x16High) \
+ V(S390_I16x8UConvertI8x16Low) \
+ V(S390_I16x8UConvertI8x16High) \
+ V(S390_I16x8AddSatS) \
+ V(S390_I16x8SubSatS) \
+ V(S390_I16x8AddSatU) \
+ V(S390_I16x8SubSatU) \
+ V(S390_I16x8RoundingAverageU) \
+ V(S390_I16x8Abs) \
+ V(S390_I16x8BitMask) \
+ V(S390_I16x8ExtMulLowI8x16S) \
+ V(S390_I16x8ExtMulHighI8x16S) \
+ V(S390_I16x8ExtMulLowI8x16U) \
+ V(S390_I16x8ExtMulHighI8x16U) \
+ V(S390_I16x8ExtAddPairwiseI8x16S) \
+ V(S390_I16x8ExtAddPairwiseI8x16U) \
+ V(S390_I16x8Q15MulRSatS) \
+ V(S390_I8x16Splat) \
+ V(S390_I8x16ExtractLaneU) \
+ V(S390_I8x16ExtractLaneS) \
+ V(S390_I8x16ReplaceLane) \
+ V(S390_I8x16Add) \
+ V(S390_I8x16Sub) \
+ V(S390_I8x16MinS) \
+ V(S390_I8x16MinU) \
+ V(S390_I8x16MaxS) \
+ V(S390_I8x16MaxU) \
+ V(S390_I8x16Eq) \
+ V(S390_I8x16Ne) \
+ V(S390_I8x16GtS) \
+ V(S390_I8x16GeS) \
+ V(S390_I8x16GtU) \
+ V(S390_I8x16GeU) \
+ V(S390_I8x16Shl) \
+ V(S390_I8x16ShrS) \
+ V(S390_I8x16ShrU) \
+ V(S390_I8x16Neg) \
+ V(S390_I8x16SConvertI16x8) \
+ V(S390_I8x16UConvertI16x8) \
+ V(S390_I8x16AddSatS) \
+ V(S390_I8x16SubSatS) \
+ V(S390_I8x16AddSatU) \
+ V(S390_I8x16SubSatU) \
+ V(S390_I8x16RoundingAverageU) \
+ V(S390_I8x16Abs) \
+ V(S390_I8x16BitMask) \
+ V(S390_I8x16Shuffle) \
+ V(S390_I8x16Swizzle) \
+ V(S390_I8x16Popcnt) \
+ V(S390_I64x2AllTrue) \
+ V(S390_I32x4AllTrue) \
+ V(S390_I16x8AllTrue) \
+ V(S390_I8x16AllTrue) \
+ V(S390_V128AnyTrue) \
+ V(S390_S128And) \
+ V(S390_S128Or) \
+ V(S390_S128Xor) \
+ V(S390_S128Const) \
+ V(S390_S128Zero) \
+ V(S390_S128AllOnes) \
+ V(S390_S128Not) \
+ V(S390_S128Select) \
+ V(S390_S128AndNot) \
+ V(S390_S128Load8Splat) \
+ V(S390_S128Load16Splat) \
+ V(S390_S128Load32Splat) \
+ V(S390_S128Load64Splat) \
+ V(S390_S128Load8x8S) \
+ V(S390_S128Load8x8U) \
+ V(S390_S128Load16x4S) \
+ V(S390_S128Load16x4U) \
+ V(S390_S128Load32x2S) \
+ V(S390_S128Load32x2U) \
+ V(S390_S128Load32Zero) \
+ V(S390_S128Load64Zero) \
+ V(S390_S128Load8Lane) \
+ V(S390_S128Load16Lane) \
+ V(S390_S128Load32Lane) \
+ V(S390_S128Load64Lane) \
+ V(S390_S128Store8Lane) \
+ V(S390_S128Store16Lane) \
+ V(S390_S128Store32Lane) \
+ V(S390_S128Store64Lane) \
+ V(S390_StoreSimd128) \
+ V(S390_LoadSimd128) \
+ V(S390_StoreCompressTagged) \
+ V(S390_LoadDecompressTaggedSigned) \
+ V(S390_LoadDecompressTaggedPointer) \
V(S390_LoadDecompressAnyTagged)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/chromium/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc b/chromium/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
index afc28b1f8cf..d7046507c71 100644
--- a/chromium/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
+++ b/chromium/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
@@ -135,9 +135,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_LoadAndTestWord64:
case kS390_LoadAndTestFloat32:
case kS390_LoadAndTestFloat64:
- case kS390_CompressSigned:
- case kS390_CompressPointer:
- case kS390_CompressAny:
case kS390_F64x2Splat:
case kS390_F64x2ReplaceLane:
case kS390_F64x2Abs:
@@ -362,6 +359,22 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_LoadDecompressTaggedSigned:
case kS390_LoadDecompressTaggedPointer:
case kS390_LoadDecompressAnyTagged:
+ case kS390_S128Load8Splat:
+ case kS390_S128Load16Splat:
+ case kS390_S128Load32Splat:
+ case kS390_S128Load64Splat:
+ case kS390_S128Load8x8S:
+ case kS390_S128Load8x8U:
+ case kS390_S128Load16x4S:
+ case kS390_S128Load16x4U:
+ case kS390_S128Load32x2S:
+ case kS390_S128Load32x2U:
+ case kS390_S128Load32Zero:
+ case kS390_S128Load64Zero:
+ case kS390_S128Load8Lane:
+ case kS390_S128Load16Lane:
+ case kS390_S128Load32Lane:
+ case kS390_S128Load64Lane:
return kIsLoadOperation;
case kS390_StoreWord8:
@@ -379,35 +392,18 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_Push:
case kS390_PushFrame:
case kS390_StoreToStackSlot:
+ case kS390_S128Store8Lane:
+ case kS390_S128Store16Lane:
+ case kS390_S128Store32Lane:
+ case kS390_S128Store64Lane:
return kHasSideEffect;
- case kS390_Word64AtomicExchangeUint8:
- case kS390_Word64AtomicExchangeUint16:
- case kS390_Word64AtomicExchangeUint32:
case kS390_Word64AtomicExchangeUint64:
- case kS390_Word64AtomicCompareExchangeUint8:
- case kS390_Word64AtomicCompareExchangeUint16:
- case kS390_Word64AtomicCompareExchangeUint32:
case kS390_Word64AtomicCompareExchangeUint64:
- case kS390_Word64AtomicAddUint8:
- case kS390_Word64AtomicAddUint16:
- case kS390_Word64AtomicAddUint32:
case kS390_Word64AtomicAddUint64:
- case kS390_Word64AtomicSubUint8:
- case kS390_Word64AtomicSubUint16:
- case kS390_Word64AtomicSubUint32:
case kS390_Word64AtomicSubUint64:
- case kS390_Word64AtomicAndUint8:
- case kS390_Word64AtomicAndUint16:
- case kS390_Word64AtomicAndUint32:
case kS390_Word64AtomicAndUint64:
- case kS390_Word64AtomicOrUint8:
- case kS390_Word64AtomicOrUint16:
- case kS390_Word64AtomicOrUint32:
case kS390_Word64AtomicOrUint64:
- case kS390_Word64AtomicXorUint8:
- case kS390_Word64AtomicXorUint16:
- case kS390_Word64AtomicXorUint32:
case kS390_Word64AtomicXorUint64:
return kHasSideEffect;
diff --git a/chromium/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/chromium/v8/src/compiler/backend/s390/instruction-selector-s390.cc
index bcf5a8dfff8..120eaf41dc5 100644
--- a/chromium/v8/src/compiler/backend/s390/instruction-selector-s390.cc
+++ b/chromium/v8/src/compiler/backend/s390/instruction-selector-s390.cc
@@ -106,7 +106,6 @@ class S390OperandGenerator final : public OperandGenerator {
return OpParameter<int64_t>(node->op());
else
UNIMPLEMENTED();
- return 0L;
}
bool CanBeImmediate(Node* node, OperandModes mode) {
@@ -272,8 +271,7 @@ bool S390OpcodeOnlySupport12BitDisp(InstructionCode op) {
(S390OpcodeOnlySupport12BitDisp(op) ? OperandMode::kUint12Imm \
: OperandMode::kInt20Imm)
-ArchOpcode SelectLoadOpcode(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ArchOpcode SelectLoadOpcode(LoadRepresentation load_rep) {
ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kFloat32:
@@ -466,7 +464,8 @@ void GenerateRightOperands(InstructionSelector* selector, Node* node,
} else if (*operand_mode & OperandMode::kAllowMemoryOperand) {
NodeMatcher mright(right);
if (mright.IsLoad() && selector->CanCover(node, right) &&
- canCombineWithLoad(SelectLoadOpcode(right))) {
+ canCombineWithLoad(
+ SelectLoadOpcode(LoadRepresentationOf(right->op())))) {
AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
right, inputs, input_count, OpcodeImmMode(*opcode));
*opcode |= AddressingModeField::encode(mode);
@@ -690,28 +689,28 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+void InstructionSelector::VisitAbortCSADcheck(Node* node) {
S390OperandGenerator g(this);
- Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r3));
+ Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), r3));
}
-void InstructionSelector::VisitLoad(Node* node) {
+void InstructionSelector::VisitLoad(Node* node, Node* value,
+ InstructionCode opcode) {
S390OperandGenerator g(this);
- InstructionCode opcode = SelectLoadOpcode(node);
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
InstructionOperand inputs[3];
size_t input_count = 0;
AddressingMode mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count);
opcode |= AddressingModeField::encode(mode);
- if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
- }
Emit(opcode, 1, outputs, input_count, inputs);
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
+void InstructionSelector::VisitLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ InstructionCode opcode = SelectLoadOpcode(load_rep);
+ VisitLoad(node, node, opcode);
+}
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
@@ -2153,21 +2152,18 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
- load_rep.representation() == MachineRepresentation::kWord16 ||
- load_rep.representation() == MachineRepresentation::kWord32);
- USE(load_rep);
- VisitLoad(node);
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ VisitLoad(node, node, SelectLoadOpcode(load_rep));
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- VisitGeneralStore(this, node, rep);
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ VisitGeneralStore(this, node, store_params.representation());
}
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
S390OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2181,7 +2177,8 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.UseUniqueRegister(value);
InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, 1, outputs, input_count, inputs);
}
@@ -2189,40 +2186,40 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
- opcode = kS390_Word64AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kS390_Word64AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kS390_Word64AtomicExchangeUint32;
+ opcode = kAtomicExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kS390_Word64AtomicExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64);
}
void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
S390OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2248,7 +2245,8 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
size_t output_count = 0;
outputs[output_count++] = g.DefineSameAsFirst(node);
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, output_count, outputs, input_count, inputs);
}
@@ -2256,40 +2254,40 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Uint8()) {
- opcode = kS390_Word64AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kS390_Word64AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kS390_Word64AtomicCompareExchangeUint32;
+ opcode = kAtomicCompareExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kS390_Word64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64);
}
void VisitAtomicBinop(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
S390OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2318,7 +2316,8 @@ void VisitAtomicBinop(InstructionSelector* selector, Node* node,
size_t temp_count = 0;
temps[temp_count++] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, output_count, outputs, input_count, inputs, temp_count,
temps);
}
@@ -2342,15 +2341,14 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2376,14 +2374,14 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64);
}
-#define VISIT_ATOMIC64_BINOP(op) \
- void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
- VisitWord64AtomicBinaryOperation( \
- node, kS390_Word64Atomic##op##Uint8, kS390_Word64Atomic##op##Uint16, \
- kS390_Word64Atomic##op##Uint32, kS390_Word64Atomic##op##Uint64); \
+#define VISIT_ATOMIC64_BINOP(op) \
+ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
+ VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
+ kAtomic##op##Uint16, kAtomic##op##Word32, \
+ kS390_Word64Atomic##op##Uint64); \
}
VISIT_ATOMIC64_BINOP(Add)
VISIT_ATOMIC64_BINOP(Sub)
@@ -2393,14 +2391,14 @@ VISIT_ATOMIC64_BINOP(Xor)
#undef VISIT_ATOMIC64_BINOP
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- USE(load_rep);
- VisitLoad(node);
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ VisitLoad(node, node, SelectLoadOpcode(load_rep));
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- VisitGeneralStore(this, node, rep);
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ VisitGeneralStore(this, node, store_params.representation());
}
#define SIMD_TYPES(V) \
@@ -2789,18 +2787,107 @@ void InstructionSelector::EmitPrepareResults(
}
void InstructionSelector::VisitLoadLane(Node* node) {
- // We should never reach here, see http://crrev.com/c/2577820
- UNREACHABLE();
+ LoadLaneParameters params = LoadLaneParametersOf(node->op());
+ InstructionCode opcode;
+ if (params.rep == MachineType::Int8()) {
+ opcode = kS390_S128Load8Lane;
+ } else if (params.rep == MachineType::Int16()) {
+ opcode = kS390_S128Load16Lane;
+ } else if (params.rep == MachineType::Int32()) {
+ opcode = kS390_S128Load32Lane;
+ } else if (params.rep == MachineType::Int64()) {
+ opcode = kS390_S128Load64Lane;
+ } else {
+ UNREACHABLE();
+ }
+
+ S390OperandGenerator g(this);
+ InstructionOperand outputs[] = {g.DefineSameAsFirst(node)};
+ InstructionOperand inputs[5];
+ size_t input_count = 0;
+
+ inputs[input_count++] = g.UseRegister(node->InputAt(2));
+ inputs[input_count++] = g.UseImmediate(params.laneidx);
+
+ AddressingMode mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ opcode |= AddressingModeField::encode(mode);
+ Emit(opcode, 1, outputs, input_count, inputs);
}
void InstructionSelector::VisitLoadTransform(Node* node) {
- // We should never reach here, see http://crrev.com/c/2050811
- UNREACHABLE();
+ LoadTransformParameters params = LoadTransformParametersOf(node->op());
+ ArchOpcode opcode;
+ switch (params.transformation) {
+ case LoadTransformation::kS128Load8Splat:
+ opcode = kS390_S128Load8Splat;
+ break;
+ case LoadTransformation::kS128Load16Splat:
+ opcode = kS390_S128Load16Splat;
+ break;
+ case LoadTransformation::kS128Load32Splat:
+ opcode = kS390_S128Load32Splat;
+ break;
+ case LoadTransformation::kS128Load64Splat:
+ opcode = kS390_S128Load64Splat;
+ break;
+ case LoadTransformation::kS128Load8x8S:
+ opcode = kS390_S128Load8x8S;
+ break;
+ case LoadTransformation::kS128Load8x8U:
+ opcode = kS390_S128Load8x8U;
+ break;
+ case LoadTransformation::kS128Load16x4S:
+ opcode = kS390_S128Load16x4S;
+ break;
+ case LoadTransformation::kS128Load16x4U:
+ opcode = kS390_S128Load16x4U;
+ break;
+ case LoadTransformation::kS128Load32x2S:
+ opcode = kS390_S128Load32x2S;
+ break;
+ case LoadTransformation::kS128Load32x2U:
+ opcode = kS390_S128Load32x2U;
+ break;
+ case LoadTransformation::kS128Load32Zero:
+ opcode = kS390_S128Load32Zero;
+ break;
+ case LoadTransformation::kS128Load64Zero:
+ opcode = kS390_S128Load64Zero;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ VisitLoad(node, node, opcode);
}
void InstructionSelector::VisitStoreLane(Node* node) {
- // We should never reach here, see http://crrev.com/c/2577820
- UNREACHABLE();
+ StoreLaneParameters params = StoreLaneParametersOf(node->op());
+ InstructionCode opcode;
+ if (params.rep == MachineRepresentation::kWord8) {
+ opcode = kS390_S128Store8Lane;
+ } else if (params.rep == MachineRepresentation::kWord16) {
+ opcode = kS390_S128Store16Lane;
+ } else if (params.rep == MachineRepresentation::kWord32) {
+ opcode = kS390_S128Store32Lane;
+ } else if (params.rep == MachineRepresentation::kWord64) {
+ opcode = kS390_S128Store64Lane;
+ } else {
+ UNREACHABLE();
+ }
+
+ S390OperandGenerator g(this);
+ InstructionOperand outputs[] = {g.DefineSameAsFirst(node)};
+ InstructionOperand inputs[5];
+ size_t input_count = 0;
+
+ inputs[input_count++] = g.UseRegister(node->InputAt(2));
+ inputs[input_count++] = g.UseImmediate(params.laneidx);
+
+ AddressingMode mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ opcode |= AddressingModeField::encode(mode);
+ Emit(opcode, 1, outputs, input_count, inputs);
}
void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
diff --git a/chromium/v8/src/compiler/backend/x64/code-generator-x64.cc b/chromium/v8/src/compiler/backend/x64/code-generator-x64.cc
index 60a40fb4893..57e0143285c 100644
--- a/chromium/v8/src/compiler/backend/x64/code-generator-x64.cc
+++ b/chromium/v8/src/compiler/backend/x64/code-generator-x64.cc
@@ -324,11 +324,124 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Zone* zone_;
};
+template <std::memory_order order>
+void EmitStore(TurboAssembler* tasm, Operand operand, Register value,
+ MachineRepresentation rep) {
+ if (order == std::memory_order_relaxed) {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ tasm->movb(operand, value);
+ break;
+ case MachineRepresentation::kWord16:
+ tasm->movw(operand, value);
+ break;
+ case MachineRepresentation::kWord32:
+ tasm->movl(operand, value);
+ break;
+ case MachineRepresentation::kWord64:
+ tasm->movq(operand, value);
+ break;
+ case MachineRepresentation::kTagged:
+ tasm->StoreTaggedField(operand, value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return;
+ }
+
+ DCHECK_EQ(order, std::memory_order_seq_cst);
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ tasm->movq(kScratchRegister, value);
+ tasm->xchgb(kScratchRegister, operand);
+ break;
+ case MachineRepresentation::kWord16:
+ tasm->movq(kScratchRegister, value);
+ tasm->xchgw(kScratchRegister, operand);
+ break;
+ case MachineRepresentation::kWord32:
+ tasm->movq(kScratchRegister, value);
+ tasm->xchgl(kScratchRegister, operand);
+ break;
+ case MachineRepresentation::kWord64:
+ tasm->movq(kScratchRegister, value);
+ tasm->xchgq(kScratchRegister, operand);
+ break;
+ case MachineRepresentation::kTagged:
+ tasm->AtomicStoreTaggedField(operand, value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+template <std::memory_order order>
+void EmitStore(TurboAssembler* tasm, Operand operand, Immediate value,
+ MachineRepresentation rep);
+
+template <>
+void EmitStore<std::memory_order_relaxed>(TurboAssembler* tasm, Operand operand,
+ Immediate value,
+ MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ tasm->movb(operand, value);
+ break;
+ case MachineRepresentation::kWord16:
+ tasm->movw(operand, value);
+ break;
+ case MachineRepresentation::kWord32:
+ tasm->movl(operand, value);
+ break;
+ case MachineRepresentation::kWord64:
+ tasm->movq(operand, value);
+ break;
+ case MachineRepresentation::kTagged:
+ tasm->StoreTaggedField(operand, value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
#ifdef V8_IS_TSAN
-class OutOfLineTSANRelaxedStore final : public OutOfLineCode {
+void EmitMemoryProbeForTrapHandlerIfNeeded(TurboAssembler* tasm,
+ Register scratch, Operand operand,
+ StubCallMode mode, int size) {
+#if V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED
+ // The wasm OOB trap handler needs to be able to look up the faulting
+ // instruction pointer to handle the SIGSEGV raised by an OOB access. It
+ // will not handle SIGSEGVs raised by the TSAN store helpers. Emit a
+ // redundant load here to give the trap handler a chance to handle any
+ // OOB SIGSEGVs.
+ if (trap_handler::IsTrapHandlerEnabled() &&
+ mode == StubCallMode::kCallWasmRuntimeStub) {
+ switch (size) {
+ case kInt8Size:
+ tasm->movb(scratch, operand);
+ break;
+ case kInt16Size:
+ tasm->movw(scratch, operand);
+ break;
+ case kInt32Size:
+ tasm->movl(scratch, operand);
+ break;
+ case kInt64Size:
+ tasm->movq(scratch, operand);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+#endif
+}
+
+class OutOfLineTSANStore : public OutOfLineCode {
public:
- OutOfLineTSANRelaxedStore(CodeGenerator* gen, Operand operand, Register value,
- Register scratch0, StubCallMode stub_mode, int size)
+ OutOfLineTSANStore(CodeGenerator* gen, Operand operand, Register value,
+ Register scratch0, StubCallMode stub_mode, int size,
+ std::memory_order order)
: OutOfLineCode(gen),
operand_(operand),
value_(value),
@@ -337,6 +450,7 @@ class OutOfLineTSANRelaxedStore final : public OutOfLineCode {
stub_mode_(stub_mode),
#endif // V8_ENABLE_WEBASSEMBLY
size_(size),
+ memory_order_(order),
zone_(gen->zone()) {
DCHECK(!AreAliased(value, scratch0));
}
@@ -352,14 +466,15 @@ class OutOfLineTSANRelaxedStore final : public OutOfLineCode {
// A direct call to a wasm runtime stub defined in this module.
// Just encode the stub index. This will be patched when the code
// is added to the native module and copied into wasm code space.
- __ CallTSANRelaxedStoreStub(scratch0_, value_, save_fp_mode, size_,
- StubCallMode::kCallWasmRuntimeStub);
+ tasm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_,
+ StubCallMode::kCallWasmRuntimeStub,
+ memory_order_);
return;
}
#endif // V8_ENABLE_WEBASSEMBLY
- __ CallTSANRelaxedStoreStub(scratch0_, value_, save_fp_mode, size_,
- StubCallMode::kCallBuiltinPointer);
+ tasm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_,
+ StubCallMode::kCallBuiltinPointer, memory_order_);
}
private:
@@ -370,42 +485,66 @@ class OutOfLineTSANRelaxedStore final : public OutOfLineCode {
StubCallMode const stub_mode_;
#endif // V8_ENABLE_WEBASSEMBLY
int size_;
+ const std::memory_order memory_order_;
Zone* zone_;
};
-void EmitTSANStoreOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
- TurboAssembler* tasm, Operand operand,
- Register value_reg, X64OperandConverter& i,
- StubCallMode mode, int size) {
+void EmitTSANStoreOOL(Zone* zone, CodeGenerator* codegen, TurboAssembler* tasm,
+ Operand operand, Register value_reg,
+ X64OperandConverter& i, StubCallMode mode, int size,
+ std::memory_order order) {
// The FOR_TESTING code doesn't initialize the root register. We can't call
// the TSAN builtin since we need to load the external reference through the
// root register.
// TODO(solanes, v8:7790, v8:11600): See if we can support the FOR_TESTING
- // path. It is not crucial, but it would be nice to remove this if.
- if (codegen->code_kind() == CodeKind::FOR_TESTING) return;
+ // path. It is not crucial, but it would be nice to remove this restriction.
+ DCHECK_NE(codegen->code_kind(), CodeKind::FOR_TESTING);
Register scratch0 = i.TempRegister(0);
- auto tsan_ool = zone->New<OutOfLineTSANRelaxedStore>(
- codegen, operand, value_reg, scratch0, mode, size);
+ auto tsan_ool = zone->New<OutOfLineTSANStore>(codegen, operand, value_reg,
+ scratch0, mode, size, order);
tasm->jmp(tsan_ool->entry());
tasm->bind(tsan_ool->exit());
}
-void EmitTSANStoreOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
- TurboAssembler* tasm, Operand operand,
- Immediate value, X64OperandConverter& i,
- StubCallMode mode, int size) {
+template <std::memory_order order>
+Register GetTSANValueRegister(TurboAssembler* tasm, Register value,
+ X64OperandConverter& i) {
+ return value;
+}
+
+template <std::memory_order order>
+Register GetTSANValueRegister(TurboAssembler* tasm, Immediate value,
+ X64OperandConverter& i);
+
+template <>
+Register GetTSANValueRegister<std::memory_order_relaxed>(
+ TurboAssembler* tasm, Immediate value, X64OperandConverter& i) {
+ Register value_reg = i.TempRegister(1);
+ tasm->movq(value_reg, value);
+ return value_reg;
+}
+
+template <std::memory_order order, typename ValueT>
+void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen,
+ TurboAssembler* tasm, Operand operand, ValueT value,
+ X64OperandConverter& i, StubCallMode stub_call_mode,
+ MachineRepresentation rep) {
// The FOR_TESTING code doesn't initialize the root register. We can't call
// the TSAN builtin since we need to load the external reference through the
// root register.
// TODO(solanes, v8:7790, v8:11600): See if we can support the FOR_TESTING
- // path. It is not crucial, but it would be nice to remove this if.
- if (codegen->code_kind() == CodeKind::FOR_TESTING) return;
-
- Register value_reg = i.TempRegister(1);
- tasm->movq(value_reg, value);
- EmitTSANStoreOOLIfNeeded(zone, codegen, tasm, operand, value_reg, i, mode,
- size);
+ // path. It is not crucial, but it would be nice to remove this restriction.
+ if (codegen->code_kind() != CodeKind::FOR_TESTING) {
+ int size = ElementSizeInBytes(rep);
+ EmitMemoryProbeForTrapHandlerIfNeeded(tasm, i.TempRegister(0), operand,
+ stub_call_mode, size);
+ Register value_reg = GetTSANValueRegister<order>(tasm, value, i);
+ EmitTSANStoreOOL(zone, codegen, tasm, operand, value_reg, i, stub_call_mode,
+ size, order);
+ } else {
+ EmitStore<order>(tasm, operand, value, rep);
+ }
}
class OutOfLineTSANRelaxedLoad final : public OutOfLineCode {
@@ -453,10 +592,10 @@ class OutOfLineTSANRelaxedLoad final : public OutOfLineCode {
Zone* zone_;
};
-void EmitTSANLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
- TurboAssembler* tasm, Operand operand,
- X64OperandConverter& i, StubCallMode mode,
- int size) {
+void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
+ TurboAssembler* tasm, Operand operand,
+ X64OperandConverter& i, StubCallMode mode,
+ int size) {
// The FOR_TESTING code doesn't initialize the root register. We can't call
// the TSAN builtin since we need to load the external reference through the
// root register.
@@ -472,20 +611,20 @@ void EmitTSANLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
}
#else
-void EmitTSANStoreOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
- TurboAssembler* tasm, Operand operand,
- Register value_reg, X64OperandConverter& i,
- StubCallMode mode, int size) {}
-
-void EmitTSANStoreOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
- TurboAssembler* tasm, Operand operand,
- Immediate value, X64OperandConverter& i,
- StubCallMode mode, int size) {}
-
-void EmitTSANLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
- TurboAssembler* tasm, Operand operand,
- X64OperandConverter& i, StubCallMode mode,
- int size) {}
+template <std::memory_order order, typename ValueT>
+void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen,
+ TurboAssembler* tasm, Operand operand, ValueT value,
+ X64OperandConverter& i, StubCallMode stub_call_mode,
+ MachineRepresentation rep) {
+ DCHECK(order == std::memory_order_relaxed ||
+ order == std::memory_order_seq_cst);
+ EmitStore<order>(tasm, operand, value, rep);
+}
+
+void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
+ TurboAssembler* tasm, Operand operand,
+ X64OperandConverter& i, StubCallMode mode,
+ int size) {}
#endif // V8_IS_TSAN
#if V8_ENABLE_WEBASSEMBLY
@@ -554,7 +693,7 @@ class WasmProtectedInstructionTrap final : public WasmOutOfLineTrap {
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
int pc) {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
+ const MemoryAccessMode access_mode = instr->memory_access_mode();
if (access_mode == kMemoryAccessProtected) {
zone->New<WasmProtectedInstructionTrap>(codegen, pc, instr);
}
@@ -564,21 +703,11 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr, int pc) {
- DCHECK_NE(kMemoryAccessProtected, AccessModeField::decode(opcode));
+ DCHECK_NE(kMemoryAccessProtected, instr->memory_access_mode());
}
#endif // V8_ENABLE_WEBASSEMBLY
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- X64OperandConverter const& i) {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- codegen->tasm()->andq(value, kSpeculationPoisonRegister);
- }
-}
-
} // namespace
#define ASSEMBLE_UNOP(asm_instr) \
@@ -871,24 +1000,32 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
} \
} while (false)
-#define ASSEMBLE_PINSR(ASM_INSTR) \
- do { \
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
- XMMRegister dst = i.OutputSimd128Register(); \
- XMMRegister src = i.InputSimd128Register(0); \
- uint8_t laneidx = i.InputUint8(1); \
- if (HasAddressingMode(instr)) { \
- __ ASM_INSTR(dst, src, i.MemoryOperand(2), laneidx); \
- break; \
- } \
- if (instr->InputAt(2)->IsFPRegister()) { \
- __ Movq(kScratchRegister, i.InputDoubleRegister(2)); \
- __ ASM_INSTR(dst, src, kScratchRegister, laneidx); \
- } else if (instr->InputAt(2)->IsRegister()) { \
- __ ASM_INSTR(dst, src, i.InputRegister(2), laneidx); \
- } else { \
- __ ASM_INSTR(dst, src, i.InputOperand(2), laneidx); \
- } \
+#define ASSEMBLE_PINSR(ASM_INSTR) \
+ do { \
+ XMMRegister dst = i.OutputSimd128Register(); \
+ XMMRegister src = i.InputSimd128Register(0); \
+ uint8_t laneidx = i.InputUint8(1); \
+ uint32_t load_offset; \
+ if (HasAddressingMode(instr)) { \
+ __ ASM_INSTR(dst, src, i.MemoryOperand(2), laneidx, &load_offset); \
+ } else if (instr->InputAt(2)->IsFPRegister()) { \
+ __ Movq(kScratchRegister, i.InputDoubleRegister(2)); \
+ __ ASM_INSTR(dst, src, kScratchRegister, laneidx, &load_offset); \
+ } else if (instr->InputAt(2)->IsRegister()) { \
+ __ ASM_INSTR(dst, src, i.InputRegister(2), laneidx, &load_offset); \
+ } else { \
+ __ ASM_INSTR(dst, src, i.InputOperand(2), laneidx, &load_offset); \
+ } \
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, load_offset); \
+ } while (false)
+
+#define ASSEMBLE_SEQ_CST_STORE(rep) \
+ do { \
+ Register value = i.InputRegister(0); \
+ Operand operand = i.MemoryOperand(1); \
+ EmitTSANAwareStore<std::memory_order_seq_cst>( \
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), \
+ rep); \
} while (false)
void CodeGenerator::AssembleDeconstructFrame() {
@@ -1019,22 +1156,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, not_zero);
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- // Set a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- __ ComputeCodeStartAddress(rbx);
- __ xorq(kSpeculationPoisonRegister, kSpeculationPoisonRegister);
- __ cmpq(kJavaScriptCallCodeStartRegister, rbx);
- __ Move(rbx, -1);
- __ cmovq(equal, kSpeculationPoisonRegister, rbx);
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- __ andq(kJSFunctionRegister, kSpeculationPoisonRegister);
- __ andq(kContextRegister, kSpeculationPoisonRegister);
- __ andq(rsp, kSpeculationPoisonRegister);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -1052,11 +1173,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineCall(reg);
- } else {
- __ call(reg);
- }
+ __ call(reg);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -1078,19 +1195,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (DetermineStubCallMode() == StubCallMode::kCallWasmRuntimeStub) {
__ near_call(wasm_code, constant.rmode());
} else {
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineCall(wasm_code, constant.rmode());
- } else {
- __ Call(wasm_code, constant.rmode());
- }
+ __ Call(wasm_code, constant.rmode());
}
} else {
- Register reg = i.InputRegister(0);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineCall(reg);
- } else {
- __ call(reg);
- }
+ __ call(i.InputRegister(0));
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -1107,12 +1215,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ jmp(kScratchRegister);
}
} else {
- Register reg = i.InputRegister(0);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineJump(reg);
- } else {
- __ jmp(reg);
- }
+ __ jmp(i.InputRegister(0));
}
unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
@@ -1130,11 +1233,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineJump(reg);
- } else {
- __ jmp(reg);
- }
+ __ jmp(reg);
}
unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
@@ -1147,11 +1246,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineJump(reg);
- } else {
- __ jmp(reg);
- }
+ __ jmp(reg);
unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -1210,7 +1305,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssemblePrepareTailCall();
break;
case kArchCallCFunction: {
- int const num_parameters = MiscField::decode(instr->opcode());
+ int const num_gp_parameters = ParamField::decode(instr->opcode());
+ int const num_fp_parameters = FPParamField::decode(instr->opcode());
Label return_location;
#if V8_ENABLE_WEBASSEMBLY
if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
@@ -1222,10 +1318,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif // V8_ENABLE_WEBASSEMBLY
if (HasImmediateInput(instr, 0)) {
ExternalReference ref = i.InputExternalReference(0);
- __ CallCFunction(ref, num_parameters);
+ __ CallCFunction(ref, num_gp_parameters + num_fp_parameters);
} else {
Register func = i.InputRegister(0);
- __ CallCFunction(func, num_parameters);
+ __ CallCFunction(func, num_gp_parameters + num_fp_parameters);
}
__ bind(&return_location);
#if V8_ENABLE_WEBASSEMBLY
@@ -1265,13 +1361,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchComment:
__ RecordComment(reinterpret_cast<const char*>(i.InputInt64(0)));
break;
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
DCHECK(i.InputRegister(0) == rdx);
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
__ int3();
@@ -1344,7 +1440,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movl(result, result);
break;
}
- case kArchStoreWithWriteBarrier: {
+ case kArchStoreWithWriteBarrier: // Fall through.
+ case kArchAtomicStoreWithWriteBarrier: {
RecordWriteMode mode =
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
Register object = i.InputRegister(0);
@@ -1356,7 +1453,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
auto ool = zone()->New<OutOfLineRecordWrite>(this, object, operand, value,
scratch0, scratch1, mode,
DetermineStubCallMode());
- __ StoreTaggedField(operand, value);
+ if (arch_opcode == kArchStoreWithWriteBarrier) {
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kTagged);
+ } else {
+ DCHECK_EQ(arch_opcode, kArchAtomicStoreWithWriteBarrier);
+ EmitTSANAwareStore<std::memory_order_seq_cst>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kTagged);
+ }
if (mode > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value, ool->exit());
}
@@ -1364,14 +1470,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
MemoryChunk::kPointersFromHereAreInterestingMask,
not_zero, ool->entry());
__ bind(ool->exit());
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kTaggedSize);
break;
}
- case kArchWordPoisonOnSpeculation:
- DCHECK_EQ(i.OutputRegister(), i.InputRegister(0));
- __ andq(i.InputRegister(0), kSpeculationPoisonRegister);
- break;
case kX64MFence:
__ mfence();
break;
@@ -1646,22 +1746,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// when there is a (v)mulss depending on the result.
__ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
- case kSSEFloat32Abs: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
- __ Pcmpeqd(tmp, tmp);
- __ Psrlq(tmp, byte{33});
- __ Andps(i.OutputDoubleRegister(), tmp);
- break;
- }
- case kSSEFloat32Neg: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
- __ Pcmpeqd(tmp, tmp);
- __ Psllq(tmp, byte{31});
- __ Xorps(i.OutputDoubleRegister(), tmp);
- break;
- }
case kSSEFloat32Sqrt:
ASSEMBLE_SSE_UNOP(sqrtss);
break;
@@ -1858,16 +1942,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(ool->exit());
break;
}
- case kX64F64x2Abs:
- case kSSEFloat64Abs: {
- __ Abspd(i.OutputDoubleRegister());
- break;
- }
- case kX64F64x2Neg:
- case kSSEFloat64Neg: {
- __ Negpd(i.OutputDoubleRegister());
- break;
- }
case kSSEFloat64Sqrt:
ASSEMBLE_SSE_UNOP(Sqrtsd);
break;
@@ -2120,56 +2194,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// when there is a (v)mulsd depending on the result.
__ Movapd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
- case kAVXFloat32Abs: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
- __ vpcmpeqd(tmp, tmp, tmp);
- __ vpsrlq(tmp, tmp, 33);
- if (instr->InputAt(0)->IsFPRegister()) {
- __ vandps(i.OutputDoubleRegister(), tmp, i.InputDoubleRegister(0));
- } else {
- __ vandps(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
- }
+ case kX64Float32Abs: {
+ __ Absps(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ kScratchRegister);
break;
}
- case kAVXFloat32Neg: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
- __ vpcmpeqd(tmp, tmp, tmp);
- __ vpsllq(tmp, tmp, 31);
- if (instr->InputAt(0)->IsFPRegister()) {
- __ vxorps(i.OutputDoubleRegister(), tmp, i.InputDoubleRegister(0));
- } else {
- __ vxorps(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
- }
+ case kX64Float32Neg: {
+ __ Negps(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ kScratchRegister);
break;
}
- case kAVXFloat64Abs: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
- __ vpcmpeqd(tmp, tmp, tmp);
- __ vpsrlq(tmp, tmp, 1);
- if (instr->InputAt(0)->IsFPRegister()) {
- __ vandpd(i.OutputDoubleRegister(), tmp, i.InputDoubleRegister(0));
- } else {
- __ vandpd(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
- }
+ case kX64F64x2Abs:
+ case kX64Float64Abs: {
+ __ Abspd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ kScratchRegister);
break;
}
- case kAVXFloat64Neg: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
- __ vpcmpeqd(tmp, tmp, tmp);
- __ vpsllq(tmp, tmp, 63);
- if (instr->InputAt(0)->IsFPRegister()) {
- __ vxorpd(i.OutputDoubleRegister(), tmp, i.InputDoubleRegister(0));
- } else {
- __ vxorpd(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
- }
+ case kX64F64x2Neg:
+ case kX64Float64Neg: {
+ __ Negpd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ kScratchRegister);
break;
}
case kSSEFloat64SilenceNaN:
@@ -2180,24 +2224,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movsxbl);
__ AssertZeroExtended(i.OutputRegister());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movzxbl:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movzxbl);
__ AssertZeroExtended(i.OutputRegister());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movsxbq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movsxbq);
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movzxbq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movzxbq);
__ AssertZeroExtended(i.OutputRegister());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movb: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -2205,29 +2245,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
Immediate value(Immediate(i.InputInt8(index)));
- __ movb(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt8Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord8);
} else {
Register value(i.InputRegister(index));
- __ movb(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt8Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord8);
}
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64Movsxwl:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movsxwl);
__ AssertZeroExtended(i.OutputRegister());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movzxwl:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movzxwl);
__ AssertZeroExtended(i.OutputRegister());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movsxwq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -2237,7 +2274,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movzxwq);
__ AssertZeroExtended(i.OutputRegister());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movw: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -2245,16 +2281,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
Immediate value(Immediate(i.InputInt16(index)));
- __ movw(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt16Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord16);
} else {
Register value(i.InputRegister(index));
- __ movw(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt16Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord16);
}
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64Movl:
@@ -2263,8 +2298,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasAddressingMode(instr)) {
Operand address(i.MemoryOperand());
__ movl(i.OutputRegister(), address);
- EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i,
- DetermineStubCallMode(), kInt32Size);
+ EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
+ DetermineStubCallMode(), kInt32Size);
} else {
if (HasRegisterInput(instr, 0)) {
__ movl(i.OutputRegister(), i.InputRegister(0));
@@ -2278,48 +2313,43 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
Immediate value(i.InputImmediate(index));
- __ movl(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt32Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord32);
} else {
Register value(i.InputRegister(index));
- __ movl(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt32Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord32);
}
}
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movsxlq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movsxlq);
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64MovqDecompressTaggedSigned: {
CHECK(instr->HasOutput());
Operand address(i.MemoryOperand());
__ DecompressTaggedSigned(i.OutputRegister(), address);
- EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i,
- DetermineStubCallMode(), kTaggedSize);
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
+ DetermineStubCallMode(), kTaggedSize);
break;
}
case kX64MovqDecompressTaggedPointer: {
CHECK(instr->HasOutput());
Operand address(i.MemoryOperand());
__ DecompressTaggedPointer(i.OutputRegister(), address);
- EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i,
- DetermineStubCallMode(), kTaggedSize);
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
+ DetermineStubCallMode(), kTaggedSize);
break;
}
case kX64MovqDecompressAnyTagged: {
CHECK(instr->HasOutput());
Operand address(i.MemoryOperand());
__ DecompressAnyTagged(i.OutputRegister(), address);
- EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i,
- DetermineStubCallMode(), kTaggedSize);
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
+ DetermineStubCallMode(), kTaggedSize);
break;
}
case kX64MovqCompressTagged: {
@@ -2328,14 +2358,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
Immediate value(i.InputImmediate(index));
- __ StoreTaggedField(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kTaggedSize);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kTagged);
} else {
Register value(i.InputRegister(index));
- __ StoreTaggedField(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kTaggedSize);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kTagged);
}
break;
}
@@ -2344,24 +2374,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->HasOutput()) {
Operand address(i.MemoryOperand());
__ movq(i.OutputRegister(), address);
- EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i,
- DetermineStubCallMode(), kInt64Size);
+ EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
+ DetermineStubCallMode(), kInt64Size);
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
Immediate value(i.InputImmediate(index));
- __ movq(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt64Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord64);
} else {
Register value(i.InputRegister(index));
- __ movq(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt64Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord64);
}
}
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movss:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -2376,17 +2405,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64Movsd: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
if (instr->HasOutput()) {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- // If we have to poison the loaded value, we load into a general
- // purpose register first, mask it with the poison, and move the
- // value from the general purpose register into the double register.
- __ movq(kScratchRegister, i.MemoryOperand());
- __ andq(kScratchRegister, kSpeculationPoisonRegister);
- __ Movq(i.OutputDoubleRegister(), kScratchRegister);
- } else {
- __ Movsd(i.OutputDoubleRegister(), i.MemoryOperand());
- }
+ __ Movsd(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
@@ -2667,27 +2686,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64F64x2Qfma: {
- if (CpuFeatures::IsSupported(FMA3)) {
- CpuFeatureScope fma3_scope(tasm(), FMA3);
- __ vfmadd231pd(i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(2));
- } else {
- __ Movapd(kScratchDoubleReg, i.InputSimd128Register(2));
- __ Mulpd(kScratchDoubleReg, i.InputSimd128Register(1));
- __ Addpd(i.OutputSimd128Register(), kScratchDoubleReg);
- }
+ __ F64x2Qfma(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2),
+ kScratchDoubleReg);
break;
}
case kX64F64x2Qfms: {
- if (CpuFeatures::IsSupported(FMA3)) {
- CpuFeatureScope fma3_scope(tasm(), FMA3);
- __ vfnmadd231pd(i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(2));
- } else {
- __ Movapd(kScratchDoubleReg, i.InputSimd128Register(2));
- __ Mulpd(kScratchDoubleReg, i.InputSimd128Register(1));
- __ Subpd(i.OutputSimd128Register(), kScratchDoubleReg);
- }
+ __ F64x2Qfms(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2),
+ kScratchDoubleReg);
break;
}
case kX64F64x2ConvertLowI32x4S: {
@@ -2696,11 +2703,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64F64x2ConvertLowI32x4U: {
__ F64x2ConvertLowI32x4U(i.OutputSimd128Register(),
- i.InputSimd128Register(0));
+ i.InputSimd128Register(0), kScratchRegister);
break;
}
case kX64F64x2PromoteLowF32x4: {
- __ Cvtps2pd(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ if (HasAddressingMode(instr)) {
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
+ __ Cvtps2pd(i.OutputSimd128Register(), i.MemoryOperand());
+ } else {
+ __ Cvtps2pd(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ }
break;
}
case kX64F32x4DemoteF64x2Zero: {
@@ -2709,12 +2721,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I32x4TruncSatF64x2SZero: {
__ I32x4TruncSatF64x2SZero(i.OutputSimd128Register(),
- i.InputSimd128Register(0));
+ i.InputSimd128Register(0), kScratchDoubleReg,
+ kScratchRegister);
break;
}
case kX64I32x4TruncSatF64x2UZero: {
__ I32x4TruncSatF64x2UZero(i.OutputSimd128Register(),
- i.InputSimd128Register(0));
+ i.InputSimd128Register(0), kScratchDoubleReg,
+ kScratchRegister);
break;
}
case kX64F32x4Splat: {
@@ -2813,42 +2827,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64F32x4Min: {
- XMMRegister src1 = i.InputSimd128Register(1),
- dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- // The minps instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform minps in both orders, merge the resuls, and adjust.
- __ Movaps(kScratchDoubleReg, src1);
- __ Minps(kScratchDoubleReg, dst);
- __ Minps(dst, src1);
- // propagate -0's and NaNs, which may be non-canonical.
- __ Orps(kScratchDoubleReg, dst);
- // Canonicalize NaNs by quieting and clearing the payload.
- __ Cmpunordps(dst, kScratchDoubleReg);
- __ Orps(kScratchDoubleReg, dst);
- __ Psrld(dst, byte{10});
- __ Andnps(dst, kScratchDoubleReg);
+ __ F32x4Min(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
case kX64F32x4Max: {
- XMMRegister src1 = i.InputSimd128Register(1),
- dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- // The maxps instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform maxps in both orders, merge the resuls, and adjust.
- __ Movaps(kScratchDoubleReg, src1);
- __ Maxps(kScratchDoubleReg, dst);
- __ Maxps(dst, src1);
- // Find discrepancies.
- __ Xorps(dst, kScratchDoubleReg);
- // Propagate NaNs, which may be non-canonical.
- __ Orps(kScratchDoubleReg, dst);
- // Propagate sign discrepancy and (subtle) quiet NaNs.
- __ Subps(kScratchDoubleReg, dst);
- // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
- __ Cmpunordps(dst, kScratchDoubleReg);
- __ Psrld(dst, byte{10});
- __ Andnps(dst, kScratchDoubleReg);
+ __ F32x4Max(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
case kX64F32x4Eq: {
@@ -2868,27 +2853,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64F32x4Qfma: {
- if (CpuFeatures::IsSupported(FMA3)) {
- CpuFeatureScope fma3_scope(tasm(), FMA3);
- __ vfmadd231ps(i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(2));
- } else {
- __ Movaps(kScratchDoubleReg, i.InputSimd128Register(2));
- __ Mulps(kScratchDoubleReg, i.InputSimd128Register(1));
- __ Addps(i.OutputSimd128Register(), kScratchDoubleReg);
- }
+ __ F32x4Qfma(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2),
+ kScratchDoubleReg);
break;
}
case kX64F32x4Qfms: {
- if (CpuFeatures::IsSupported(FMA3)) {
- CpuFeatureScope fma3_scope(tasm(), FMA3);
- __ vfnmadd231ps(i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(2));
- } else {
- __ Movaps(kScratchDoubleReg, i.InputSimd128Register(2));
- __ Mulps(kScratchDoubleReg, i.InputSimd128Register(1));
- __ Subps(i.OutputSimd128Register(), kScratchDoubleReg);
- }
+ __ F32x4Qfms(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2),
+ kScratchDoubleReg);
break;
}
case kX64F32x4Pmin: {
@@ -2973,28 +2946,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I64x2Mul: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- XMMRegister left = i.InputSimd128Register(0);
- XMMRegister right = i.InputSimd128Register(1);
- XMMRegister tmp1 = i.TempSimd128Register(0);
- XMMRegister tmp2 = kScratchDoubleReg;
-
- __ Movdqa(tmp1, left);
- __ Movdqa(tmp2, right);
-
- // Multiply high dword of each qword of left with right.
- __ Psrlq(tmp1, byte{32});
- __ Pmuludq(tmp1, right);
-
- // Multiply high dword of each qword of right with left.
- __ Psrlq(tmp2, byte{32});
- __ Pmuludq(tmp2, left);
-
- __ Paddq(tmp2, tmp1);
- __ Psllq(tmp2, byte{32});
-
- __ Pmuludq(left, right);
- __ Paddq(left, tmp2); // left == dst
+ __ I64x2Mul(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.TempSimd128Register(0),
+ kScratchDoubleReg);
break;
}
case kX64I64x2Eq: {
@@ -3084,21 +3038,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I32x4SConvertF32x4: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- XMMRegister dst = i.OutputSimd128Register();
- // NAN->0
- __ Movaps(kScratchDoubleReg, dst);
- __ Cmpeqps(kScratchDoubleReg, kScratchDoubleReg);
- __ Pand(dst, kScratchDoubleReg);
- // Set top bit if >= 0 (but not -0.0!)
- __ Pxor(kScratchDoubleReg, dst);
- // Convert
- __ Cvttps2dq(dst, dst);
- // Set top bit if >=0 is now < 0
- __ Pand(kScratchDoubleReg, dst);
- __ Psrad(kScratchDoubleReg, byte{31});
- // Set positive overflow lanes to 0x7FFFFFFF
- __ Pxor(dst, kScratchDoubleReg);
+ __ I32x4SConvertF32x4(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchDoubleReg,
+ kScratchRegister);
break;
}
case kX64I32x4SConvertI16x8Low: {
@@ -3252,21 +3194,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I32x4ExtAddPairwiseI16x8S: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src1 = i.InputSimd128Register(0);
- // pmaddwd multiplies signed words in src1 and src2, producing signed
- // doublewords, then adds pairwise.
- // src1 = |a|b|c|d|e|f|g|h|
- // src2 = |1|1|1|1|1|1|1|1|
- // dst = | a*1 + b*1 | c*1 + d*1 | e*1 + f*1 | g*1 + h*1 |
- Operand src2 = __ ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i16x8_splat_0x0001());
- __ Pmaddwd(dst, src1, src2);
+ __ I32x4ExtAddPairwiseI16x8S(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchRegister);
break;
}
case kX64I32x4ExtAddPairwiseI16x8U: {
__ I32x4ExtAddPairwiseI16x8U(i.OutputSimd128Register(),
- i.InputSimd128Register(0));
+ i.InputSimd128Register(0),
+ kScratchDoubleReg);
break;
}
case kX64S128Const: {
@@ -3293,12 +3228,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64I16x8Splat: {
XMMRegister dst = i.OutputSimd128Register();
if (HasRegisterInput(instr, 0)) {
- __ Movd(dst, i.InputRegister(0));
+ __ I16x8Splat(dst, i.InputRegister(0));
} else {
- __ Movd(dst, i.InputOperand(0));
+ __ I16x8Splat(dst, i.InputOperand(0));
}
- __ Pshuflw(dst, dst, uint8_t{0x0});
- __ Pshufd(dst, dst, uint8_t{0x0});
break;
}
case kX64I16x8ExtractLaneS: {
@@ -3481,43 +3414,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I16x8ExtAddPairwiseI8x16S: {
__ I16x8ExtAddPairwiseI8x16S(i.OutputSimd128Register(),
- i.InputSimd128Register(0));
+ i.InputSimd128Register(0), kScratchDoubleReg,
+ kScratchRegister);
break;
}
case kX64I16x8ExtAddPairwiseI8x16U: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src1 = i.InputSimd128Register(0);
- Operand src2 = __ ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x01());
- __ Pmaddubsw(dst, src1, src2);
+ __ I16x8ExtAddPairwiseI8x16U(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchRegister);
break;
}
case kX64I16x8Q15MulRSatS: {
__ I16x8Q15MulRSatS(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
case kX64I8x16Splat: {
XMMRegister dst = i.OutputSimd128Register();
- if (CpuFeatures::IsSupported(AVX2)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
- CpuFeatureScope avx2_scope(tasm(), AVX2);
- if (HasRegisterInput(instr, 0)) {
- __ vmovd(kScratchDoubleReg, i.InputRegister(0));
- __ vpbroadcastb(dst, kScratchDoubleReg);
- } else {
- __ vpbroadcastb(dst, i.InputOperand(0));
- }
+ if (HasRegisterInput(instr, 0)) {
+ __ I8x16Splat(dst, i.InputRegister(0), kScratchDoubleReg);
} else {
- if (HasRegisterInput(instr, 0)) {
- __ Movd(dst, i.InputRegister(0));
- } else {
- __ Movd(dst, i.InputOperand(0));
- }
- __ Xorps(kScratchDoubleReg, kScratchDoubleReg);
- __ Pshufb(dst, kScratchDoubleReg);
+ __ I8x16Splat(dst, i.InputOperand(0), kScratchDoubleReg);
}
-
break;
}
case kX64Pextrb: {
@@ -3586,66 +3503,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I8x16Shl: {
XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- // Temp registers for shift mask and additional moves to XMM registers.
- Register tmp = i.ToRegister(instr->TempAt(0));
- XMMRegister tmp_simd = i.TempSimd128Register(1);
+ XMMRegister src = i.InputSimd128Register(0);
+ DCHECK_IMPLIES(!CpuFeatures::IsSupported(AVX), dst == src);
if (HasImmediateInput(instr, 1)) {
- // Perform 16-bit shift, then mask away low bits.
- uint8_t shift = i.InputInt3(1);
- __ Psllw(dst, byte{shift});
-
- uint8_t bmask = static_cast<uint8_t>(0xff << shift);
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- __ movl(tmp, Immediate(mask));
- __ Movd(tmp_simd, tmp);
- __ Pshufd(tmp_simd, tmp_simd, uint8_t{0});
- __ Pand(dst, tmp_simd);
+ __ I8x16Shl(dst, src, i.InputInt3(1), kScratchRegister,
+ kScratchDoubleReg);
} else {
- // Mask off the unwanted bits before word-shifting.
- __ Pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
- // Take shift value modulo 8.
- __ movq(tmp, i.InputRegister(1));
- __ andq(tmp, Immediate(7));
- __ addq(tmp, Immediate(8));
- __ Movq(tmp_simd, tmp);
- __ Psrlw(kScratchDoubleReg, tmp_simd);
- __ Packuswb(kScratchDoubleReg, kScratchDoubleReg);
- __ Pand(dst, kScratchDoubleReg);
- // TODO(zhin): subq here to avoid asking for another temporary register,
- // examine codegen for other i8x16 shifts, they use less instructions.
- __ subq(tmp, Immediate(8));
- __ Movq(tmp_simd, tmp);
- __ Psllw(dst, tmp_simd);
+ __ I8x16Shl(dst, src, i.InputRegister(1), kScratchRegister,
+ kScratchDoubleReg, i.TempSimd128Register(0));
}
break;
}
case kX64I8x16ShrS: {
XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
+ XMMRegister src = i.InputSimd128Register(0);
+ DCHECK_IMPLIES(!CpuFeatures::IsSupported(AVX), dst == src);
if (HasImmediateInput(instr, 1)) {
- __ Punpckhbw(kScratchDoubleReg, dst);
- __ Punpcklbw(dst, dst);
- uint8_t shift = i.InputInt3(1) + 8;
- __ Psraw(kScratchDoubleReg, shift);
- __ Psraw(dst, shift);
- __ Packsswb(dst, kScratchDoubleReg);
+ __ I8x16ShrS(dst, src, i.InputInt3(1), kScratchDoubleReg);
} else {
- // Temp registers for shift mask andadditional moves to XMM registers.
- Register tmp = i.ToRegister(instr->TempAt(0));
- XMMRegister tmp_simd = i.TempSimd128Register(1);
- // Unpack the bytes into words, do arithmetic shifts, and repack.
- __ Punpckhbw(kScratchDoubleReg, dst);
- __ Punpcklbw(dst, dst);
- // Prepare shift value
- __ movq(tmp, i.InputRegister(1));
- // Take shift value modulo 8.
- __ andq(tmp, Immediate(7));
- __ addq(tmp, Immediate(8));
- __ Movq(tmp_simd, tmp);
- __ Psraw(kScratchDoubleReg, tmp_simd);
- __ Psraw(dst, tmp_simd);
- __ Packsswb(dst, kScratchDoubleReg);
+ __ I8x16ShrS(dst, src, i.InputRegister(1), kScratchRegister,
+ kScratchDoubleReg, i.TempSimd128Register(0));
}
break;
}
@@ -3701,34 +3578,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I8x16ShrU: {
XMMRegister dst = i.OutputSimd128Register();
- // Unpack the bytes into words, do logical shifts, and repack.
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- // Temp registers for shift mask andadditional moves to XMM registers.
- Register tmp = i.ToRegister(instr->TempAt(0));
- XMMRegister tmp_simd = i.TempSimd128Register(1);
+ XMMRegister src = i.InputSimd128Register(0);
+ DCHECK_IMPLIES(!CpuFeatures::IsSupported(AVX), dst == src);
if (HasImmediateInput(instr, 1)) {
- // Perform 16-bit shift, then mask away high bits.
- uint8_t shift = i.InputInt3(1);
- __ Psrlw(dst, byte{shift});
-
- uint8_t bmask = 0xff >> shift;
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- __ movl(tmp, Immediate(mask));
- __ Movd(tmp_simd, tmp);
- __ Pshufd(tmp_simd, tmp_simd, byte{0});
- __ Pand(dst, tmp_simd);
+ __ I8x16ShrU(dst, src, i.InputInt3(1), kScratchRegister,
+ kScratchDoubleReg);
} else {
- __ Punpckhbw(kScratchDoubleReg, dst);
- __ Punpcklbw(dst, dst);
- // Prepare shift value
- __ movq(tmp, i.InputRegister(1));
- // Take shift value modulo 8.
- __ andq(tmp, Immediate(7));
- __ addq(tmp, Immediate(8));
- __ Movq(tmp_simd, tmp);
- __ Psrlw(kScratchDoubleReg, tmp_simd);
- __ Psrlw(dst, tmp_simd);
- __ Packuswb(dst, kScratchDoubleReg);
+ __ I8x16ShrU(dst, src, i.InputRegister(1), kScratchRegister,
+ kScratchDoubleReg, i.TempSimd128Register(0));
}
break;
}
@@ -3834,9 +3691,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I8x16Swizzle: {
- bool omit_add = MiscField::decode(instr->opcode());
__ I8x16Swizzle(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), omit_add);
+ i.InputSimd128Register(1), kScratchDoubleReg,
+ kScratchRegister, MiscField::decode(instr->opcode()));
break;
}
case kX64I8x16Shuffle: {
@@ -3888,45 +3745,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I8x16Popcnt: {
__ I8x16Popcnt(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.TempSimd128Register(0));
+ i.TempSimd128Register(0), kScratchDoubleReg,
+ kScratchRegister);
break;
}
case kX64S128Load8Splat: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
- XMMRegister dst = i.OutputSimd128Register();
- if (CpuFeatures::IsSupported(AVX2)) {
- CpuFeatureScope avx2_scope(tasm(), AVX2);
- __ vpbroadcastb(dst, i.MemoryOperand());
- } else {
- __ Pinsrb(dst, dst, i.MemoryOperand(), 0);
- __ Pxor(kScratchDoubleReg, kScratchDoubleReg);
- __ Pshufb(dst, kScratchDoubleReg);
- }
+ __ S128Load8Splat(i.OutputSimd128Register(), i.MemoryOperand(),
+ kScratchDoubleReg);
break;
}
case kX64S128Load16Splat: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
- XMMRegister dst = i.OutputSimd128Register();
- if (CpuFeatures::IsSupported(AVX2)) {
- CpuFeatureScope avx2_scope(tasm(), AVX2);
- __ vpbroadcastw(dst, i.MemoryOperand());
- } else {
- __ Pinsrw(dst, dst, i.MemoryOperand(), 0);
- __ Pshuflw(dst, dst, uint8_t{0});
- __ Punpcklqdq(dst, dst);
- }
+ __ S128Load16Splat(i.OutputSimd128Register(), i.MemoryOperand(),
+ kScratchDoubleReg);
break;
}
case kX64S128Load32Splat: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vbroadcastss(i.OutputSimd128Register(), i.MemoryOperand());
- } else {
- __ movss(i.OutputSimd128Register(), i.MemoryOperand());
- __ shufps(i.OutputSimd128Register(), i.OutputSimd128Register(),
- byte{0});
- }
+ __ S128Load32Splat(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
case kX64S128Load64Splat: {
@@ -4049,10 +3886,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
uint8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
if (lane < 4) {
ASSEMBLE_SIMD_IMM_INSTR(Pshuflw, dst, 0, half_dup);
- __ Pshufd(dst, dst, uint8_t{0});
+ __ Punpcklqdq(dst, dst);
} else {
ASSEMBLE_SIMD_IMM_INSTR(Pshufhw, dst, 0, half_dup);
- __ Pshufd(dst, dst, uint8_t{0xaa});
+ __ Punpckhqdq(dst, dst);
}
break;
}
@@ -4070,10 +3907,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
uint8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
if (lane < 4) {
__ Pshuflw(dst, dst, half_dup);
- __ Pshufd(dst, dst, uint8_t{0});
+ __ Punpcklqdq(dst, dst);
} else {
__ Pshufhw(dst, dst, half_dup);
- __ Pshufd(dst, dst, uint8_t{0xaa});
+ __ Punpckhqdq(dst, dst);
}
break;
}
@@ -4232,156 +4069,180 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqb);
break;
}
- case kWord32AtomicExchangeInt8: {
+ case kAtomicStoreWord8: {
+ ASSEMBLE_SEQ_CST_STORE(MachineRepresentation::kWord8);
+ break;
+ }
+ case kAtomicStoreWord16: {
+ ASSEMBLE_SEQ_CST_STORE(MachineRepresentation::kWord16);
+ break;
+ }
+ case kAtomicStoreWord32: {
+ ASSEMBLE_SEQ_CST_STORE(MachineRepresentation::kWord32);
+ break;
+ }
+ case kX64Word64AtomicStoreWord64: {
+ ASSEMBLE_SEQ_CST_STORE(MachineRepresentation::kWord64);
+ break;
+ }
+ case kAtomicExchangeInt8: {
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
__ xchgb(i.InputRegister(0), i.MemoryOperand(1));
__ movsxbl(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kWord32AtomicExchangeUint8: {
+ case kAtomicExchangeUint8: {
__ xchgb(i.InputRegister(0), i.MemoryOperand(1));
- __ movzxbl(i.InputRegister(0), i.InputRegister(0));
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ movzxbl(i.InputRegister(0), i.InputRegister(0));
+ break;
+ case AtomicWidth::kWord64:
+ __ movzxbq(i.InputRegister(0), i.InputRegister(0));
+ break;
+ }
break;
}
- case kWord32AtomicExchangeInt16: {
+ case kAtomicExchangeInt16: {
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
__ xchgw(i.InputRegister(0), i.MemoryOperand(1));
__ movsxwl(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kWord32AtomicExchangeUint16: {
+ case kAtomicExchangeUint16: {
__ xchgw(i.InputRegister(0), i.MemoryOperand(1));
- __ movzxwl(i.InputRegister(0), i.InputRegister(0));
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ movzxwl(i.InputRegister(0), i.InputRegister(0));
+ break;
+ case AtomicWidth::kWord64:
+ __ movzxwq(i.InputRegister(0), i.InputRegister(0));
+ break;
+ }
break;
}
- case kWord32AtomicExchangeWord32: {
+ case kAtomicExchangeWord32: {
__ xchgl(i.InputRegister(0), i.MemoryOperand(1));
break;
}
- case kWord32AtomicCompareExchangeInt8: {
+ case kAtomicCompareExchangeInt8: {
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
__ lock();
__ cmpxchgb(i.MemoryOperand(2), i.InputRegister(1));
__ movsxbl(rax, rax);
break;
}
- case kWord32AtomicCompareExchangeUint8: {
+ case kAtomicCompareExchangeUint8: {
__ lock();
__ cmpxchgb(i.MemoryOperand(2), i.InputRegister(1));
- __ movzxbl(rax, rax);
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ movzxbl(rax, rax);
+ break;
+ case AtomicWidth::kWord64:
+ __ movzxbq(rax, rax);
+ break;
+ }
break;
}
- case kWord32AtomicCompareExchangeInt16: {
+ case kAtomicCompareExchangeInt16: {
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
__ lock();
__ cmpxchgw(i.MemoryOperand(2), i.InputRegister(1));
__ movsxwl(rax, rax);
break;
}
- case kWord32AtomicCompareExchangeUint16: {
+ case kAtomicCompareExchangeUint16: {
__ lock();
__ cmpxchgw(i.MemoryOperand(2), i.InputRegister(1));
- __ movzxwl(rax, rax);
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ movzxwl(rax, rax);
+ break;
+ case AtomicWidth::kWord64:
+ __ movzxwq(rax, rax);
+ break;
+ }
break;
}
- case kWord32AtomicCompareExchangeWord32: {
+ case kAtomicCompareExchangeWord32: {
__ lock();
__ cmpxchgl(i.MemoryOperand(2), i.InputRegister(1));
- break;
- }
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
- ASSEMBLE_ATOMIC_BINOP(inst, movb, cmpxchgb); \
- __ movsxbl(rax, rax); \
- break; \
- case kWord32Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP(inst, movb, cmpxchgb); \
- __ movzxbl(rax, rax); \
- break; \
- case kWord32Atomic##op##Int16: \
- ASSEMBLE_ATOMIC_BINOP(inst, movw, cmpxchgw); \
- __ movsxwl(rax, rax); \
- break; \
- case kWord32Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP(inst, movw, cmpxchgw); \
- __ movzxwl(rax, rax); \
- break; \
- case kWord32Atomic##op##Word32: \
- ASSEMBLE_ATOMIC_BINOP(inst, movl, cmpxchgl); \
- break;
- ATOMIC_BINOP_CASE(Add, addl)
- ATOMIC_BINOP_CASE(Sub, subl)
- ATOMIC_BINOP_CASE(And, andl)
- ATOMIC_BINOP_CASE(Or, orl)
- ATOMIC_BINOP_CASE(Xor, xorl)
-#undef ATOMIC_BINOP_CASE
- case kX64Word64AtomicExchangeUint8: {
- __ xchgb(i.InputRegister(0), i.MemoryOperand(1));
- __ movzxbq(i.InputRegister(0), i.InputRegister(0));
- break;
- }
- case kX64Word64AtomicExchangeUint16: {
- __ xchgw(i.InputRegister(0), i.MemoryOperand(1));
- __ movzxwq(i.InputRegister(0), i.InputRegister(0));
- break;
- }
- case kX64Word64AtomicExchangeUint32: {
- __ xchgl(i.InputRegister(0), i.MemoryOperand(1));
+ if (AtomicWidthField::decode(opcode) == AtomicWidth::kWord64) {
+ // Zero-extend the 32 bit value to 64 bit.
+ __ movl(rax, rax);
+ }
break;
}
case kX64Word64AtomicExchangeUint64: {
__ xchgq(i.InputRegister(0), i.MemoryOperand(1));
break;
}
- case kX64Word64AtomicCompareExchangeUint8: {
- __ lock();
- __ cmpxchgb(i.MemoryOperand(2), i.InputRegister(1));
- __ movzxbq(rax, rax);
- break;
- }
- case kX64Word64AtomicCompareExchangeUint16: {
- __ lock();
- __ cmpxchgw(i.MemoryOperand(2), i.InputRegister(1));
- __ movzxwq(rax, rax);
- break;
- }
- case kX64Word64AtomicCompareExchangeUint32: {
- __ lock();
- __ cmpxchgl(i.MemoryOperand(2), i.InputRegister(1));
- // Zero-extend the 32 bit value to 64 bit.
- __ movl(rax, rax);
- break;
- }
case kX64Word64AtomicCompareExchangeUint64: {
__ lock();
__ cmpxchgq(i.MemoryOperand(2), i.InputRegister(1));
break;
}
-#define ATOMIC64_BINOP_CASE(op, inst) \
- case kX64Word64Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC64_BINOP(inst, movb, cmpxchgb); \
- __ movzxbq(rax, rax); \
- break; \
- case kX64Word64Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC64_BINOP(inst, movw, cmpxchgw); \
- __ movzxwq(rax, rax); \
- break; \
- case kX64Word64Atomic##op##Uint32: \
- ASSEMBLE_ATOMIC64_BINOP(inst, movl, cmpxchgl); \
- break; \
- case kX64Word64Atomic##op##Uint64: \
- ASSEMBLE_ATOMIC64_BINOP(inst, movq, cmpxchgq); \
+#define ATOMIC_BINOP_CASE(op, inst32, inst64) \
+ case kAtomic##op##Int8: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP(inst32, movb, cmpxchgb); \
+ __ movsxbl(rax, rax); \
+ break; \
+ case kAtomic##op##Uint8: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP(inst32, movb, cmpxchgb); \
+ __ movzxbl(rax, rax); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC64_BINOP(inst64, movb, cmpxchgb); \
+ __ movzxbq(rax, rax); \
+ break; \
+ } \
+ break; \
+ case kAtomic##op##Int16: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP(inst32, movw, cmpxchgw); \
+ __ movsxwl(rax, rax); \
+ break; \
+ case kAtomic##op##Uint16: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP(inst32, movw, cmpxchgw); \
+ __ movzxwl(rax, rax); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC64_BINOP(inst64, movw, cmpxchgw); \
+ __ movzxwq(rax, rax); \
+ break; \
+ } \
+ break; \
+ case kAtomic##op##Word32: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP(inst32, movl, cmpxchgl); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC64_BINOP(inst64, movl, cmpxchgl); \
+ break; \
+ } \
+ break; \
+ case kX64Word64Atomic##op##Uint64: \
+ ASSEMBLE_ATOMIC64_BINOP(inst64, movq, cmpxchgq); \
break;
- ATOMIC64_BINOP_CASE(Add, addq)
- ATOMIC64_BINOP_CASE(Sub, subq)
- ATOMIC64_BINOP_CASE(And, andq)
- ATOMIC64_BINOP_CASE(Or, orq)
- ATOMIC64_BINOP_CASE(Xor, xorq)
-#undef ATOMIC64_BINOP_CASE
- case kWord32AtomicLoadInt8:
- case kWord32AtomicLoadUint8:
- case kWord32AtomicLoadInt16:
- case kWord32AtomicLoadUint16:
- case kWord32AtomicLoadWord32:
- case kWord32AtomicStoreWord8:
- case kWord32AtomicStoreWord16:
- case kWord32AtomicStoreWord32:
+ ATOMIC_BINOP_CASE(Add, addl, addq)
+ ATOMIC_BINOP_CASE(Sub, subl, subq)
+ ATOMIC_BINOP_CASE(And, andl, andq)
+ ATOMIC_BINOP_CASE(Or, orl, orq)
+ ATOMIC_BINOP_CASE(Xor, xorl, xorq)
+#undef ATOMIC_BINOP_CASE
+
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
UNREACHABLE(); // Won't be generated by instruction selector.
}
return kSuccess;
@@ -4407,6 +4268,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#undef ASSEMBLE_SIMD_IMM_SHUFFLE
#undef ASSEMBLE_SIMD_ALL_TRUE
#undef ASSEMBLE_SIMD_SHIFT
+#undef ASSEMBLE_SEQ_CST_STORE
namespace {
@@ -4462,19 +4324,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ jmp(flabel, flabel_distance);
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
- return;
- }
-
- condition = NegateFlagsCondition(condition);
- __ Move(kScratchRegister, 0);
- __ cmovq(FlagsConditionToCondition(condition), kSpeculationPoisonRegister,
- kScratchRegister);
-}
-
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
Label::Distance flabel_distance =
@@ -4716,7 +4565,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= static_cast<int>(osr_helper()->UnoptimizedFrameSlots());
- ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
@@ -4876,18 +4724,24 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// The number of arguments without the receiver is
// max(argc_reg, parameter_slots-1), and the receiver is added in
// DropArguments().
- int parameter_slots_without_receiver = parameter_slots - 1;
Label mismatch_return;
Register scratch_reg = r10;
DCHECK_NE(argc_reg, scratch_reg);
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & scratch_reg.bit());
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & argc_reg.bit());
- __ cmpq(argc_reg, Immediate(parameter_slots_without_receiver));
+ if (kJSArgcIncludesReceiver) {
+ __ cmpq(argc_reg, Immediate(parameter_slots));
+ } else {
+ int parameter_slots_without_receiver = parameter_slots - 1;
+ __ cmpq(argc_reg, Immediate(parameter_slots_without_receiver));
+ }
__ j(greater, &mismatch_return, Label::kNear);
__ Ret(parameter_slots * kSystemPointerSize, scratch_reg);
__ bind(&mismatch_return);
__ DropArguments(argc_reg, scratch_reg, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
// We use a return instead of a jump for better return address prediction.
__ Ret();
} else if (additional_pop_count->IsImmediate()) {
diff --git a/chromium/v8/src/compiler/backend/x64/instruction-codes-x64.h b/chromium/v8/src/compiler/backend/x64/instruction-codes-x64.h
index eba23dcfa92..ad9906585ce 100644
--- a/chromium/v8/src/compiler/backend/x64/instruction-codes-x64.h
+++ b/chromium/v8/src/compiler/backend/x64/instruction-codes-x64.h
@@ -11,413 +11,394 @@ namespace compiler {
// X64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(X64Add) \
- V(X64Add32) \
- V(X64And) \
- V(X64And32) \
- V(X64Cmp) \
- V(X64Cmp32) \
- V(X64Cmp16) \
- V(X64Cmp8) \
- V(X64Test) \
- V(X64Test32) \
- V(X64Test16) \
- V(X64Test8) \
- V(X64Or) \
- V(X64Or32) \
- V(X64Xor) \
- V(X64Xor32) \
- V(X64Sub) \
- V(X64Sub32) \
- V(X64Imul) \
- V(X64Imul32) \
- V(X64ImulHigh32) \
- V(X64UmulHigh32) \
- V(X64Idiv) \
- V(X64Idiv32) \
- V(X64Udiv) \
- V(X64Udiv32) \
- V(X64Not) \
- V(X64Not32) \
- V(X64Neg) \
- V(X64Neg32) \
- V(X64Shl) \
- V(X64Shl32) \
- V(X64Shr) \
- V(X64Shr32) \
- V(X64Sar) \
- V(X64Sar32) \
- V(X64Rol) \
- V(X64Rol32) \
- V(X64Ror) \
- V(X64Ror32) \
- V(X64Lzcnt) \
- V(X64Lzcnt32) \
- V(X64Tzcnt) \
- V(X64Tzcnt32) \
- V(X64Popcnt) \
- V(X64Popcnt32) \
- V(X64Bswap) \
- V(X64Bswap32) \
- V(X64MFence) \
- V(X64LFence) \
- V(SSEFloat32Cmp) \
- V(SSEFloat32Add) \
- V(SSEFloat32Sub) \
- V(SSEFloat32Mul) \
- V(SSEFloat32Div) \
- V(SSEFloat32Abs) \
- V(SSEFloat32Neg) \
- V(SSEFloat32Sqrt) \
- V(SSEFloat32ToFloat64) \
- V(SSEFloat32ToInt32) \
- V(SSEFloat32ToUint32) \
- V(SSEFloat32Round) \
- V(SSEFloat64Cmp) \
- V(SSEFloat64Add) \
- V(SSEFloat64Sub) \
- V(SSEFloat64Mul) \
- V(SSEFloat64Div) \
- V(SSEFloat64Mod) \
- V(SSEFloat64Abs) \
- V(SSEFloat64Neg) \
- V(SSEFloat64Sqrt) \
- V(SSEFloat64Round) \
- V(SSEFloat32Max) \
- V(SSEFloat64Max) \
- V(SSEFloat32Min) \
- V(SSEFloat64Min) \
- V(SSEFloat64ToFloat32) \
- V(SSEFloat64ToInt32) \
- V(SSEFloat64ToUint32) \
- V(SSEFloat32ToInt64) \
- V(SSEFloat64ToInt64) \
- V(SSEFloat32ToUint64) \
- V(SSEFloat64ToUint64) \
- V(SSEInt32ToFloat64) \
- V(SSEInt32ToFloat32) \
- V(SSEInt64ToFloat32) \
- V(SSEInt64ToFloat64) \
- V(SSEUint64ToFloat32) \
- V(SSEUint64ToFloat64) \
- V(SSEUint32ToFloat64) \
- V(SSEUint32ToFloat32) \
- V(SSEFloat64ExtractLowWord32) \
- V(SSEFloat64ExtractHighWord32) \
- V(SSEFloat64InsertLowWord32) \
- V(SSEFloat64InsertHighWord32) \
- V(SSEFloat64LoadLowWord32) \
- V(SSEFloat64SilenceNaN) \
- V(AVXFloat32Cmp) \
- V(AVXFloat32Add) \
- V(AVXFloat32Sub) \
- V(AVXFloat32Mul) \
- V(AVXFloat32Div) \
- V(AVXFloat64Cmp) \
- V(AVXFloat64Add) \
- V(AVXFloat64Sub) \
- V(AVXFloat64Mul) \
- V(AVXFloat64Div) \
- V(AVXFloat64Abs) \
- V(AVXFloat64Neg) \
- V(AVXFloat32Abs) \
- V(AVXFloat32Neg) \
- V(X64Movsxbl) \
- V(X64Movzxbl) \
- V(X64Movsxbq) \
- V(X64Movzxbq) \
- V(X64Movb) \
- V(X64Movsxwl) \
- V(X64Movzxwl) \
- V(X64Movsxwq) \
- V(X64Movzxwq) \
- V(X64Movw) \
- V(X64Movl) \
- V(X64Movsxlq) \
- V(X64MovqDecompressTaggedSigned) \
- V(X64MovqDecompressTaggedPointer) \
- V(X64MovqDecompressAnyTagged) \
- V(X64MovqCompressTagged) \
- V(X64Movq) \
- V(X64Movsd) \
- V(X64Movss) \
- V(X64Movdqu) \
- V(X64BitcastFI) \
- V(X64BitcastDL) \
- V(X64BitcastIF) \
- V(X64BitcastLD) \
- V(X64Lea32) \
- V(X64Lea) \
- V(X64Dec32) \
- V(X64Inc32) \
- V(X64Push) \
- V(X64Poke) \
- V(X64Peek) \
- V(X64F64x2Splat) \
- V(X64F64x2ExtractLane) \
- V(X64F64x2ReplaceLane) \
- V(X64F64x2Abs) \
- V(X64F64x2Neg) \
- V(X64F64x2Sqrt) \
- V(X64F64x2Add) \
- V(X64F64x2Sub) \
- V(X64F64x2Mul) \
- V(X64F64x2Div) \
- V(X64F64x2Min) \
- V(X64F64x2Max) \
- V(X64F64x2Eq) \
- V(X64F64x2Ne) \
- V(X64F64x2Lt) \
- V(X64F64x2Le) \
- V(X64F64x2Qfma) \
- V(X64F64x2Qfms) \
- V(X64F64x2Pmin) \
- V(X64F64x2Pmax) \
- V(X64F64x2Round) \
- V(X64F64x2ConvertLowI32x4S) \
- V(X64F64x2ConvertLowI32x4U) \
- V(X64F64x2PromoteLowF32x4) \
- V(X64F32x4Splat) \
- V(X64F32x4ExtractLane) \
- V(X64F32x4ReplaceLane) \
- V(X64F32x4SConvertI32x4) \
- V(X64F32x4UConvertI32x4) \
- V(X64F32x4Abs) \
- V(X64F32x4Neg) \
- V(X64F32x4Sqrt) \
- V(X64F32x4RecipApprox) \
- V(X64F32x4RecipSqrtApprox) \
- V(X64F32x4Add) \
- V(X64F32x4Sub) \
- V(X64F32x4Mul) \
- V(X64F32x4Div) \
- V(X64F32x4Min) \
- V(X64F32x4Max) \
- V(X64F32x4Eq) \
- V(X64F32x4Ne) \
- V(X64F32x4Lt) \
- V(X64F32x4Le) \
- V(X64F32x4Qfma) \
- V(X64F32x4Qfms) \
- V(X64F32x4Pmin) \
- V(X64F32x4Pmax) \
- V(X64F32x4Round) \
- V(X64F32x4DemoteF64x2Zero) \
- V(X64I64x2Splat) \
- V(X64I64x2ExtractLane) \
- V(X64I64x2Abs) \
- V(X64I64x2Neg) \
- V(X64I64x2BitMask) \
- V(X64I64x2Shl) \
- V(X64I64x2ShrS) \
- V(X64I64x2Add) \
- V(X64I64x2Sub) \
- V(X64I64x2Mul) \
- V(X64I64x2Eq) \
- V(X64I64x2GtS) \
- V(X64I64x2GeS) \
- V(X64I64x2Ne) \
- V(X64I64x2ShrU) \
- V(X64I64x2ExtMulLowI32x4S) \
- V(X64I64x2ExtMulHighI32x4S) \
- V(X64I64x2ExtMulLowI32x4U) \
- V(X64I64x2ExtMulHighI32x4U) \
- V(X64I64x2SConvertI32x4Low) \
- V(X64I64x2SConvertI32x4High) \
- V(X64I64x2UConvertI32x4Low) \
- V(X64I64x2UConvertI32x4High) \
- V(X64I32x4Splat) \
- V(X64I32x4ExtractLane) \
- V(X64I32x4SConvertF32x4) \
- V(X64I32x4SConvertI16x8Low) \
- V(X64I32x4SConvertI16x8High) \
- V(X64I32x4Neg) \
- V(X64I32x4Shl) \
- V(X64I32x4ShrS) \
- V(X64I32x4Add) \
- V(X64I32x4Sub) \
- V(X64I32x4Mul) \
- V(X64I32x4MinS) \
- V(X64I32x4MaxS) \
- V(X64I32x4Eq) \
- V(X64I32x4Ne) \
- V(X64I32x4GtS) \
- V(X64I32x4GeS) \
- V(X64I32x4UConvertF32x4) \
- V(X64I32x4UConvertI16x8Low) \
- V(X64I32x4UConvertI16x8High) \
- V(X64I32x4ShrU) \
- V(X64I32x4MinU) \
- V(X64I32x4MaxU) \
- V(X64I32x4GtU) \
- V(X64I32x4GeU) \
- V(X64I32x4Abs) \
- V(X64I32x4BitMask) \
- V(X64I32x4DotI16x8S) \
- V(X64I32x4ExtMulLowI16x8S) \
- V(X64I32x4ExtMulHighI16x8S) \
- V(X64I32x4ExtMulLowI16x8U) \
- V(X64I32x4ExtMulHighI16x8U) \
- V(X64I32x4ExtAddPairwiseI16x8S) \
- V(X64I32x4ExtAddPairwiseI16x8U) \
- V(X64I32x4TruncSatF64x2SZero) \
- V(X64I32x4TruncSatF64x2UZero) \
- V(X64I16x8Splat) \
- V(X64I16x8ExtractLaneS) \
- V(X64I16x8SConvertI8x16Low) \
- V(X64I16x8SConvertI8x16High) \
- V(X64I16x8Neg) \
- V(X64I16x8Shl) \
- V(X64I16x8ShrS) \
- V(X64I16x8SConvertI32x4) \
- V(X64I16x8Add) \
- V(X64I16x8AddSatS) \
- V(X64I16x8Sub) \
- V(X64I16x8SubSatS) \
- V(X64I16x8Mul) \
- V(X64I16x8MinS) \
- V(X64I16x8MaxS) \
- V(X64I16x8Eq) \
- V(X64I16x8Ne) \
- V(X64I16x8GtS) \
- V(X64I16x8GeS) \
- V(X64I16x8UConvertI8x16Low) \
- V(X64I16x8UConvertI8x16High) \
- V(X64I16x8ShrU) \
- V(X64I16x8UConvertI32x4) \
- V(X64I16x8AddSatU) \
- V(X64I16x8SubSatU) \
- V(X64I16x8MinU) \
- V(X64I16x8MaxU) \
- V(X64I16x8GtU) \
- V(X64I16x8GeU) \
- V(X64I16x8RoundingAverageU) \
- V(X64I16x8Abs) \
- V(X64I16x8BitMask) \
- V(X64I16x8ExtMulLowI8x16S) \
- V(X64I16x8ExtMulHighI8x16S) \
- V(X64I16x8ExtMulLowI8x16U) \
- V(X64I16x8ExtMulHighI8x16U) \
- V(X64I16x8ExtAddPairwiseI8x16S) \
- V(X64I16x8ExtAddPairwiseI8x16U) \
- V(X64I16x8Q15MulRSatS) \
- V(X64I8x16Splat) \
- V(X64I8x16ExtractLaneS) \
- V(X64Pinsrb) \
- V(X64Pinsrw) \
- V(X64Pinsrd) \
- V(X64Pinsrq) \
- V(X64Pextrb) \
- V(X64Pextrw) \
- V(X64I8x16SConvertI16x8) \
- V(X64I8x16Neg) \
- V(X64I8x16Shl) \
- V(X64I8x16ShrS) \
- V(X64I8x16Add) \
- V(X64I8x16AddSatS) \
- V(X64I8x16Sub) \
- V(X64I8x16SubSatS) \
- V(X64I8x16MinS) \
- V(X64I8x16MaxS) \
- V(X64I8x16Eq) \
- V(X64I8x16Ne) \
- V(X64I8x16GtS) \
- V(X64I8x16GeS) \
- V(X64I8x16UConvertI16x8) \
- V(X64I8x16AddSatU) \
- V(X64I8x16SubSatU) \
- V(X64I8x16ShrU) \
- V(X64I8x16MinU) \
- V(X64I8x16MaxU) \
- V(X64I8x16GtU) \
- V(X64I8x16GeU) \
- V(X64I8x16RoundingAverageU) \
- V(X64I8x16Abs) \
- V(X64I8x16BitMask) \
- V(X64S128Const) \
- V(X64S128Zero) \
- V(X64S128AllOnes) \
- V(X64S128Not) \
- V(X64S128And) \
- V(X64S128Or) \
- V(X64S128Xor) \
- V(X64S128Select) \
- V(X64S128AndNot) \
- V(X64I8x16Swizzle) \
- V(X64I8x16Shuffle) \
- V(X64I8x16Popcnt) \
- V(X64S128Load8Splat) \
- V(X64S128Load16Splat) \
- V(X64S128Load32Splat) \
- V(X64S128Load64Splat) \
- V(X64S128Load8x8S) \
- V(X64S128Load8x8U) \
- V(X64S128Load16x4S) \
- V(X64S128Load16x4U) \
- V(X64S128Load32x2S) \
- V(X64S128Load32x2U) \
- V(X64S128Store32Lane) \
- V(X64S128Store64Lane) \
- V(X64Shufps) \
- V(X64S32x4Rotate) \
- V(X64S32x4Swizzle) \
- V(X64S32x4Shuffle) \
- V(X64S16x8Blend) \
- V(X64S16x8HalfShuffle1) \
- V(X64S16x8HalfShuffle2) \
- V(X64S8x16Alignr) \
- V(X64S16x8Dup) \
- V(X64S8x16Dup) \
- V(X64S16x8UnzipHigh) \
- V(X64S16x8UnzipLow) \
- V(X64S8x16UnzipHigh) \
- V(X64S8x16UnzipLow) \
- V(X64S64x2UnpackHigh) \
- V(X64S32x4UnpackHigh) \
- V(X64S16x8UnpackHigh) \
- V(X64S8x16UnpackHigh) \
- V(X64S64x2UnpackLow) \
- V(X64S32x4UnpackLow) \
- V(X64S16x8UnpackLow) \
- V(X64S8x16UnpackLow) \
- V(X64S8x16TransposeLow) \
- V(X64S8x16TransposeHigh) \
- V(X64S8x8Reverse) \
- V(X64S8x4Reverse) \
- V(X64S8x2Reverse) \
- V(X64V128AnyTrue) \
- V(X64I64x2AllTrue) \
- V(X64I32x4AllTrue) \
- V(X64I16x8AllTrue) \
- V(X64I8x16AllTrue) \
- V(X64Word64AtomicAddUint8) \
- V(X64Word64AtomicAddUint16) \
- V(X64Word64AtomicAddUint32) \
- V(X64Word64AtomicAddUint64) \
- V(X64Word64AtomicSubUint8) \
- V(X64Word64AtomicSubUint16) \
- V(X64Word64AtomicSubUint32) \
- V(X64Word64AtomicSubUint64) \
- V(X64Word64AtomicAndUint8) \
- V(X64Word64AtomicAndUint16) \
- V(X64Word64AtomicAndUint32) \
- V(X64Word64AtomicAndUint64) \
- V(X64Word64AtomicOrUint8) \
- V(X64Word64AtomicOrUint16) \
- V(X64Word64AtomicOrUint32) \
- V(X64Word64AtomicOrUint64) \
- V(X64Word64AtomicXorUint8) \
- V(X64Word64AtomicXorUint16) \
- V(X64Word64AtomicXorUint32) \
- V(X64Word64AtomicXorUint64) \
- V(X64Word64AtomicExchangeUint8) \
- V(X64Word64AtomicExchangeUint16) \
- V(X64Word64AtomicExchangeUint32) \
- V(X64Word64AtomicExchangeUint64) \
- V(X64Word64AtomicCompareExchangeUint8) \
- V(X64Word64AtomicCompareExchangeUint16) \
- V(X64Word64AtomicCompareExchangeUint32) \
+
+// Opcodes that support a MemoryAccessMode.
+#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(X64F64x2PromoteLowF32x4) \
+ V(X64Movb) \
+ V(X64Movdqu) \
+ V(X64Movl) \
+ V(X64Movq) \
+ V(X64Movsd) \
+ V(X64Movss) \
+ V(X64Movsxbl) \
+ V(X64Movsxbq) \
+ V(X64Movsxlq) \
+ V(X64Movsxwl) \
+ V(X64Movsxwq) \
+ V(X64Movw) \
+ V(X64Movzxbl) \
+ V(X64Movzxbq) \
+ V(X64Movzxwl) \
+ V(X64Movzxwq) \
+ V(X64Pextrb) \
+ V(X64Pextrw) \
+ V(X64Pinsrb) \
+ V(X64Pinsrd) \
+ V(X64Pinsrq) \
+ V(X64Pinsrw) \
+ V(X64S128Load16Splat) \
+ V(X64S128Load16x4S) \
+ V(X64S128Load16x4U) \
+ V(X64S128Load32Splat) \
+ V(X64S128Load32x2S) \
+ V(X64S128Load32x2U) \
+ V(X64S128Load64Splat) \
+ V(X64S128Load8Splat) \
+ V(X64S128Load8x8S) \
+ V(X64S128Load8x8U) \
+ V(X64S128Store32Lane) \
+ V(X64S128Store64Lane)
+
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(X64Add) \
+ V(X64Add32) \
+ V(X64And) \
+ V(X64And32) \
+ V(X64Cmp) \
+ V(X64Cmp32) \
+ V(X64Cmp16) \
+ V(X64Cmp8) \
+ V(X64Test) \
+ V(X64Test32) \
+ V(X64Test16) \
+ V(X64Test8) \
+ V(X64Or) \
+ V(X64Or32) \
+ V(X64Xor) \
+ V(X64Xor32) \
+ V(X64Sub) \
+ V(X64Sub32) \
+ V(X64Imul) \
+ V(X64Imul32) \
+ V(X64ImulHigh32) \
+ V(X64UmulHigh32) \
+ V(X64Idiv) \
+ V(X64Idiv32) \
+ V(X64Udiv) \
+ V(X64Udiv32) \
+ V(X64Not) \
+ V(X64Not32) \
+ V(X64Neg) \
+ V(X64Neg32) \
+ V(X64Shl) \
+ V(X64Shl32) \
+ V(X64Shr) \
+ V(X64Shr32) \
+ V(X64Sar) \
+ V(X64Sar32) \
+ V(X64Rol) \
+ V(X64Rol32) \
+ V(X64Ror) \
+ V(X64Ror32) \
+ V(X64Lzcnt) \
+ V(X64Lzcnt32) \
+ V(X64Tzcnt) \
+ V(X64Tzcnt32) \
+ V(X64Popcnt) \
+ V(X64Popcnt32) \
+ V(X64Bswap) \
+ V(X64Bswap32) \
+ V(X64MFence) \
+ V(X64LFence) \
+ V(SSEFloat32Cmp) \
+ V(SSEFloat32Add) \
+ V(SSEFloat32Sub) \
+ V(SSEFloat32Mul) \
+ V(SSEFloat32Div) \
+ V(SSEFloat32Sqrt) \
+ V(SSEFloat32ToFloat64) \
+ V(SSEFloat32ToInt32) \
+ V(SSEFloat32ToUint32) \
+ V(SSEFloat32Round) \
+ V(SSEFloat64Cmp) \
+ V(SSEFloat64Add) \
+ V(SSEFloat64Sub) \
+ V(SSEFloat64Mul) \
+ V(SSEFloat64Div) \
+ V(SSEFloat64Mod) \
+ V(SSEFloat64Sqrt) \
+ V(SSEFloat64Round) \
+ V(SSEFloat32Max) \
+ V(SSEFloat64Max) \
+ V(SSEFloat32Min) \
+ V(SSEFloat64Min) \
+ V(SSEFloat64ToFloat32) \
+ V(SSEFloat64ToInt32) \
+ V(SSEFloat64ToUint32) \
+ V(SSEFloat32ToInt64) \
+ V(SSEFloat64ToInt64) \
+ V(SSEFloat32ToUint64) \
+ V(SSEFloat64ToUint64) \
+ V(SSEInt32ToFloat64) \
+ V(SSEInt32ToFloat32) \
+ V(SSEInt64ToFloat32) \
+ V(SSEInt64ToFloat64) \
+ V(SSEUint64ToFloat32) \
+ V(SSEUint64ToFloat64) \
+ V(SSEUint32ToFloat64) \
+ V(SSEUint32ToFloat32) \
+ V(SSEFloat64ExtractLowWord32) \
+ V(SSEFloat64ExtractHighWord32) \
+ V(SSEFloat64InsertLowWord32) \
+ V(SSEFloat64InsertHighWord32) \
+ V(SSEFloat64LoadLowWord32) \
+ V(SSEFloat64SilenceNaN) \
+ V(AVXFloat32Cmp) \
+ V(AVXFloat32Add) \
+ V(AVXFloat32Sub) \
+ V(AVXFloat32Mul) \
+ V(AVXFloat32Div) \
+ V(AVXFloat64Cmp) \
+ V(AVXFloat64Add) \
+ V(AVXFloat64Sub) \
+ V(AVXFloat64Mul) \
+ V(AVXFloat64Div) \
+ V(X64Float64Abs) \
+ V(X64Float64Neg) \
+ V(X64Float32Abs) \
+ V(X64Float32Neg) \
+ V(X64MovqDecompressTaggedSigned) \
+ V(X64MovqDecompressTaggedPointer) \
+ V(X64MovqDecompressAnyTagged) \
+ V(X64MovqCompressTagged) \
+ V(X64BitcastFI) \
+ V(X64BitcastDL) \
+ V(X64BitcastIF) \
+ V(X64BitcastLD) \
+ V(X64Lea32) \
+ V(X64Lea) \
+ V(X64Dec32) \
+ V(X64Inc32) \
+ V(X64Push) \
+ V(X64Poke) \
+ V(X64Peek) \
+ V(X64F64x2Splat) \
+ V(X64F64x2ExtractLane) \
+ V(X64F64x2ReplaceLane) \
+ V(X64F64x2Abs) \
+ V(X64F64x2Neg) \
+ V(X64F64x2Sqrt) \
+ V(X64F64x2Add) \
+ V(X64F64x2Sub) \
+ V(X64F64x2Mul) \
+ V(X64F64x2Div) \
+ V(X64F64x2Min) \
+ V(X64F64x2Max) \
+ V(X64F64x2Eq) \
+ V(X64F64x2Ne) \
+ V(X64F64x2Lt) \
+ V(X64F64x2Le) \
+ V(X64F64x2Qfma) \
+ V(X64F64x2Qfms) \
+ V(X64F64x2Pmin) \
+ V(X64F64x2Pmax) \
+ V(X64F64x2Round) \
+ V(X64F64x2ConvertLowI32x4S) \
+ V(X64F64x2ConvertLowI32x4U) \
+ V(X64F32x4Splat) \
+ V(X64F32x4ExtractLane) \
+ V(X64F32x4ReplaceLane) \
+ V(X64F32x4SConvertI32x4) \
+ V(X64F32x4UConvertI32x4) \
+ V(X64F32x4Abs) \
+ V(X64F32x4Neg) \
+ V(X64F32x4Sqrt) \
+ V(X64F32x4RecipApprox) \
+ V(X64F32x4RecipSqrtApprox) \
+ V(X64F32x4Add) \
+ V(X64F32x4Sub) \
+ V(X64F32x4Mul) \
+ V(X64F32x4Div) \
+ V(X64F32x4Min) \
+ V(X64F32x4Max) \
+ V(X64F32x4Eq) \
+ V(X64F32x4Ne) \
+ V(X64F32x4Lt) \
+ V(X64F32x4Le) \
+ V(X64F32x4Qfma) \
+ V(X64F32x4Qfms) \
+ V(X64F32x4Pmin) \
+ V(X64F32x4Pmax) \
+ V(X64F32x4Round) \
+ V(X64F32x4DemoteF64x2Zero) \
+ V(X64I64x2Splat) \
+ V(X64I64x2ExtractLane) \
+ V(X64I64x2Abs) \
+ V(X64I64x2Neg) \
+ V(X64I64x2BitMask) \
+ V(X64I64x2Shl) \
+ V(X64I64x2ShrS) \
+ V(X64I64x2Add) \
+ V(X64I64x2Sub) \
+ V(X64I64x2Mul) \
+ V(X64I64x2Eq) \
+ V(X64I64x2GtS) \
+ V(X64I64x2GeS) \
+ V(X64I64x2Ne) \
+ V(X64I64x2ShrU) \
+ V(X64I64x2ExtMulLowI32x4S) \
+ V(X64I64x2ExtMulHighI32x4S) \
+ V(X64I64x2ExtMulLowI32x4U) \
+ V(X64I64x2ExtMulHighI32x4U) \
+ V(X64I64x2SConvertI32x4Low) \
+ V(X64I64x2SConvertI32x4High) \
+ V(X64I64x2UConvertI32x4Low) \
+ V(X64I64x2UConvertI32x4High) \
+ V(X64I32x4Splat) \
+ V(X64I32x4ExtractLane) \
+ V(X64I32x4SConvertF32x4) \
+ V(X64I32x4SConvertI16x8Low) \
+ V(X64I32x4SConvertI16x8High) \
+ V(X64I32x4Neg) \
+ V(X64I32x4Shl) \
+ V(X64I32x4ShrS) \
+ V(X64I32x4Add) \
+ V(X64I32x4Sub) \
+ V(X64I32x4Mul) \
+ V(X64I32x4MinS) \
+ V(X64I32x4MaxS) \
+ V(X64I32x4Eq) \
+ V(X64I32x4Ne) \
+ V(X64I32x4GtS) \
+ V(X64I32x4GeS) \
+ V(X64I32x4UConvertF32x4) \
+ V(X64I32x4UConvertI16x8Low) \
+ V(X64I32x4UConvertI16x8High) \
+ V(X64I32x4ShrU) \
+ V(X64I32x4MinU) \
+ V(X64I32x4MaxU) \
+ V(X64I32x4GtU) \
+ V(X64I32x4GeU) \
+ V(X64I32x4Abs) \
+ V(X64I32x4BitMask) \
+ V(X64I32x4DotI16x8S) \
+ V(X64I32x4ExtMulLowI16x8S) \
+ V(X64I32x4ExtMulHighI16x8S) \
+ V(X64I32x4ExtMulLowI16x8U) \
+ V(X64I32x4ExtMulHighI16x8U) \
+ V(X64I32x4ExtAddPairwiseI16x8S) \
+ V(X64I32x4ExtAddPairwiseI16x8U) \
+ V(X64I32x4TruncSatF64x2SZero) \
+ V(X64I32x4TruncSatF64x2UZero) \
+ V(X64I16x8Splat) \
+ V(X64I16x8ExtractLaneS) \
+ V(X64I16x8SConvertI8x16Low) \
+ V(X64I16x8SConvertI8x16High) \
+ V(X64I16x8Neg) \
+ V(X64I16x8Shl) \
+ V(X64I16x8ShrS) \
+ V(X64I16x8SConvertI32x4) \
+ V(X64I16x8Add) \
+ V(X64I16x8AddSatS) \
+ V(X64I16x8Sub) \
+ V(X64I16x8SubSatS) \
+ V(X64I16x8Mul) \
+ V(X64I16x8MinS) \
+ V(X64I16x8MaxS) \
+ V(X64I16x8Eq) \
+ V(X64I16x8Ne) \
+ V(X64I16x8GtS) \
+ V(X64I16x8GeS) \
+ V(X64I16x8UConvertI8x16Low) \
+ V(X64I16x8UConvertI8x16High) \
+ V(X64I16x8ShrU) \
+ V(X64I16x8UConvertI32x4) \
+ V(X64I16x8AddSatU) \
+ V(X64I16x8SubSatU) \
+ V(X64I16x8MinU) \
+ V(X64I16x8MaxU) \
+ V(X64I16x8GtU) \
+ V(X64I16x8GeU) \
+ V(X64I16x8RoundingAverageU) \
+ V(X64I16x8Abs) \
+ V(X64I16x8BitMask) \
+ V(X64I16x8ExtMulLowI8x16S) \
+ V(X64I16x8ExtMulHighI8x16S) \
+ V(X64I16x8ExtMulLowI8x16U) \
+ V(X64I16x8ExtMulHighI8x16U) \
+ V(X64I16x8ExtAddPairwiseI8x16S) \
+ V(X64I16x8ExtAddPairwiseI8x16U) \
+ V(X64I16x8Q15MulRSatS) \
+ V(X64I8x16Splat) \
+ V(X64I8x16ExtractLaneS) \
+ V(X64I8x16SConvertI16x8) \
+ V(X64I8x16Neg) \
+ V(X64I8x16Shl) \
+ V(X64I8x16ShrS) \
+ V(X64I8x16Add) \
+ V(X64I8x16AddSatS) \
+ V(X64I8x16Sub) \
+ V(X64I8x16SubSatS) \
+ V(X64I8x16MinS) \
+ V(X64I8x16MaxS) \
+ V(X64I8x16Eq) \
+ V(X64I8x16Ne) \
+ V(X64I8x16GtS) \
+ V(X64I8x16GeS) \
+ V(X64I8x16UConvertI16x8) \
+ V(X64I8x16AddSatU) \
+ V(X64I8x16SubSatU) \
+ V(X64I8x16ShrU) \
+ V(X64I8x16MinU) \
+ V(X64I8x16MaxU) \
+ V(X64I8x16GtU) \
+ V(X64I8x16GeU) \
+ V(X64I8x16RoundingAverageU) \
+ V(X64I8x16Abs) \
+ V(X64I8x16BitMask) \
+ V(X64S128Const) \
+ V(X64S128Zero) \
+ V(X64S128AllOnes) \
+ V(X64S128Not) \
+ V(X64S128And) \
+ V(X64S128Or) \
+ V(X64S128Xor) \
+ V(X64S128Select) \
+ V(X64S128AndNot) \
+ V(X64I8x16Swizzle) \
+ V(X64I8x16Shuffle) \
+ V(X64I8x16Popcnt) \
+ V(X64Shufps) \
+ V(X64S32x4Rotate) \
+ V(X64S32x4Swizzle) \
+ V(X64S32x4Shuffle) \
+ V(X64S16x8Blend) \
+ V(X64S16x8HalfShuffle1) \
+ V(X64S16x8HalfShuffle2) \
+ V(X64S8x16Alignr) \
+ V(X64S16x8Dup) \
+ V(X64S8x16Dup) \
+ V(X64S16x8UnzipHigh) \
+ V(X64S16x8UnzipLow) \
+ V(X64S8x16UnzipHigh) \
+ V(X64S8x16UnzipLow) \
+ V(X64S64x2UnpackHigh) \
+ V(X64S32x4UnpackHigh) \
+ V(X64S16x8UnpackHigh) \
+ V(X64S8x16UnpackHigh) \
+ V(X64S64x2UnpackLow) \
+ V(X64S32x4UnpackLow) \
+ V(X64S16x8UnpackLow) \
+ V(X64S8x16UnpackLow) \
+ V(X64S8x16TransposeLow) \
+ V(X64S8x16TransposeHigh) \
+ V(X64S8x8Reverse) \
+ V(X64S8x4Reverse) \
+ V(X64S8x2Reverse) \
+ V(X64V128AnyTrue) \
+ V(X64I64x2AllTrue) \
+ V(X64I32x4AllTrue) \
+ V(X64I16x8AllTrue) \
+ V(X64I8x16AllTrue) \
+ V(X64Word64AtomicAddUint64) \
+ V(X64Word64AtomicSubUint64) \
+ V(X64Word64AtomicAndUint64) \
+ V(X64Word64AtomicOrUint64) \
+ V(X64Word64AtomicXorUint64) \
+ V(X64Word64AtomicStoreWord64) \
+ V(X64Word64AtomicExchangeUint64) \
V(X64Word64AtomicCompareExchangeUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/chromium/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc b/chromium/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
index 4fada93a312..d5f33d86bc1 100644
--- a/chromium/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
+++ b/chromium/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
@@ -62,8 +62,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kSSEFloat32Sub:
case kSSEFloat32Mul:
case kSSEFloat32Div:
- case kSSEFloat32Abs:
- case kSSEFloat32Neg:
case kSSEFloat32Sqrt:
case kSSEFloat32Round:
case kSSEFloat32ToFloat64:
@@ -73,8 +71,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kSSEFloat64Mul:
case kSSEFloat64Div:
case kSSEFloat64Mod:
- case kSSEFloat64Abs:
- case kSSEFloat64Neg:
case kSSEFloat64Sqrt:
case kSSEFloat64Round:
case kSSEFloat32Max:
@@ -114,10 +110,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXFloat64Sub:
case kAVXFloat64Mul:
case kAVXFloat64Div:
- case kAVXFloat64Abs:
- case kAVXFloat64Neg:
- case kAVXFloat32Abs:
- case kAVXFloat32Neg:
+ case kX64Float64Abs:
+ case kX64Float64Neg:
+ case kX64Float32Abs:
+ case kX64Float32Neg:
case kX64BitcastFI:
case kX64BitcastDL:
case kX64BitcastIF:
@@ -422,33 +418,13 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64LFence:
return kHasSideEffect;
- case kX64Word64AtomicAddUint8:
- case kX64Word64AtomicAddUint16:
- case kX64Word64AtomicAddUint32:
+ case kX64Word64AtomicStoreWord64:
case kX64Word64AtomicAddUint64:
- case kX64Word64AtomicSubUint8:
- case kX64Word64AtomicSubUint16:
- case kX64Word64AtomicSubUint32:
case kX64Word64AtomicSubUint64:
- case kX64Word64AtomicAndUint8:
- case kX64Word64AtomicAndUint16:
- case kX64Word64AtomicAndUint32:
case kX64Word64AtomicAndUint64:
- case kX64Word64AtomicOrUint8:
- case kX64Word64AtomicOrUint16:
- case kX64Word64AtomicOrUint32:
case kX64Word64AtomicOrUint64:
- case kX64Word64AtomicXorUint8:
- case kX64Word64AtomicXorUint16:
- case kX64Word64AtomicXorUint32:
case kX64Word64AtomicXorUint64:
- case kX64Word64AtomicExchangeUint8:
- case kX64Word64AtomicExchangeUint16:
- case kX64Word64AtomicExchangeUint32:
case kX64Word64AtomicExchangeUint64:
- case kX64Word64AtomicCompareExchangeUint8:
- case kX64Word64AtomicCompareExchangeUint16:
- case kX64Word64AtomicCompareExchangeUint32:
case kX64Word64AtomicCompareExchangeUint64:
return kHasSideEffect;
@@ -472,18 +448,18 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
case kX64Imul32:
case kX64ImulHigh32:
case kX64UmulHigh32:
+ case kX64Float32Abs:
+ case kX64Float32Neg:
+ case kX64Float64Abs:
+ case kX64Float64Neg:
case kSSEFloat32Cmp:
case kSSEFloat32Add:
case kSSEFloat32Sub:
- case kSSEFloat32Abs:
- case kSSEFloat32Neg:
case kSSEFloat64Cmp:
case kSSEFloat64Add:
case kSSEFloat64Sub:
case kSSEFloat64Max:
case kSSEFloat64Min:
- case kSSEFloat64Abs:
- case kSSEFloat64Neg:
return 3;
case kSSEFloat32Mul:
case kSSEFloat32ToFloat64:
diff --git a/chromium/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/chromium/v8/src/compiler/backend/x64/instruction-selector-x64.cc
index 53ee75064bb..c477c44b07d 100644
--- a/chromium/v8/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/chromium/v8/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -16,6 +16,7 @@
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/opcodes.h"
#include "src/roots/roots-inl.h"
#if V8_ENABLE_WEBASSEMBLY
@@ -250,6 +251,7 @@ class X64OperandGenerator final : public OperandGenerator {
};
namespace {
+
ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
ArchOpcode opcode;
switch (load_rep.representation()) {
@@ -340,6 +342,30 @@ ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
UNREACHABLE();
}
+ArchOpcode GetSeqCstStoreOpcode(StoreRepresentation store_rep) {
+ switch (store_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ return kAtomicStoreWord8;
+ case MachineRepresentation::kWord16:
+ return kAtomicStoreWord16;
+ case MachineRepresentation::kWord32:
+ return kAtomicStoreWord32;
+ case MachineRepresentation::kWord64:
+ return kX64Word64AtomicStoreWord64;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ if (COMPRESS_POINTERS_BOOL) return kAtomicStoreWord32;
+ return kX64Word64AtomicStoreWord64;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed:
+ CHECK(COMPRESS_POINTERS_BOOL);
+ return kAtomicStoreWord32;
+ default:
+ UNREACHABLE();
+ }
+}
+
} // namespace
void InstructionSelector::VisitStackSlot(Node* node) {
@@ -351,9 +377,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+void InstructionSelector::VisitAbortCSADcheck(Node* node) {
X64OperandGenerator g(this);
- Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), rdx));
+ Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), rdx));
}
void InstructionSelector::VisitLoadLane(Node* node) {
@@ -471,9 +497,6 @@ void InstructionSelector::VisitLoad(Node* node, Node* value,
InstructionCode code = opcode | AddressingModeField::encode(mode);
if (node->opcode() == IrOpcode::kProtectedLoad) {
code |= AccessModeField::encode(kMemoryAccessProtected);
- } else if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- code |= AccessModeField::encode(kMemoryAccessPoisoned);
}
Emit(code, 1, outputs, input_count, inputs, temp_count, temps);
}
@@ -484,19 +507,39 @@ void InstructionSelector::VisitLoad(Node* node) {
VisitLoad(node, node, GetLoadOpcode(load_rep));
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
-
void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); }
-void InstructionSelector::VisitStore(Node* node) {
- X64OperandGenerator g(this);
+namespace {
+
+// Shared routine for Word32/Word64 Atomic Exchange
+void VisitAtomicExchange(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, AtomicWidth width) {
+ X64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[] = {
+ g.UseUniqueRegister(value), g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
+ InstructionOperand outputs[] = {g.DefineSameAsFirst(node)};
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
+ selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
+}
+
+void VisitStoreCommon(InstructionSelector* selector, Node* node,
+ StoreRepresentation store_rep,
+ base::Optional<AtomicMemoryOrder> atomic_order) {
+ X64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = StoreRepresentationOf(node->op());
DCHECK_NE(store_rep.representation(), MachineRepresentation::kMapWord);
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+ const bool is_seqcst =
+ atomic_order && *atomic_order == AtomicMemoryOrder::kSeqCst;
if (FLAG_enable_unconditional_write_barriers &&
CanBeTaggedOrCompressedPointer(store_rep.representation())) {
@@ -513,16 +556,13 @@ void InstructionSelector::VisitStore(Node* node) {
RecordWriteMode record_write_mode =
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
- InstructionCode code = kArchStoreWithWriteBarrier;
+ InstructionCode code = is_seqcst ? kArchAtomicStoreWithWriteBarrier
+ : kArchStoreWithWriteBarrier;
code |= AddressingModeField::encode(addressing_mode);
code |= MiscField::encode(static_cast<int>(record_write_mode));
- Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
+ selector->Emit(code, 0, nullptr, arraysize(inputs), inputs,
+ arraysize(temps), temps);
} else {
- if ((ElementSizeLog2Of(store_rep.representation()) <
- kSystemPointerSizeLog2) &&
- value->opcode() == IrOpcode::kTruncateInt64ToInt32) {
- value = value->InputAt(0);
- }
#ifdef V8_IS_TSAN
// On TSAN builds we require two scratch registers. Because of this we also
// have to modify the inputs to take into account possible aliasing and use
@@ -536,22 +576,54 @@ void InstructionSelector::VisitStore(Node* node) {
auto reg_kind = OperandGenerator::RegisterUseKind::kUseRegister;
#endif // V8_IS_TSAN
+ // Release and non-atomic stores emit MOV and sequentially consistent stores
+ // emit XCHG.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+
+ ArchOpcode opcode;
+ AddressingMode addressing_mode;
InstructionOperand inputs[4];
size_t input_count = 0;
- AddressingMode addressing_mode = g.GetEffectiveAddressMemoryOperand(
- node, inputs, &input_count, reg_kind);
- InstructionOperand value_operand = g.CanBeImmediate(value)
- ? g.UseImmediate(value)
- : g.UseRegister(value, reg_kind);
- inputs[input_count++] = value_operand;
- ArchOpcode opcode = GetStoreOpcode(store_rep);
+
+ if (is_seqcst) {
+ // SeqCst stores emit XCHG instead of MOV, so encode the inputs as we
+ // would for XCHG. XCHG can't encode the value as an immediate and has
+ // fewer addressing modes available.
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] =
+ g.GetEffectiveIndexOperand(index, &addressing_mode);
+ opcode = GetSeqCstStoreOpcode(store_rep);
+ } else {
+ if ((ElementSizeLog2Of(store_rep.representation()) <
+ kSystemPointerSizeLog2) &&
+ value->opcode() == IrOpcode::kTruncateInt64ToInt32) {
+ value = value->InputAt(0);
+ }
+
+ addressing_mode = g.GetEffectiveAddressMemoryOperand(
+ node, inputs, &input_count, reg_kind);
+ InstructionOperand value_operand = g.CanBeImmediate(value)
+ ? g.UseImmediate(value)
+ : g.UseRegister(value, reg_kind);
+ inputs[input_count++] = value_operand;
+ opcode = GetStoreOpcode(store_rep);
+ }
+
InstructionCode code =
opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
- inputs, temp_count, temps);
+ selector->Emit(code, 0, static_cast<InstructionOperand*>(nullptr),
+ input_count, inputs, temp_count, temps);
}
}
+} // namespace
+
+void InstructionSelector::VisitStore(Node* node) {
+ return VisitStoreCommon(this, node, StoreRepresentationOf(node->op()),
+ base::nullopt);
+}
+
void InstructionSelector::VisitProtectedStore(Node* node) {
X64OperandGenerator g(this);
Node* value = node->InputAt(2);
@@ -935,15 +1007,15 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
kPositiveDisplacement);
return;
} else {
- Int64BinopMatcher m(node);
- if ((m.left().IsChangeInt32ToInt64() ||
- m.left().IsChangeUint32ToUint64()) &&
- m.right().IsInRange(32, 63)) {
+ Int64BinopMatcher bm(node);
+ if ((bm.left().IsChangeInt32ToInt64() ||
+ bm.left().IsChangeUint32ToUint64()) &&
+ bm.right().IsInRange(32, 63)) {
// There's no need to sign/zero-extend to 64-bit if we shift out the upper
// 32 bits anyway.
Emit(kX64Shl, g.DefineSameAsFirst(node),
- g.UseRegister(m.left().node()->InputAt(0)),
- g.UseImmediate(m.right().node()));
+ g.UseRegister(bm.left().node()->InputAt(0)),
+ g.UseImmediate(bm.right().node()));
return;
}
}
@@ -1502,8 +1574,7 @@ bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
}
case IrOpcode::kLoad:
case IrOpcode::kLoadImmutable:
- case IrOpcode::kProtectedLoad:
- case IrOpcode::kPoisonedLoad: {
+ case IrOpcode::kProtectedLoad: {
// The movzxbl/movsxbl/movzxwl/movsxwl/movl operations implicitly
// zero-extend to 64-bit on x64, so the zero-extension is a no-op.
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
@@ -1622,15 +1693,12 @@ void VisitFloatBinop(InstructionSelector* selector, Node* node,
}
void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
- ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
+ ArchOpcode opcode) {
X64OperandGenerator g(selector);
- InstructionOperand temps[] = {g.TempDoubleRegister()};
if (selector->IsSupported(AVX)) {
- selector->Emit(avx_opcode, g.DefineAsRegister(node), g.UseUnique(input),
- arraysize(temps), temps);
+ selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(input));
} else {
- selector->Emit(sse_opcode, g.DefineSameAsFirst(node), g.UseRegister(input),
- arraysize(temps), temps);
+ selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(input));
}
}
@@ -1770,7 +1838,7 @@ void InstructionSelector::VisitFloat32Div(Node* node) {
}
void InstructionSelector::VisitFloat32Abs(Node* node) {
- VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Abs, kSSEFloat32Abs);
+ VisitFloatUnop(this, node, node->InputAt(0), kX64Float32Abs);
}
void InstructionSelector::VisitFloat32Max(Node* node) {
@@ -1814,7 +1882,7 @@ void InstructionSelector::VisitFloat64Min(Node* node) {
}
void InstructionSelector::VisitFloat64Abs(Node* node) {
- VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
+ VisitFloatUnop(this, node, node->InputAt(0), kX64Float64Abs);
}
void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
@@ -1822,11 +1890,11 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
void InstructionSelector::VisitFloat32Neg(Node* node) {
- VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Neg, kSSEFloat32Neg);
+ VisitFloatUnop(this, node, node->InputAt(0), kX64Float32Neg);
}
void InstructionSelector::VisitFloat64Neg(Node* node) {
- VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Neg, kSSEFloat64Neg);
+ VisitFloatUnop(this, node, node->InputAt(0), kX64Float64Neg);
}
void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
@@ -2294,7 +2362,7 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
// Shared routine for Word32/Word64 Atomic Binops
void VisitAtomicBinop(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
X64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2305,14 +2373,15 @@ void VisitAtomicBinop(InstructionSelector* selector, Node* node,
g.GetEffectiveIndexOperand(index, &addressing_mode)};
InstructionOperand outputs[] = {g.DefineAsFixed(node, rax)};
InstructionOperand temps[] = {g.TempRegister()};
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
arraysize(temps), temps);
}
// Shared routine for Word32/Word64 Atomic CmpExchg
void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
X64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2324,23 +2393,8 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
g.UseUniqueRegister(base),
g.GetEffectiveIndexOperand(index, &addressing_mode)};
InstructionOperand outputs[] = {g.DefineAsFixed(node, rax)};
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
-}
-
-// Shared routine for Word32/Word64 Atomic Exchange
-void VisitAtomicExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
- X64OperandGenerator g(selector);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
- AddressingMode addressing_mode;
- InstructionOperand inputs[] = {
- g.UseUniqueRegister(value), g.UseUniqueRegister(base),
- g.GetEffectiveIndexOperand(index, &addressing_mode)};
- InstructionOperand outputs[] = {g.DefineSameAsFirst(node)};
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
}
@@ -2381,19 +2435,19 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
Int64BinopMatcher m(value);
if (m.right().Is(0)) {
// Try to combine the branch with a comparison.
- Node* const user = m.node();
- Node* const value = m.left().node();
- if (CanCover(user, value)) {
- switch (value->opcode()) {
+ Node* const eq_user = m.node();
+ Node* const eq_value = m.left().node();
+ if (CanCover(eq_user, eq_value)) {
+ switch (eq_value->opcode()) {
case IrOpcode::kInt64Sub:
- return VisitWordCompare(this, value, kX64Cmp, cont);
+ return VisitWordCompare(this, eq_value, kX64Cmp, cont);
case IrOpcode::kWord64And:
- return VisitWordCompare(this, value, kX64Test, cont);
+ return VisitWordCompare(this, eq_value, kX64Test, cont);
default:
break;
}
}
- return VisitCompareZero(this, user, value, kX64Cmp, cont);
+ return VisitCompareZero(this, eq_user, eq_value, kX64Cmp, cont);
}
return VisitWord64EqualImpl(this, value, cont);
}
@@ -2711,131 +2765,114 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
- load_rep.representation() == MachineRepresentation::kWord16 ||
- load_rep.representation() == MachineRepresentation::kWord32);
- USE(load_rep);
- VisitLoad(node);
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ DCHECK(IsIntegral(load_rep.representation()) ||
+ IsAnyTagged(load_rep.representation()) ||
+ (COMPRESS_POINTERS_BOOL &&
+ CanBeCompressedPointer(load_rep.representation())));
+ DCHECK_NE(load_rep.representation(), MachineRepresentation::kWord64);
+ DCHECK(!load_rep.IsMapWord());
+ // The memory order is ignored as both acquire and sequentially consistent
+ // loads can emit MOV.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+ VisitLoad(node, node, GetLoadOpcode(load_rep));
}
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- USE(load_rep);
- VisitLoad(node);
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ DCHECK(!atomic_load_params.representation().IsMapWord());
+ // The memory order is ignored as both acquire and sequentially consistent
+ // loads can emit MOV.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+ VisitLoad(node, node, GetLoadOpcode(atomic_load_params.representation()));
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kWord32AtomicExchangeInt8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kWord32AtomicExchangeInt16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicExchangeWord32;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicExchange(this, node, opcode);
+ AtomicStoreParameters params = AtomicStoreParametersOf(node->op());
+ DCHECK_NE(params.representation(), MachineRepresentation::kWord64);
+ DCHECK_IMPLIES(CanBeTaggedOrCompressedPointer(params.representation()),
+ kTaggedSize == 4);
+ VisitStoreCommon(this, node, params.store_representation(), params.order());
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kX64Word64AtomicExchangeUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kX64Word64AtomicExchangeUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kX64Word64AtomicExchangeUint32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kX64Word64AtomicExchangeUint64;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicExchange(this, node, opcode);
+ AtomicStoreParameters params = AtomicStoreParametersOf(node->op());
+ DCHECK_IMPLIES(CanBeTaggedOrCompressedPointer(params.representation()),
+ kTaggedSize == 8);
+ VisitStoreCommon(this, node, params.store_representation(), params.order());
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Uint8()) {
- opcode = kX64Word64AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kX64Word64AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kX64Word64AtomicExchangeUint32;
+ opcode = kAtomicExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kX64Word64AtomicExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Uint8()) {
- opcode = kX64Word64AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kX64Word64AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kX64Word64AtomicCompareExchangeUint32;
+ opcode = kAtomicCompareExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kX64Word64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicBinaryOperation(
@@ -2856,15 +2893,14 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2889,14 +2925,14 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
- VisitWord64AtomicBinaryOperation( \
- node, kX64Word64Atomic##op##Uint8, kX64Word64Atomic##op##Uint16, \
- kX64Word64Atomic##op##Uint32, kX64Word64Atomic##op##Uint64); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
+ VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
+ kAtomic##op##Uint16, kAtomic##op##Word32, \
+ kX64Word64Atomic##op##Uint64); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -3005,7 +3041,6 @@ VISIT_ATOMIC_BINOP(Xor)
#define SIMD_UNOP_LIST(V) \
V(F64x2Sqrt) \
V(F64x2ConvertLowI32x4S) \
- V(F64x2PromoteLowF32x4) \
V(F32x4SConvertI32x4) \
V(F32x4Abs) \
V(F32x4Neg) \
@@ -3053,6 +3088,7 @@ VISIT_ATOMIC_BINOP(Xor)
#define SIMD_NARROW_SHIFT_OPCODES(V) \
V(I8x16Shl) \
+ V(I8x16ShrS) \
V(I8x16ShrU)
void InstructionSelector::VisitS128Const(Node* node) {
@@ -3182,19 +3218,19 @@ SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
#undef VISIT_SIMD_SHIFT
#undef SIMD_SHIFT_OPCODES
-#define VISIT_SIMD_NARROW_SHIFT(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- X64OperandGenerator g(this); \
- InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()}; \
- if (g.CanBeImmediate(node->InputAt(1))) { \
- Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
- g.UseRegister(node->InputAt(0)), g.UseImmediate(node->InputAt(1)), \
- arraysize(temps), temps); \
- } else { \
- Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
- g.UseUniqueRegister(node->InputAt(0)), \
- g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); \
- } \
+#define VISIT_SIMD_NARROW_SHIFT(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ X64OperandGenerator g(this); \
+ InstructionOperand output = \
+ IsSupported(AVX) ? g.UseRegister(node) : g.DefineSameAsFirst(node); \
+ if (g.CanBeImmediate(node->InputAt(1))) { \
+ Emit(kX64##Opcode, output, g.UseRegister(node->InputAt(0)), \
+ g.UseImmediate(node->InputAt(1))); \
+ } else { \
+ InstructionOperand temps[] = {g.TempSimd128Register()}; \
+ Emit(kX64##Opcode, output, g.UseUniqueRegister(node->InputAt(0)), \
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); \
+ } \
}
SIMD_NARROW_SHIFT_OPCODES(VISIT_SIMD_NARROW_SHIFT)
#undef VISIT_SIMD_NARROW_SHIFT
@@ -3257,15 +3293,11 @@ void InstructionSelector::VisitS128AndNot(Node* node) {
}
void InstructionSelector::VisitF64x2Abs(Node* node) {
- X64OperandGenerator g(this);
- Emit(kX64F64x2Abs, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)));
+ VisitFloatUnop(this, node, node->InputAt(0), kX64F64x2Abs);
}
void InstructionSelector::VisitF64x2Neg(Node* node) {
- X64OperandGenerator g(this);
- Emit(kX64F64x2Neg, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)));
+ VisitFloatUnop(this, node, node->InputAt(0), kX64F64x2Neg);
}
void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
@@ -3274,12 +3306,11 @@ void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
g.UseRegister(node->InputAt(0)));
}
-#define VISIT_SIMD_QFMOP(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- X64OperandGenerator g(this); \
- Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), \
- g.UseRegister(node->InputAt(2))); \
+#define VISIT_SIMD_QFMOP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ X64OperandGenerator g(this); \
+ Emit(kX64##Opcode, g.UseRegister(node), g.UseRegister(node->InputAt(0)), \
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2))); \
}
VISIT_SIMD_QFMOP(F64x2Qfma)
VISIT_SIMD_QFMOP(F64x2Qfms)
@@ -3321,7 +3352,8 @@ void InstructionSelector::VisitI64x2Mul(Node* node) {
void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
X64OperandGenerator g(this);
- Emit(kX64I32x4SConvertF32x4, g.DefineSameAsFirst(node),
+ Emit(kX64I32x4SConvertF32x4,
+ IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)));
}
@@ -3333,19 +3365,6 @@ void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
g.UseRegister(node->InputAt(0)), arraysize(temps), temps);
}
-void InstructionSelector::VisitI8x16ShrS(Node* node) {
- X64OperandGenerator g(this);
- if (g.CanBeImmediate(node->InputAt(1))) {
- Emit(kX64I8x16ShrS, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseImmediate(node->InputAt(1)));
- } else {
- InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()};
- Emit(kX64I8x16ShrS, g.DefineSameAsFirst(node),
- g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
- }
-}
-
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE();
}
@@ -3823,6 +3842,26 @@ void InstructionSelector::VisitI64x2Abs(Node* node) {
}
}
+void InstructionSelector::VisitF64x2PromoteLowF32x4(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionCode code = kX64F64x2PromoteLowF32x4;
+ Node* input = node->InputAt(0);
+ LoadTransformMatcher m(input);
+
+ if (m.Is(LoadTransformation::kS128Load64Zero) && CanCover(node, input)) {
+ if (m.ResolvedValue().kind == MemoryAccessKind::kProtected) {
+ code |= AccessModeField::encode(kMemoryAccessProtected);
+ }
+ // LoadTransforms cannot be eliminated, so they are visited even if
+ // unused. Mark it as defined so that we don't visit it.
+ MarkAsDefined(input);
+ VisitLoad(node, input, code);
+ return;
+ }
+
+ VisitRR(this, node, code);
+}
+
void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g,
int first_input_index,
Node* node) {
diff --git a/chromium/v8/src/compiler/branch-elimination.cc b/chromium/v8/src/compiler/branch-elimination.cc
index a864012a7a6..d2fce8a2768 100644
--- a/chromium/v8/src/compiler/branch-elimination.cc
+++ b/chromium/v8/src/compiler/branch-elimination.cc
@@ -5,6 +5,7 @@
#include "src/compiler/branch-elimination.h"
#include "src/base/small-vector.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
@@ -14,12 +15,15 @@ namespace internal {
namespace compiler {
BranchElimination::BranchElimination(Editor* editor, JSGraph* js_graph,
- Zone* zone, Phase phase)
+ Zone* zone,
+ SourcePositionTable* source_positions,
+ Phase phase)
: AdvancedReducer(editor),
jsgraph_(js_graph),
node_conditions_(js_graph->graph()->NodeCount(), zone),
reduced_(js_graph->graph()->NodeCount(), zone),
zone_(zone),
+ source_positions_(source_positions),
dead_(js_graph->Dead()),
phase_(phase) {}
@@ -135,7 +139,6 @@ Reduction BranchElimination::ReduceBranch(Node* node) {
bool condition_value;
// If we know the condition we can discard the branch.
if (from_input.LookupCondition(condition, &branch, &condition_value)) {
- MarkAsSafetyCheckIfNeeded(branch, node);
for (Node* const use : node->uses()) {
switch (use->opcode()) {
case IrOpcode::kIfTrue:
@@ -159,6 +162,72 @@ Reduction BranchElimination::ReduceBranch(Node* node) {
return TakeConditionsFromFirstControl(node);
}
+// Simplify a trap following a merge.
+// Assuming condition is in control1's path conditions, and !condition is in
+// control2's path condtions, the following transformation takes place:
+//
+// control1 control2 condition effect1
+// \ / \ / |
+// Merge X | control1
+// | / \ | /
+// effect1 effect2 | | TrapIf control2
+// \ | /| ==> | \ /
+// EffectPhi | | effect2 Merge
+// | / | | /
+// condition | / \ | /
+// \ | / EffectPhi
+// TrapIf
+// TODO(manoskouk): We require that the trap's effect input is the Merge's
+// EffectPhi, so we can ensure that the new traps' effect inputs are not
+// dominated by the Merge. Can we relax this?
+bool BranchElimination::TryPullTrapIntoMerge(Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kTrapIf ||
+ node->opcode() == IrOpcode::kTrapUnless);
+ Node* merge = NodeProperties::GetControlInput(node);
+ DCHECK_EQ(merge->opcode(), IrOpcode::kMerge);
+ Node* condition = NodeProperties::GetValueInput(node, 0);
+ Node* effect_input = NodeProperties::GetEffectInput(node);
+ if (!(effect_input->opcode() == IrOpcode::kEffectPhi &&
+ NodeProperties::GetControlInput(effect_input) == merge)) {
+ return false;
+ }
+
+ bool trapping_condition = node->opcode() == IrOpcode::kTrapIf;
+ base::SmallVector<Node*, 8> new_merge_inputs;
+ for (Edge edge : merge->input_edges()) {
+ Node* input = edge.to();
+ ControlPathConditions from_input = node_conditions_.Get(input);
+ Node* previous_branch;
+ bool condition_value;
+ if (!from_input.LookupCondition(condition, &previous_branch,
+ &condition_value)) {
+ return false;
+ }
+ if (condition_value == trapping_condition) {
+ Node* inputs[] = {
+ condition, NodeProperties::GetEffectInput(effect_input, edge.index()),
+ input};
+ Node* trap_clone = graph()->NewNode(node->op(), 3, inputs);
+ if (source_positions_) {
+ source_positions_->SetSourcePosition(
+ trap_clone, source_positions_->GetSourcePosition(node));
+ }
+ new_merge_inputs.emplace_back(trap_clone);
+ } else {
+ new_merge_inputs.emplace_back(input);
+ }
+ }
+
+ for (int i = 0; i < merge->InputCount(); i++) {
+ merge->ReplaceInput(i, new_merge_inputs[i]);
+ }
+ ReplaceWithValue(node, dead(), dead(), merge);
+ node->Kill();
+ Revisit(merge);
+
+ return true;
+}
+
Reduction BranchElimination::ReduceTrapConditional(Node* node) {
DCHECK(node->opcode() == IrOpcode::kTrapIf ||
node->opcode() == IrOpcode::kTrapUnless);
@@ -168,17 +237,59 @@ Reduction BranchElimination::ReduceTrapConditional(Node* node) {
// If we do not know anything about the predecessor, do not propagate just
// yet because we will have to recompute anyway once we compute the
// predecessor.
- if (!reduced_.Get(control_input)) {
- return NoChange();
+ if (!reduced_.Get(control_input)) return NoChange();
+
+ // If the trap comes directly after a merge, pull it into the merge. This will
+ // unlock other optimizations later.
+ if (control_input->opcode() == IrOpcode::kMerge &&
+ TryPullTrapIntoMerge(node)) {
+ return Replace(dead());
}
+
ControlPathConditions from_input = node_conditions_.Get(control_input);
- Node* branch;
+ Node* previous_branch;
bool condition_value;
- if (from_input.LookupCondition(condition, &branch, &condition_value)) {
+ if (from_input.LookupCondition(condition, &previous_branch,
+ &condition_value)) {
if (condition_value == trapping_condition) {
- // This will always trap. Mark its outputs as dead and connect it to
- // graph()->end().
+ // Special case: Trap directly inside a branch without sibling nodes.
+ // Replace the branch with the trap.
+ // condition control condition control
+ // | \ / \ /
+ // | Branch TrapIf
+ // | / \ ==> |
+ // | IfTrue IfFalse <subgraph2>
+ // | / |
+ // TrapIf <subraph2> Dead
+ // | |
+ // <subgraph1> <subgraph1>
+ // (and symmetrically for TrapUnless.)
+ if ((control_input->opcode() == IrOpcode::kIfTrue ||
+ control_input->opcode() == IrOpcode::kIfFalse) &&
+ control_input->UseCount() == 1) {
+ Node* branch = NodeProperties::GetControlInput(control_input);
+ DCHECK_EQ(branch->opcode(), IrOpcode::kBranch);
+ if (condition == NodeProperties::GetValueInput(branch, 0)) {
+ Node* other_if_branch = nullptr;
+ for (Node* use : branch->uses()) {
+ if (use != control_input) other_if_branch = use;
+ }
+ DCHECK_NOT_NULL(other_if_branch);
+
+ node->ReplaceInput(NodeProperties::FirstControlIndex(node),
+ NodeProperties::GetControlInput(branch));
+ ReplaceWithValue(node, dead(), dead(), dead());
+ ReplaceWithValue(other_if_branch, node, node, node);
+ other_if_branch->Kill();
+ control_input->Kill();
+ branch->Kill();
+ return Changed(node);
+ }
+ }
+
+ // General case: This will always trap. Mark its outputs as dead and
+ // connect it to graph()->end().
ReplaceWithValue(node, dead(), dead(), dead());
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = graph()->NewNode(common()->Throw(), effect, node);
@@ -215,7 +326,6 @@ Reduction BranchElimination::ReduceDeoptimizeConditional(Node* node) {
Node* branch;
// If we know the condition we can discard the branch.
if (conditions.LookupCondition(condition, &branch, &condition_value)) {
- MarkAsSafetyCheckIfNeeded(branch, node);
if (condition_is_true == condition_value) {
// We don't update the conditions here, because we're replacing {node}
// with the {control} node that already contains the right information.
@@ -410,21 +520,6 @@ bool BranchElimination::ControlPathConditions::BlocksAndConditionsInvariant() {
}
#endif
-void BranchElimination::MarkAsSafetyCheckIfNeeded(Node* branch, Node* node) {
- // Check if {branch} is dead because we might have a stale side-table entry.
- if (!branch->IsDead() && branch->opcode() != IrOpcode::kDead &&
- branch->opcode() != IrOpcode::kTrapIf &&
- branch->opcode() != IrOpcode::kTrapUnless) {
- IsSafetyCheck branch_safety = IsSafetyCheckOf(branch->op());
- IsSafetyCheck combined_safety =
- CombineSafetyChecks(branch_safety, IsSafetyCheckOf(node->op()));
- if (branch_safety != combined_safety) {
- NodeProperties::ChangeOp(
- branch, common()->MarkAsSafetyCheck(branch->op(), combined_safety));
- }
- }
-}
-
Graph* BranchElimination::graph() const { return jsgraph()->graph(); }
Isolate* BranchElimination::isolate() const { return jsgraph()->isolate(); }
diff --git a/chromium/v8/src/compiler/branch-elimination.h b/chromium/v8/src/compiler/branch-elimination.h
index 9078c390381..7964e0a1b94 100644
--- a/chromium/v8/src/compiler/branch-elimination.h
+++ b/chromium/v8/src/compiler/branch-elimination.h
@@ -19,6 +19,7 @@ namespace compiler {
// Forward declarations.
class CommonOperatorBuilder;
class JSGraph;
+class SourcePositionTable;
class V8_EXPORT_PRIVATE BranchElimination final
: public NON_EXPORTED_BASE(AdvancedReducer) {
@@ -28,7 +29,7 @@ class V8_EXPORT_PRIVATE BranchElimination final
kLATE,
};
BranchElimination(Editor* editor, JSGraph* js_graph, Zone* zone,
- Phase phase = kLATE);
+ SourcePositionTable* sourse_positions, Phase phase = kLATE);
~BranchElimination() final;
const char* reducer_name() const override { return "BranchElimination"; }
@@ -108,13 +109,13 @@ class V8_EXPORT_PRIVATE BranchElimination final
Reduction ReduceStart(Node* node);
Reduction ReduceOtherControl(Node* node);
void SimplifyBranchCondition(Node* branch);
+ bool TryPullTrapIntoMerge(Node* node);
Reduction TakeConditionsFromFirstControl(Node* node);
Reduction UpdateConditions(Node* node, ControlPathConditions conditions);
Reduction UpdateConditions(Node* node, ControlPathConditions prev_conditions,
Node* current_condition, Node* current_branch,
bool is_true_branch, bool in_new_block);
- void MarkAsSafetyCheckIfNeeded(Node* branch, Node* node);
Node* dead() const { return dead_; }
Graph* graph() const;
@@ -132,6 +133,7 @@ class V8_EXPORT_PRIVATE BranchElimination final
node_conditions_;
NodeAuxData<bool> reduced_;
Zone* zone_;
+ SourcePositionTable* source_positions_;
Node* dead_;
Phase phase_;
};
diff --git a/chromium/v8/src/compiler/bytecode-graph-builder.cc b/chromium/v8/src/compiler/bytecode-graph-builder.cc
index 985a256c57d..019f0bc9540 100644
--- a/chromium/v8/src/compiler/bytecode-graph-builder.cc
+++ b/chromium/v8/src/compiler/bytecode-graph-builder.cc
@@ -141,9 +141,8 @@ class BytecodeGraphBuilder {
Node* NewIfDefault() { return NewNode(common()->IfDefault()); }
Node* NewMerge() { return NewNode(common()->Merge(1), true); }
Node* NewLoop() { return NewNode(common()->Loop(1), true); }
- Node* NewBranch(Node* condition, BranchHint hint = BranchHint::kNone,
- IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck) {
- return NewNode(common()->Branch(hint, is_safety_check), condition);
+ Node* NewBranch(Node* condition, BranchHint hint = BranchHint::kNone) {
+ return NewNode(common()->Branch(hint), condition);
}
Node* NewSwitch(Node* condition, int control_output_count) {
return NewNode(common()->Switch(control_output_count), condition);
@@ -1053,7 +1052,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
shared_info_(shared_info),
bytecode_array_(shared_info.GetBytecodeArray()),
feedback_cell_(feedback_cell),
- feedback_vector_(feedback_cell.value().value()),
+ feedback_vector_(feedback_cell.feedback_vector().value()),
invocation_frequency_(invocation_frequency),
type_hint_lowering_(
broker, jsgraph, feedback_vector_,
@@ -3959,7 +3958,7 @@ void BytecodeGraphBuilder::BuildJump() {
}
void BytecodeGraphBuilder::BuildJumpIf(Node* condition) {
- NewBranch(condition, BranchHint::kNone, IsSafetyCheck::kNoSafetyCheck);
+ NewBranch(condition, BranchHint::kNone);
{
SubEnvironment sub_environment(this);
NewIfTrue();
@@ -3971,7 +3970,7 @@ void BytecodeGraphBuilder::BuildJumpIf(Node* condition) {
}
void BytecodeGraphBuilder::BuildJumpIfNot(Node* condition) {
- NewBranch(condition, BranchHint::kNone, IsSafetyCheck::kNoSafetyCheck);
+ NewBranch(condition, BranchHint::kNone);
{
SubEnvironment sub_environment(this);
NewIfFalse();
@@ -3997,8 +3996,7 @@ void BytecodeGraphBuilder::BuildJumpIfNotEqual(Node* comperand) {
}
void BytecodeGraphBuilder::BuildJumpIfFalse() {
- NewBranch(environment()->LookupAccumulator(), BranchHint::kNone,
- IsSafetyCheck::kNoSafetyCheck);
+ NewBranch(environment()->LookupAccumulator(), BranchHint::kNone);
{
SubEnvironment sub_environment(this);
NewIfFalse();
@@ -4012,8 +4010,7 @@ void BytecodeGraphBuilder::BuildJumpIfFalse() {
}
void BytecodeGraphBuilder::BuildJumpIfTrue() {
- NewBranch(environment()->LookupAccumulator(), BranchHint::kNone,
- IsSafetyCheck::kNoSafetyCheck);
+ NewBranch(environment()->LookupAccumulator(), BranchHint::kNone);
{
SubEnvironment sub_environment(this);
NewIfTrue();
diff --git a/chromium/v8/src/compiler/c-linkage.cc b/chromium/v8/src/compiler/c-linkage.cc
index 59505411115..95a84ceeabd 100644
--- a/chromium/v8/src/compiler/c-linkage.cc
+++ b/chromium/v8/src/compiler/c-linkage.cc
@@ -66,6 +66,8 @@ namespace {
// == arm64 ====================================================================
// ===========================================================================
#define PARAM_REGISTERS x0, x1, x2, x3, x4, x5, x6, x7
+#define FP_PARAM_REGISTERS d0, d1, d2, d3, d4, d5, d6, d7
+#define FP_RETURN_REGISTER d0
#define CALLEE_SAVE_REGISTERS \
(1 << x19.code()) | (1 << x20.code()) | (1 << x21.code()) | \
(1 << x22.code()) | (1 << x23.code()) | (1 << x24.code()) | \
@@ -100,6 +102,18 @@ namespace {
#define CALLEE_SAVE_FP_REGISTERS \
f20.bit() | f22.bit() | f24.bit() | f26.bit() | f28.bit() | f30.bit()
+#elif V8_TARGET_ARCH_LOONG64
+// ===========================================================================
+// == loong64 ================================================================
+// ===========================================================================
+#define PARAM_REGISTERS a0, a1, a2, a3, a4, a5, a6, a7
+#define CALLEE_SAVE_REGISTERS \
+ s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() | s6.bit() | \
+ s7.bit() | s8.bit() | fp.bit()
+#define CALLEE_SAVE_FP_REGISTERS \
+ f24.bit() | f25.bit() | f26.bit() | f27.bit() | f28.bit() | f29.bit() | \
+ f30.bit() | f31.bit()
+
#elif V8_TARGET_ARCH_PPC64
// ===========================================================================
// == ppc & ppc64 ============================================================
diff --git a/chromium/v8/src/compiler/code-assembler.cc b/chromium/v8/src/compiler/code-assembler.cc
index 2cbcce236fa..a723d21a10e 100644
--- a/chromium/v8/src/compiler/code-assembler.cc
+++ b/chromium/v8/src/compiler/code-assembler.cc
@@ -48,8 +48,7 @@ static_assert(
CodeAssemblerState::CodeAssemblerState(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
- CodeKind kind, const char* name, PoisoningMitigationLevel poisoning_level,
- Builtin builtin)
+ CodeKind kind, const char* name, Builtin builtin)
// TODO(rmcilroy): Should we use Linkage::GetBytecodeDispatchDescriptor for
// bytecode handlers?
: CodeAssemblerState(
@@ -57,29 +56,26 @@ CodeAssemblerState::CodeAssemblerState(
Linkage::GetStubCallDescriptor(
zone, descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoProperties),
- kind, name, poisoning_level, builtin) {}
+ kind, name, builtin) {}
CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
int parameter_count, CodeKind kind,
- const char* name,
- PoisoningMitigationLevel poisoning_level,
- Builtin builtin)
+ const char* name, Builtin builtin)
: CodeAssemblerState(
isolate, zone,
Linkage::GetJSCallDescriptor(zone, false, parameter_count,
CallDescriptor::kCanUseRoots),
- kind, name, poisoning_level, builtin) {}
+ kind, name, builtin) {}
CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
CallDescriptor* call_descriptor,
CodeKind kind, const char* name,
- PoisoningMitigationLevel poisoning_level,
Builtin builtin)
: raw_assembler_(new RawMachineAssembler(
isolate, zone->New<Graph>(zone), call_descriptor,
MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags(),
- InstructionSelector::AlignmentRequirements(), poisoning_level)),
+ InstructionSelector::AlignmentRequirements())),
kind_(kind),
name_(name),
builtin_(builtin),
@@ -169,10 +165,6 @@ bool CodeAssembler::Word32ShiftIsSafe() const {
return raw_assembler()->machine()->Word32ShiftIsSafe();
}
-PoisoningMitigationLevel CodeAssembler::poisoning_level() const {
- return raw_assembler()->poisoning_level();
-}
-
// static
Handle<Code> CodeAssembler::GenerateCode(
CodeAssemblerState* state, const AssemblerOptions& options,
@@ -187,7 +179,7 @@ Handle<Code> CodeAssembler::GenerateCode(
code = Pipeline::GenerateCodeForCodeStub(
rasm->isolate(), rasm->call_descriptor(), graph, state->jsgraph_,
rasm->source_positions(), state->kind_, state->name_,
- state->builtin_, rasm->poisoning_level(), options, profile_data)
+ state->builtin_, options, profile_data)
.ToHandleChecked();
state->code_generated_ = true;
@@ -253,9 +245,9 @@ void CodeAssembler::GenerateCheckMaybeObjectIsObject(TNode<MaybeObject> node,
base::EmbeddedVector<char, 1024> message;
SNPrintF(message, "no Object: %s", location);
TNode<String> message_node = StringConstant(message.begin());
- // This somewhat misuses the AbortCSAAssert runtime function. This will print
- // "abort: CSA_ASSERT failed: <message>", which is good enough.
- AbortCSAAssert(message_node);
+ // This somewhat misuses the AbortCSADcheck runtime function. This will print
+ // "abort: CSA_DCHECK failed: <message>", which is good enough.
+ AbortCSADcheck(message_node);
Unreachable();
Bind(&ok);
}
@@ -511,8 +503,8 @@ void CodeAssembler::ReturnIf(TNode<BoolT> condition, TNode<Object> value) {
Bind(&if_continue);
}
-void CodeAssembler::AbortCSAAssert(Node* message) {
- raw_assembler()->AbortCSAAssert(message);
+void CodeAssembler::AbortCSADcheck(Node* message) {
+ raw_assembler()->AbortCSADcheck(message);
}
void CodeAssembler::DebugBreak() { raw_assembler()->DebugBreak(); }
@@ -565,15 +557,6 @@ TNode<RawPtrT> CodeAssembler::LoadParentFramePointer() {
return UncheckedCast<RawPtrT>(raw_assembler()->LoadParentFramePointer());
}
-TNode<Object> CodeAssembler::TaggedPoisonOnSpeculation(TNode<Object> value) {
- return UncheckedCast<Object>(
- raw_assembler()->TaggedPoisonOnSpeculation(value));
-}
-
-TNode<WordT> CodeAssembler::WordPoisonOnSpeculation(TNode<WordT> value) {
- return UncheckedCast<WordT>(raw_assembler()->WordPoisonOnSpeculation(value));
-}
-
#define DEFINE_CODE_ASSEMBLER_BINARY_OP(name, ResType, Arg1Type, Arg2Type) \
TNode<ResType> CodeAssembler::name(TNode<Arg1Type> a, TNode<Arg2Type> b) { \
return UncheckedCast<ResType>(raw_assembler()->name(a, b)); \
@@ -677,45 +660,44 @@ TNode<Int32T> CodeAssembler::TruncateFloat32ToInt32(TNode<Float32T> value) {
CODE_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_ASSEMBLER_UNARY_OP)
#undef DEFINE_CODE_ASSEMBLER_UNARY_OP
-Node* CodeAssembler::Load(MachineType type, Node* base,
- LoadSensitivity needs_poisoning) {
- return raw_assembler()->Load(type, base, needs_poisoning);
+Node* CodeAssembler::Load(MachineType type, Node* base) {
+ return raw_assembler()->Load(type, base);
}
-Node* CodeAssembler::Load(MachineType type, Node* base, Node* offset,
- LoadSensitivity needs_poisoning) {
- return raw_assembler()->Load(type, base, offset, needs_poisoning);
+Node* CodeAssembler::Load(MachineType type, Node* base, Node* offset) {
+ return raw_assembler()->Load(type, base, offset);
}
-TNode<Object> CodeAssembler::LoadFullTagged(Node* base,
- LoadSensitivity needs_poisoning) {
- return BitcastWordToTagged(Load<RawPtrT>(base, needs_poisoning));
+TNode<Object> CodeAssembler::LoadFullTagged(Node* base) {
+ return BitcastWordToTagged(Load<RawPtrT>(base));
}
-TNode<Object> CodeAssembler::LoadFullTagged(Node* base, TNode<IntPtrT> offset,
- LoadSensitivity needs_poisoning) {
+TNode<Object> CodeAssembler::LoadFullTagged(Node* base, TNode<IntPtrT> offset) {
// Please use LoadFromObject(MachineType::MapInHeader(), object,
// IntPtrConstant(-kHeapObjectTag)) instead.
DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
- return BitcastWordToTagged(Load<RawPtrT>(base, offset, needs_poisoning));
+ return BitcastWordToTagged(Load<RawPtrT>(base, offset));
}
-Node* CodeAssembler::AtomicLoad(MachineType type, TNode<RawPtrT> base,
- TNode<WordT> offset) {
+Node* CodeAssembler::AtomicLoad(MachineType type, AtomicMemoryOrder order,
+ TNode<RawPtrT> base, TNode<WordT> offset) {
DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
- return raw_assembler()->AtomicLoad(type, base, offset);
+ return raw_assembler()->AtomicLoad(AtomicLoadParameters(type, order), base,
+ offset);
}
template <class Type>
-TNode<Type> CodeAssembler::AtomicLoad64(TNode<RawPtrT> base,
+TNode<Type> CodeAssembler::AtomicLoad64(AtomicMemoryOrder order,
+ TNode<RawPtrT> base,
TNode<WordT> offset) {
- return UncheckedCast<Type>(raw_assembler()->AtomicLoad64(base, offset));
+ return UncheckedCast<Type>(raw_assembler()->AtomicLoad64(
+ AtomicLoadParameters(MachineType::Uint64(), order), base, offset));
}
template TNode<AtomicInt64> CodeAssembler::AtomicLoad64<AtomicInt64>(
- TNode<RawPtrT> base, TNode<WordT> offset);
+ AtomicMemoryOrder order, TNode<RawPtrT> base, TNode<WordT> offset);
template TNode<AtomicUint64> CodeAssembler::AtomicLoad64<AtomicUint64>(
- TNode<RawPtrT> base, TNode<WordT> offset);
+ AtomicMemoryOrder order, TNode<RawPtrT> base, TNode<WordT> offset);
Node* CodeAssembler::LoadFromObject(MachineType type, TNode<Object> object,
TNode<IntPtrT> offset) {
@@ -880,16 +862,22 @@ void CodeAssembler::StoreFullTaggedNoWriteBarrier(TNode<RawPtrT> base,
BitcastTaggedToWord(tagged_value));
}
-void CodeAssembler::AtomicStore(MachineRepresentation rep, TNode<RawPtrT> base,
+void CodeAssembler::AtomicStore(MachineRepresentation rep,
+ AtomicMemoryOrder order, TNode<RawPtrT> base,
TNode<WordT> offset, TNode<Word32T> value) {
DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
- raw_assembler()->AtomicStore(rep, base, offset, value);
+ raw_assembler()->AtomicStore(
+ AtomicStoreParameters(rep, WriteBarrierKind::kNoWriteBarrier, order),
+ base, offset, value);
}
-void CodeAssembler::AtomicStore64(TNode<RawPtrT> base, TNode<WordT> offset,
- TNode<UintPtrT> value,
+void CodeAssembler::AtomicStore64(AtomicMemoryOrder order, TNode<RawPtrT> base,
+ TNode<WordT> offset, TNode<UintPtrT> value,
TNode<UintPtrT> value_high) {
- raw_assembler()->AtomicStore64(base, offset, value, value_high);
+ raw_assembler()->AtomicStore64(
+ AtomicStoreParameters(MachineRepresentation::kWord64,
+ WriteBarrierKind::kNoWriteBarrier, order),
+ base, offset, value, value_high);
}
#define ATOMIC_FUNCTION(name) \
diff --git a/chromium/v8/src/compiler/code-assembler.h b/chromium/v8/src/compiler/code-assembler.h
index 0e6872aa66e..fcef5bdd72e 100644
--- a/chromium/v8/src/compiler/code-assembler.h
+++ b/chromium/v8/src/compiler/code-assembler.h
@@ -17,6 +17,7 @@
#include "src/base/optional.h"
#include "src/base/type-traits.h"
#include "src/builtins/builtins.h"
+#include "src/codegen/atomic-memory-order.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/source-position.h"
@@ -630,7 +631,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void ReturnIf(TNode<BoolT> condition, TNode<Object> value);
- void AbortCSAAssert(Node* message);
+ void AbortCSADcheck(Node* message);
void DebugBreak();
void Unreachable();
void Comment(const char* msg) {
@@ -725,47 +726,36 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<RawPtrT> LoadFramePointer();
TNode<RawPtrT> LoadParentFramePointer();
- // Poison |value| on speculative paths.
- TNode<Object> TaggedPoisonOnSpeculation(TNode<Object> value);
- TNode<WordT> WordPoisonOnSpeculation(TNode<WordT> value);
-
// Load raw memory location.
- Node* Load(MachineType type, Node* base,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
+ Node* Load(MachineType type, Node* base);
template <class Type>
TNode<Type> Load(MachineType type, TNode<RawPtr<Type>> base) {
DCHECK(
IsSubtype(type.representation(), MachineRepresentationOf<Type>::value));
return UncheckedCast<Type>(Load(type, static_cast<Node*>(base)));
}
- Node* Load(MachineType type, Node* base, Node* offset,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
+ Node* Load(MachineType type, Node* base, Node* offset);
template <class Type>
- TNode<Type> Load(Node* base,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
- return UncheckedCast<Type>(
- Load(MachineTypeOf<Type>::value, base, needs_poisoning));
+ TNode<Type> Load(Node* base) {
+ return UncheckedCast<Type>(Load(MachineTypeOf<Type>::value, base));
}
template <class Type>
- TNode<Type> Load(Node* base, TNode<WordT> offset,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
- return UncheckedCast<Type>(
- Load(MachineTypeOf<Type>::value, base, offset, needs_poisoning));
+ TNode<Type> Load(Node* base, TNode<WordT> offset) {
+ return UncheckedCast<Type>(Load(MachineTypeOf<Type>::value, base, offset));
}
template <class Type>
- TNode<Type> AtomicLoad(TNode<RawPtrT> base, TNode<WordT> offset) {
+ TNode<Type> AtomicLoad(AtomicMemoryOrder order, TNode<RawPtrT> base,
+ TNode<WordT> offset) {
return UncheckedCast<Type>(
- AtomicLoad(MachineTypeOf<Type>::value, base, offset));
+ AtomicLoad(MachineTypeOf<Type>::value, order, base, offset));
}
template <class Type>
- TNode<Type> AtomicLoad64(TNode<RawPtrT> base, TNode<WordT> offset);
+ TNode<Type> AtomicLoad64(AtomicMemoryOrder order, TNode<RawPtrT> base,
+ TNode<WordT> offset);
// Load uncompressed tagged value from (most likely off JS heap) memory
// location.
- TNode<Object> LoadFullTagged(
- Node* base, LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
- TNode<Object> LoadFullTagged(
- Node* base, TNode<IntPtrT> offset,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
+ TNode<Object> LoadFullTagged(Node* base);
+ TNode<Object> LoadFullTagged(Node* base, TNode<IntPtrT> offset);
Node* LoadFromObject(MachineType type, TNode<Object> object,
TNode<IntPtrT> offset);
@@ -822,12 +812,14 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<HeapObject> object,
int offset, Node* value);
void OptimizedStoreMap(TNode<HeapObject> object, TNode<Map>);
- void AtomicStore(MachineRepresentation rep, TNode<RawPtrT> base,
- TNode<WordT> offset, TNode<Word32T> value);
+ void AtomicStore(MachineRepresentation rep, AtomicMemoryOrder order,
+ TNode<RawPtrT> base, TNode<WordT> offset,
+ TNode<Word32T> value);
// {value_high} is used for 64-bit stores on 32-bit platforms, must be
// nullptr in other cases.
- void AtomicStore64(TNode<RawPtrT> base, TNode<WordT> offset,
- TNode<UintPtrT> value, TNode<UintPtrT> value_high);
+ void AtomicStore64(AtomicMemoryOrder order, TNode<RawPtrT> base,
+ TNode<WordT> offset, TNode<UintPtrT> value,
+ TNode<UintPtrT> value_high);
TNode<Word32T> AtomicAdd(MachineType type, TNode<RawPtrT> base,
TNode<UintPtrT> offset, TNode<Word32T> value);
@@ -1225,7 +1217,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
template <class... TArgs>
TNode<Object> CallJS(Callable const& callable, Node* context, Node* function,
Node* receiver, TArgs... args) {
- int argc = static_cast<int>(sizeof...(args));
+ int argc = JSParameterCount(static_cast<int>(sizeof...(args)));
TNode<Int32T> arity = Int32Constant(argc);
TNode<Code> target = HeapConstant(callable.code());
return CAST(CallJSStubImpl(callable.descriptor(), target, CAST(context),
@@ -1235,7 +1227,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
template <class... TArgs>
Node* ConstructJSWithTarget(Callable const& callable, Node* context,
Node* function, Node* new_target, TArgs... args) {
- int argc = static_cast<int>(sizeof...(args));
+ int argc = JSParameterCount(static_cast<int>(sizeof...(args)));
TNode<Int32T> arity = Int32Constant(argc);
TNode<Object> receiver = LoadRoot(RootIndex::kUndefinedValue);
TNode<Code> target = HeapConstant(callable.code());
@@ -1312,7 +1304,6 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void UnregisterCallGenerationCallbacks();
bool Word32ShiftIsSafe() const;
- PoisoningMitigationLevel poisoning_level() const;
bool IsJSFunctionCall() const;
@@ -1367,7 +1358,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
const CallInterfaceDescriptor& descriptor, int input_count,
Node* const* inputs);
- Node* AtomicLoad(MachineType type, TNode<RawPtrT> base, TNode<WordT> offset);
+ Node* AtomicLoad(MachineType type, AtomicMemoryOrder order,
+ TNode<RawPtrT> base, TNode<WordT> offset);
Node* UnalignedLoad(MachineType type, TNode<RawPtrT> base,
TNode<WordT> offset);
@@ -1595,13 +1587,11 @@ class V8_EXPORT_PRIVATE CodeAssemblerState {
// TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
CodeAssemblerState(Isolate* isolate, Zone* zone,
const CallInterfaceDescriptor& descriptor, CodeKind kind,
- const char* name, PoisoningMitigationLevel poisoning_level,
- Builtin builtin = Builtin::kNoBuiltinId);
+ const char* name, Builtin builtin = Builtin::kNoBuiltinId);
// Create with JSCall linkage.
CodeAssemblerState(Isolate* isolate, Zone* zone, int parameter_count,
CodeKind kind, const char* name,
- PoisoningMitigationLevel poisoning_level,
Builtin builtin = Builtin::kNoBuiltinId);
~CodeAssemblerState();
@@ -1628,8 +1618,7 @@ class V8_EXPORT_PRIVATE CodeAssemblerState {
CodeAssemblerState(Isolate* isolate, Zone* zone,
CallDescriptor* call_descriptor, CodeKind kind,
- const char* name, PoisoningMitigationLevel poisoning_level,
- Builtin builtin);
+ const char* name, Builtin builtin);
void PushExceptionHandler(CodeAssemblerExceptionHandlerLabel* label);
void PopExceptionHandler();
diff --git a/chromium/v8/src/compiler/common-operator.cc b/chromium/v8/src/compiler/common-operator.cc
index b370a673b96..329ccc7e86d 100644
--- a/chromium/v8/src/compiler/common-operator.cc
+++ b/chromium/v8/src/compiler/common-operator.cc
@@ -28,18 +28,6 @@ std::ostream& operator<<(std::ostream& os, BranchHint hint) {
UNREACHABLE();
}
-std::ostream& operator<<(std::ostream& os, IsSafetyCheck is_safety_check) {
- switch (is_safety_check) {
- case IsSafetyCheck::kCriticalSafetyCheck:
- return os << "CriticalSafetyCheck";
- case IsSafetyCheck::kSafetyCheck:
- return os << "SafetyCheck";
- case IsSafetyCheck::kNoSafetyCheck:
- return os << "NoSafetyCheck";
- }
- UNREACHABLE();
-}
-
std::ostream& operator<<(std::ostream& os, TrapId trap_id) {
switch (trap_id) {
#define TRAP_CASE(Name) \
@@ -59,22 +47,12 @@ TrapId TrapIdOf(const Operator* const op) {
return OpParameter<TrapId>(op);
}
-std::ostream& operator<<(std::ostream& os, BranchOperatorInfo info) {
- return os << info.hint << ", " << info.is_safety_check;
-}
-
-const BranchOperatorInfo& BranchOperatorInfoOf(const Operator* const op) {
- DCHECK_EQ(IrOpcode::kBranch, op->opcode());
- return OpParameter<BranchOperatorInfo>(op);
-}
-
BranchHint BranchHintOf(const Operator* const op) {
switch (op->opcode()) {
- case IrOpcode::kBranch:
- return BranchOperatorInfoOf(op).hint;
case IrOpcode::kIfValue:
return IfValueParametersOf(op).hint();
case IrOpcode::kIfDefault:
+ case IrOpcode::kBranch:
return OpParameter<BranchHint>(op);
default:
UNREACHABLE();
@@ -90,8 +68,7 @@ int ValueInputCountOfReturn(Operator const* const op) {
bool operator==(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
return lhs.kind() == rhs.kind() && lhs.reason() == rhs.reason() &&
- lhs.feedback() == rhs.feedback() &&
- lhs.is_safety_check() == rhs.is_safety_check();
+ lhs.feedback() == rhs.feedback();
}
bool operator!=(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
@@ -100,13 +77,11 @@ bool operator!=(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
size_t hash_value(DeoptimizeParameters p) {
FeedbackSource::Hash feebdack_hash;
- return base::hash_combine(p.kind(), p.reason(), feebdack_hash(p.feedback()),
- p.is_safety_check());
+ return base::hash_combine(p.kind(), p.reason(), feebdack_hash(p.feedback()));
}
std::ostream& operator<<(std::ostream& os, DeoptimizeParameters p) {
- return os << p.kind() << ", " << p.reason() << ", " << p.is_safety_check()
- << ", " << p.feedback();
+ return os << p.kind() << ", " << p.reason() << ", " << p.feedback();
}
DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const op) {
@@ -117,32 +92,6 @@ DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const op) {
return OpParameter<DeoptimizeParameters>(op);
}
-IsSafetyCheck IsSafetyCheckOf(const Operator* op) {
- if (op->opcode() == IrOpcode::kBranch) {
- return BranchOperatorInfoOf(op).is_safety_check;
- }
- return DeoptimizeParametersOf(op).is_safety_check();
-}
-
-const Operator* CommonOperatorBuilder::MarkAsSafetyCheck(
- const Operator* op, IsSafetyCheck safety_check) {
- if (op->opcode() == IrOpcode::kBranch) {
- BranchOperatorInfo info = BranchOperatorInfoOf(op);
- if (info.is_safety_check == safety_check) return op;
- return Branch(info.hint, safety_check);
- }
- DeoptimizeParameters p = DeoptimizeParametersOf(op);
- if (p.is_safety_check() == safety_check) return op;
- switch (op->opcode()) {
- case IrOpcode::kDeoptimizeIf:
- return DeoptimizeIf(p.kind(), p.reason(), p.feedback(), safety_check);
- case IrOpcode::kDeoptimizeUnless:
- return DeoptimizeUnless(p.kind(), p.reason(), p.feedback(), safety_check);
- default:
- UNREACHABLE();
- }
-}
-
const Operator* CommonOperatorBuilder::DelayedStringConstant(
const StringConstantBase* str) {
return zone()->New<Operator1<const StringConstantBase*>>(
@@ -478,16 +427,10 @@ IfValueParameters const& IfValueParametersOf(const Operator* op) {
#define CACHED_LOOP_EXIT_VALUE_LIST(V) V(kTagged)
-#define CACHED_BRANCH_LIST(V) \
- V(None, CriticalSafetyCheck) \
- V(True, CriticalSafetyCheck) \
- V(False, CriticalSafetyCheck) \
- V(None, SafetyCheck) \
- V(True, SafetyCheck) \
- V(False, SafetyCheck) \
- V(None, NoSafetyCheck) \
- V(True, NoSafetyCheck) \
- V(False, NoSafetyCheck)
+#define CACHED_BRANCH_LIST(V) \
+ V(None) \
+ V(True) \
+ V(False)
#define CACHED_RETURN_LIST(V) \
V(1) \
@@ -541,28 +484,22 @@ IfValueParameters const& IfValueParametersOf(const Operator* op) {
V(Soft, InsufficientTypeFeedbackForGenericKeyedAccess) \
V(Soft, InsufficientTypeFeedbackForGenericNamedAccess)
-#define CACHED_DEOPTIMIZE_IF_LIST(V) \
- V(Eager, DivisionByZero, NoSafetyCheck) \
- V(Eager, DivisionByZero, SafetyCheck) \
- V(Eager, Hole, NoSafetyCheck) \
- V(Eager, Hole, SafetyCheck) \
- V(Eager, MinusZero, NoSafetyCheck) \
- V(Eager, MinusZero, SafetyCheck) \
- V(Eager, Overflow, NoSafetyCheck) \
- V(Eager, Overflow, SafetyCheck) \
- V(Eager, Smi, SafetyCheck)
-
-#define CACHED_DEOPTIMIZE_UNLESS_LIST(V) \
- V(Eager, LostPrecision, NoSafetyCheck) \
- V(Eager, LostPrecision, SafetyCheck) \
- V(Eager, LostPrecisionOrNaN, NoSafetyCheck) \
- V(Eager, LostPrecisionOrNaN, SafetyCheck) \
- V(Eager, NotAHeapNumber, SafetyCheck) \
- V(Eager, NotANumberOrOddball, SafetyCheck) \
- V(Eager, NotASmi, SafetyCheck) \
- V(Eager, OutOfBounds, SafetyCheck) \
- V(Eager, WrongInstanceType, SafetyCheck) \
- V(Eager, WrongMap, SafetyCheck)
+#define CACHED_DEOPTIMIZE_IF_LIST(V) \
+ V(Eager, DivisionByZero) \
+ V(Eager, Hole) \
+ V(Eager, MinusZero) \
+ V(Eager, Overflow) \
+ V(Eager, Smi)
+
+#define CACHED_DEOPTIMIZE_UNLESS_LIST(V) \
+ V(Eager, LostPrecision) \
+ V(Eager, LostPrecisionOrNaN) \
+ V(Eager, NotAHeapNumber) \
+ V(Eager, NotANumberOrOddball) \
+ V(Eager, NotASmi) \
+ V(Eager, OutOfBounds) \
+ V(Eager, WrongInstanceType) \
+ V(Eager, WrongMap)
#define CACHED_DYNAMIC_CHECK_MAPS_LIST(V) \
V(DynamicCheckMaps) \
@@ -668,18 +605,17 @@ struct CommonOperatorGlobalCache final {
CACHED_RETURN_LIST(CACHED_RETURN)
#undef CACHED_RETURN
- template <BranchHint hint, IsSafetyCheck is_safety_check>
- struct BranchOperator final : public Operator1<BranchOperatorInfo> {
+ template <BranchHint hint>
+ struct BranchOperator final : public Operator1<BranchHint> {
BranchOperator()
- : Operator1<BranchOperatorInfo>( // --
- IrOpcode::kBranch, Operator::kKontrol, // opcode
- "Branch", // name
- 1, 0, 1, 0, 0, 2, // counts
- BranchOperatorInfo{hint, is_safety_check}) {} // parameter
+ : Operator1<BranchHint>( // --
+ IrOpcode::kBranch, Operator::kKontrol, // opcode
+ "Branch", // name
+ 1, 0, 1, 0, 0, 2, // counts
+ hint) {} // parameter
};
-#define CACHED_BRANCH(Hint, IsCheck) \
- BranchOperator<BranchHint::k##Hint, IsSafetyCheck::k##IsCheck> \
- kBranch##Hint##IsCheck##Operator;
+#define CACHED_BRANCH(Hint) \
+ BranchOperator<BranchHint::k##Hint> kBranch##Hint##Operator;
CACHED_BRANCH_LIST(CACHED_BRANCH)
#undef CACHED_BRANCH
@@ -757,8 +693,7 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"Deoptimize", // name
1, 1, 1, 0, 0, 1, // counts
- DeoptimizeParameters(kKind, kReason, FeedbackSource(),
- IsSafetyCheck::kNoSafetyCheck)) {}
+ DeoptimizeParameters(kKind, kReason, FeedbackSource())) {}
};
#define CACHED_DEOPTIMIZE(Kind, Reason) \
DeoptimizeOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason> \
@@ -766,8 +701,7 @@ struct CommonOperatorGlobalCache final {
CACHED_DEOPTIMIZE_LIST(CACHED_DEOPTIMIZE)
#undef CACHED_DEOPTIMIZE
- template <DeoptimizeKind kKind, DeoptimizeReason kReason,
- IsSafetyCheck is_safety_check>
+ template <DeoptimizeKind kKind, DeoptimizeReason kReason>
struct DeoptimizeIfOperator final : public Operator1<DeoptimizeParameters> {
DeoptimizeIfOperator()
: Operator1<DeoptimizeParameters>( // --
@@ -775,18 +709,15 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"DeoptimizeIf", // name
2, 1, 1, 0, 1, 1, // counts
- DeoptimizeParameters(kKind, kReason, FeedbackSource(),
- is_safety_check)) {}
+ DeoptimizeParameters(kKind, kReason, FeedbackSource())) {}
};
-#define CACHED_DEOPTIMIZE_IF(Kind, Reason, IsCheck) \
- DeoptimizeIfOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason, \
- IsSafetyCheck::k##IsCheck> \
- kDeoptimizeIf##Kind##Reason##IsCheck##Operator;
+#define CACHED_DEOPTIMIZE_IF(Kind, Reason) \
+ DeoptimizeIfOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason> \
+ kDeoptimizeIf##Kind##Reason##Operator;
CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
#undef CACHED_DEOPTIMIZE_IF
- template <DeoptimizeKind kKind, DeoptimizeReason kReason,
- IsSafetyCheck is_safety_check>
+ template <DeoptimizeKind kKind, DeoptimizeReason kReason>
struct DeoptimizeUnlessOperator final
: public Operator1<DeoptimizeParameters> {
DeoptimizeUnlessOperator()
@@ -795,14 +726,12 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"DeoptimizeUnless", // name
2, 1, 1, 0, 1, 1, // counts
- DeoptimizeParameters(kKind, kReason, FeedbackSource(),
- is_safety_check)) {}
+ DeoptimizeParameters(kKind, kReason, FeedbackSource())) {}
};
-#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason, IsCheck) \
+#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason) \
DeoptimizeUnlessOperator<DeoptimizeKind::k##Kind, \
- DeoptimizeReason::k##Reason, \
- IsSafetyCheck::k##IsCheck> \
- kDeoptimizeUnless##Kind##Reason##IsCheck##Operator;
+ DeoptimizeReason::k##Reason> \
+ kDeoptimizeUnless##Kind##Reason##Operator;
CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
#undef CACHED_DEOPTIMIZE_UNLESS
@@ -815,8 +744,7 @@ struct CommonOperatorGlobalCache final {
"DynamicCheckMapsWithDeoptUnless", // name
6, 1, 1, 0, 1, 1, // counts
DeoptimizeParameters(DeoptimizeKind::kEagerWithResume, kReason,
- FeedbackSource(),
- IsSafetyCheck::kCriticalSafetyCheck)) {}
+ FeedbackSource())) {}
};
#define CACHED_DYNAMIC_CHECK_MAPS(Reason) \
DynamicMapCheckOperator<DeoptimizeReason::k##Reason> k##Reason##Operator;
@@ -985,12 +913,10 @@ const Operator* CommonOperatorBuilder::StaticAssert(const char* source) {
1, 0, source);
}
-const Operator* CommonOperatorBuilder::Branch(BranchHint hint,
- IsSafetyCheck is_safety_check) {
-#define CACHED_BRANCH(Hint, IsCheck) \
- if (hint == BranchHint::k##Hint && \
- is_safety_check == IsSafetyCheck::k##IsCheck) { \
- return &cache_.kBranch##Hint##IsCheck##Operator; \
+const Operator* CommonOperatorBuilder::Branch(BranchHint hint) {
+#define CACHED_BRANCH(Hint) \
+ if (hint == BranchHint::k##Hint) { \
+ return &cache_.kBranch##Hint##Operator; \
}
CACHED_BRANCH_LIST(CACHED_BRANCH)
#undef CACHED_BRANCH
@@ -1008,8 +934,7 @@ const Operator* CommonOperatorBuilder::Deoptimize(
CACHED_DEOPTIMIZE_LIST(CACHED_DEOPTIMIZE)
#undef CACHED_DEOPTIMIZE
// Uncached
- DeoptimizeParameters parameter(kind, reason, feedback,
- IsSafetyCheck::kNoSafetyCheck);
+ DeoptimizeParameters parameter(kind, reason, feedback);
return zone()->New<Operator1<DeoptimizeParameters>>( // --
IrOpcode::kDeoptimize, // opcodes
Operator::kFoldable | Operator::kNoThrow, // properties
@@ -1020,17 +945,16 @@ const Operator* CommonOperatorBuilder::Deoptimize(
const Operator* CommonOperatorBuilder::DeoptimizeIf(
DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback, IsSafetyCheck is_safety_check) {
-#define CACHED_DEOPTIMIZE_IF(Kind, Reason, IsCheck) \
- if (kind == DeoptimizeKind::k##Kind && \
- reason == DeoptimizeReason::k##Reason && \
- is_safety_check == IsSafetyCheck::k##IsCheck && !feedback.IsValid()) { \
- return &cache_.kDeoptimizeIf##Kind##Reason##IsCheck##Operator; \
+ FeedbackSource const& feedback) {
+#define CACHED_DEOPTIMIZE_IF(Kind, Reason) \
+ if (kind == DeoptimizeKind::k##Kind && \
+ reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \
+ return &cache_.kDeoptimizeIf##Kind##Reason##Operator; \
}
CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
#undef CACHED_DEOPTIMIZE_IF
// Uncached
- DeoptimizeParameters parameter(kind, reason, feedback, is_safety_check);
+ DeoptimizeParameters parameter(kind, reason, feedback);
return zone()->New<Operator1<DeoptimizeParameters>>( // --
IrOpcode::kDeoptimizeIf, // opcode
Operator::kFoldable | Operator::kNoThrow, // properties
@@ -1041,17 +965,16 @@ const Operator* CommonOperatorBuilder::DeoptimizeIf(
const Operator* CommonOperatorBuilder::DeoptimizeUnless(
DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback, IsSafetyCheck is_safety_check) {
-#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason, IsCheck) \
- if (kind == DeoptimizeKind::k##Kind && \
- reason == DeoptimizeReason::k##Reason && \
- is_safety_check == IsSafetyCheck::k##IsCheck && !feedback.IsValid()) { \
- return &cache_.kDeoptimizeUnless##Kind##Reason##IsCheck##Operator; \
+ FeedbackSource const& feedback) {
+#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason) \
+ if (kind == DeoptimizeKind::k##Kind && \
+ reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \
+ return &cache_.kDeoptimizeUnless##Kind##Reason##Operator; \
}
CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
#undef CACHED_DEOPTIMIZE_UNLESS
// Uncached
- DeoptimizeParameters parameter(kind, reason, feedback, is_safety_check);
+ DeoptimizeParameters parameter(kind, reason, feedback);
return zone()->New<Operator1<DeoptimizeParameters>>( // --
IrOpcode::kDeoptimizeUnless, // opcode
Operator::kFoldable | Operator::kNoThrow, // properties
@@ -1664,17 +1587,6 @@ const FrameStateInfo& FrameStateInfoOf(const Operator* op) {
return OpParameter<FrameStateInfo>(op);
}
-IsSafetyCheck CombineSafetyChecks(IsSafetyCheck a, IsSafetyCheck b) {
- if (a == IsSafetyCheck::kCriticalSafetyCheck ||
- b == IsSafetyCheck::kCriticalSafetyCheck) {
- return IsSafetyCheck::kCriticalSafetyCheck;
- }
- if (a == IsSafetyCheck::kSafetyCheck || b == IsSafetyCheck::kSafetyCheck) {
- return IsSafetyCheck::kSafetyCheck;
- }
- return IsSafetyCheck::kNoSafetyCheck;
-}
-
#undef COMMON_CACHED_OP_LIST
#undef CACHED_BRANCH_LIST
#undef CACHED_RETURN_LIST
diff --git a/chromium/v8/src/compiler/common-operator.h b/chromium/v8/src/compiler/common-operator.h
index fa49d3b9920..f691c1fbf46 100644
--- a/chromium/v8/src/compiler/common-operator.h
+++ b/chromium/v8/src/compiler/common-operator.h
@@ -51,20 +51,6 @@ inline size_t hash_value(BranchHint hint) { return static_cast<size_t>(hint); }
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, BranchHint);
-enum class IsSafetyCheck : uint8_t {
- kCriticalSafetyCheck,
- kSafetyCheck,
- kNoSafetyCheck
-};
-
-// Get the more critical safety check of the two arguments.
-IsSafetyCheck CombineSafetyChecks(IsSafetyCheck, IsSafetyCheck);
-
-V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, IsSafetyCheck);
-inline size_t hash_value(IsSafetyCheck is_safety_check) {
- return static_cast<size_t>(is_safety_check);
-}
-
enum class TrapId : uint32_t {
#define DEF_ENUM(Name, ...) k##Name,
FOREACH_WASM_TRAPREASON(DEF_ENUM)
@@ -78,24 +64,6 @@ std::ostream& operator<<(std::ostream&, TrapId trap_id);
TrapId TrapIdOf(const Operator* const op);
-struct BranchOperatorInfo {
- BranchHint hint;
- IsSafetyCheck is_safety_check;
-};
-
-inline size_t hash_value(const BranchOperatorInfo& info) {
- return base::hash_combine(info.hint, info.is_safety_check);
-}
-
-V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, BranchOperatorInfo);
-
-inline bool operator==(const BranchOperatorInfo& a,
- const BranchOperatorInfo& b) {
- return a.hint == b.hint && a.is_safety_check == b.is_safety_check;
-}
-
-V8_EXPORT_PRIVATE const BranchOperatorInfo& BranchOperatorInfoOf(
- const Operator* const) V8_WARN_UNUSED_RESULT;
V8_EXPORT_PRIVATE BranchHint BranchHintOf(const Operator* const)
V8_WARN_UNUSED_RESULT;
@@ -106,23 +74,17 @@ int ValueInputCountOfReturn(Operator const* const op);
class DeoptimizeParameters final {
public:
DeoptimizeParameters(DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback,
- IsSafetyCheck is_safety_check)
- : kind_(kind),
- reason_(reason),
- feedback_(feedback),
- is_safety_check_(is_safety_check) {}
+ FeedbackSource const& feedback)
+ : kind_(kind), reason_(reason), feedback_(feedback) {}
DeoptimizeKind kind() const { return kind_; }
DeoptimizeReason reason() const { return reason_; }
const FeedbackSource& feedback() const { return feedback_; }
- IsSafetyCheck is_safety_check() const { return is_safety_check_; }
private:
DeoptimizeKind const kind_;
DeoptimizeReason const reason_;
FeedbackSource const feedback_;
- IsSafetyCheck is_safety_check_;
};
bool operator==(DeoptimizeParameters, DeoptimizeParameters);
@@ -135,8 +97,6 @@ std::ostream& operator<<(std::ostream&, DeoptimizeParameters p);
DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const)
V8_WARN_UNUSED_RESULT;
-IsSafetyCheck IsSafetyCheckOf(const Operator* op) V8_WARN_UNUSED_RESULT;
-
class SelectParameters final {
public:
explicit SelectParameters(MachineRepresentation representation,
@@ -479,8 +439,7 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* Unreachable();
const Operator* StaticAssert(const char* source);
const Operator* End(size_t control_input_count);
- const Operator* Branch(BranchHint = BranchHint::kNone,
- IsSafetyCheck = IsSafetyCheck::kSafetyCheck);
+ const Operator* Branch(BranchHint = BranchHint::kNone);
const Operator* IfTrue();
const Operator* IfFalse();
const Operator* IfSuccess();
@@ -492,14 +451,10 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* Throw();
const Operator* Deoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
FeedbackSource const& feedback);
- const Operator* DeoptimizeIf(
- DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback,
- IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
- const Operator* DeoptimizeUnless(
- DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback,
- IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
+ const Operator* DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason,
+ FeedbackSource const& feedback);
+ const Operator* DeoptimizeUnless(DeoptimizeKind kind, DeoptimizeReason reason,
+ FeedbackSource const& feedback);
// DynamicCheckMapsWithDeoptUnless will call the dynamic map check builtin if
// the condition is false, which may then either deoptimize or resume
// execution.
@@ -577,9 +532,6 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const wasm::FunctionSig* signature);
#endif // V8_ENABLE_WEBASSEMBLY
- const Operator* MarkAsSafetyCheck(const Operator* op,
- IsSafetyCheck safety_check);
-
const Operator* DelayedStringConstant(const StringConstantBase* str);
private:
diff --git a/chromium/v8/src/compiler/compilation-dependencies.cc b/chromium/v8/src/compiler/compilation-dependencies.cc
index dc2db327530..27720c80edf 100644
--- a/chromium/v8/src/compiler/compilation-dependencies.cc
+++ b/chromium/v8/src/compiler/compilation-dependencies.cc
@@ -5,7 +5,6 @@
#include "src/compiler/compilation-dependencies.h"
#include "src/base/optional.h"
-#include "src/compiler/compilation-dependency.h"
#include "src/execution/protectors.h"
#include "src/handles/handles-inl.h"
#include "src/objects/allocation-site-inl.h"
@@ -19,18 +18,84 @@ namespace v8 {
namespace internal {
namespace compiler {
+#define DEPENDENCY_LIST(V) \
+ V(ConsistentJSFunctionView) \
+ V(ConstantInDictionaryPrototypeChain) \
+ V(ElementsKind) \
+ V(FieldConstness) \
+ V(FieldRepresentation) \
+ V(FieldType) \
+ V(GlobalProperty) \
+ V(InitialMap) \
+ V(InitialMapInstanceSizePrediction) \
+ V(OwnConstantDataProperty) \
+ V(OwnConstantDictionaryProperty) \
+ V(OwnConstantElement) \
+ V(PretenureMode) \
+ V(Protector) \
+ V(PrototypeProperty) \
+ V(StableMap) \
+ V(Transition)
+
CompilationDependencies::CompilationDependencies(JSHeapBroker* broker,
Zone* zone)
: zone_(zone), broker_(broker), dependencies_(zone) {
broker->set_dependencies(this);
}
+namespace {
+
+enum CompilationDependencyKind {
+#define V(Name) k##Name,
+ DEPENDENCY_LIST(V)
+#undef V
+};
+
+#define V(Name) class Name##Dependency;
+DEPENDENCY_LIST(V)
+#undef V
+
+const char* CompilationDependencyKindToString(CompilationDependencyKind kind) {
+#define V(Name) #Name "Dependency",
+ static const char* const names[] = {DEPENDENCY_LIST(V)};
+#undef V
+ return names[kind];
+}
+
+} // namespace
+
+class CompilationDependency : public ZoneObject {
+ public:
+ explicit CompilationDependency(CompilationDependencyKind kind) : kind(kind) {}
+
+ virtual bool IsValid() const = 0;
+ virtual void PrepareInstall() const {}
+ virtual void Install(Handle<Code> code) const = 0;
+
+#ifdef DEBUG
+#define V(Name) \
+ bool Is##Name() const { return kind == k##Name; } \
+ V8_ALLOW_UNUSED const Name##Dependency* As##Name() const;
+ DEPENDENCY_LIST(V)
+#undef V
+#endif
+
+ const char* ToString() const {
+ return CompilationDependencyKindToString(kind);
+ }
+
+ const CompilationDependencyKind kind;
+};
+
+namespace {
+
class InitialMapDependency final : public CompilationDependency {
public:
InitialMapDependency(JSHeapBroker* broker, const JSFunctionRef& function,
const MapRef& initial_map)
- : function_(function), initial_map_(initial_map) {
- }
+ : CompilationDependency(kInitialMap),
+ function_(function),
+ initial_map_(initial_map) {}
bool IsValid() const override {
Handle<JSFunction> function = function_.object();
@@ -55,7 +120,9 @@ class PrototypePropertyDependency final : public CompilationDependency {
PrototypePropertyDependency(JSHeapBroker* broker,
const JSFunctionRef& function,
const ObjectRef& prototype)
- : function_(function), prototype_(prototype) {
+ : CompilationDependency(kPrototypeProperty),
+ function_(function),
+ prototype_(prototype) {
DCHECK(function_.has_instance_prototype(broker->dependencies()));
DCHECK(!function_.PrototypeRequiresRuntimeLookup(broker->dependencies()));
DCHECK(function_.instance_prototype(broker->dependencies())
@@ -92,7 +159,8 @@ class PrototypePropertyDependency final : public CompilationDependency {
class StableMapDependency final : public CompilationDependency {
public:
- explicit StableMapDependency(const MapRef& map) : map_(map) {}
+ explicit StableMapDependency(const MapRef& map)
+ : CompilationDependency(kStableMap), map_(map) {}
bool IsValid() const override {
// TODO(v8:11670): Consider turn this back into a CHECK inside the
@@ -117,7 +185,8 @@ class ConstantInDictionaryPrototypeChainDependency final
explicit ConstantInDictionaryPrototypeChainDependency(
const MapRef receiver_map, const NameRef property_name,
const ObjectRef constant, PropertyKind kind)
- : receiver_map_(receiver_map),
+ : CompilationDependency(kConstantInDictionaryPrototypeChain),
+ receiver_map_(receiver_map),
property_name_{property_name},
constant_{constant},
kind_{kind} {
@@ -240,7 +309,8 @@ class OwnConstantDataPropertyDependency final : public CompilationDependency {
const MapRef& map,
Representation representation,
FieldIndex index, const ObjectRef& value)
- : broker_(broker),
+ : CompilationDependency(kOwnConstantDataProperty),
+ broker_(broker),
holder_(holder),
map_(map),
representation_(representation),
@@ -294,7 +364,8 @@ class OwnConstantDictionaryPropertyDependency final
const JSObjectRef& holder,
InternalIndex index,
const ObjectRef& value)
- : broker_(broker),
+ : CompilationDependency(kOwnConstantDictionaryProperty),
+ broker_(broker),
holder_(holder),
map_(holder.map()),
index_(index),
@@ -345,7 +416,7 @@ class OwnConstantDictionaryPropertyDependency final
class ConsistentJSFunctionViewDependency final : public CompilationDependency {
public:
explicit ConsistentJSFunctionViewDependency(const JSFunctionRef& function)
- : function_(function) {}
+ : CompilationDependency(kConsistentJSFunctionView), function_(function) {}
bool IsValid() const override {
return function_.IsConsistentWithHeapState();
@@ -353,17 +424,14 @@ class ConsistentJSFunctionViewDependency final : public CompilationDependency {
void Install(Handle<Code> code) const override {}
-#ifdef DEBUG
- bool IsConsistentJSFunctionViewDependency() const override { return true; }
-#endif
-
private:
const JSFunctionRef function_;
};
class TransitionDependency final : public CompilationDependency {
public:
- explicit TransitionDependency(const MapRef& map) : map_(map) {
+ explicit TransitionDependency(const MapRef& map)
+ : CompilationDependency(kTransition), map_(map) {
DCHECK(map_.CanBeDeprecated());
}
@@ -383,7 +451,9 @@ class PretenureModeDependency final : public CompilationDependency {
public:
PretenureModeDependency(const AllocationSiteRef& site,
AllocationType allocation)
- : site_(site), allocation_(allocation) {}
+ : CompilationDependency(kPretenureMode),
+ site_(site),
+ allocation_(allocation) {}
bool IsValid() const override {
return allocation_ == site_.object()->GetAllocationType();
@@ -396,10 +466,6 @@ class PretenureModeDependency final : public CompilationDependency {
DependentCode::kAllocationSiteTenuringChangedGroup);
}
-#ifdef DEBUG
- bool IsPretenureModeDependency() const override { return true; }
-#endif
-
private:
AllocationSiteRef site_;
AllocationType allocation_;
@@ -409,7 +475,10 @@ class FieldRepresentationDependency final : public CompilationDependency {
public:
FieldRepresentationDependency(const MapRef& map, InternalIndex descriptor,
Representation representation)
- : map_(map), descriptor_(descriptor), representation_(representation) {}
+ : CompilationDependency(kFieldRepresentation),
+ map_(map),
+ descriptor_(descriptor),
+ representation_(representation) {}
bool IsValid() const override {
DisallowGarbageCollection no_heap_allocation;
@@ -433,12 +502,9 @@ class FieldRepresentationDependency final : public CompilationDependency {
DependentCode::kFieldRepresentationGroup);
}
-#ifdef DEBUG
- bool IsFieldRepresentationDependencyOnMap(
- Handle<Map> const& receiver_map) const override {
+ bool DependsOn(const Handle<Map>& receiver_map) const {
return map_.object().equals(receiver_map);
}
-#endif
private:
MapRef map_;
@@ -450,7 +516,10 @@ class FieldTypeDependency final : public CompilationDependency {
public:
FieldTypeDependency(const MapRef& map, InternalIndex descriptor,
const ObjectRef& type)
- : map_(map), descriptor_(descriptor), type_(type) {}
+ : CompilationDependency(kFieldType),
+ map_(map),
+ descriptor_(descriptor),
+ type_(type) {}
bool IsValid() const override {
DisallowGarbageCollection no_heap_allocation;
@@ -481,7 +550,9 @@ class FieldTypeDependency final : public CompilationDependency {
class FieldConstnessDependency final : public CompilationDependency {
public:
FieldConstnessDependency(const MapRef& map, InternalIndex descriptor)
- : map_(map), descriptor_(descriptor) {}
+ : CompilationDependency(kFieldConstness),
+ map_(map),
+ descriptor_(descriptor) {}
bool IsValid() const override {
DisallowGarbageCollection no_heap_allocation;
@@ -515,7 +586,10 @@ class GlobalPropertyDependency final : public CompilationDependency {
public:
GlobalPropertyDependency(const PropertyCellRef& cell, PropertyCellType type,
bool read_only)
- : cell_(cell), type_(type), read_only_(read_only) {
+ : CompilationDependency(kGlobalProperty),
+ cell_(cell),
+ type_(type),
+ read_only_(read_only) {
DCHECK_EQ(type_, cell_.property_details().cell_type());
DCHECK_EQ(read_only_, cell_.property_details().IsReadOnly());
}
@@ -545,7 +619,8 @@ class GlobalPropertyDependency final : public CompilationDependency {
class ProtectorDependency final : public CompilationDependency {
public:
- explicit ProtectorDependency(const PropertyCellRef& cell) : cell_(cell) {}
+ explicit ProtectorDependency(const PropertyCellRef& cell)
+ : CompilationDependency(kProtector), cell_(cell) {}
bool IsValid() const override {
Handle<PropertyCell> cell = cell_.object();
@@ -565,7 +640,7 @@ class ProtectorDependency final : public CompilationDependency {
class ElementsKindDependency final : public CompilationDependency {
public:
ElementsKindDependency(const AllocationSiteRef& site, ElementsKind kind)
- : site_(site), kind_(kind) {
+ : CompilationDependency(kElementsKind), site_(site), kind_(kind) {
DCHECK(AllocationSite::ShouldTrack(kind_));
}
@@ -596,7 +671,10 @@ class OwnConstantElementDependency final : public CompilationDependency {
public:
OwnConstantElementDependency(const JSObjectRef& holder, uint32_t index,
const ObjectRef& element)
- : holder_(holder), index_(index), element_(element) {}
+ : CompilationDependency(kOwnConstantElement),
+ holder_(holder),
+ index_(index),
+ element_(element) {}
bool IsValid() const override {
DisallowGarbageCollection no_gc;
@@ -624,7 +702,9 @@ class InitialMapInstanceSizePredictionDependency final
public:
InitialMapInstanceSizePredictionDependency(const JSFunctionRef& function,
int instance_size)
- : function_(function), instance_size_(instance_size) {}
+ : CompilationDependency(kInitialMapInstanceSizePrediction),
+ function_(function),
+ instance_size_(instance_size) {}
bool IsValid() const override {
// The dependency is valid if the prediction is the same as the current
@@ -651,6 +731,8 @@ class InitialMapInstanceSizePredictionDependency final
int instance_size_;
};
+} // namespace
+
void CompilationDependencies::RecordDependency(
CompilationDependency const* dependency) {
if (dependency != nullptr) dependencies_.push_front(dependency);
@@ -795,9 +877,19 @@ void CompilationDependencies::DependOnOwnConstantDictionaryProperty(
broker_, holder, index, value));
}
+V8_INLINE void TraceInvalidCompilationDependency(
+ const CompilationDependency* d) {
+ DCHECK(FLAG_trace_compilation_dependencies);
+ DCHECK(!d->IsValid());
+ PrintF("Compilation aborted due to invalid dependency: %s\n", d->ToString());
+}
+
bool CompilationDependencies::Commit(Handle<Code> code) {
for (auto dep : dependencies_) {
if (!dep->IsValid()) {
+ if (FLAG_trace_compilation_dependencies) {
+ TraceInvalidCompilationDependency(dep);
+ }
dependencies_.clear();
return false;
}
@@ -812,6 +904,9 @@ bool CompilationDependencies::Commit(Handle<Code> code) {
// can call EnsureHasInitialMap, which can invalidate a StableMapDependency
// on the prototype object's map.
if (!dep->IsValid()) {
+ if (FLAG_trace_compilation_dependencies) {
+ TraceInvalidCompilationDependency(dep);
+ }
dependencies_.clear();
return false;
}
@@ -838,8 +933,7 @@ bool CompilationDependencies::Commit(Handle<Code> code) {
#ifdef DEBUG
for (auto dep : dependencies_) {
CHECK_IMPLIES(!dep->IsValid(),
- dep->IsPretenureModeDependency() ||
- dep->IsConsistentJSFunctionViewDependency());
+ dep->IsPretenureMode() || dep->IsConsistentJSFunctionView());
}
#endif
@@ -848,6 +942,7 @@ bool CompilationDependencies::Commit(Handle<Code> code) {
}
namespace {
+
// This function expects to never see a JSProxy.
void DependOnStablePrototypeChain(CompilationDependencies* deps, MapRef map,
base::Optional<JSObjectRef> last_prototype) {
@@ -862,8 +957,19 @@ void DependOnStablePrototypeChain(CompilationDependencies* deps, MapRef map,
if (last_prototype.has_value() && proto.equals(*last_prototype)) break;
}
}
+
} // namespace
+#ifdef DEBUG
+#define V(Name) \
+ const Name##Dependency* CompilationDependency::As##Name() const { \
+ DCHECK(Is##Name()); \
+ return static_cast<const Name##Dependency*>(this); \
+ }
+DEPENDENCY_LIST(V)
+#undef V
+#endif // DEBUG
+
void CompilationDependencies::DependOnStablePrototypeChains(
ZoneVector<MapRef> const& receiver_maps, WhereToStart start,
base::Optional<JSObjectRef> last_prototype) {
@@ -944,6 +1050,17 @@ CompilationDependencies::FieldTypeDependencyOffTheRecord(
return zone_->New<FieldTypeDependency>(map, descriptor, type);
}
+#ifdef DEBUG
+// static
+bool CompilationDependencies::IsFieldRepresentationDependencyOnMap(
+ const CompilationDependency* dep, const Handle<Map>& receiver_map) {
+ return dep->IsFieldRepresentation() &&
+ dep->AsFieldRepresentation()->DependsOn(receiver_map);
+}
+#endif // DEBUG
+
+#undef DEPENDENCY_LIST
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/compiler/compilation-dependencies.h b/chromium/v8/src/compiler/compilation-dependencies.h
index be507c6843d..f4b49878c86 100644
--- a/chromium/v8/src/compiler/compilation-dependencies.h
+++ b/chromium/v8/src/compiler/compilation-dependencies.h
@@ -154,6 +154,11 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
const MapRef& map, InternalIndex descriptor,
const ObjectRef& /* Contains a FieldType underneath. */ type) const;
+#ifdef DEBUG
+ static bool IsFieldRepresentationDependencyOnMap(
+ const CompilationDependency* dep, const Handle<Map>& receiver_map);
+#endif // DEBUG
+
private:
Zone* const zone_;
JSHeapBroker* const broker_;
diff --git a/chromium/v8/src/compiler/compilation-dependency.h b/chromium/v8/src/compiler/compilation-dependency.h
deleted file mode 100644
index 852c7b76406..00000000000
--- a/chromium/v8/src/compiler/compilation-dependency.h
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_COMPILATION_DEPENDENCY_H_
-#define V8_COMPILER_COMPILATION_DEPENDENCY_H_
-
-#include "src/zone/zone.h"
-
-namespace v8 {
-namespace internal {
-
-class MaybeObjectHandle;
-
-namespace compiler {
-
-class CompilationDependency : public ZoneObject {
- public:
- virtual bool IsValid() const = 0;
- virtual void PrepareInstall() const {}
- virtual void Install(Handle<Code> code) const = 0;
-
-#ifdef DEBUG
- virtual bool IsPretenureModeDependency() const { return false; }
- virtual bool IsFieldRepresentationDependencyOnMap(
- Handle<Map> const& receiver_map) const {
- return false;
- }
- virtual bool IsConsistentJSFunctionViewDependency() const { return false; }
-#endif
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_COMPILATION_DEPENDENCY_H_
diff --git a/chromium/v8/src/compiler/csa-load-elimination.cc b/chromium/v8/src/compiler/csa-load-elimination.cc
index b5df8b542b7..ece79a71560 100644
--- a/chromium/v8/src/compiler/csa-load-elimination.cc
+++ b/chromium/v8/src/compiler/csa-load-elimination.cc
@@ -46,7 +46,7 @@ Reduction CsaLoadElimination::Reduce(Node* node) {
case IrOpcode::kStoreToObject:
return ReduceStoreToObject(node, ObjectAccessOf(node->op()));
case IrOpcode::kDebugBreak:
- case IrOpcode::kAbortCSAAssert:
+ case IrOpcode::kAbortCSADcheck:
// Avoid changing optimizations in the presence of debug instructions.
return PropagateInputState(node);
case IrOpcode::kCall:
diff --git a/chromium/v8/src/compiler/decompression-optimizer.cc b/chromium/v8/src/compiler/decompression-optimizer.cc
index 79e77fcee65..c0068489f75 100644
--- a/chromium/v8/src/compiler/decompression-optimizer.cc
+++ b/chromium/v8/src/compiler/decompression-optimizer.cc
@@ -15,8 +15,7 @@ namespace {
bool IsMachineLoad(Node* const node) {
const IrOpcode::Value opcode = node->opcode();
- return opcode == IrOpcode::kLoad || opcode == IrOpcode::kPoisonedLoad ||
- opcode == IrOpcode::kProtectedLoad ||
+ return opcode == IrOpcode::kLoad || opcode == IrOpcode::kProtectedLoad ||
opcode == IrOpcode::kUnalignedLoad ||
opcode == IrOpcode::kLoadImmutable;
}
@@ -212,10 +211,6 @@ void DecompressionOptimizer::ChangeLoad(Node* const node) {
NodeProperties::ChangeOp(node,
machine()->LoadImmutable(compressed_load_rep));
break;
- case IrOpcode::kPoisonedLoad:
- NodeProperties::ChangeOp(node,
- machine()->PoisonedLoad(compressed_load_rep));
- break;
case IrOpcode::kProtectedLoad:
NodeProperties::ChangeOp(node,
machine()->ProtectedLoad(compressed_load_rep));
diff --git a/chromium/v8/src/compiler/effect-control-linearizer.cc b/chromium/v8/src/compiler/effect-control-linearizer.cc
index d7a0ca62dd2..9d000724b5a 100644
--- a/chromium/v8/src/compiler/effect-control-linearizer.cc
+++ b/chromium/v8/src/compiler/effect-control-linearizer.cc
@@ -36,7 +36,6 @@ namespace internal {
namespace compiler {
enum class MaintainSchedule { kMaintain, kDiscard };
-enum class MaskArrayIndexEnable { kDoNotMaskArrayIndex, kMaskArrayIndex };
class EffectControlLinearizer {
public:
@@ -44,13 +43,11 @@ class EffectControlLinearizer {
JSGraphAssembler* graph_assembler, Zone* temp_zone,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
- MaskArrayIndexEnable mask_array_index,
MaintainSchedule maintain_schedule,
JSHeapBroker* broker)
: js_graph_(js_graph),
schedule_(schedule),
temp_zone_(temp_zone),
- mask_array_index_(mask_array_index),
maintain_schedule_(maintain_schedule),
source_positions_(source_positions),
node_origins_(node_origins),
@@ -80,7 +77,6 @@ class EffectControlLinearizer {
Node* LowerChangeTaggedToUint32(Node* node);
Node* LowerChangeTaggedToInt64(Node* node);
Node* LowerChangeTaggedToTaggedSigned(Node* node);
- Node* LowerPoisonIndex(Node* node);
Node* LowerCheckInternalizedString(Node* node, Node* frame_state);
void LowerCheckMaps(Node* node, Node* frame_state);
void LowerDynamicCheckMaps(Node* node, Node* frame_state);
@@ -338,7 +334,6 @@ class EffectControlLinearizer {
JSGraph* js_graph_;
Schedule* schedule_;
Zone* temp_zone_;
- MaskArrayIndexEnable mask_array_index_;
MaintainSchedule maintain_schedule_;
RegionObservability region_observability_ = RegionObservability::kObservable;
SourcePositionTable* source_positions_;
@@ -966,9 +961,6 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kTruncateTaggedToFloat64:
result = LowerTruncateTaggedToFloat64(node);
break;
- case IrOpcode::kPoisonIndex:
- result = LowerPoisonIndex(node);
- break;
case IrOpcode::kCheckClosure:
result = LowerCheckClosure(node, frame_state);
break;
@@ -1788,14 +1780,6 @@ Node* EffectControlLinearizer::LowerTruncateTaggedToFloat64(Node* node) {
return done.PhiAt(0);
}
-Node* EffectControlLinearizer::LowerPoisonIndex(Node* node) {
- Node* index = node->InputAt(0);
- if (mask_array_index_ == MaskArrayIndexEnable::kMaskArrayIndex) {
- index = __ Word32PoisonOnSpeculation(index);
- }
- return index;
-}
-
Node* EffectControlLinearizer::LowerCheckClosure(Node* node,
Node* frame_state) {
Handle<FeedbackCell> feedback_cell = FeedbackCellOf(node->op());
@@ -1831,8 +1815,7 @@ void EffectControlLinearizer::MigrateInstanceOrDeopt(
__ Word32And(bitfield3,
__ Int32Constant(Map::Bits3::IsDeprecatedBit::kMask)),
__ Int32Constant(0));
- __ DeoptimizeIf(reason, feedback_source, is_not_deprecated, frame_state,
- IsSafetyCheck::kCriticalSafetyCheck);
+ __ DeoptimizeIf(reason, feedback_source, is_not_deprecated, frame_state);
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
Runtime::FunctionId id = Runtime::kTryMigrateInstance;
auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
@@ -1842,7 +1825,7 @@ void EffectControlLinearizer::MigrateInstanceOrDeopt(
__ Int32Constant(1), __ NoContextConstant());
Node* check = ObjectIsSmi(result);
__ DeoptimizeIf(DeoptimizeReason::kInstanceMigrationFailed, feedback_source,
- check, frame_state, IsSafetyCheck::kCriticalSafetyCheck);
+ check, frame_state);
}
void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
@@ -1886,7 +1869,7 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
Node* check = __ TaggedEqual(value_map, map);
if (i == map_count - 1) {
__ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, p.feedback(), check,
- frame_state, IsSafetyCheck::kCriticalSafetyCheck);
+ frame_state);
} else {
auto next_map = __ MakeLabel();
__ BranchWithCriticalSafetyCheck(check, &done, &next_map);
@@ -1908,7 +1891,7 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
if (i == map_count - 1) {
__ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, p.feedback(), check,
- frame_state, IsSafetyCheck::kCriticalSafetyCheck);
+ frame_state);
} else {
auto next_map = __ MakeLabel();
__ BranchWithCriticalSafetyCheck(check, &done, &next_map);
@@ -2528,8 +2511,8 @@ Node* EffectControlLinearizer::LowerCheckedUint32Bounds(Node* node,
Node* check = __ Uint32LessThan(index, limit);
if (!(params.flags() & CheckBoundsFlag::kAbortOnOutOfBounds)) {
__ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds,
- params.check_parameters().feedback(), check, frame_state,
- IsSafetyCheck::kCriticalSafetyCheck);
+ params.check_parameters().feedback(), check,
+ frame_state);
} else {
auto if_abort = __ MakeDeferredLabel();
auto done = __ MakeLabel();
@@ -2574,8 +2557,8 @@ Node* EffectControlLinearizer::LowerCheckedUint64Bounds(Node* node,
Node* check = __ Uint64LessThan(index, limit);
if (!(params.flags() & CheckBoundsFlag::kAbortOnOutOfBounds)) {
__ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds,
- params.check_parameters().feedback(), check, frame_state,
- IsSafetyCheck::kCriticalSafetyCheck);
+ params.check_parameters().feedback(), check,
+ frame_state);
} else {
auto if_abort = __ MakeDeferredLabel();
auto done = __ MakeLabel();
@@ -3696,9 +3679,14 @@ Node* EffectControlLinearizer::LowerToBoolean(Node* node) {
}
Node* EffectControlLinearizer::LowerArgumentsLength(Node* node) {
- return ChangeIntPtrToSmi(
+ Node* arguments_length = ChangeIntPtrToSmi(
__ Load(MachineType::Pointer(), __ LoadFramePointer(),
__ IntPtrConstant(StandardFrameConstants::kArgCOffset)));
+ if (kJSArgcIncludesReceiver) {
+ arguments_length =
+ __ SmiSub(arguments_length, __ SmiConstant(kJSArgcReceiverSlots));
+ }
+ return arguments_length;
}
Node* EffectControlLinearizer::LowerRestLength(Node* node) {
@@ -3711,6 +3699,10 @@ Node* EffectControlLinearizer::LowerRestLength(Node* node) {
Node* arguments_length = ChangeIntPtrToSmi(
__ Load(MachineType::Pointer(), frame,
__ IntPtrConstant(StandardFrameConstants::kArgCOffset)));
+ if (kJSArgcIncludesReceiver) {
+ arguments_length =
+ __ SmiSub(arguments_length, __ SmiConstant(kJSArgcReceiverSlots));
+ }
Node* rest_length =
__ SmiSub(arguments_length, __ SmiConstant(formal_parameter_count));
__ GotoIf(__ SmiLessThan(rest_length, __ SmiConstant(0)), &done,
@@ -4263,12 +4255,10 @@ Node* EffectControlLinearizer::LowerStringToUpperCaseIntl(Node* node) {
Node* EffectControlLinearizer::LowerStringToLowerCaseIntl(Node* node) {
UNREACHABLE();
- return nullptr;
}
Node* EffectControlLinearizer::LowerStringToUpperCaseIntl(Node* node) {
UNREACHABLE();
- return nullptr;
}
#endif // V8_INTL_SUPPORT
@@ -5175,6 +5165,8 @@ EffectControlLinearizer::AdaptOverloadedFastCallArgument(
Node* value_is_smi = ObjectIsSmi(node);
__ GotoIf(value_is_smi, if_error);
+ ExternalReference::Type ref_type = ExternalReference::FAST_C_CALL;
+
switch (arg_type.GetSequenceType()) {
case CTypeInfo::SequenceType::kIsSequence: {
CHECK_EQ(arg_type.GetType(), CTypeInfo::Type::kVoid);
@@ -5195,8 +5187,8 @@ EffectControlLinearizer::AdaptOverloadedFastCallArgument(
kNoWriteBarrier),
stack_slot, 0, node);
- Node* target_address = __ ExternalConstant(
- ExternalReference::Create(c_functions[func_index].address));
+ Node* target_address = __ ExternalConstant(ExternalReference::Create(
+ c_functions[func_index].address, ref_type));
__ Goto(&merge, target_address, stack_slot);
break;
}
@@ -5209,8 +5201,8 @@ EffectControlLinearizer::AdaptOverloadedFastCallArgument(
fast_api_call::GetTypedArrayElementsKind(
overloads_resolution_result.element_type),
&next);
- Node* target_address = __ ExternalConstant(
- ExternalReference::Create(c_functions[func_index].address));
+ Node* target_address = __ ExternalConstant(ExternalReference::Create(
+ c_functions[func_index].address, ref_type));
__ Goto(&merge, target_address, stack_slot);
break;
}
@@ -5397,6 +5389,8 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
Node** const inputs = graph()->zone()->NewArray<Node*>(
kFastTargetAddressInputCount + c_arg_count + n.FastCallExtraInputCount());
+ ExternalReference::Type ref_type = ExternalReference::FAST_C_CALL;
+
// The inputs to {Call} node for the fast call look like:
// [fast callee, receiver, ... C arguments, [optional Options], effect,
// control].
@@ -5408,7 +5402,7 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
// with a Phi node created by AdaptOverloadedFastCallArgument.
inputs[kFastTargetAddressInputIndex] =
(c_functions.size() == 1) ? __ ExternalConstant(ExternalReference::Create(
- c_functions[0].address))
+ c_functions[0].address, ref_type))
: nullptr;
for (int i = 0; i < c_arg_count; ++i) {
@@ -5776,8 +5770,7 @@ Node* EffectControlLinearizer::LowerLoadTypedElement(Node* node) {
Node* data_ptr = BuildTypedArrayDataPointer(base, external);
// Perform the actual typed element access.
- return __ LoadElement(AccessBuilder::ForTypedArrayElement(
- array_type, true, LoadSensitivity::kCritical),
+ return __ LoadElement(AccessBuilder::ForTypedArrayElement(array_type, true),
data_ptr, index);
}
@@ -6796,26 +6789,13 @@ Node* EffectControlLinearizer::BuildIsClearedWeakReference(Node* maybe_object) {
#undef __
-namespace {
-
-MaskArrayIndexEnable MaskArrayForPoisonLevel(
- PoisoningMitigationLevel poison_level) {
- return (poison_level != PoisoningMitigationLevel::kDontPoison)
- ? MaskArrayIndexEnable::kMaskArrayIndex
- : MaskArrayIndexEnable::kDoNotMaskArrayIndex;
-}
-
-} // namespace
-
void LinearizeEffectControl(JSGraph* graph, Schedule* schedule, Zone* temp_zone,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
- PoisoningMitigationLevel poison_level,
JSHeapBroker* broker) {
JSGraphAssembler graph_assembler_(graph, temp_zone, base::nullopt, nullptr);
EffectControlLinearizer linearizer(graph, schedule, &graph_assembler_,
temp_zone, source_positions, node_origins,
- MaskArrayForPoisonLevel(poison_level),
MaintainSchedule::kDiscard, broker);
linearizer.Run();
}
@@ -6824,16 +6804,13 @@ void LowerToMachineSchedule(JSGraph* js_graph, Schedule* schedule,
Zone* temp_zone,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
- PoisoningMitigationLevel poison_level,
JSHeapBroker* broker) {
JSGraphAssembler graph_assembler(js_graph, temp_zone, base::nullopt,
schedule);
EffectControlLinearizer linearizer(js_graph, schedule, &graph_assembler,
temp_zone, source_positions, node_origins,
- MaskArrayForPoisonLevel(poison_level),
MaintainSchedule::kMaintain, broker);
- MemoryLowering memory_lowering(js_graph, temp_zone, &graph_assembler,
- poison_level);
+ MemoryLowering memory_lowering(js_graph, temp_zone, &graph_assembler);
SelectLowering select_lowering(&graph_assembler, js_graph->graph());
graph_assembler.AddInlineReducer(&memory_lowering);
graph_assembler.AddInlineReducer(&select_lowering);
diff --git a/chromium/v8/src/compiler/effect-control-linearizer.h b/chromium/v8/src/compiler/effect-control-linearizer.h
index fca4899263c..97467391e2a 100644
--- a/chromium/v8/src/compiler/effect-control-linearizer.h
+++ b/chromium/v8/src/compiler/effect-control-linearizer.h
@@ -26,7 +26,7 @@ class JSHeapBroker;
V8_EXPORT_PRIVATE void LinearizeEffectControl(
JSGraph* graph, Schedule* schedule, Zone* temp_zone,
SourcePositionTable* source_positions, NodeOriginTable* node_origins,
- PoisoningMitigationLevel poison_level, JSHeapBroker* broker);
+ JSHeapBroker* broker);
// Performs effect control linearization lowering in addition to machine
// lowering, producing a scheduled graph that is ready for instruction
@@ -34,7 +34,7 @@ V8_EXPORT_PRIVATE void LinearizeEffectControl(
V8_EXPORT_PRIVATE void LowerToMachineSchedule(
JSGraph* graph, Schedule* schedule, Zone* temp_zone,
SourcePositionTable* source_positions, NodeOriginTable* node_origins,
- PoisoningMitigationLevel poison_level, JSHeapBroker* broker);
+ JSHeapBroker* broker);
} // namespace compiler
} // namespace internal
diff --git a/chromium/v8/src/compiler/escape-analysis.cc b/chromium/v8/src/compiler/escape-analysis.cc
index 7ff6ab684fc..bf693c71dc6 100644
--- a/chromium/v8/src/compiler/escape-analysis.cc
+++ b/chromium/v8/src/compiler/escape-analysis.cc
@@ -510,12 +510,15 @@ int OffsetOfFieldAccess(const Operator* op) {
return access.offset;
}
-int OffsetOfElementAt(ElementAccess const& access, int index) {
+Maybe<int> OffsetOfElementAt(ElementAccess const& access, int index) {
+ MachineRepresentation representation = access.machine_type.representation();
+ // Double elements accesses are not yet supported. See chromium:1237821.
+ if (representation == MachineRepresentation::kFloat64) return Nothing<int>();
+
DCHECK_GE(index, 0);
- DCHECK_GE(ElementSizeLog2Of(access.machine_type.representation()),
- kTaggedSizeLog2);
- return access.header_size +
- (index << ElementSizeLog2Of(access.machine_type.representation()));
+ DCHECK_GE(ElementSizeLog2Of(representation), kTaggedSizeLog2);
+ return Just(access.header_size +
+ (index << ElementSizeLog2Of(representation)));
}
Maybe<int> OffsetOfElementsAccess(const Operator* op, Node* index_node) {
@@ -527,7 +530,7 @@ Maybe<int> OffsetOfElementsAccess(const Operator* op, Node* index_node) {
double min = index_type.Min();
int index = static_cast<int>(min);
if (index < 0 || index != min || index != max) return Nothing<int>();
- return Just(OffsetOfElementAt(ElementAccessOf(op), index));
+ return OffsetOfElementAt(ElementAccessOf(op), index);
}
Node* LowerCompareMapsWithoutLoad(Node* checked_map,
diff --git a/chromium/v8/src/compiler/escape-analysis.h b/chromium/v8/src/compiler/escape-analysis.h
index 907c7cc0878..d3f9768fe71 100644
--- a/chromium/v8/src/compiler/escape-analysis.h
+++ b/chromium/v8/src/compiler/escape-analysis.h
@@ -139,6 +139,11 @@ class VirtualObject : public Dependable {
}
return Just(fields_.at(offset / kTaggedSize));
}
+ Maybe<Variable> FieldAt(Maybe<int> maybe_offset) const {
+ int offset;
+ if (!maybe_offset.To(&offset)) return Nothing<Variable>();
+ return FieldAt(offset);
+ }
Id id() const { return id_; }
int size() const { return static_cast<int>(kTaggedSize * fields_.size()); }
// Escaped might mean that the object escaped to untracked memory or that it
diff --git a/chromium/v8/src/compiler/frame-states.cc b/chromium/v8/src/compiler/frame-states.cc
index bbc2049ae5d..c5199f1e647 100644
--- a/chromium/v8/src/compiler/frame-states.cc
+++ b/chromium/v8/src/compiler/frame-states.cc
@@ -214,8 +214,11 @@ FrameState CreateJavaScriptBuiltinContinuationFrameState(
ContinuationFrameStateMode mode) {
// Depending on {mode}, final parameters are added by the deoptimizer
// and aren't explicitly passed in the frame state.
- DCHECK_EQ(Builtins::GetStackParameterCount(name) + 1, // add receiver
- stack_parameter_count + DeoptimizerParameterCountFor(mode));
+ DCHECK_EQ(
+ Builtins::GetStackParameterCount(name) +
+ (kJSArgcIncludesReceiver ? 0
+ : 1), // Add receiver if it is not included.
+ stack_parameter_count + DeoptimizerParameterCountFor(mode));
Node* argc = jsgraph->Constant(Builtins::GetStackParameterCount(name));
diff --git a/chromium/v8/src/compiler/globals.h b/chromium/v8/src/compiler/globals.h
index 392cb239173..23f834cd6c5 100644
--- a/chromium/v8/src/compiler/globals.h
+++ b/chromium/v8/src/compiler/globals.h
@@ -92,7 +92,8 @@ const int kMaxFastLiteralProperties = JSObject::kMaxInObjectProperties;
// to add support for IA32, because it has a totally different approach
// (using FP stack). As support is added to more platforms, please make sure
// to list them here in order to enable tests of this functionality.
-#if defined(V8_TARGET_ARCH_X64)
+#if defined(V8_TARGET_ARCH_X64) || \
+ (defined(V8_TARGET_ARCH_ARM64) && !defined(USE_SIMULATOR))
#define V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
#endif
diff --git a/chromium/v8/src/compiler/graph-assembler.cc b/chromium/v8/src/compiler/graph-assembler.cc
index 26ae88362d8..6bfd6f8c223 100644
--- a/chromium/v8/src/compiler/graph-assembler.cc
+++ b/chromium/v8/src/compiler/graph-assembler.cc
@@ -829,46 +829,36 @@ Node* GraphAssembler::BitcastMaybeObjectToWord(Node* value) {
effect(), control()));
}
-Node* GraphAssembler::Word32PoisonOnSpeculation(Node* value) {
- return AddNode(graph()->NewNode(machine()->Word32PoisonOnSpeculation(), value,
- effect(), control()));
-}
-
Node* GraphAssembler::DeoptimizeIf(DeoptimizeReason reason,
FeedbackSource const& feedback,
- Node* condition, Node* frame_state,
- IsSafetyCheck is_safety_check) {
- return AddNode(
- graph()->NewNode(common()->DeoptimizeIf(DeoptimizeKind::kEager, reason,
- feedback, is_safety_check),
- condition, frame_state, effect(), control()));
+ Node* condition, Node* frame_state) {
+ return AddNode(graph()->NewNode(
+ common()->DeoptimizeIf(DeoptimizeKind::kEager, reason, feedback),
+ condition, frame_state, effect(), control()));
}
Node* GraphAssembler::DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason,
FeedbackSource const& feedback,
- Node* condition, Node* frame_state,
- IsSafetyCheck is_safety_check) {
- return AddNode(graph()->NewNode(
- common()->DeoptimizeIf(kind, reason, feedback, is_safety_check),
- condition, frame_state, effect(), control()));
+ Node* condition, Node* frame_state) {
+ return AddNode(
+ graph()->NewNode(common()->DeoptimizeIf(kind, reason, feedback),
+ condition, frame_state, effect(), control()));
}
Node* GraphAssembler::DeoptimizeIfNot(DeoptimizeKind kind,
DeoptimizeReason reason,
FeedbackSource const& feedback,
- Node* condition, Node* frame_state,
- IsSafetyCheck is_safety_check) {
- return AddNode(graph()->NewNode(
- common()->DeoptimizeUnless(kind, reason, feedback, is_safety_check),
- condition, frame_state, effect(), control()));
+ Node* condition, Node* frame_state) {
+ return AddNode(
+ graph()->NewNode(common()->DeoptimizeUnless(kind, reason, feedback),
+ condition, frame_state, effect(), control()));
}
Node* GraphAssembler::DeoptimizeIfNot(DeoptimizeReason reason,
FeedbackSource const& feedback,
- Node* condition, Node* frame_state,
- IsSafetyCheck is_safety_check) {
+ Node* condition, Node* frame_state) {
return DeoptimizeIfNot(DeoptimizeKind::kEager, reason, feedback, condition,
- frame_state, is_safety_check);
+ frame_state);
}
Node* GraphAssembler::DynamicCheckMapsWithDeoptUnless(Node* condition,
@@ -924,8 +914,7 @@ void GraphAssembler::BranchWithCriticalSafetyCheck(
hint = if_false->IsDeferred() ? BranchHint::kTrue : BranchHint::kFalse;
}
- BranchImpl(condition, if_true, if_false, hint,
- IsSafetyCheck::kCriticalSafetyCheck);
+ BranchImpl(condition, if_true, if_false, hint);
}
void GraphAssembler::RecordBranchInBlockUpdater(Node* branch,
diff --git a/chromium/v8/src/compiler/graph-assembler.h b/chromium/v8/src/compiler/graph-assembler.h
index 5efe6dd9c3c..c9ddd63e719 100644
--- a/chromium/v8/src/compiler/graph-assembler.h
+++ b/chromium/v8/src/compiler/graph-assembler.h
@@ -330,24 +330,16 @@ class V8_EXPORT_PRIVATE GraphAssembler {
Node* Retain(Node* buffer);
Node* UnsafePointerAdd(Node* base, Node* external);
- Node* Word32PoisonOnSpeculation(Node* value);
-
- Node* DeoptimizeIf(
- DeoptimizeReason reason, FeedbackSource const& feedback, Node* condition,
- Node* frame_state,
- IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
- Node* DeoptimizeIf(
- DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback, Node* condition, Node* frame_state,
- IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
- Node* DeoptimizeIfNot(
- DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback, Node* condition, Node* frame_state,
- IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
- Node* DeoptimizeIfNot(
- DeoptimizeReason reason, FeedbackSource const& feedback, Node* condition,
- Node* frame_state,
- IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
+ Node* DeoptimizeIf(DeoptimizeReason reason, FeedbackSource const& feedback,
+ Node* condition, Node* frame_state);
+ Node* DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason,
+ FeedbackSource const& feedback, Node* condition,
+ Node* frame_state);
+ Node* DeoptimizeIfNot(DeoptimizeKind kind, DeoptimizeReason reason,
+ FeedbackSource const& feedback, Node* condition,
+ Node* frame_state);
+ Node* DeoptimizeIfNot(DeoptimizeReason reason, FeedbackSource const& feedback,
+ Node* condition, Node* frame_state);
Node* DynamicCheckMapsWithDeoptUnless(Node* condition, Node* slot_index,
Node* map, Node* handler,
Node* feedback_vector,
@@ -557,7 +549,7 @@ class V8_EXPORT_PRIVATE GraphAssembler {
void BranchImpl(Node* condition,
GraphAssemblerLabel<sizeof...(Vars)>* if_true,
GraphAssemblerLabel<sizeof...(Vars)>* if_false,
- BranchHint hint, IsSafetyCheck is_safety_check, Vars...);
+ BranchHint hint, Vars...);
void RecordBranchInBlockUpdater(Node* branch, Node* if_true_control,
Node* if_false_control,
BasicBlock* if_true_block,
@@ -742,8 +734,7 @@ void GraphAssembler::Branch(Node* condition,
hint = if_false->IsDeferred() ? BranchHint::kTrue : BranchHint::kFalse;
}
- BranchImpl(condition, if_true, if_false, hint, IsSafetyCheck::kNoSafetyCheck,
- vars...);
+ BranchImpl(condition, if_true, if_false, hint, vars...);
}
template <typename... Vars>
@@ -751,20 +742,17 @@ void GraphAssembler::BranchWithHint(
Node* condition, GraphAssemblerLabel<sizeof...(Vars)>* if_true,
GraphAssemblerLabel<sizeof...(Vars)>* if_false, BranchHint hint,
Vars... vars) {
- BranchImpl(condition, if_true, if_false, hint, IsSafetyCheck::kNoSafetyCheck,
- vars...);
+ BranchImpl(condition, if_true, if_false, hint, vars...);
}
template <typename... Vars>
void GraphAssembler::BranchImpl(Node* condition,
GraphAssemblerLabel<sizeof...(Vars)>* if_true,
GraphAssemblerLabel<sizeof...(Vars)>* if_false,
- BranchHint hint, IsSafetyCheck is_safety_check,
- Vars... vars) {
+ BranchHint hint, Vars... vars) {
DCHECK_NOT_NULL(control());
- Node* branch = graph()->NewNode(common()->Branch(hint, is_safety_check),
- condition, control());
+ Node* branch = graph()->NewNode(common()->Branch(hint), condition, control());
Node* if_true_control = control_ =
graph()->NewNode(common()->IfTrue(), branch);
diff --git a/chromium/v8/src/compiler/heap-refs.cc b/chromium/v8/src/compiler/heap-refs.cc
index 1688a14a048..19c7bd1ef6e 100644
--- a/chromium/v8/src/compiler/heap-refs.cc
+++ b/chromium/v8/src/compiler/heap-refs.cc
@@ -14,7 +14,6 @@
#include "src/base/platform/platform.h"
#include "src/codegen/code-factory.h"
#include "src/compiler/compilation-dependencies.h"
-#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-heap-broker.h"
#include "src/execution/protectors-inl.h"
#include "src/objects/allocation-site-inl.h"
@@ -41,7 +40,7 @@ namespace compiler {
//
// kBackgroundSerializedHeapObject: The underlying V8 object is a HeapObject
// and the data is an instance of the corresponding (most-specific) subclass,
-// e.g. JSFunctionData, which provides serialized information about the
+// e.g. JSFunctionData, which provides serialized information about the
// object. Allows serialization from the background thread.
//
// kUnserializedHeapObject: The underlying V8 object is a HeapObject and the
@@ -257,13 +256,9 @@ bool PropertyCellData::Cache(JSHeapBroker* broker) {
}
}
- if (property_details.cell_type() == PropertyCellType::kConstant) {
- Handle<Object> value_again =
- broker->CanonicalPersistentHandle(cell->value(kAcquireLoad));
- if (*value != *value_again) {
- DCHECK(!broker->IsMainThread());
- return false;
- }
+ if (property_details.cell_type() == PropertyCellType::kInTransition) {
+ DCHECK(!broker->IsMainThread());
+ return false;
}
ObjectData* value_data = broker->TryGetOrCreateData(value);
@@ -317,17 +312,6 @@ class JSObjectData : public JSReceiverData {
return object_create_map_;
}
- ObjectData* GetOwnConstantElement(
- JSHeapBroker* broker, uint32_t index,
- SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
- ObjectData* GetOwnFastDataProperty(
- JSHeapBroker* broker, Representation representation,
- FieldIndex field_index,
- SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
- ObjectData* GetOwnDictionaryProperty(JSHeapBroker* broker,
- InternalIndex dict_index,
- SerializationPolicy policy);
-
// This method is only used to assert our invariants.
bool cow_or_empty_elements_tenured() const;
@@ -349,21 +333,6 @@ class JSObjectData : public JSReceiverData {
bool serialized_object_create_map_ = false;
ObjectData* object_create_map_ = nullptr;
-
- // Elements (indexed properties) that either
- // (1) are known to exist directly on the object as non-writable and
- // non-configurable, or (2) are known not to (possibly they don't exist at
- // all). In case (2), the second pair component is nullptr.
- ZoneVector<std::pair<uint32_t, ObjectData*>> own_constant_elements_;
- // Properties that either:
- // (1) are known to exist directly on the object, or
- // (2) are known not to (possibly they don't exist at all).
- // In case (2), the second pair component is nullptr.
- // For simplicity, this may in theory overlap with inobject_fields_.
- // For fast mode objects, the keys of the map are the property_index() values
- // of the respective property FieldIndex'es. For slow mode objects, the keys
- // are the dictionary indicies.
- ZoneUnorderedMap<int, ObjectData*> own_properties_;
};
void JSObjectData::SerializeObjectCreateMap(JSHeapBroker* broker,
@@ -390,18 +359,6 @@ void JSObjectData::SerializeObjectCreateMap(JSHeapBroker* broker,
namespace {
-base::Optional<ObjectRef> GetOwnElementFromHeap(JSHeapBroker* broker,
- Handle<Object> receiver,
- uint32_t index,
- bool constant_only) {
- LookupIterator it(broker->isolate(), receiver, index, LookupIterator::OWN);
- if (it.state() == LookupIterator::DATA &&
- (!constant_only || (it.IsReadOnly() && !it.IsConfigurable()))) {
- return MakeRef(broker, it.GetDataValue());
- }
- return base::nullopt;
-}
-
base::Optional<ObjectRef> GetOwnFastDataPropertyFromHeap(
JSHeapBroker* broker, JSObjectRef holder, Representation representation,
FieldIndex field_index) {
@@ -496,70 +453,6 @@ base::Optional<ObjectRef> GetOwnDictionaryPropertyFromHeap(
} // namespace
-ObjectData* JSObjectData::GetOwnConstantElement(JSHeapBroker* broker,
- uint32_t index,
- SerializationPolicy policy) {
- for (auto const& p : own_constant_elements_) {
- if (p.first == index) return p.second;
- }
-
- if (policy == SerializationPolicy::kAssumeSerialized) {
- TRACE_MISSING(broker, "knowledge about index " << index << " on " << this);
- return nullptr;
- }
-
- base::Optional<ObjectRef> element =
- GetOwnElementFromHeap(broker, object(), index, true);
- ObjectData* result = element.has_value() ? element->data() : nullptr;
- own_constant_elements_.push_back({index, result});
- return result;
-}
-
-ObjectData* JSObjectData::GetOwnFastDataProperty(JSHeapBroker* broker,
- Representation representation,
- FieldIndex field_index,
- SerializationPolicy policy) {
- auto p = own_properties_.find(field_index.property_index());
- if (p != own_properties_.end()) return p->second;
-
- if (policy == SerializationPolicy::kAssumeSerialized) {
- TRACE_MISSING(broker, "knowledge about fast property with index "
- << field_index.property_index() << " on "
- << this);
- return nullptr;
- }
-
- // This call will always succeed on the main thread.
- CHECK(broker->IsMainThread());
- JSObjectRef object_ref = MakeRef(broker, Handle<JSObject>::cast(object()));
- ObjectRef property = GetOwnFastDataPropertyFromHeap(
- broker, object_ref, representation, field_index)
- .value();
- ObjectData* result(property.data());
- own_properties_.insert(std::make_pair(field_index.property_index(), result));
- return result;
-}
-
-ObjectData* JSObjectData::GetOwnDictionaryProperty(JSHeapBroker* broker,
- InternalIndex dict_index,
- SerializationPolicy policy) {
- auto p = own_properties_.find(dict_index.as_int());
- if (p != own_properties_.end()) return p->second;
-
- if (policy == SerializationPolicy::kAssumeSerialized) {
- TRACE_MISSING(broker, "knowledge about dictionary property with index "
- << dict_index.as_int() << " on " << this);
- return nullptr;
- }
-
- ObjectRef property = GetOwnDictionaryPropertyFromHeap(
- broker, Handle<JSObject>::cast(object()), dict_index)
- .value();
- ObjectData* result(property.data());
- own_properties_.insert(std::make_pair(dict_index.as_int(), result));
- return result;
-}
-
class JSTypedArrayData : public JSObjectData {
public:
JSTypedArrayData(JSHeapBroker* broker, ObjectData** storage,
@@ -625,28 +518,6 @@ class JSBoundFunctionData : public JSObjectData {
JSBoundFunctionData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSBoundFunction> object, ObjectDataKind kind)
: JSObjectData(broker, storage, object, kind) {}
-
- bool Serialize(JSHeapBroker* broker, NotConcurrentInliningTag tag);
-
- ObjectData* bound_target_function() const {
- DCHECK(!broker()->is_concurrent_inlining());
- return bound_target_function_;
- }
- ObjectData* bound_this() const {
- DCHECK(!broker()->is_concurrent_inlining());
- return bound_this_;
- }
- ObjectData* bound_arguments() const {
- DCHECK(!broker()->is_concurrent_inlining());
- return bound_arguments_;
- }
-
- private:
- bool serialized_ = false;
-
- ObjectData* bound_target_function_ = nullptr;
- ObjectData* bound_this_ = nullptr;
- ObjectData* bound_arguments_ = nullptr;
};
class JSFunctionData : public JSObjectData {
@@ -659,10 +530,6 @@ class JSFunctionData : public JSObjectData {
bool IsConsistentWithHeapState(JSHeapBroker* broker) const;
- bool has_feedback_vector() const {
- DCHECK(serialized_);
- return has_feedback_vector_;
- }
bool has_initial_map() const {
DCHECK(serialized_);
return has_initial_map_;
@@ -680,10 +547,6 @@ class JSFunctionData : public JSObjectData {
DCHECK(serialized_);
return context_;
}
- ObjectData* native_context() const {
- DCHECK(serialized_);
- return native_context_;
- }
MapData* initial_map() const {
DCHECK(serialized_);
return initial_map_;
@@ -700,10 +563,6 @@ class JSFunctionData : public JSObjectData {
DCHECK(serialized_);
return feedback_cell_;
}
- ObjectData* feedback_vector() const {
- DCHECK(serialized_);
- return feedback_vector_;
- }
int initial_map_instance_size_with_min_slack() const {
DCHECK(serialized_);
return initial_map_instance_size_with_min_slack_;
@@ -740,19 +599,16 @@ class JSFunctionData : public JSObjectData {
using UsedFields = base::Flags<UsedField>;
UsedFields used_fields_;
- bool has_feedback_vector_ = false;
ObjectData* prototype_or_initial_map_ = nullptr;
bool has_initial_map_ = false;
bool has_instance_prototype_ = false;
bool PrototypeRequiresRuntimeLookup_ = false;
ObjectData* context_ = nullptr;
- ObjectData* native_context_ = nullptr; // Derives from context_.
MapData* initial_map_ = nullptr; // Derives from prototype_or_initial_map_.
ObjectData* instance_prototype_ =
nullptr; // Derives from prototype_or_initial_map_.
ObjectData* shared_ = nullptr;
- ObjectData* feedback_vector_ = nullptr; // Derives from feedback_cell.
ObjectData* feedback_cell_ = nullptr;
int initial_map_instance_size_with_min_slack_; // Derives from
// prototype_or_initial_map_.
@@ -809,10 +665,6 @@ class MapData : public HeapObjectData {
return is_abandoned_prototype_map_;
}
- // Extra information.
- void SerializeRootMap(JSHeapBroker* broker, NotConcurrentInliningTag tag);
- ObjectData* FindRootMap() const;
-
void SerializeConstructor(JSHeapBroker* broker, NotConcurrentInliningTag tag);
ObjectData* GetConstructor() const {
CHECK(serialized_constructor_);
@@ -840,8 +692,7 @@ class MapData : public HeapObjectData {
bool has_extra_serialized_data() const {
return serialized_constructor_ || serialized_backpointer_ ||
- serialized_prototype_ || serialized_root_map_ ||
- serialized_for_element_store_;
+ serialized_prototype_ || serialized_for_element_store_;
}
private:
@@ -881,9 +732,6 @@ class MapData : public HeapObjectData {
bool serialized_prototype_ = false;
ObjectData* prototype_ = nullptr;
- bool serialized_root_map_ = false;
- ObjectData* root_map_ = nullptr;
-
bool serialized_for_element_store_ = false;
};
@@ -938,16 +786,13 @@ void JSFunctionData::Cache(JSHeapBroker* broker) {
// guaranteed to see an initialized JSFunction object, and after
// initialization fields remain in a valid state.
- Context context = function->context(kRelaxedLoad);
- context_ = broker->GetOrCreateData(context, kAssumeMemoryFence);
- CHECK(context_->IsContext());
+ ContextRef context =
+ MakeRefAssumeMemoryFence(broker, function->context(kRelaxedLoad));
+ context_ = context.data();
- native_context_ = broker->GetOrCreateData(context.map().native_context(),
- kAssumeMemoryFence);
- CHECK(native_context_->IsNativeContext());
-
- SharedFunctionInfo shared = function->shared(kRelaxedLoad);
- shared_ = broker->GetOrCreateData(shared, kAssumeMemoryFence);
+ SharedFunctionInfoRef shared =
+ MakeRefAssumeMemoryFence(broker, function->shared(kRelaxedLoad));
+ shared_ = shared.data();
if (function->has_prototype_slot()) {
prototype_or_initial_map_ = broker->GetOrCreateData(
@@ -981,9 +826,10 @@ void JSFunctionData::Cache(JSHeapBroker* broker) {
if (has_initial_map_) {
has_instance_prototype_ = true;
- instance_prototype_ = broker->GetOrCreateData(
- Handle<Map>::cast(initial_map_->object())->prototype(),
- kAssumeMemoryFence);
+ instance_prototype_ =
+ MakeRefAssumeMemoryFence(
+ broker, Handle<Map>::cast(initial_map_->object())->prototype())
+ .data();
} else if (prototype_or_initial_map_->IsHeapObject() &&
!Handle<HeapObject>::cast(prototype_or_initial_map_->object())
->IsTheHole()) {
@@ -994,15 +840,9 @@ void JSFunctionData::Cache(JSHeapBroker* broker) {
PrototypeRequiresRuntimeLookup_ = function->PrototypeRequiresRuntimeLookup();
- FeedbackCell feedback_cell = function->raw_feedback_cell(kAcquireLoad);
- feedback_cell_ = broker->GetOrCreateData(feedback_cell, kAssumeMemoryFence);
-
- ObjectData* maybe_feedback_vector = broker->GetOrCreateData(
- feedback_cell.value(kAcquireLoad), kAssumeMemoryFence);
- if (shared.is_compiled() && maybe_feedback_vector->IsFeedbackVector()) {
- has_feedback_vector_ = true;
- feedback_vector_ = maybe_feedback_vector;
- }
+ FeedbackCellRef feedback_cell = MakeRefAssumeMemoryFence(
+ broker, function->raw_feedback_cell(kAcquireLoad));
+ feedback_cell_ = feedback_cell.data();
#ifdef DEBUG
serialized_ = true;
@@ -1016,7 +856,6 @@ bool JSFunctionData::IsConsistentWithHeapState(JSHeapBroker* broker) const {
Handle<JSFunction> f = Handle<JSFunction>::cast(object());
CHECK_EQ(*context_->object(), f->context());
- CHECK_EQ(*native_context_->object(), f->native_context());
CHECK_EQ(*shared_->object(), f->shared());
if (f->has_prototype_slot()) {
@@ -1080,22 +919,6 @@ bool JSFunctionData::IsConsistentWithHeapState(JSHeapBroker* broker) const {
return false;
}
- if (has_used_field(kHasFeedbackVector) &&
- has_feedback_vector_ != f->has_feedback_vector()) {
- TRACE_BROKER_MISSING(broker, "JSFunction::has_feedback_vector");
- return false;
- }
-
- if (has_feedback_vector_) {
- if (has_used_field(kFeedbackVector) &&
- *feedback_vector_->object() != f->feedback_vector()) {
- TRACE_BROKER_MISSING(broker, "JSFunction::feedback_vector");
- return false;
- }
- } else {
- DCHECK_NULL(feedback_vector_);
- }
-
return true;
}
@@ -1269,61 +1092,16 @@ class ScriptContextTableData : public FixedArrayData {
: FixedArrayData(broker, storage, object, kind) {}
};
-bool JSBoundFunctionData::Serialize(JSHeapBroker* broker,
- NotConcurrentInliningTag tag) {
- DCHECK(!broker->is_concurrent_inlining());
-
- if (serialized_) return true;
- if (broker->StackHasOverflowed()) return false;
-
- TraceScope tracer(broker, this, "JSBoundFunctionData::Serialize");
- Handle<JSBoundFunction> function = Handle<JSBoundFunction>::cast(object());
-
- // We don't immediately set {serialized_} in order to correctly handle the
- // case where a recursive call to this method reaches the stack limit.
-
- DCHECK_NULL(bound_target_function_);
- bound_target_function_ =
- broker->GetOrCreateData(function->bound_target_function());
- bool serialized_nested = true;
- if (!bound_target_function_->should_access_heap()) {
- if (bound_target_function_->IsJSBoundFunction()) {
- serialized_nested =
- bound_target_function_->AsJSBoundFunction()->Serialize(broker, tag);
- }
- }
- if (!serialized_nested) {
- // We couldn't serialize all nested bound functions due to stack
- // overflow. Give up.
- DCHECK(!serialized_);
- bound_target_function_ = nullptr; // Reset to sync with serialized_.
- return false;
- }
-
- serialized_ = true;
-
- DCHECK_NULL(bound_arguments_);
- bound_arguments_ = broker->GetOrCreateData(function->bound_arguments());
-
- DCHECK_NULL(bound_this_);
- bound_this_ = broker->GetOrCreateData(function->bound_this());
-
- return true;
-}
-
JSObjectData::JSObjectData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSObject> object, ObjectDataKind kind)
: JSReceiverData(broker, storage, object, kind),
- inobject_fields_(broker->zone()),
- own_constant_elements_(broker->zone()),
- own_properties_(broker->zone()) {}
+ inobject_fields_(broker->zone()) {}
class JSArrayData : public JSObjectData {
public:
JSArrayData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSArray> object, ObjectDataKind kind)
- : JSObjectData(broker, storage, object, kind),
- own_elements_(broker->zone()) {}
+ : JSObjectData(broker, storage, object, kind) {}
void Serialize(JSHeapBroker* broker, NotConcurrentInliningTag tag);
ObjectData* length() const {
@@ -1331,19 +1109,9 @@ class JSArrayData : public JSObjectData {
return length_;
}
- ObjectData* GetOwnElement(
- JSHeapBroker* broker, uint32_t index,
- SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
-
private:
bool serialized_ = false;
ObjectData* length_ = nullptr;
-
- // Elements (indexed properties) that either
- // (1) are known to exist directly on the object, or
- // (2) are known not to (possibly they don't exist at all).
- // In case (2), the second pair component is nullptr.
- ZoneVector<std::pair<uint32_t, ObjectData*>> own_elements_;
};
void JSArrayData::Serialize(JSHeapBroker* broker,
@@ -1358,52 +1126,11 @@ void JSArrayData::Serialize(JSHeapBroker* broker,
length_ = broker->GetOrCreateData(jsarray->length());
}
-ObjectData* JSArrayData::GetOwnElement(JSHeapBroker* broker, uint32_t index,
- SerializationPolicy policy) {
- for (auto const& p : own_elements_) {
- if (p.first == index) return p.second;
- }
-
- if (policy == SerializationPolicy::kAssumeSerialized) {
- TRACE_MISSING(broker, "knowledge about index " << index << " on " << this);
- return nullptr;
- }
-
- base::Optional<ObjectRef> element =
- GetOwnElementFromHeap(broker, object(), index, false);
- ObjectData* result = element.has_value() ? element->data() : nullptr;
- own_elements_.push_back({index, result});
- return result;
-}
-
class JSGlobalObjectData : public JSObjectData {
public:
JSGlobalObjectData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSGlobalObject> object, ObjectDataKind kind)
- : JSObjectData(broker, storage, object, kind),
- properties_(broker->zone()) {
- if (!broker->is_concurrent_inlining()) {
- is_detached_ = object->IsDetached();
- }
- }
-
- bool IsDetached() const {
- return is_detached_;
- }
-
- ObjectData* GetPropertyCell(
- JSHeapBroker* broker, ObjectData* name,
- SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
-
- private:
- // Only valid if not concurrent inlining.
- bool is_detached_ = false;
-
- // Properties that either
- // (1) are known to exist as property cells on the global object, or
- // (2) are known not to (possibly they don't exist at all).
- // In case (2), the second pair component is nullptr.
- ZoneVector<std::pair<ObjectData*, ObjectData*>> properties_;
+ : JSObjectData(broker, storage, object, kind) {}
};
class JSGlobalProxyData : public JSObjectData {
@@ -1413,46 +1140,6 @@ class JSGlobalProxyData : public JSObjectData {
: JSObjectData(broker, storage, object, kind) {}
};
-namespace {
-
-base::Optional<PropertyCellRef> GetPropertyCellFromHeap(JSHeapBroker* broker,
- Handle<Name> name) {
- base::Optional<PropertyCell> maybe_cell =
- ConcurrentLookupIterator::TryGetPropertyCell(
- broker->isolate(), broker->local_isolate_or_isolate(),
- broker->target_native_context().global_object().object(), name);
- if (!maybe_cell.has_value()) return {};
- return TryMakeRef(broker, *maybe_cell);
-}
-
-} // namespace
-
-ObjectData* JSGlobalObjectData::GetPropertyCell(JSHeapBroker* broker,
- ObjectData* name,
- SerializationPolicy policy) {
- CHECK_NOT_NULL(name);
- for (auto const& p : properties_) {
- if (p.first == name) return p.second;
- }
-
- if (policy == SerializationPolicy::kAssumeSerialized) {
- TRACE_MISSING(broker, "knowledge about global property " << name);
- return nullptr;
- }
-
- ObjectData* result = nullptr;
- base::Optional<PropertyCellRef> cell =
- GetPropertyCellFromHeap(broker, Handle<Name>::cast(name->object()));
- if (cell.has_value()) {
- result = cell->data();
- if (!result->should_access_heap()) {
- result->AsPropertyCell()->Cache(broker);
- }
- }
- properties_.push_back({name, result});
- return result;
-}
-
#define DEFINE_IS(Name) \
bool ObjectData::Is##Name() const { \
if (should_access_heap()) { \
@@ -1540,19 +1227,6 @@ bool MapData::TrySerializePrototype(JSHeapBroker* broker,
return true;
}
-void MapData::SerializeRootMap(JSHeapBroker* broker,
- NotConcurrentInliningTag tag) {
- if (serialized_root_map_) return;
- serialized_root_map_ = true;
-
- TraceScope tracer(broker, this, "MapData::SerializeRootMap");
- Handle<Map> map = Handle<Map>::cast(object());
- DCHECK_NULL(root_map_);
- root_map_ = broker->GetOrCreateData(map->FindRootMap(broker->isolate()));
-}
-
-ObjectData* MapData::FindRootMap() const { return root_map_; }
-
bool JSObjectData::SerializeAsBoilerplateRecursive(JSHeapBroker* broker,
NotConcurrentInliningTag tag,
int max_depth) {
@@ -1598,7 +1272,7 @@ bool JSObjectData::SerializeAsBoilerplateRecursive(JSHeapBroker* broker,
boilerplate->map().instance_descriptors(isolate), isolate);
for (InternalIndex i : boilerplate->map().IterateOwnDescriptors()) {
PropertyDetails details = descriptors->GetDetails(i);
- if (details.location() != kField) continue;
+ if (details.location() != PropertyLocation::kField) continue;
DCHECK_EQ(kData, details.kind());
FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
@@ -1693,8 +1367,6 @@ void JSHeapBroker::InitializeAndStartSerializing() {
SetTargetNativeContextRef(target_native_context().object());
if (!is_concurrent_inlining()) {
- target_native_context().Serialize(NotConcurrentInliningTag{this});
-
Factory* const f = isolate()->factory();
ObjectData* data;
data = GetOrCreateData(f->array_buffer_detaching_protector());
@@ -1838,6 +1510,19 @@ int ObjectRef::AsSmi() const {
INSTANCE_TYPE_CHECKERS(DEF_TESTER)
#undef DEF_TESTER
+bool MapRef::CanInlineElementAccess() const {
+ if (!IsJSObjectMap()) return false;
+ if (is_access_check_needed()) return false;
+ if (has_indexed_interceptor()) return false;
+ ElementsKind kind = elements_kind();
+ if (IsFastElementsKind(kind)) return true;
+ if (IsTypedArrayElementsKind(kind) && kind != BIGUINT64_ELEMENTS &&
+ kind != BIGINT64_ELEMENTS) {
+ return true;
+ }
+ return false;
+}
+
base::Optional<MapRef> MapRef::AsElementsKind(ElementsKind kind) const {
const ElementsKind current_kind = elements_kind();
if (kind == current_kind) return *this;
@@ -1931,6 +1616,11 @@ void RecordConsistentJSFunctionViewDependencyIfNeeded(
} // namespace
+base::Optional<FeedbackVectorRef> JSFunctionRef::feedback_vector(
+ CompilationDependencies* dependencies) const {
+ return raw_feedback_cell(dependencies).feedback_vector();
+}
+
int JSFunctionRef::InitialMapInstanceSizeWithMinSlack(
CompilationDependencies* dependencies) const {
if (data_->should_access_heap()) {
@@ -2090,31 +1780,22 @@ MapRef MapRef::FindFieldOwner(InternalIndex descriptor_index) const {
object()->FindFieldOwner(broker()->isolate(), descriptor_index));
}
-ObjectRef MapRef::GetFieldType(InternalIndex descriptor_index) const {
- CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
- return instance_descriptors().GetFieldType(descriptor_index);
-}
-
base::Optional<ObjectRef> StringRef::GetCharAsStringOrUndefined(
- uint32_t index, SerializationPolicy policy) const {
- if (broker()->is_concurrent_inlining()) {
- String maybe_char;
- auto result = ConcurrentLookupIterator::TryGetOwnChar(
- &maybe_char, broker()->isolate(), broker()->local_isolate(), *object(),
- index);
-
- if (result == ConcurrentLookupIterator::kGaveUp) {
- TRACE_BROKER_MISSING(broker(), "StringRef::GetCharAsStringOrUndefined on "
- << *this << " at index " << index);
- return {};
- }
+ uint32_t index) const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ String maybe_char;
+ auto result = ConcurrentLookupIterator::TryGetOwnChar(
+ &maybe_char, broker()->isolate(), broker()->local_isolate(), *object(),
+ index);
- DCHECK_EQ(result, ConcurrentLookupIterator::kPresent);
- return TryMakeRef(broker(), maybe_char);
+ if (result == ConcurrentLookupIterator::kGaveUp) {
+ TRACE_BROKER_MISSING(broker(), "StringRef::GetCharAsStringOrUndefined on "
+ << *this << " at index " << index);
+ return {};
}
- CHECK_EQ(data_->kind(), ObjectDataKind::kUnserializedHeapObject);
- return GetOwnElementFromHeap(broker(), object(), index, true);
+ DCHECK_EQ(result, ConcurrentLookupIterator::kPresent);
+ return TryMakeRef(broker(), maybe_char);
}
bool StringRef::SupportedStringKind() const {
@@ -2165,8 +1846,6 @@ int ArrayBoilerplateDescriptionRef::constants_elements_length() const {
return object()->constant_elements().length();
}
-ObjectRef FixedArrayRef::get(int i) const { return TryGet(i).value(); }
-
base::Optional<ObjectRef> FixedArrayRef::TryGet(int i) const {
Handle<Object> value;
{
@@ -2234,26 +1913,17 @@ int BytecodeArrayRef::handler_table_size() const {
return BitField::decode(ObjectRef::data()->As##holder()->field()); \
}
-// Like IF_ACCESS_FROM_HEAP[_C] but we also allow direct heap access for
+// Like IF_ACCESS_FROM_HEAP but we also allow direct heap access for
// kBackgroundSerialized only for methods that we identified to be safe.
-#define IF_ACCESS_FROM_HEAP_WITH_FLAG(result, name) \
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) { \
- return MakeRef(broker(), result::cast(object()->name())); \
- }
#define IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name) \
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) { \
return object()->name(); \
}
-// Like BIMODAL_ACCESSOR[_C] except that we force a direct heap access if
+// Like BIMODAL_ACCESSOR except that we force a direct heap access if
// broker()->is_concurrent_inlining() is true (even for kBackgroundSerialized).
// This is because we identified the method to be safe to use direct heap
// access, but the holder##Data class still needs to be serialized.
-#define BIMODAL_ACCESSOR_WITH_FLAG(holder, result, name) \
- result##Ref holder##Ref::name() const { \
- IF_ACCESS_FROM_HEAP_WITH_FLAG(result, name); \
- return result##Ref(broker(), ObjectRef::data()->As##holder()->name()); \
- }
#define BIMODAL_ACCESSOR_WITH_FLAG_C(holder, result, name) \
result holder##Ref::name() const { \
IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name); \
@@ -2298,31 +1968,22 @@ uint64_t HeapNumberRef::value_as_bits() const {
return object()->value_as_bits(kRelaxedLoad);
}
-base::Optional<JSReceiverRef> JSBoundFunctionRef::bound_target_function()
- const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Immutable after initialization.
- return TryMakeRef(broker(), object()->bound_target_function(),
- kAssumeMemoryFence);
- }
- return TryMakeRef<JSReceiver>(
- broker(), data()->AsJSBoundFunction()->bound_target_function());
+JSReceiverRef JSBoundFunctionRef::bound_target_function() const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ // Immutable after initialization.
+ return MakeRefAssumeMemoryFence(broker(), object()->bound_target_function());
}
-base::Optional<ObjectRef> JSBoundFunctionRef::bound_this() const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Immutable after initialization.
- return TryMakeRef(broker(), object()->bound_this(), kAssumeMemoryFence);
- }
- return TryMakeRef<Object>(broker(),
- data()->AsJSBoundFunction()->bound_this());
+
+ObjectRef JSBoundFunctionRef::bound_this() const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ // Immutable after initialization.
+ return MakeRefAssumeMemoryFence(broker(), object()->bound_this());
}
+
FixedArrayRef JSBoundFunctionRef::bound_arguments() const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Immutable after initialization.
- return MakeRefAssumeMemoryFence(broker(), object()->bound_arguments());
- }
- return FixedArrayRef(broker(),
- data()->AsJSBoundFunction()->bound_arguments());
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ // Immutable after initialization.
+ return MakeRefAssumeMemoryFence(broker(), object()->bound_arguments());
}
// Immutable after initialization.
@@ -2354,8 +2015,6 @@ BIMODAL_ACCESSOR_C(Map, int, instance_size)
BIMODAL_ACCESSOR_WITH_FLAG_C(Map, int, NextFreePropertyIndex)
BIMODAL_ACCESSOR_C(Map, int, UnusedPropertyFields)
BIMODAL_ACCESSOR_WITH_FLAG_C(Map, InstanceType, instance_type)
-BIMODAL_ACCESSOR_WITH_FLAG(Map, Object, GetConstructor)
-BIMODAL_ACCESSOR_WITH_FLAG(Map, HeapObject, GetBackPointer)
BIMODAL_ACCESSOR_C(Map, bool, is_abandoned_prototype_map)
int ObjectBoilerplateDescriptionRef::size() const { return object()->size(); }
@@ -2385,33 +2044,16 @@ bool FunctionTemplateInfoRef::is_signature_undefined() const {
return object()->signature().IsUndefined(broker()->isolate());
}
-bool FunctionTemplateInfoRef::has_call_code() const {
- HeapObject call_code = object()->call_code(kAcquireLoad);
- return !call_code.IsUndefined();
-}
-
HEAP_ACCESSOR_C(FunctionTemplateInfo, bool, accept_any_receiver)
HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType(
- MapRef receiver_map, SerializationPolicy policy) {
+ MapRef receiver_map) {
const HolderLookupResult not_found;
- // There are currently two ways we can see a FunctionTemplateInfo on the
- // background thread: 1.) As part of a SharedFunctionInfo and 2.) in an
- // AccessorPair. In both cases, the FTI is fully constructed on the main
- // thread before.
- // TODO(nicohartmann@, v8:7790): Once the above no longer holds, we might
- // have to use the GC predicate to check whether objects are fully
- // initialized and safe to read.
- if (!receiver_map.IsJSReceiverMap() ||
- (receiver_map.is_access_check_needed() &&
- !object()->accept_any_receiver())) {
+ if (!receiver_map.IsJSObjectMap() || (receiver_map.is_access_check_needed() &&
+ !object()->accept_any_receiver())) {
return not_found;
}
- if (!receiver_map.IsJSObjectMap()) return not_found;
-
- DCHECK(has_call_code());
-
Handle<FunctionTemplateInfo> expected_receiver_type;
{
DisallowGarbageCollection no_gc;
@@ -2424,17 +2066,11 @@ HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType(
if (expected_receiver_type->IsTemplateFor(*receiver_map.object())) {
return HolderLookupResult(CallOptimization::kHolderIsReceiver);
}
-
if (!receiver_map.IsJSGlobalProxyMap()) return not_found;
}
- if (policy == SerializationPolicy::kSerializeIfNeeded) {
- receiver_map.SerializePrototype(NotConcurrentInliningTag{broker()});
- }
base::Optional<HeapObjectRef> prototype = receiver_map.prototype();
- if (!prototype.has_value()) return not_found;
- if (prototype->IsNull()) return not_found;
-
+ if (!prototype.has_value() || prototype->IsNull()) return not_found;
if (!expected_receiver_type->IsTemplateFor(prototype->object()->map())) {
return not_found;
}
@@ -2457,6 +2093,7 @@ ScopeInfoRef ScopeInfoRef::OuterScopeInfo() const {
HEAP_ACCESSOR_C(SharedFunctionInfo, Builtin, builtin_id)
BytecodeArrayRef SharedFunctionInfoRef::GetBytecodeArray() const {
+ CHECK(HasBytecodeArray());
BytecodeArray bytecode_array;
if (!broker()->IsMainThread()) {
bytecode_array = object()->GetBytecodeArray(broker()->local_isolate());
@@ -2480,12 +2117,9 @@ SharedFunctionInfo::Inlineability SharedFunctionInfoRef::GetInlineability()
broker()->is_turboprop());
}
-base::Optional<FeedbackVectorRef> FeedbackCellRef::value() const {
- DisallowGarbageCollection no_gc;
+ObjectRef FeedbackCellRef::value() const {
DCHECK(data_->should_access_heap());
- Object value = object()->value(kAcquireLoad);
- if (!value.IsFeedbackVector()) return base::nullopt;
- return MakeRefAssumeMemoryFence(broker(), FeedbackVector::cast(value));
+ return MakeRefAssumeMemoryFence(broker(), object()->value(kAcquireLoad));
}
base::Optional<ObjectRef> MapRef::GetStrongValue(
@@ -2513,75 +2147,59 @@ base::Optional<HeapObjectRef> MapRef::prototype() const {
return HeapObjectRef(broker(), prototype_data);
}
-void MapRef::SerializeRootMap(NotConcurrentInliningTag tag) {
- if (data_->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsMap()->SerializeRootMap(broker(), tag);
+MapRef MapRef::FindRootMap() const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ // TODO(solanes, v8:7790): Consider caching the result of the root map.
+ return MakeRefAssumeMemoryFence(broker(),
+ object()->FindRootMap(broker()->isolate()));
}
-// TODO(solanes, v8:7790): Remove base::Optional from the return type when
-// deleting serialization.
-base::Optional<MapRef> MapRef::FindRootMap() const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // TODO(solanes): Change TryMakeRef to MakeRef when Map is moved to
- // kNeverSerialized.
- // TODO(solanes, v8:7790): Consider caching the result of the root map.
- return TryMakeRef(broker(), object()->FindRootMap(broker()->isolate()));
+ObjectRef MapRef::GetConstructor() const {
+ if (data()->should_access_heap() || broker()->is_concurrent_inlining()) {
+ // Immutable after initialization.
+ return MakeRefAssumeMemoryFence(broker(), object()->GetConstructor());
}
- ObjectData* map_data = data()->AsMap()->FindRootMap();
- if (map_data != nullptr) {
- return MapRef(broker(), map_data);
+ return ObjectRef(broker(), data()->AsMap()->GetConstructor());
+}
+
+HeapObjectRef MapRef::GetBackPointer() const {
+ if (data()->should_access_heap() || broker()->is_concurrent_inlining()) {
+ // Immutable after initialization.
+ return MakeRefAssumeMemoryFence(
+ broker(), HeapObject::cast(object()->GetBackPointer()));
}
- TRACE_BROKER_MISSING(broker(), "root map for object " << *this);
- return base::nullopt;
+ return HeapObjectRef(broker(), ObjectRef::data()->AsMap()->GetBackPointer());
}
bool JSTypedArrayRef::is_on_heap() const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Safe to read concurrently because:
- // - host object seen by serializer.
- // - underlying field written 1. during initialization or 2. with
- // release-store.
- return object()->is_on_heap(kAcquireLoad);
- }
- return data()->AsJSTypedArray()->data_ptr();
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ // Underlying field written 1. during initialization or 2. with release-store.
+ return object()->is_on_heap(kAcquireLoad);
}
size_t JSTypedArrayRef::length() const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
CHECK(!is_on_heap());
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Safe to read concurrently because:
- // - immutable after initialization.
- // - host object seen by serializer.
- return object()->length();
- }
- return data()->AsJSTypedArray()->length();
+ // Immutable after initialization.
+ return object()->length();
}
HeapObjectRef JSTypedArrayRef::buffer() const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
CHECK(!is_on_heap());
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Safe to read concurrently because:
- // - immutable after initialization.
- // - host object seen by serializer.
- return MakeRef<HeapObject>(broker(), object()->buffer());
- }
- return HeapObjectRef{broker(), data()->AsJSTypedArray()->buffer()};
+ // Immutable after initialization.
+ return MakeRef<HeapObject>(broker(), object()->buffer());
}
void* JSTypedArrayRef::data_ptr() const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
CHECK(!is_on_heap());
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Safe to read concurrently because:
- // - host object seen by serializer.
- // - underlying field written 1. during initialization or 2. protected by
- // the is_on_heap release/acquire semantics (external_pointer store
- // happens-before base_pointer store, and this external_pointer load
- // happens-after base_pointer load).
- STATIC_ASSERT(JSTypedArray::kOffHeapDataPtrEqualsExternalPointer);
- return object()->DataPtr();
- }
- return data()->AsJSTypedArray()->data_ptr();
+ // Underlying field written 1. during initialization or 2. protected by the
+ // is_on_heap release/acquire semantics (external_pointer store happens-before
+ // base_pointer store, and this external_pointer load happens-after
+ // base_pointer load).
+ STATIC_ASSERT(JSTypedArray::kOffHeapDataPtrEqualsExternalPointer);
+ return object()->DataPtr();
}
bool MapRef::IsInobjectSlackTrackingInProgress() const {
@@ -2642,32 +2260,6 @@ ZoneVector<const CFunctionInfo*> FunctionTemplateInfoRef::c_signatures() const {
bool StringRef::IsSeqString() const { return object()->IsSeqString(); }
-void NativeContextRef::Serialize(NotConcurrentInliningTag tag) {
- // TODO(jgruber): Disable visitation if should_access_heap() once all
- // NativeContext element refs can be created on background threads. Until
- // then, we *must* iterate them and create refs at serialization-time (even
- // though NativeContextRef itself is never-serialized).
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
-#define SERIALIZE_MEMBER(type, name) \
- { \
- ObjectData* member_data = broker()->GetOrCreateData(object()->name()); \
- if (member_data->IsMap() && !InstanceTypeChecker::IsContext( \
- member_data->AsMap()->instance_type())) { \
- member_data->AsMap()->SerializeConstructor(broker(), tag); \
- } \
- }
- BROKER_NATIVE_CONTEXT_FIELDS(SERIALIZE_MEMBER)
-#undef SERIALIZE_MEMBER
-
- for (int i = Context::FIRST_FUNCTION_MAP_INDEX;
- i <= Context::LAST_FUNCTION_MAP_INDEX; i++) {
- MapData* member_data = broker()->GetOrCreateData(object()->get(i))->AsMap();
- if (!InstanceTypeChecker::IsContext(member_data->instance_type())) {
- member_data->SerializeConstructor(broker(), tag);
- }
- }
-}
-
ScopeInfoRef NativeContextRef::scope_info() const {
// The scope_info is immutable after initialization.
return MakeRefAssumeMemoryFence(broker(), object()->scope_info());
@@ -2777,25 +2369,18 @@ bool ObjectRef::should_access_heap() const {
base::Optional<ObjectRef> JSObjectRef::GetOwnConstantElement(
const FixedArrayBaseRef& elements_ref, uint32_t index,
- CompilationDependencies* dependencies, SerializationPolicy policy) const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- base::Optional<Object> maybe_element = GetOwnConstantElementFromHeap(
- *elements_ref.object(), map().elements_kind(), index);
-
- if (!maybe_element.has_value()) return {};
+ CompilationDependencies* dependencies) const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ base::Optional<Object> maybe_element = GetOwnConstantElementFromHeap(
+ *elements_ref.object(), map().elements_kind(), index);
+ if (!maybe_element.has_value()) return {};
- base::Optional<ObjectRef> result =
- TryMakeRef(broker(), maybe_element.value());
- if (policy == SerializationPolicy::kAssumeSerialized &&
- result.has_value()) {
- dependencies->DependOnOwnConstantElement(*this, index, *result);
- }
- return result;
- } else {
- ObjectData* element =
- data()->AsJSObject()->GetOwnConstantElement(broker(), index, policy);
- return TryMakeRef<Object>(broker(), element);
+ base::Optional<ObjectRef> result =
+ TryMakeRef(broker(), maybe_element.value());
+ if (result.has_value()) {
+ dependencies->DependOnOwnConstantElement(*this, index, *result);
}
+ return result;
}
base::Optional<Object> JSObjectRef::GetOwnConstantElementFromHeap(
@@ -2844,109 +2429,82 @@ base::Optional<Object> JSObjectRef::GetOwnConstantElementFromHeap(
base::Optional<ObjectRef> JSObjectRef::GetOwnFastDataProperty(
Representation field_representation, FieldIndex index,
- CompilationDependencies* dependencies, SerializationPolicy policy) const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- base::Optional<ObjectRef> result = GetOwnFastDataPropertyFromHeap(
- broker(), *this, field_representation, index);
- if (policy == SerializationPolicy::kAssumeSerialized &&
- result.has_value()) {
- dependencies->DependOnOwnConstantDataProperty(
- *this, map(), field_representation, index, *result);
- }
- return result;
+ CompilationDependencies* dependencies) const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ base::Optional<ObjectRef> result = GetOwnFastDataPropertyFromHeap(
+ broker(), *this, field_representation, index);
+ if (result.has_value()) {
+ dependencies->DependOnOwnConstantDataProperty(
+ *this, map(), field_representation, index, *result);
}
- ObjectData* property = data()->AsJSObject()->GetOwnFastDataProperty(
- broker(), field_representation, index, policy);
- return TryMakeRef<Object>(broker(), property);
+ return result;
}
base::Optional<ObjectRef> JSObjectRef::GetOwnDictionaryProperty(
- InternalIndex index, CompilationDependencies* dependencies,
- SerializationPolicy policy) const {
+ InternalIndex index, CompilationDependencies* dependencies) const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
CHECK(index.is_found());
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- base::Optional<ObjectRef> result =
- GetOwnDictionaryPropertyFromHeap(broker(), object(), index);
- if (policy == SerializationPolicy::kAssumeSerialized &&
- result.has_value()) {
- dependencies->DependOnOwnConstantDictionaryProperty(*this, index,
- *result);
- }
- return result;
+ base::Optional<ObjectRef> result =
+ GetOwnDictionaryPropertyFromHeap(broker(), object(), index);
+ if (result.has_value()) {
+ dependencies->DependOnOwnConstantDictionaryProperty(*this, index, *result);
}
- ObjectData* property =
- data()->AsJSObject()->GetOwnDictionaryProperty(broker(), index, policy);
- CHECK_NE(property, nullptr);
- return ObjectRef(broker(), property);
+ return result;
}
ObjectRef JSArrayRef::GetBoilerplateLength() const {
// Safe to read concurrently because:
// - boilerplates are immutable after initialization.
// - boilerplates are published into the feedback vector.
- return length_unsafe();
+ // These facts also mean we can expect a valid value.
+ return length_unsafe().value();
}
-ObjectRef JSArrayRef::length_unsafe() const {
+base::Optional<ObjectRef> JSArrayRef::length_unsafe() const {
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- return MakeRef(broker(),
- object()->length(broker()->isolate(), kRelaxedLoad));
+ return TryMakeRef(broker(),
+ object()->length(broker()->isolate(), kRelaxedLoad));
} else {
return ObjectRef{broker(), data()->AsJSArray()->length()};
}
}
base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(
- FixedArrayBaseRef elements_ref, uint32_t index,
- SerializationPolicy policy) const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Note: we'd like to check `elements_ref == elements()` here, but due to
- // concurrency this may not hold. The code below must be able to deal with
- // concurrent `elements` modifications.
-
- // Due to concurrency, the kind read here may not be consistent with
- // `elements_ref`. The caller has to guarantee consistency at runtime by
- // other means (e.g. through a runtime equality check or a compilation
- // dependency).
- ElementsKind elements_kind = map().elements_kind();
-
- // We only inspect fixed COW arrays, which may only occur for fast
- // smi/objects elements kinds.
- if (!IsSmiOrObjectElementsKind(elements_kind)) return {};
- DCHECK(IsFastElementsKind(elements_kind));
- if (!elements_ref.map().IsFixedCowArrayMap()) return {};
-
- // As the name says, the `length` read here is unsafe and may not match
- // `elements`. We rely on the invariant that any `length` change will
- // also result in an `elements` change to make this safe. The `elements`
- // consistency check in the caller thus also guards the value of `length`.
- ObjectRef length_ref = length_unsafe();
-
- // Likewise we only deal with smi lengths.
- if (!length_ref.IsSmi()) return {};
-
- base::Optional<Object> result =
- ConcurrentLookupIterator::TryGetOwnCowElement(
- broker()->isolate(), *elements_ref.AsFixedArray().object(),
- elements_kind, length_ref.AsSmi(), index);
- if (!result.has_value()) return {};
-
- return TryMakeRef(broker(), result.value());
- } else {
- DCHECK(!data_->should_access_heap());
- DCHECK(!broker()->is_concurrent_inlining());
+ FixedArrayBaseRef elements_ref, uint32_t index) const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ // Note: we'd like to check `elements_ref == elements()` here, but due to
+ // concurrency this may not hold. The code below must be able to deal with
+ // concurrent `elements` modifications.
- // Just to clarify that `elements_ref` is not used on this path.
- // GetOwnElement accesses the serialized `elements` field on its own.
- USE(elements_ref);
+ // Due to concurrency, the kind read here may not be consistent with
+ // `elements_ref`. The caller has to guarantee consistency at runtime by
+ // other means (e.g. through a runtime equality check or a compilation
+ // dependency).
+ ElementsKind elements_kind = map().elements_kind();
- if (!elements(kRelaxedLoad).value().map().IsFixedCowArrayMap()) return {};
+ // We only inspect fixed COW arrays, which may only occur for fast
+ // smi/objects elements kinds.
+ if (!IsSmiOrObjectElementsKind(elements_kind)) return {};
+ DCHECK(IsFastElementsKind(elements_kind));
+ if (!elements_ref.map().IsFixedCowArrayMap()) return {};
- ObjectData* element =
- data()->AsJSArray()->GetOwnElement(broker(), index, policy);
- if (element == nullptr) return base::nullopt;
- return ObjectRef(broker(), element);
- }
+ // As the name says, the `length` read here is unsafe and may not match
+ // `elements`. We rely on the invariant that any `length` change will
+ // also result in an `elements` change to make this safe. The `elements`
+ // consistency check in the caller thus also guards the value of `length`.
+ base::Optional<ObjectRef> length_ref = length_unsafe();
+
+ if (!length_ref.has_value()) return {};
+
+ // Likewise we only deal with smi lengths.
+ if (!length_ref->IsSmi()) return {};
+
+ base::Optional<Object> result = ConcurrentLookupIterator::TryGetOwnCowElement(
+ broker()->isolate(), *elements_ref.AsFixedArray().object(), elements_kind,
+ length_ref->AsSmi(), index);
+ if (!result.has_value()) return {};
+
+ return TryMakeRef(broker(), result.value());
}
base::Optional<CellRef> SourceTextModuleRef::GetCell(int cell_index) const {
@@ -3042,12 +2600,6 @@ NameRef DescriptorArrayRef::GetPropertyKey(
return result;
}
-ObjectRef DescriptorArrayRef::GetFieldType(
- InternalIndex descriptor_index) const {
- return MakeRef(broker(),
- Object::cast(object()->GetFieldType(descriptor_index)));
-}
-
base::Optional<ObjectRef> DescriptorArrayRef::GetStrongValue(
InternalIndex descriptor_index) const {
HeapObject heap_object;
@@ -3062,15 +2614,22 @@ base::Optional<ObjectRef> DescriptorArrayRef::GetStrongValue(
return TryMakeRef(broker(), heap_object);
}
+base::Optional<FeedbackVectorRef> FeedbackCellRef::feedback_vector() const {
+ ObjectRef contents = value();
+ if (!contents.IsFeedbackVector()) return {};
+ return contents.AsFeedbackVector();
+}
+
base::Optional<SharedFunctionInfoRef> FeedbackCellRef::shared_function_info()
const {
- base::Optional<FeedbackVectorRef> feedback_vector = value();
- if (!feedback_vector.has_value()) return {};
- return feedback_vector->shared_function_info();
+ base::Optional<FeedbackVectorRef> vector = feedback_vector();
+ if (!vector.has_value()) return {};
+ return vector->shared_function_info();
}
SharedFunctionInfoRef FeedbackVectorRef::shared_function_info() const {
- return MakeRef(broker(), object()->shared_function_info());
+ // Immutable after initialization.
+ return MakeRefAssumeMemoryFence(broker(), object()->shared_function_info());
}
bool NameRef::IsUniqueName() const {
@@ -3143,20 +2702,6 @@ Handle<T> TinyRef<T>::object() const {
HEAP_BROKER_OBJECT_LIST(V)
#undef V
-Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker,
- const char* function, int line) {
- TRACE_MISSING(broker, "data in function " << function << " at line " << line);
- return AdvancedReducer::NoChange();
-}
-
-bool JSBoundFunctionRef::Serialize(NotConcurrentInliningTag tag) {
- if (data_->should_access_heap()) {
- return true;
- }
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- return data()->AsJSBoundFunction()->Serialize(broker(), tag);
-}
-
#define JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(Result, Name, UsedField) \
Result##Ref JSFunctionRef::Name(CompilationDependencies* dependencies) \
const { \
@@ -3174,26 +2719,40 @@ bool JSBoundFunctionRef::Serialize(NotConcurrentInliningTag tag) {
return data()->AsJSFunction()->Name(); \
}
-JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C(bool, has_feedback_vector,
- JSFunctionData::kHasFeedbackVector)
-JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C(bool, has_initial_map,
- JSFunctionData::kHasInitialMap)
-JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C(bool, has_instance_prototype,
- JSFunctionData::kHasInstancePrototype)
-JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C(
+// Like JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C but only depend on the
+// field in question if its recorded value is "relevant". This is in order to
+// tolerate certain state changes during compilation, e.g. from "has no feedback
+// vector" (in which case we would simply do less optimization) to "has feedback
+// vector".
+#define JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_RELEVANT_C( \
+ Result, Name, UsedField, RelevantValue) \
+ Result JSFunctionRef::Name(CompilationDependencies* dependencies) const { \
+ IF_ACCESS_FROM_HEAP_C(Name); \
+ Result const result = data()->AsJSFunction()->Name(); \
+ if (result == RelevantValue) { \
+ RecordConsistentJSFunctionViewDependencyIfNeeded( \
+ broker(), *this, data()->AsJSFunction(), UsedField); \
+ } \
+ return result; \
+ }
+
+JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_RELEVANT_C(bool, has_initial_map,
+ JSFunctionData::kHasInitialMap,
+ true)
+JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_RELEVANT_C(
+ bool, has_instance_prototype, JSFunctionData::kHasInstancePrototype, true)
+JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_RELEVANT_C(
bool, PrototypeRequiresRuntimeLookup,
- JSFunctionData::kPrototypeRequiresRuntimeLookup)
+ JSFunctionData::kPrototypeRequiresRuntimeLookup, false)
+
JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(Map, initial_map,
JSFunctionData::kInitialMap)
JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(Object, instance_prototype,
JSFunctionData::kInstancePrototype)
JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(FeedbackCell, raw_feedback_cell,
JSFunctionData::kFeedbackCell)
-JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(FeedbackVector, feedback_vector,
- JSFunctionData::kFeedbackVector)
BIMODAL_ACCESSOR(JSFunction, Context, context)
-BIMODAL_ACCESSOR(JSFunction, NativeContext, native_context)
BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared)
#undef JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP
@@ -3203,6 +2762,11 @@ CodeRef JSFunctionRef::code() const {
return MakeRefAssumeMemoryFence(broker(), object()->code(kAcquireLoad));
}
+NativeContextRef JSFunctionRef::native_context() const {
+ return MakeRefAssumeMemoryFence(broker(),
+ context().object()->native_context());
+}
+
base::Optional<FunctionTemplateInfoRef>
SharedFunctionInfoRef::function_template_info() const {
if (!object()->IsApiFunction()) return {};
@@ -3269,23 +2833,6 @@ void MapRef::SerializePrototype(NotConcurrentInliningTag tag) {
CHECK(TrySerializePrototype(tag));
}
-void JSTypedArrayRef::Serialize(NotConcurrentInliningTag tag) {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Nothing to do.
- } else {
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsJSTypedArray()->Serialize(broker(), tag);
- }
-}
-
-bool JSTypedArrayRef::serialized() const {
- if (data_->should_access_heap()) return true;
- if (broker()->is_concurrent_inlining()) return true;
- if (data_->AsJSTypedArray()->serialized()) return true;
- TRACE_BROKER_MISSING(broker(), "data for JSTypedArray " << this);
- return false;
-}
-
bool PropertyCellRef::Cache() const {
if (data_->should_access_heap()) return true;
CHECK(broker()->mode() == JSHeapBroker::kSerializing ||
@@ -3293,18 +2840,6 @@ bool PropertyCellRef::Cache() const {
return data()->AsPropertyCell()->Cache(broker());
}
-void FunctionTemplateInfoRef::SerializeCallCode(NotConcurrentInliningTag tag) {
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- // CallHandlerInfo::data may still hold a serialized heap object, so we
- // have to make the broker aware of it.
- // TODO(v8:7790): Remove this case once ObjectRef is never serialized.
- Handle<HeapObject> call_code(object()->call_code(kAcquireLoad),
- broker()->isolate());
- if (call_code->IsCallHandlerInfo()) {
- broker()->GetOrCreateData(Handle<CallHandlerInfo>::cast(call_code)->data());
- }
-}
-
bool NativeContextRef::GlobalIsDetached() const {
base::Optional<ObjectRef> proxy_proto =
global_proxy_object().map().prototype();
@@ -3312,14 +2847,15 @@ bool NativeContextRef::GlobalIsDetached() const {
}
base::Optional<PropertyCellRef> JSGlobalObjectRef::GetPropertyCell(
- NameRef const& name, SerializationPolicy policy) const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- return GetPropertyCellFromHeap(broker(), name.object());
- }
-
- ObjectData* property_cell_data = data()->AsJSGlobalObject()->GetPropertyCell(
- broker(), name.data(), policy);
- return TryMakeRef<PropertyCell>(broker(), property_cell_data);
+ NameRef const& name) const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ base::Optional<PropertyCell> maybe_cell =
+ ConcurrentLookupIterator::TryGetPropertyCell(
+ broker()->isolate(), broker()->local_isolate_or_isolate(),
+ broker()->target_native_context().global_object().object(),
+ name.object());
+ if (!maybe_cell.has_value()) return {};
+ return TryMakeRef(broker(), *maybe_cell);
}
std::ostream& operator<<(std::ostream& os, const ObjectRef& ref) {
@@ -3347,13 +2883,11 @@ unsigned CodeRef::GetInlinedBytecodeSize() const {
#undef BIMODAL_ACCESSOR
#undef BIMODAL_ACCESSOR_B
#undef BIMODAL_ACCESSOR_C
-#undef BIMODAL_ACCESSOR_WITH_FLAG
#undef BIMODAL_ACCESSOR_WITH_FLAG_B
#undef BIMODAL_ACCESSOR_WITH_FLAG_C
#undef HEAP_ACCESSOR_C
#undef IF_ACCESS_FROM_HEAP
#undef IF_ACCESS_FROM_HEAP_C
-#undef IF_ACCESS_FROM_HEAP_WITH_FLAG
#undef IF_ACCESS_FROM_HEAP_WITH_FLAG_C
#undef TRACE
#undef TRACE_MISSING
diff --git a/chromium/v8/src/compiler/heap-refs.h b/chromium/v8/src/compiler/heap-refs.h
index d580671f6d3..7f737c0c263 100644
--- a/chromium/v8/src/compiler/heap-refs.h
+++ b/chromium/v8/src/compiler/heap-refs.h
@@ -55,8 +55,6 @@ inline bool IsAnyStore(AccessMode mode) {
return mode == AccessMode::kStore || mode == AccessMode::kStoreInLiteral;
}
-enum class SerializationPolicy { kAssumeSerialized, kSerializeIfNeeded };
-
// Clarifies in function signatures that a method may only be called when
// concurrent inlining is disabled.
class NotConcurrentInliningTag final {
@@ -272,6 +270,7 @@ class V8_EXPORT_PRIVATE ObjectRef {
private:
friend class FunctionTemplateInfoRef;
friend class JSArrayData;
+ friend class JSFunctionData;
friend class JSGlobalObjectData;
friend class JSGlobalObjectRef;
friend class JSHeapBroker;
@@ -395,9 +394,7 @@ class JSObjectRef : public JSReceiverRef {
// against inconsistency due to weak memory concurrency.
base::Optional<ObjectRef> GetOwnConstantElement(
const FixedArrayBaseRef& elements_ref, uint32_t index,
- CompilationDependencies* dependencies,
- SerializationPolicy policy =
- SerializationPolicy::kAssumeSerialized) const;
+ CompilationDependencies* dependencies) const;
// The direct-read implementation of the above, extracted into a helper since
// it's also called from compilation-dependency validation. This helper is
// guaranteed to not create new Ref instances.
@@ -412,16 +409,12 @@ class JSObjectRef : public JSReceiverRef {
// property at code finalization time.
base::Optional<ObjectRef> GetOwnFastDataProperty(
Representation field_representation, FieldIndex index,
- CompilationDependencies* dependencies,
- SerializationPolicy policy =
- SerializationPolicy::kAssumeSerialized) const;
+ CompilationDependencies* dependencies) const;
// Return the value of the dictionary property at {index} in the dictionary
// if {index} is known to be an own data property of the object.
base::Optional<ObjectRef> GetOwnDictionaryProperty(
- InternalIndex index, CompilationDependencies* dependencies,
- SerializationPolicy policy =
- SerializationPolicy::kAssumeSerialized) const;
+ InternalIndex index, CompilationDependencies* dependencies) const;
// When concurrent inlining is enabled, reads the elements through a direct
// relaxed read. This is to ease the transition to unserialized (or
@@ -451,12 +444,8 @@ class JSBoundFunctionRef : public JSObjectRef {
Handle<JSBoundFunction> object() const;
- bool Serialize(NotConcurrentInliningTag tag);
-
- // TODO(neis): Make return types non-optional once JSFunction is no longer
- // fg-serialized.
- base::Optional<JSReceiverRef> bound_target_function() const;
- base::Optional<ObjectRef> bound_this() const;
+ JSReceiverRef bound_target_function() const;
+ ObjectRef bound_this() const;
FixedArrayRef bound_arguments() const;
};
@@ -474,8 +463,8 @@ class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
ContextRef context() const;
NativeContextRef native_context() const;
SharedFunctionInfoRef shared() const;
+ CodeRef code() const;
- bool has_feedback_vector(CompilationDependencies* dependencies) const;
bool has_initial_map(CompilationDependencies* dependencies) const;
bool PrototypeRequiresRuntimeLookup(
CompilationDependencies* dependencies) const;
@@ -484,12 +473,10 @@ class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
MapRef initial_map(CompilationDependencies* dependencies) const;
int InitialMapInstanceSizeWithMinSlack(
CompilationDependencies* dependencies) const;
- FeedbackVectorRef feedback_vector(
- CompilationDependencies* dependencies) const;
FeedbackCellRef raw_feedback_cell(
CompilationDependencies* dependencies) const;
-
- CodeRef code() const;
+ base::Optional<FeedbackVectorRef> feedback_vector(
+ CompilationDependencies* dependencies) const;
};
class RegExpBoilerplateDescriptionRef : public HeapObjectRef {
@@ -535,9 +522,6 @@ class ContextRef : public HeapObjectRef {
base::Optional<ObjectRef> get(int index) const;
};
-// TODO(jgruber): Don't serialize NativeContext fields once all refs can be
-// created concurrently.
-
#define BROKER_NATIVE_CONTEXT_FIELDS(V) \
V(JSFunction, array_function) \
V(JSFunction, bigint_function) \
@@ -619,7 +603,6 @@ class DescriptorArrayRef : public HeapObjectRef {
PropertyDetails GetPropertyDetails(InternalIndex descriptor_index) const;
NameRef GetPropertyKey(InternalIndex descriptor_index) const;
- ObjectRef GetFieldType(InternalIndex descriptor_index) const;
base::Optional<ObjectRef> GetStrongValue(
InternalIndex descriptor_index) const;
};
@@ -629,13 +612,12 @@ class FeedbackCellRef : public HeapObjectRef {
DEFINE_REF_CONSTRUCTOR(FeedbackCell, HeapObjectRef)
Handle<FeedbackCell> object() const;
- base::Optional<SharedFunctionInfoRef> shared_function_info() const;
- // TODO(mvstanton): Once we allow inlining of functions we didn't see
- // during serialization, we do need to ensure that any feedback vector
- // we read here has been fully initialized (ie, store-ordered into the
- // cell).
- base::Optional<FeedbackVectorRef> value() const;
+ ObjectRef value() const;
+
+ // Convenience wrappers around {value()}:
+ base::Optional<FeedbackVectorRef> feedback_vector() const;
+ base::Optional<SharedFunctionInfoRef> shared_function_info() const;
};
class FeedbackVectorRef : public HeapObjectRef {
@@ -729,6 +711,8 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
OddballType oddball_type() const;
+ bool CanInlineElementAccess() const;
+
// Note: Only returns a value if the requested elements kind matches the
// current kind, or if the current map is an unmodified JSArray initial map.
base::Optional<MapRef> AsElementsKind(ElementsKind kind) const;
@@ -752,19 +736,15 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
ZoneVector<MapRef>* prototype_maps);
// Concerning the underlying instance_descriptors:
+ DescriptorArrayRef instance_descriptors() const;
MapRef FindFieldOwner(InternalIndex descriptor_index) const;
PropertyDetails GetPropertyDetails(InternalIndex descriptor_index) const;
NameRef GetPropertyKey(InternalIndex descriptor_index) const;
FieldIndex GetFieldIndexFor(InternalIndex descriptor_index) const;
- ObjectRef GetFieldType(InternalIndex descriptor_index) const;
base::Optional<ObjectRef> GetStrongValue(
InternalIndex descriptor_number) const;
- DescriptorArrayRef instance_descriptors() const;
-
- void SerializeRootMap(NotConcurrentInliningTag tag);
- base::Optional<MapRef> FindRootMap() const;
-
+ MapRef FindRootMap() const;
ObjectRef GetConstructor() const;
};
@@ -785,17 +765,10 @@ class FunctionTemplateInfoRef : public HeapObjectRef {
bool is_signature_undefined() const;
bool accept_any_receiver() const;
- // The following returns true if the CallHandlerInfo is present.
- bool has_call_code() const;
-
- void SerializeCallCode(NotConcurrentInliningTag tag);
base::Optional<CallHandlerInfoRef> call_code() const;
ZoneVector<Address> c_functions() const;
ZoneVector<const CFunctionInfo*> c_signatures() const;
-
- HolderLookupResult LookupHolderOfExpectedType(
- MapRef receiver_map,
- SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
+ HolderLookupResult LookupHolderOfExpectedType(MapRef receiver_map);
};
class FixedArrayBaseRef : public HeapObjectRef {
@@ -821,12 +794,6 @@ class FixedArrayRef : public FixedArrayBaseRef {
Handle<FixedArray> object() const;
- ObjectRef get(int i) const;
-
- // As above but may fail if Ref construction is not possible (e.g. for
- // serialized types on the background thread).
- // TODO(jgruber): Remove once all Ref types are never-serialized or
- // background-serialized and can thus be created on background threads.
base::Optional<ObjectRef> TryGet(int i) const;
};
@@ -894,15 +861,14 @@ class JSArrayRef : public JSObjectRef {
// storage and {index} is known to be an own data property.
// Note the value returned by this function is only valid if we ensure at
// runtime that the backing store has not changed.
- base::Optional<ObjectRef> GetOwnCowElement(
- FixedArrayBaseRef elements_ref, uint32_t index,
- SerializationPolicy policy =
- SerializationPolicy::kAssumeSerialized) const;
+ base::Optional<ObjectRef> GetOwnCowElement(FixedArrayBaseRef elements_ref,
+ uint32_t index) const;
// The `JSArray::length` property; not safe to use in general, but can be
// used in some special cases that guarantee a valid `length` value despite
- // concurrent reads.
- ObjectRef length_unsafe() const;
+ // concurrent reads. The result needs to be optional in case the
+ // return value was created too recently to pass the gc predicate.
+ base::Optional<ObjectRef> length_unsafe() const;
};
class ScopeInfoRef : public HeapObjectRef {
@@ -918,22 +884,23 @@ class ScopeInfoRef : public HeapObjectRef {
ScopeInfoRef OuterScopeInfo() const;
};
-#define BROKER_SFI_FIELDS(V) \
- V(int, internal_formal_parameter_count) \
- V(bool, has_simple_parameters) \
- V(bool, has_duplicate_parameters) \
- V(int, function_map_index) \
- V(FunctionKind, kind) \
- V(LanguageMode, language_mode) \
- V(bool, native) \
- V(bool, HasBreakInfo) \
- V(bool, HasBuiltinId) \
- V(bool, construct_as_builtin) \
- V(bool, HasBytecodeArray) \
- V(int, StartPosition) \
- V(bool, is_compiled) \
- V(bool, IsUserJavaScript) \
- IF_WASM(V, const wasm::WasmModule*, wasm_module) \
+#define BROKER_SFI_FIELDS(V) \
+ V(int, internal_formal_parameter_count_without_receiver) \
+ V(bool, IsDontAdaptArguments) \
+ V(bool, has_simple_parameters) \
+ V(bool, has_duplicate_parameters) \
+ V(int, function_map_index) \
+ V(FunctionKind, kind) \
+ V(LanguageMode, language_mode) \
+ V(bool, native) \
+ V(bool, HasBreakInfo) \
+ V(bool, HasBuiltinId) \
+ V(bool, construct_as_builtin) \
+ V(bool, HasBytecodeArray) \
+ V(int, StartPosition) \
+ V(bool, is_compiled) \
+ V(bool, IsUserJavaScript) \
+ IF_WASM(V, const wasm::WasmModule*, wasm_module) \
IF_WASM(V, const wasm::FunctionSig*, wasm_function_signature)
class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef {
@@ -966,9 +933,7 @@ class StringRef : public NameRef {
// With concurrent inlining on, we return base::nullopt due to not being able
// to use LookupIterator in a thread-safe way.
- base::Optional<ObjectRef> GetCharAsStringOrUndefined(
- uint32_t index, SerializationPolicy policy =
- SerializationPolicy::kAssumeSerialized) const;
+ base::Optional<ObjectRef> GetCharAsStringOrUndefined(uint32_t index) const;
// When concurrently accessing non-read-only non-supported strings, we return
// base::nullopt for these methods.
@@ -1002,10 +967,6 @@ class JSTypedArrayRef : public JSObjectRef {
bool is_on_heap() const;
size_t length() const;
void* data_ptr() const;
-
- void Serialize(NotConcurrentInliningTag tag);
- bool serialized() const;
-
HeapObjectRef buffer() const;
};
@@ -1042,9 +1003,7 @@ class JSGlobalObjectRef : public JSObjectRef {
bool IsDetachedFrom(JSGlobalProxyRef const& proxy) const;
// Can be called even when there is no property cell for the given name.
- base::Optional<PropertyCellRef> GetPropertyCell(
- NameRef const& name, SerializationPolicy policy =
- SerializationPolicy::kAssumeSerialized) const;
+ base::Optional<PropertyCellRef> GetPropertyCell(NameRef const& name) const;
};
class JSGlobalProxyRef : public JSObjectRef {
diff --git a/chromium/v8/src/compiler/int64-lowering.cc b/chromium/v8/src/compiler/int64-lowering.cc
index 28eb30969c7..00930998dda 100644
--- a/chromium/v8/src/compiler/int64-lowering.cc
+++ b/chromium/v8/src/compiler/int64-lowering.cc
@@ -944,29 +944,31 @@ void Int64Lowering::LowerNode(Node* node) {
}
case IrOpcode::kWord64AtomicLoad: {
DCHECK_EQ(4, node->InputCount());
- MachineType type = AtomicOpType(node->op());
+ AtomicLoadParameters params = AtomicLoadParametersOf(node->op());
DefaultLowering(node, true);
- if (type == MachineType::Uint64()) {
- NodeProperties::ChangeOp(node, machine()->Word32AtomicPairLoad());
+ if (params.representation() == MachineType::Uint64()) {
+ NodeProperties::ChangeOp(
+ node, machine()->Word32AtomicPairLoad(params.order()));
ReplaceNodeWithProjections(node);
} else {
- NodeProperties::ChangeOp(node, machine()->Word32AtomicLoad(type));
+ NodeProperties::ChangeOp(node, machine()->Word32AtomicLoad(params));
ReplaceNode(node, node, graph()->NewNode(common()->Int32Constant(0)));
}
break;
}
case IrOpcode::kWord64AtomicStore: {
DCHECK_EQ(5, node->InputCount());
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- if (rep == MachineRepresentation::kWord64) {
+ AtomicStoreParameters params = AtomicStoreParametersOf(node->op());
+ if (params.representation() == MachineRepresentation::kWord64) {
LowerMemoryBaseAndIndex(node);
Node* value = node->InputAt(2);
node->ReplaceInput(2, GetReplacementLow(value));
node->InsertInput(zone(), 3, GetReplacementHigh(value));
- NodeProperties::ChangeOp(node, machine()->Word32AtomicPairStore());
+ NodeProperties::ChangeOp(
+ node, machine()->Word32AtomicPairStore(params.order()));
} else {
DefaultLowering(node, true);
- NodeProperties::ChangeOp(node, machine()->Word32AtomicStore(rep));
+ NodeProperties::ChangeOp(node, machine()->Word32AtomicStore(params));
}
break;
}
diff --git a/chromium/v8/src/compiler/js-call-reducer.cc b/chromium/v8/src/compiler/js-call-reducer.cc
index 3dcdc6a33ee..de8dcfacbab 100644
--- a/chromium/v8/src/compiler/js-call-reducer.cc
+++ b/chromium/v8/src/compiler/js-call-reducer.cc
@@ -728,8 +728,7 @@ class IteratingArrayBuiltinReducerAssembler : public JSCallReducerAssembler {
TNode<HeapObject> elements =
LoadField<HeapObject>(AccessBuilder::ForJSObjectElements(), o);
TNode<Object> value = LoadElement<Object>(
- AccessBuilder::ForFixedArrayElement(kind, LoadSensitivity::kCritical),
- elements, index);
+ AccessBuilder::ForFixedArrayElement(kind), elements, index);
return std::make_pair(index, value);
}
@@ -2099,7 +2098,8 @@ FrameState CreateArtificialFrameState(
FrameState PromiseConstructorFrameState(
const PromiseCtorFrameStateParams& params, CommonOperatorBuilder* common,
Graph* graph) {
- DCHECK_EQ(1, params.shared.internal_formal_parameter_count());
+ DCHECK_EQ(1,
+ params.shared.internal_formal_parameter_count_without_receiver());
return CreateArtificialFrameState(
params.node_ptr, params.outer_frame_state, 1,
BytecodeOffset::ConstructStubInvoke(), FrameStateType::kConstructStub,
@@ -3639,8 +3639,6 @@ Reduction JSCallReducer::ReduceCallApiFunction(
FunctionTemplateInfoRef function_template_info(
shared.function_template_info().value());
- if (!function_template_info.has_call_code()) return NoChange();
-
if (function_template_info.accept_any_receiver() &&
function_template_info.is_signature_undefined()) {
// We might be able to
@@ -3764,7 +3762,8 @@ Reduction JSCallReducer::ReduceCallApiFunction(
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
node->ReplaceInput(1, jsgraph()->Constant(function_template_info));
- node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(argc));
+ node->InsertInput(graph()->zone(), 2,
+ jsgraph()->Constant(JSParameterCount(argc)));
node->ReplaceInput(3, receiver); // Update receiver input.
node->ReplaceInput(6 + argc, effect); // Update effect input.
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
@@ -4039,7 +4038,8 @@ JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpreadOfCreateArguments(
return NoChange();
}
formal_parameter_count =
- MakeRef(broker(), shared).internal_formal_parameter_count();
+ MakeRef(broker(), shared)
+ .internal_formal_parameter_count_without_receiver();
}
if (type == CreateArgumentsType::kMappedArguments) {
@@ -4309,13 +4309,9 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return ReduceJSCall(node, function.shared());
} else if (target_ref.IsJSBoundFunction()) {
JSBoundFunctionRef function = target_ref.AsJSBoundFunction();
- base::Optional<JSReceiverRef> bound_target_function =
- function.bound_target_function();
- if (!bound_target_function.has_value()) return NoChange();
- base::Optional<ObjectRef> bound_this = function.bound_this();
- if (!bound_this.has_value()) return NoChange();
+ ObjectRef bound_this = function.bound_this();
ConvertReceiverMode const convert_mode =
- bound_this->IsNullOrUndefined()
+ bound_this.IsNullOrUndefined()
? ConvertReceiverMode::kNullOrUndefined
: ConvertReceiverMode::kNotNullOrUndefined;
@@ -4336,9 +4332,9 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
// Patch {node} to use [[BoundTargetFunction]] and [[BoundThis]].
NodeProperties::ReplaceValueInput(
- node, jsgraph()->Constant(*bound_target_function),
+ node, jsgraph()->Constant(function.bound_target_function()),
JSCallNode::TargetIndex());
- NodeProperties::ReplaceValueInput(node, jsgraph()->Constant(*bound_this),
+ NodeProperties::ReplaceValueInput(node, jsgraph()->Constant(bound_this),
JSCallNode::ReceiverIndex());
// Insert the [[BoundArguments]] for {node}.
@@ -4372,13 +4368,13 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return ReduceJSCall(node, p.shared_info(broker()));
} else if (target->opcode() == IrOpcode::kCheckClosure) {
FeedbackCellRef cell = MakeRef(broker(), FeedbackCellOf(target->op()));
- if (cell.shared_function_info().has_value()) {
- return ReduceJSCall(node, *cell.shared_function_info());
- } else {
+ base::Optional<SharedFunctionInfoRef> shared = cell.shared_function_info();
+ if (!shared.has_value()) {
TRACE_BROKER_MISSING(broker(), "Unable to reduce JSCall. FeedbackCell "
<< cell << " has no FeedbackVector");
return NoChange();
}
+ return ReduceJSCall(node, *shared);
}
// If {target} is the result of a JSCreateBoundFunction operation,
@@ -4455,9 +4451,9 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
// Try to further reduce the JSCall {node}.
return Changed(node).FollowedBy(ReduceJSCall(node));
} else if (feedback_target.has_value() && feedback_target->IsFeedbackCell()) {
- FeedbackCellRef feedback_cell =
- MakeRef(broker(), feedback_target.value().AsFeedbackCell().object());
- if (feedback_cell.value().has_value()) {
+ FeedbackCellRef feedback_cell = feedback_target.value().AsFeedbackCell();
+ // TODO(neis): This check seems unnecessary.
+ if (feedback_cell.feedback_vector().has_value()) {
// Check that {target} is a closure with given {feedback_cell},
// which uniquely identifies a given function inside a native context.
Node* target_closure = effect =
@@ -5055,9 +5051,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
}
} else if (target_ref.IsJSBoundFunction()) {
JSBoundFunctionRef function = target_ref.AsJSBoundFunction();
- base::Optional<JSReceiverRef> bound_target_function =
- function.bound_target_function();
- if (!bound_target_function.has_value()) return NoChange();
+ JSReceiverRef bound_target_function = function.bound_target_function();
FixedArrayRef bound_arguments = function.bound_arguments();
const int bound_arguments_length = bound_arguments.length();
@@ -5076,20 +5070,20 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
// Patch {node} to use [[BoundTargetFunction]].
node->ReplaceInput(n.TargetIndex(),
- jsgraph()->Constant(*bound_target_function));
+ jsgraph()->Constant(bound_target_function));
// Patch {node} to use [[BoundTargetFunction]]
// as new.target if {new_target} equals {target}.
if (target == new_target) {
node->ReplaceInput(n.NewTargetIndex(),
- jsgraph()->Constant(*bound_target_function));
+ jsgraph()->Constant(bound_target_function));
} else {
node->ReplaceInput(
n.NewTargetIndex(),
graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
graph()->NewNode(simplified()->ReferenceEqual(),
target, new_target),
- jsgraph()->Constant(*bound_target_function),
+ jsgraph()->Constant(bound_target_function),
new_target));
}
@@ -5956,9 +5950,13 @@ Reduction JSCallReducer::ReduceArrayPrototypeSlice(Node* node) {
Effect effect = n.effect();
Control control = n.control();
- // Optimize for the case where we simply clone the {receiver},
- // i.e. when the {start} is zero and the {end} is undefined
- // (meaning it will be set to {receiver}s "length" property).
+ // Optimize for the case where we simply clone the {receiver}, i.e. when the
+ // {start} is zero and the {end} is undefined (meaning it will be set to
+ // {receiver}s "length" property). This logic should be in sync with
+ // ReduceArrayPrototypeSlice (to a reasonable degree). This is because
+ // CloneFastJSArray produces arrays which are potentially COW. If there's a
+ // discrepancy, TF generates code which produces a COW array and then expects
+ // it to be non-COW (or the other way around) -> immediate deopt.
if (!NumberMatcher(start).Is(0) ||
!HeapObjectMatcher(end).Is(factory()->undefined_value())) {
return NoChange();
@@ -6373,9 +6371,8 @@ Reduction JSCallReducer::ReduceStringPrototypeStringAt(
index, receiver_length, effect, control);
// Return the character from the {receiver} as single character string.
- Node* masked_index = graph()->NewNode(simplified()->PoisonIndex(), index);
Node* value = effect = graph()->NewNode(string_access_operator, receiver,
- masked_index, effect, control);
+ index, effect, control);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
@@ -6433,11 +6430,9 @@ Reduction JSCallReducer::ReduceStringPrototypeStartsWith(Node* node) {
Node* etrue = effect;
Node* vtrue;
{
- Node* masked_position = graph()->NewNode(
- simplified()->PoisonIndex(), unsigned_position);
Node* string_first = etrue =
graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
- masked_position, etrue, if_true);
+ unsigned_position, etrue, if_true);
Node* search_first =
jsgraph()->Constant(str.GetFirstChar().value());
@@ -6488,10 +6483,8 @@ Reduction JSCallReducer::ReduceStringPrototypeCharAt(Node* node) {
index, receiver_length, effect, control);
// Return the character from the {receiver} as single character string.
- Node* masked_index = graph()->NewNode(simplified()->PoisonIndex(), index);
- Node* value = effect =
- graph()->NewNode(simplified()->StringCharCodeAt(), receiver, masked_index,
- effect, control);
+ Node* value = effect = graph()->NewNode(simplified()->StringCharCodeAt(),
+ receiver, index, effect, control);
value = graph()->NewNode(simplified()->StringFromSingleCharCode(), value);
ReplaceWithValue(node, value, effect, control);
diff --git a/chromium/v8/src/compiler/js-context-specialization.cc b/chromium/v8/src/compiler/js-context-specialization.cc
index 02e5cb17107..36217ca13bb 100644
--- a/chromium/v8/src/compiler/js-context-specialization.cc
+++ b/chromium/v8/src/compiler/js-context-specialization.cc
@@ -103,7 +103,16 @@ base::Optional<ContextRef> GetSpecializationContext(
Maybe<OuterContext> maybe_outer) {
switch (node->opcode()) {
case IrOpcode::kHeapConstant: {
- HeapObjectRef object = MakeRef(broker, HeapConstantOf(node->op()));
+ // TODO(jgruber,chromium:1209798): Using kAssumeMemoryFence works around
+ // the fact that the graph stores handles (and not refs). The assumption
+ // is that any handle inserted into the graph is safe to read; but we
+ // don't preserve the reason why it is safe to read. Thus we must
+ // over-approximate here and assume the existence of a memory fence. In
+ // the future, we should consider having the graph store ObjectRefs or
+ // ObjectData pointer instead, which would make new ref construction here
+ // unnecessary.
+ HeapObjectRef object =
+ MakeRefAssumeMemoryFence(broker, HeapConstantOf(node->op()));
if (object.IsContext()) return object.AsContext();
break;
}
@@ -231,7 +240,16 @@ base::Optional<ContextRef> GetModuleContext(JSHeapBroker* broker, Node* node,
switch (context->opcode()) {
case IrOpcode::kHeapConstant: {
- HeapObjectRef object = MakeRef(broker, HeapConstantOf(context->op()));
+ // TODO(jgruber,chromium:1209798): Using kAssumeMemoryFence works around
+ // the fact that the graph stores handles (and not refs). The assumption
+ // is that any handle inserted into the graph is safe to read; but we
+ // don't preserve the reason why it is safe to read. Thus we must
+ // over-approximate here and assume the existence of a memory fence. In
+ // the future, we should consider having the graph store ObjectRefs or
+ // ObjectData pointer instead, which would make new ref construction here
+ // unnecessary.
+ HeapObjectRef object =
+ MakeRefAssumeMemoryFence(broker, HeapConstantOf(context->op()));
if (object.IsContext()) {
return find_context(object.AsContext());
}
diff --git a/chromium/v8/src/compiler/js-create-lowering.cc b/chromium/v8/src/compiler/js-create-lowering.cc
index 414977eb7db..1b79b9d786c 100644
--- a/chromium/v8/src/compiler/js-create-lowering.cc
+++ b/chromium/v8/src/compiler/js-create-lowering.cc
@@ -197,11 +197,11 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
Node* const arguments_length =
graph()->NewNode(simplified()->ArgumentsLength());
// Allocate the elements backing store.
- Node* const elements = effect =
- graph()->NewNode(simplified()->NewArgumentsElements(
- CreateArgumentsType::kUnmappedArguments,
- shared.internal_formal_parameter_count()),
- arguments_length, effect);
+ Node* const elements = effect = graph()->NewNode(
+ simplified()->NewArgumentsElements(
+ CreateArgumentsType::kUnmappedArguments,
+ shared.internal_formal_parameter_count_without_receiver()),
+ arguments_length, effect);
// Load the arguments object map.
Node* const arguments_map =
jsgraph()->Constant(native_context().strict_arguments_map());
@@ -222,14 +222,14 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* const arguments_length =
graph()->NewNode(simplified()->ArgumentsLength());
- Node* const rest_length = graph()->NewNode(
- simplified()->RestLength(shared.internal_formal_parameter_count()));
+ Node* const rest_length = graph()->NewNode(simplified()->RestLength(
+ shared.internal_formal_parameter_count_without_receiver()));
// Allocate the elements backing store.
- Node* const elements = effect =
- graph()->NewNode(simplified()->NewArgumentsElements(
- CreateArgumentsType::kRestParameter,
- shared.internal_formal_parameter_count()),
- arguments_length, effect);
+ Node* const elements = effect = graph()->NewNode(
+ simplified()->NewArgumentsElements(
+ CreateArgumentsType::kRestParameter,
+ shared.internal_formal_parameter_count_without_receiver()),
+ arguments_length, effect);
// Load the JSArray object map.
Node* const jsarray_map = jsgraph()->Constant(
native_context().js_array_packed_elements_map());
@@ -332,7 +332,8 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
return Changed(node);
}
case CreateArgumentsType::kRestParameter: {
- int start_index = shared.internal_formal_parameter_count();
+ int start_index =
+ shared.internal_formal_parameter_count_without_receiver();
// Use inline allocation for all unmapped arguments objects within inlined
// (i.e. non-outermost) frames, independent of the object size.
Node* effect = NodeProperties::GetEffectInput(node);
@@ -401,7 +402,8 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
// Allocate a register file.
SharedFunctionInfoRef shared = js_function.shared();
DCHECK(shared.HasBytecodeArray());
- int parameter_count_no_receiver = shared.internal_formal_parameter_count();
+ int parameter_count_no_receiver =
+ shared.internal_formal_parameter_count_without_receiver();
int length = parameter_count_no_receiver +
shared.GetBytecodeArray().register_count();
MapRef fixed_array_map = MakeRef(broker(), factory()->fixed_array_map());
@@ -466,9 +468,10 @@ Reduction JSCreateLowering::ReduceNewArray(
// Constructing an Array via new Array(N) where N is an unsigned
// integer, always creates a holey backing store.
- ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(
- initial_map,
- initial_map.AsElementsKind(GetHoleyElementsKind(elements_kind)));
+ base::Optional<MapRef> maybe_initial_map =
+ initial_map.AsElementsKind(GetHoleyElementsKind(elements_kind));
+ if (!maybe_initial_map.has_value()) return NoChange();
+ initial_map = maybe_initial_map.value();
// Because CheckBounds performs implicit conversion from string to number, an
// additional CheckNumber is required to behave correctly for calls with a
@@ -525,8 +528,12 @@ Reduction JSCreateLowering::ReduceNewArray(
if (NodeProperties::GetType(length).Max() > 0.0) {
elements_kind = GetHoleyElementsKind(elements_kind);
}
- ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(
- initial_map, initial_map.AsElementsKind(elements_kind));
+
+ base::Optional<MapRef> maybe_initial_map =
+ initial_map.AsElementsKind(elements_kind);
+ if (!maybe_initial_map.has_value()) return NoChange();
+ initial_map = maybe_initial_map.value();
+
DCHECK(IsFastElementsKind(elements_kind));
// Setup elements and properties.
@@ -566,8 +573,11 @@ Reduction JSCreateLowering::ReduceNewArray(
// Determine the appropriate elements kind.
DCHECK(IsFastElementsKind(elements_kind));
- ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(
- initial_map, initial_map.AsElementsKind(elements_kind));
+
+ base::Optional<MapRef> maybe_initial_map =
+ initial_map.AsElementsKind(elements_kind);
+ if (!maybe_initial_map.has_value()) return NoChange();
+ initial_map = maybe_initial_map.value();
// Check {values} based on the {elements_kind}. These checks are guarded
// by the {elements_kind} feedback on the {site}, so it's safe to just
@@ -1479,7 +1489,8 @@ Node* JSCreateLowering::TryAllocateAliasedArguments(
// If there is no aliasing, the arguments object elements are not special in
// any way, we can just return an unmapped backing store instead.
- int parameter_count = shared.internal_formal_parameter_count();
+ int parameter_count =
+ shared.internal_formal_parameter_count_without_receiver();
if (parameter_count == 0) {
return TryAllocateArguments(effect, control, frame_state);
}
@@ -1545,7 +1556,8 @@ Node* JSCreateLowering::TryAllocateAliasedArguments(
const SharedFunctionInfoRef& shared, bool* has_aliased_arguments) {
// If there is no aliasing, the arguments object elements are not
// special in any way, we can just return an unmapped backing store.
- int parameter_count = shared.internal_formal_parameter_count();
+ int parameter_count =
+ shared.internal_formal_parameter_count_without_receiver();
if (parameter_count == 0) {
return graph()->NewNode(
simplified()->NewArgumentsElements(
@@ -1699,7 +1711,7 @@ base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteral(
for (InternalIndex i : InternalIndex::Range(boilerplate_nof)) {
PropertyDetails const property_details =
boilerplate_map.GetPropertyDetails(i);
- if (property_details.location() != kField) continue;
+ if (property_details.location() != PropertyLocation::kField) continue;
DCHECK_EQ(kData, property_details.kind());
if ((*max_properties)-- == 0) return {};
@@ -1713,7 +1725,6 @@ base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteral(
Type::Any(),
MachineType::AnyTagged(),
kFullWriteBarrier,
- LoadSensitivity::kUnsafe,
const_field_info};
// Note: the use of RawInobjectPropertyAt (vs. the higher-level
diff --git a/chromium/v8/src/compiler/js-generic-lowering.cc b/chromium/v8/src/compiler/js-generic-lowering.cc
index bbc47e45add..08896e3f111 100644
--- a/chromium/v8/src/compiler/js-generic-lowering.cc
+++ b/chromium/v8/src/compiler/js-generic-lowering.cc
@@ -586,7 +586,7 @@ void JSGenericLowering::LowerJSCreateArray(Node* node) {
// between top of stack and JS arguments.
DCHECK_EQ(interface_descriptor.GetStackParameterCount(), 0);
Node* stub_code = jsgraph()->ArrayConstructorStubConstant();
- Node* stub_arity = jsgraph()->Int32Constant(arity);
+ Node* stub_arity = jsgraph()->Int32Constant(JSParameterCount(arity));
base::Optional<AllocationSiteRef> const site = p.site(broker());
Node* type_info = site.has_value() ? jsgraph()->Constant(site.value())
: jsgraph()->UndefinedConstant();
@@ -820,7 +820,7 @@ void JSGenericLowering::LowerJSConstructForwardVarargs(Node* node) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ Node* stub_arity = jsgraph()->Int32Constant(JSParameterCount(arg_count));
Node* start_index = jsgraph()->Uint32Constant(p.start_index());
Node* receiver = jsgraph()->UndefinedConstant();
node->InsertInput(zone(), 0, stub_code);
@@ -843,7 +843,7 @@ void JSGenericLowering::LowerJSConstruct(Node* node) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), callable.descriptor(), stack_argument_count, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ Node* stub_arity = jsgraph()->Int32Constant(JSParameterCount(arg_count));
Node* receiver = jsgraph()->UndefinedConstant();
node->RemoveInput(n.FeedbackVectorIndex());
node->InsertInput(zone(), 0, stub_code);
@@ -906,7 +906,8 @@ void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
Node* stub_code = jsgraph()->HeapConstant(callable.code());
// We pass the spread in a register, not on the stack.
- Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread);
+ Node* stub_arity =
+ jsgraph()->Int32Constant(JSParameterCount(arg_count - kTheSpread));
Node* receiver = jsgraph()->UndefinedConstant();
DCHECK(n.FeedbackVectorIndex() > n.LastArgumentIndex());
node->RemoveInput(n.FeedbackVectorIndex());
@@ -930,7 +931,7 @@ void JSGenericLowering::LowerJSCallForwardVarargs(Node* node) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ Node* stub_arity = jsgraph()->Int32Constant(JSParameterCount(arg_count));
Node* start_index = jsgraph()->Uint32Constant(p.start_index());
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 2, stub_arity);
@@ -951,7 +952,7 @@ void JSGenericLowering::LowerJSCall(Node* node) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ Node* stub_arity = jsgraph()->Int32Constant(JSParameterCount(arg_count));
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 2, stub_arity);
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
@@ -1009,7 +1010,8 @@ void JSGenericLowering::LowerJSCallWithSpread(Node* node) {
Node* stub_code = jsgraph()->HeapConstant(callable.code());
// We pass the spread in a register, not on the stack.
- Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread);
+ Node* stub_arity =
+ jsgraph()->Int32Constant(JSParameterCount(arg_count - kTheSpread));
// Shuffling inputs.
// Before: {target, receiver, ...args, spread, vector}.
diff --git a/chromium/v8/src/compiler/js-heap-broker.cc b/chromium/v8/src/compiler/js-heap-broker.cc
index dc34bcae6de..0007a582a0d 100644
--- a/chromium/v8/src/compiler/js-heap-broker.cc
+++ b/chromium/v8/src/compiler/js-heap-broker.cc
@@ -50,12 +50,10 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
array_and_object_prototypes_(zone()),
tracing_enabled_(tracing_enabled),
is_concurrent_inlining_(is_concurrent_inlining),
- is_isolate_bootstrapping_(isolate->bootstrapper()->IsActive()),
code_kind_(code_kind),
feedback_(zone()),
property_access_infos_(zone()),
- minimorphic_property_access_infos_(zone()),
- typed_array_string_tags_(zone()) {
+ minimorphic_property_access_infos_(zone()) {
// Note that this initialization of {refs_} with the minimal initial capacity
// is redundant in the normal use case (concurrent compilation enabled,
// standard objects to be serialized), as the map is going to be replaced
@@ -220,20 +218,6 @@ bool JSHeapBroker::ObjectMayBeUninitialized(HeapObject object) const {
return !IsMainThread() && isolate()->heap()->IsPendingAllocation(object);
}
-bool CanInlineElementAccess(MapRef const& map) {
- if (!map.IsJSObjectMap()) return false;
- if (map.is_access_check_needed()) return false;
- if (map.has_indexed_interceptor()) return false;
- ElementsKind const elements_kind = map.elements_kind();
- if (IsFastElementsKind(elements_kind)) return true;
- if (IsTypedArrayElementsKind(elements_kind) &&
- elements_kind != BIGUINT64_ELEMENTS &&
- elements_kind != BIGINT64_ELEMENTS) {
- return true;
- }
- return false;
-}
-
ProcessedFeedback::ProcessedFeedback(Kind kind, FeedbackSlotKind slot_kind)
: kind_(kind), slot_kind_(slot_kind) {}
@@ -423,7 +407,10 @@ ElementAccessFeedback::ElementAccessFeedback(Zone* zone,
bool ElementAccessFeedback::HasOnlyStringMaps(JSHeapBroker* broker) const {
for (auto const& group : transition_groups()) {
for (Handle<Map> map : group) {
- if (!MakeRef(broker, map).IsStringMap()) return false;
+ // We assume a memory fence because {map} was read earlier from
+ // the feedback vector and was store ordered on insertion into the
+ // vector.
+ if (!MakeRefAssumeMemoryFence(broker, map).IsStringMap()) return false;
}
}
return true;
@@ -880,11 +867,7 @@ ElementAccessFeedback const& JSHeapBroker::ProcessFeedbackMapsForElementAccess(
MapHandles possible_transition_targets;
possible_transition_targets.reserve(maps.size());
for (MapRef& map : maps) {
- if (!is_concurrent_inlining()) {
- map.SerializeRootMap(NotConcurrentInliningTag{this});
- }
-
- if (CanInlineElementAccess(map) &&
+ if (map.CanInlineElementAccess() &&
IsFastElementsKind(map.elements_kind()) &&
GetInitialFastElementsKind() != map.elements_kind()) {
possible_transition_targets.push_back(map.object());
@@ -992,9 +975,13 @@ MinimorphicLoadPropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo(
MinimorphicLoadPropertyAccessInfo access_info =
factory.ComputePropertyAccessInfo(feedback);
if (is_concurrent_inlining_) {
+ // We can assume a memory fence on {source.vector} because in production,
+ // the vector has already passed the gc predicate. Unit tests create
+ // FeedbackSource objects directly from handles, but they run on
+ // the main thread.
TRACE(this, "Storing MinimorphicLoadPropertyAccessInfo for "
<< source.index() << " "
- << MakeRef<Object>(this, source.vector));
+ << MakeRefAssumeMemoryFence<Object>(this, source.vector));
minimorphic_property_access_infos_.insert({source, access_info});
}
return access_info;
diff --git a/chromium/v8/src/compiler/js-heap-broker.h b/chromium/v8/src/compiler/js-heap-broker.h
index 91b94bebb5f..bf9b9aaac09 100644
--- a/chromium/v8/src/compiler/js-heap-broker.h
+++ b/chromium/v8/src/compiler/js-heap-broker.h
@@ -117,7 +117,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
Zone* zone() const { return zone_; }
bool tracing_enabled() const { return tracing_enabled_; }
bool is_concurrent_inlining() const { return is_concurrent_inlining_; }
- bool is_isolate_bootstrapping() const { return is_isolate_bootstrapping_; }
bool is_turboprop() const { return code_kind_ == CodeKind::TURBOPROP; }
NexusConfig feedback_nexus_config() const {
@@ -173,7 +172,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
ProcessedFeedback const* feedback);
FeedbackSlotKind GetFeedbackSlotKind(FeedbackSource const& source) const;
- // TODO(neis): Move these into serializer when we're always in the background.
ElementAccessFeedback const& ProcessFeedbackMapsForElementAccess(
ZoneVector<MapRef>& maps, KeyedAccessMode const& keyed_mode,
FeedbackSlotKind slot_kind);
@@ -291,8 +289,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
void IncrementTracingIndentation();
void DecrementTracingIndentation();
- RootIndexMap const& root_index_map() { return root_index_map_; }
-
// Locks {mutex} through the duration of this scope iff it is the first
// occurrence. This is done to have a recursive shared lock on {mutex}.
class V8_NODISCARD RecursiveSharedMutexGuardIfNeeded {
@@ -389,8 +385,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
void CollectArrayAndObjectPrototypes();
- PerIsolateCompilerCache* compiler_cache() const { return compiler_cache_; }
-
void set_persistent_handles(
std::unique_ptr<PersistentHandles> persistent_handles) {
DCHECK_NULL(ph_);
@@ -419,7 +413,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
std::unique_ptr<CanonicalHandlesMap> canonical_handles);
Isolate* const isolate_;
- Zone* const zone_ = nullptr;
+ Zone* const zone_;
base::Optional<NativeContextRef> target_native_context_;
RefsMap* refs_;
RootIndexMap root_index_map_;
@@ -429,13 +423,11 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
BrokerMode mode_ = kDisabled;
bool const tracing_enabled_;
bool const is_concurrent_inlining_;
- bool const is_isolate_bootstrapping_;
CodeKind const code_kind_;
std::unique_ptr<PersistentHandles> ph_;
LocalIsolate* local_isolate_ = nullptr;
std::unique_ptr<CanonicalHandlesMap> canonical_handles_;
unsigned trace_indentation_ = 0;
- PerIsolateCompilerCache* compiler_cache_ = nullptr;
ZoneUnorderedMap<FeedbackSource, ProcessedFeedback const*,
FeedbackSource::Hash, FeedbackSource::Equal>
feedback_;
@@ -446,8 +438,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
FeedbackSource::Hash, FeedbackSource::Equal>
minimorphic_property_access_infos_;
- ZoneVector<ObjectData*> typed_array_string_tags_;
-
CompilationDependencies* dependencies_ = nullptr;
// The MapUpdater mutex is used in recursive patterns; for example,
@@ -460,7 +450,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
// Likewise for boilerplate migrations.
int boilerplate_migration_mutex_depth_ = 0;
- static constexpr size_t kMaxSerializedFunctionsCacheSize = 200;
static constexpr uint32_t kMinimalRefsBucketCount = 8;
STATIC_ASSERT(base::bits::IsPowerOfTwo(kMinimalRefsBucketCount));
static constexpr uint32_t kInitialRefsBucketCount = 1024;
@@ -487,21 +476,6 @@ class V8_NODISCARD TraceScope {
JSHeapBroker* const broker_;
};
-#define ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(something_var, \
- optionally_something) \
- auto optionally_something_ = optionally_something; \
- if (!optionally_something_) \
- return NoChangeBecauseOfMissingData(broker(), __FUNCTION__, __LINE__); \
- something_var = *optionally_something_;
-
-class Reduction;
-Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker,
- const char* function, int line);
-
-// Miscellaneous definitions that should be moved elsewhere once concurrent
-// compilation is finished.
-bool CanInlineElementAccess(MapRef const& map);
-
// Scope that unparks the LocalHeap, if:
// a) We have a JSHeapBroker,
// b) Said JSHeapBroker has a LocalIsolate and thus a LocalHeap,
diff --git a/chromium/v8/src/compiler/js-inlining-heuristic.cc b/chromium/v8/src/compiler/js-inlining-heuristic.cc
index 177f35c7a04..c6a223b600b 100644
--- a/chromium/v8/src/compiler/js-inlining-heuristic.cc
+++ b/chromium/v8/src/compiler/js-inlining-heuristic.cc
@@ -27,8 +27,40 @@ bool IsSmall(int const size) {
}
bool CanConsiderForInlining(JSHeapBroker* broker,
- SharedFunctionInfoRef const& shared,
- FeedbackVectorRef const& feedback_vector) {
+ FeedbackCellRef const& feedback_cell) {
+ base::Optional<FeedbackVectorRef> feedback_vector =
+ feedback_cell.feedback_vector();
+ if (!feedback_vector.has_value()) {
+ TRACE("Cannot consider " << feedback_cell
+ << " for inlining (no feedback vector)");
+ return false;
+ }
+ SharedFunctionInfoRef shared = feedback_vector->shared_function_info();
+
+ if (!shared.HasBytecodeArray()) {
+ TRACE("Cannot consider " << shared << " for inlining (no bytecode)");
+ return false;
+ }
+ // Ensure we have a persistent handle to the bytecode in order to avoid
+ // flushing it during the remaining compilation.
+ shared.GetBytecodeArray();
+
+ // Read feedback vector again in case it got flushed before we were able to
+ // prevent flushing above.
+ base::Optional<FeedbackVectorRef> feedback_vector_again =
+ feedback_cell.feedback_vector();
+ if (!feedback_vector_again.has_value()) {
+ TRACE("Cannot consider " << shared << " for inlining (no feedback vector)");
+ return false;
+ }
+ if (!feedback_vector_again->equals(*feedback_vector)) {
+ // The new feedback vector likely contains lots of uninitialized slots, so
+ // it doesn't make much sense to inline this function now.
+ TRACE("Not considering " << shared
+ << " for inlining (feedback vector changed)");
+ return false;
+ }
+
SharedFunctionInfo::Inlineability inlineability = shared.GetInlineability();
if (inlineability != SharedFunctionInfo::kIsInlineable) {
TRACE("Cannot consider "
@@ -36,22 +68,20 @@ bool CanConsiderForInlining(JSHeapBroker* broker,
return false;
}
- DCHECK(shared.HasBytecodeArray());
- TRACE("Considering " << shared << " for inlining with " << feedback_vector);
+ TRACE("Considering " << shared << " for inlining with " << *feedback_vector);
return true;
}
bool CanConsiderForInlining(JSHeapBroker* broker,
JSFunctionRef const& function) {
- if (!function.has_feedback_vector(broker->dependencies())) {
- TRACE("Cannot consider " << function
- << " for inlining (no feedback vector)");
- return false;
- }
-
- return CanConsiderForInlining(
- broker, function.shared(),
- function.feedback_vector(broker->dependencies()));
+ FeedbackCellRef feedback_cell =
+ function.raw_feedback_cell(broker->dependencies());
+ bool const result = CanConsiderForInlining(broker, feedback_cell);
+ if (result) {
+ CHECK(
+ function.shared().equals(feedback_cell.shared_function_info().value()));
+ }
+ return result;
}
} // namespace
@@ -65,8 +95,8 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
HeapObjectMatcher m(callee);
if (m.HasResolvedValue() && m.Ref(broker()).IsJSFunction()) {
- out.functions[0] = m.Ref(broker()).AsJSFunction();
- JSFunctionRef function = out.functions[0].value();
+ JSFunctionRef function = m.Ref(broker()).AsJSFunction();
+ out.functions[0] = function;
if (CanConsiderForInlining(broker(), function)) {
out.bytecode[0] = function.shared().GetBytecodeArray();
out.num_functions = 1;
@@ -98,10 +128,9 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
if (m.IsCheckClosure()) {
DCHECK(!out.functions[0].has_value());
FeedbackCellRef feedback_cell = MakeRef(broker(), FeedbackCellOf(m.op()));
- SharedFunctionInfoRef shared_info = *feedback_cell.shared_function_info();
- out.shared_info = shared_info;
- if (CanConsiderForInlining(broker(), shared_info, *feedback_cell.value())) {
- out.bytecode[0] = shared_info.GetBytecodeArray();
+ if (CanConsiderForInlining(broker(), feedback_cell)) {
+ out.shared_info = feedback_cell.shared_function_info().value();
+ out.bytecode[0] = out.shared_info->GetBytecodeArray();
}
out.num_functions = 1;
return out;
@@ -109,13 +138,11 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
if (m.IsJSCreateClosure()) {
DCHECK(!out.functions[0].has_value());
JSCreateClosureNode n(callee);
- CreateClosureParameters const& p = n.Parameters();
FeedbackCellRef feedback_cell = n.GetFeedbackCellRefChecked(broker());
- SharedFunctionInfoRef shared_info = p.shared_info(broker());
- out.shared_info = shared_info;
- if (feedback_cell.value().has_value() &&
- CanConsiderForInlining(broker(), shared_info, *feedback_cell.value())) {
- out.bytecode[0] = shared_info.GetBytecodeArray();
+ if (CanConsiderForInlining(broker(), feedback_cell)) {
+ out.shared_info = feedback_cell.shared_function_info().value();
+ out.bytecode[0] = out.shared_info->GetBytecodeArray();
+ CHECK(out.shared_info->equals(n.Parameters().shared_info(broker())));
}
out.num_functions = 1;
return out;
diff --git a/chromium/v8/src/compiler/js-inlining.cc b/chromium/v8/src/compiler/js-inlining.cc
index a17a43ecd21..b2e012d8c46 100644
--- a/chromium/v8/src/compiler/js-inlining.cc
+++ b/chromium/v8/src/compiler/js-inlining.cc
@@ -305,7 +305,7 @@ base::Optional<SharedFunctionInfoRef> JSInliner::DetermineCallTarget(
JSFunctionRef function = match.Ref(broker()).AsJSFunction();
// The function might have not been called yet.
- if (!function.has_feedback_vector(broker()->dependencies())) {
+ if (!function.feedback_vector(broker()->dependencies()).has_value()) {
return base::nullopt;
}
@@ -355,7 +355,7 @@ FeedbackCellRef JSInliner::DetermineCallContext(Node* node,
if (match.HasResolvedValue() && match.Ref(broker()).IsJSFunction()) {
JSFunctionRef function = match.Ref(broker()).AsJSFunction();
// This was already ensured by DetermineCallTarget
- CHECK(function.has_feedback_vector(broker()->dependencies()));
+ CHECK(function.feedback_vector(broker()->dependencies()).has_value());
// The inlinee specializes to the context from the JSFunction object.
*context_out = jsgraph()->Constant(function.context());
@@ -472,11 +472,24 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
// Determine the call target.
base::Optional<SharedFunctionInfoRef> shared_info(DetermineCallTarget(node));
if (!shared_info.has_value()) return NoChange();
- DCHECK(shared_info->IsInlineable());
SharedFunctionInfoRef outer_shared_info =
MakeRef(broker(), info_->shared_info());
+ SharedFunctionInfo::Inlineability inlineability =
+ shared_info->GetInlineability();
+ if (inlineability != SharedFunctionInfo::kIsInlineable) {
+ // The function is no longer inlineable. The only way this can happen is if
+ // the function had its optimization disabled in the meantime, e.g. because
+ // another optimization job failed too often.
+ CHECK_EQ(inlineability, SharedFunctionInfo::kHasOptimizationDisabled);
+ TRACE("Not inlining " << *shared_info << " into " << outer_shared_info
+ << " because it had its optimization disabled.");
+ return NoChange();
+ }
+ // NOTE: Even though we bailout in the kHasOptimizationDisabled case above, we
+ // won't notice if the function's optimization is disabled after this point.
+
// Constructor must be constructable.
if (node->opcode() == IrOpcode::kJSConstruct &&
!IsConstructable(shared_info->kind())) {
@@ -709,7 +722,8 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
// Insert argument adaptor frame if required. The callees formal parameter
// count have to match the number of arguments passed
// to the call.
- int parameter_count = shared_info->internal_formal_parameter_count();
+ int parameter_count =
+ shared_info->internal_formal_parameter_count_without_receiver();
DCHECK_EQ(parameter_count, start.FormalParameterCountWithoutReceiver());
if (call.argument_count() != parameter_count) {
frame_state = CreateArtificialFrameState(
diff --git a/chromium/v8/src/compiler/js-native-context-specialization.cc b/chromium/v8/src/compiler/js-native-context-specialization.cc
index e03e0d41a31..d100fd91af9 100644
--- a/chromium/v8/src/compiler/js-native-context-specialization.cc
+++ b/chromium/v8/src/compiler/js-native-context-specialization.cc
@@ -230,8 +230,9 @@ Reduction JSNativeContextSpecialization::ReduceJSAsyncFunctionEnter(
broker(),
FrameStateInfoOf(frame_state->op()).shared_info().ToHandleChecked());
DCHECK(shared.is_compiled());
- int register_count = shared.internal_formal_parameter_count() +
- shared.GetBytecodeArray().register_count();
+ int register_count =
+ shared.internal_formal_parameter_count_without_receiver() +
+ shared.GetBytecodeArray().register_count();
MapRef fixed_array_map = MakeRef(broker(), factory()->fixed_array_map());
AllocationBuilder ab(jsgraph(), effect, control);
if (!ab.CanAllocateArray(register_count, fixed_array_map)) {
@@ -617,15 +618,11 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
// OrdinaryHasInstance on bound functions turns into a recursive invocation
// of the instanceof operator again.
JSBoundFunctionRef function = m.Ref(broker()).AsJSBoundFunction();
- base::Optional<JSReceiverRef> bound_target_function =
- function.bound_target_function();
- if (bound_target_function.has_value()) return NoChange();
-
Node* feedback = jsgraph()->UndefinedConstant();
NodeProperties::ReplaceValueInput(node, object,
JSInstanceOfNode::LeftIndex());
NodeProperties::ReplaceValueInput(
- node, jsgraph()->Constant(*bound_target_function),
+ node, jsgraph()->Constant(function.bound_target_function()),
JSInstanceOfNode::RightIndex());
node->InsertInput(zone(), JSInstanceOfNode::FeedbackVectorIndex(),
feedback);
@@ -970,6 +967,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
break;
}
case PropertyCellType::kUndefined:
+ case PropertyCellType::kInTransition:
UNREACHABLE();
}
}
@@ -1635,8 +1633,7 @@ void JSNativeContextSpecialization::RemoveImpossibleMaps(
maps->erase(std::remove_if(maps->begin(), maps->end(),
[root_map](const MapRef& map) {
return map.is_abandoned_prototype_map() ||
- (map.FindRootMap().has_value() &&
- !map.FindRootMap()->equals(*root_map));
+ !map.FindRootMap().equals(*root_map);
}),
maps->end());
}
@@ -1747,16 +1744,6 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
}
}
- // Check if we have the necessary data for building element accesses.
- for (ElementAccessInfo const& access_info : access_infos) {
- if (!IsTypedArrayElementsKind(access_info.elements_kind())) continue;
- base::Optional<JSTypedArrayRef> typed_array =
- GetTypedArrayConstant(broker(), receiver);
- if (typed_array.has_value() && !typed_array->serialized()) {
- return NoChange();
- }
- }
-
// Check for the monomorphic case.
PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
if (access_infos.size() == 1) {
@@ -2256,10 +2243,6 @@ void JSNativeContextSpecialization::InlinePropertySetterCall(
Node* JSNativeContextSpecialization::InlineApiCall(
Node* receiver, Node* holder, Node* frame_state, Node* value, Node** effect,
Node** control, FunctionTemplateInfoRef const& function_template_info) {
- if (!function_template_info.has_call_code()) {
- return nullptr;
- }
-
if (!function_template_info.call_code().has_value()) {
TRACE_BROKER_MISSING(broker(), "call code for function template info "
<< function_template_info);
@@ -2449,7 +2432,6 @@ JSNativeContextSpecialization::BuildPropertyStore(
field_type,
MachineType::TypeForRepresentation(field_representation),
kFullWriteBarrier,
- LoadSensitivity::kUnsafe,
access_info.GetConstFieldInfo(),
access_mode == AccessMode::kStoreInLiteral};
@@ -2483,7 +2465,6 @@ JSNativeContextSpecialization::BuildPropertyStore(
Type::OtherInternal(),
MachineType::TaggedPointer(),
kPointerWriteBarrier,
- LoadSensitivity::kUnsafe,
access_info.GetConstFieldInfo(),
access_mode == AccessMode::kStoreInLiteral};
storage = effect =
@@ -2789,10 +2770,8 @@ JSNativeContextSpecialization::BuildElementAccess(
if (situation == kHandleOOB_SmiCheckDone) {
Node* check =
graph()->NewNode(simplified()->NumberLessThan(), index, length);
- Node* branch = graph()->NewNode(
- common()->Branch(BranchHint::kTrue,
- IsSafetyCheck::kCriticalSafetyCheck),
- check, control);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* etrue = effect;
@@ -2980,10 +2959,9 @@ JSNativeContextSpecialization::BuildElementAccess(
element_type = Type::SignedSmall();
element_machine_type = MachineType::TaggedSigned();
}
- ElementAccess element_access = {
- kTaggedBase, FixedArray::kHeaderSize,
- element_type, element_machine_type,
- kFullWriteBarrier, LoadSensitivity::kCritical};
+ ElementAccess element_access = {kTaggedBase, FixedArray::kHeaderSize,
+ element_type, element_machine_type,
+ kFullWriteBarrier};
// Access the actual element.
if (keyed_mode.access_mode() == AccessMode::kLoad) {
@@ -3003,10 +2981,8 @@ JSNativeContextSpecialization::BuildElementAccess(
CanTreatHoleAsUndefined(receiver_maps)) {
Node* check =
graph()->NewNode(simplified()->NumberLessThan(), index, length);
- Node* branch = graph()->NewNode(
- common()->Branch(BranchHint::kTrue,
- IsSafetyCheck::kCriticalSafetyCheck),
- check, control);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* etrue = effect;
@@ -3289,9 +3265,7 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
Node* check =
graph()->NewNode(simplified()->NumberLessThan(), index, length);
Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue,
- IsSafetyCheck::kCriticalSafetyCheck),
- check, *control);
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, *control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
// Do a real bounds check against {length}. This is in order to protect
@@ -3302,10 +3276,8 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
CheckBoundsFlag::kConvertStringAndMinusZero |
CheckBoundsFlag::kAbortOnOutOfBounds),
index, length, *effect, if_true);
- Node* masked_index = graph()->NewNode(simplified()->PoisonIndex(), index);
- Node* vtrue = etrue =
- graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
- masked_index, etrue, if_true);
+ Node* vtrue = etrue = graph()->NewNode(simplified()->StringCharCodeAt(),
+ receiver, index, etrue, if_true);
vtrue = graph()->NewNode(simplified()->StringFromSingleCharCode(), vtrue);
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
@@ -3323,12 +3295,9 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
CheckBoundsFlag::kConvertStringAndMinusZero),
index, length, *effect, *control);
- Node* masked_index = graph()->NewNode(simplified()->PoisonIndex(), index);
-
// Return the character from the {receiver} as single character string.
- Node* value = *effect =
- graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
- masked_index, *effect, *control);
+ Node* value = *effect = graph()->NewNode(
+ simplified()->StringCharCodeAt(), receiver, index, *effect, *control);
value = graph()->NewNode(simplified()->StringFromSingleCharCode(), value);
return value;
}
@@ -3465,10 +3434,7 @@ base::Optional<MapRef> JSNativeContextSpecialization::InferRootMap(
base::Optional<MapRef> initial_map =
NodeProperties::GetJSCreateMap(broker(), object);
if (initial_map.has_value()) {
- if (!initial_map->FindRootMap().has_value()) {
- return base::nullopt;
- }
- DCHECK(initial_map->equals(*initial_map->FindRootMap()));
+ DCHECK(initial_map->equals(initial_map->FindRootMap()));
return *initial_map;
}
}
diff --git a/chromium/v8/src/compiler/js-type-hint-lowering.cc b/chromium/v8/src/compiler/js-type-hint-lowering.cc
index 956f13d7f91..38c523596c2 100644
--- a/chromium/v8/src/compiler/js-type-hint-lowering.cc
+++ b/chromium/v8/src/compiler/js-type-hint-lowering.cc
@@ -153,6 +153,7 @@ class JSSpeculativeBinopBuilder final {
}
const Operator* SpeculativeBigIntOp(BigIntOperationHint hint) {
+ DCHECK(jsgraph()->machine()->Is64());
switch (op_->opcode()) {
case IrOpcode::kJSAdd:
return simplified()->SpeculativeBigIntAdd(hint);
@@ -206,6 +207,7 @@ class JSSpeculativeBinopBuilder final {
}
Node* TryBuildBigIntBinop() {
+ DCHECK(jsgraph()->machine()->Is64());
BigIntOperationHint hint;
if (GetBinaryBigIntOperationHint(&hint)) {
const Operator* op = SpeculativeBigIntOp(hint);
@@ -321,10 +323,13 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceUnaryOperation(
jsgraph()->SmiConstant(-1), effect, control, slot);
node = b.TryBuildNumberBinop();
if (!node) {
- if (GetBinaryOperationHint(slot) == BinaryOperationHint::kBigInt) {
- const Operator* op = jsgraph()->simplified()->SpeculativeBigIntNegate(
- BigIntOperationHint::kBigInt);
- node = jsgraph()->graph()->NewNode(op, operand, effect, control);
+ if (jsgraph()->machine()->Is64()) {
+ if (GetBinaryOperationHint(slot) == BinaryOperationHint::kBigInt) {
+ const Operator* op =
+ jsgraph()->simplified()->SpeculativeBigIntNegate(
+ BigIntOperationHint::kBigInt);
+ node = jsgraph()->graph()->NewNode(op, operand, effect, control);
+ }
}
}
break;
@@ -403,8 +408,10 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
}
if (op->opcode() == IrOpcode::kJSAdd ||
op->opcode() == IrOpcode::kJSSubtract) {
- if (Node* node = b.TryBuildBigIntBinop()) {
- return LoweringResult::SideEffectFree(node, node, control);
+ if (jsgraph()->machine()->Is64()) {
+ if (Node* node = b.TryBuildBigIntBinop()) {
+ return LoweringResult::SideEffectFree(node, node, control);
+ }
}
}
break;
diff --git a/chromium/v8/src/compiler/js-typed-lowering.cc b/chromium/v8/src/compiler/js-typed-lowering.cc
index e986ef1baf6..8d67e417512 100644
--- a/chromium/v8/src/compiler/js-typed-lowering.cc
+++ b/chromium/v8/src/compiler/js-typed-lowering.cc
@@ -998,9 +998,9 @@ Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
HeapObjectMatcher m(input);
if (m.HasResolvedValue() && m.Ref(broker()).IsString()) {
StringRef input_value = m.Ref(broker()).AsString();
- double number;
- ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(number, input_value.ToNumber());
- return Replace(jsgraph()->Constant(number));
+ base::Optional<double> number = input_value.ToNumber();
+ if (!number.has_value()) return NoChange();
+ return Replace(jsgraph()->Constant(number.value()));
}
}
if (input_type.IsHeapConstant()) {
@@ -1595,7 +1595,8 @@ Reduction JSTypedLowering::ReduceJSConstructForwardVarargs(Node* node) {
Callable callable = CodeFactory::ConstructFunctionForwardVarargs(isolate());
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), 3,
+ jsgraph()->Constant(JSParameterCount(arity)));
node->InsertInput(graph()->zone(), 4, jsgraph()->Constant(start_index));
node->InsertInput(graph()->zone(), 5, jsgraph()->UndefinedConstant());
NodeProperties::ChangeOp(
@@ -1633,7 +1634,8 @@ Reduction JSTypedLowering::ReduceJSConstruct(Node* node) {
STATIC_ASSERT(JSConstructNode::NewTargetIndex() == 1);
node->RemoveInput(n.FeedbackVectorIndex());
node->InsertInput(graph()->zone(), 0, jsgraph()->Constant(code));
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), 3,
+ jsgraph()->Constant(JSParameterCount(arity)));
node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
node->InsertInput(graph()->zone(), 5, jsgraph()->UndefinedConstant());
NodeProperties::ChangeOp(
@@ -1663,7 +1665,8 @@ Reduction JSTypedLowering::ReduceJSCallForwardVarargs(Node* node) {
Callable callable = CodeFactory::CallFunctionForwardVarargs(isolate());
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
- node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), 2,
+ jsgraph()->Constant(JSParameterCount(arity)));
node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(start_index));
NodeProperties::ChangeOp(
node, common()->Call(Linkage::GetStubCallDescriptor(
@@ -1750,8 +1753,11 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
Node* new_target = jsgraph()->UndefinedConstant();
- int formal_count = shared->internal_formal_parameter_count();
- if (formal_count != kDontAdaptArgumentsSentinel && formal_count > arity) {
+ int formal_count =
+ shared->internal_formal_parameter_count_without_receiver();
+ // TODO(v8:11112): Once the sentinel is always 0, the check against
+ // IsDontAdaptArguments() can be removed.
+ if (!shared->IsDontAdaptArguments() && formal_count > arity) {
node->RemoveInput(n.FeedbackVectorIndex());
// Underapplication. Massage the arguments to match the expected number of
// arguments.
@@ -1763,7 +1769,7 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
// Patch {node} to a direct call.
node->InsertInput(graph()->zone(), formal_count + 2, new_target);
node->InsertInput(graph()->zone(), formal_count + 3,
- jsgraph()->Constant(arity));
+ jsgraph()->Constant(JSParameterCount(arity)));
NodeProperties::ChangeOp(node,
common()->Call(Linkage::GetJSCallDescriptor(
graph()->zone(), false, 1 + formal_count,
@@ -1786,13 +1792,15 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
node->RemoveInput(n.FeedbackVectorIndex());
node->InsertInput(graph()->zone(), 0, stub_code); // Code object.
node->InsertInput(graph()->zone(), 2, new_target);
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), 3,
+ jsgraph()->Constant(JSParameterCount(arity)));
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
} else {
// Patch {node} to a direct call.
node->RemoveInput(n.FeedbackVectorIndex());
node->InsertInput(graph()->zone(), arity + 2, new_target);
- node->InsertInput(graph()->zone(), arity + 3, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), arity + 3,
+ jsgraph()->Constant(JSParameterCount(arity)));
NodeProperties::ChangeOp(node,
common()->Call(Linkage::GetJSCallDescriptor(
graph()->zone(), false, 1 + arity,
@@ -1811,7 +1819,8 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
Callable callable = CodeFactory::CallFunction(isolate(), convert_mode);
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
- node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), 2,
+ jsgraph()->Constant(JSParameterCount(arity)));
NodeProperties::ChangeOp(
node, common()->Call(Linkage::GetStubCallDescriptor(
graph()->zone(), callable.descriptor(), 1 + arity, flags)));
diff --git a/chromium/v8/src/compiler/linkage.cc b/chromium/v8/src/compiler/linkage.cc
index fac24e802d8..2197fe6a65d 100644
--- a/chromium/v8/src/compiler/linkage.cc
+++ b/chromium/v8/src/compiler/linkage.cc
@@ -208,6 +208,18 @@ int CallDescriptor::CalculateFixedFrameSize(CodeKind code_kind) const {
UNREACHABLE();
}
+void CallDescriptor::ComputeParamCounts() const {
+ gp_param_count_ = 0;
+ fp_param_count_ = 0;
+ for (size_t i = 0; i < ParameterCount(); ++i) {
+ if (IsFloatingPoint(GetParameterType(i).representation())) {
+ ++fp_param_count_.value();
+ } else {
+ ++gp_param_count_.value();
+ }
+ }
+}
+
CallDescriptor* Linkage::ComputeIncoming(Zone* zone,
OptimizedCompilationInfo* info) {
#if V8_ENABLE_WEBASSEMBLY
@@ -219,9 +231,10 @@ CallDescriptor* Linkage::ComputeIncoming(Zone* zone,
// If we are compiling a JS function, use a JS call descriptor,
// plus the receiver.
SharedFunctionInfo shared = info->closure()->shared();
- return GetJSCallDescriptor(zone, info->is_osr(),
- 1 + shared.internal_formal_parameter_count(),
- CallDescriptor::kCanUseRoots);
+ return GetJSCallDescriptor(
+ zone, info->is_osr(),
+ shared.internal_formal_parameter_count_with_receiver(),
+ CallDescriptor::kCanUseRoots);
}
return nullptr; // TODO(titzer): ?
}
diff --git a/chromium/v8/src/compiler/linkage.h b/chromium/v8/src/compiler/linkage.h
index 8b33444b294..d157b44e031 100644
--- a/chromium/v8/src/compiler/linkage.h
+++ b/chromium/v8/src/compiler/linkage.h
@@ -214,15 +214,13 @@ class V8_EXPORT_PRIVATE CallDescriptor final
kInitializeRootRegister = 1u << 3,
// Does not ever try to allocate space on our heap.
kNoAllocate = 1u << 4,
- // Use retpoline for this call if indirect.
- kRetpoline = 1u << 5,
// Use the kJavaScriptCallCodeStartRegister (fixed) register for the
// indirect target address when calling.
- kFixedTargetRegister = 1u << 6,
- kCallerSavedRegisters = 1u << 7,
+ kFixedTargetRegister = 1u << 5,
+ kCallerSavedRegisters = 1u << 6,
// The kCallerSavedFPRegisters only matters (and set) when the more general
// flag for kCallerSavedRegisters above is also set.
- kCallerSavedFPRegisters = 1u << 8,
+ kCallerSavedFPRegisters = 1u << 7,
// Tail calls for tier up are special (in fact they are different enough
// from normal tail calls to warrant a dedicated opcode; but they also have
// enough similar aspects that reusing the TailCall opcode is pragmatic).
@@ -238,15 +236,15 @@ class V8_EXPORT_PRIVATE CallDescriptor final
//
// In other words, behavior is identical to a jmp instruction prior caller
// frame construction.
- kIsTailCallForTierUp = 1u << 9,
+ kIsTailCallForTierUp = 1u << 8,
+
+ // AIX has a function descriptor by default but it can be disabled for a
+ // certain CFunction call (only used for Kind::kCallAddress).
+ kNoFunctionDescriptor = 1u << 9,
// Flags past here are *not* encoded in InstructionCode and are thus not
// accessible from the code generator. See also
// kFlagsBitsEncodedInInstructionCode.
-
- // AIX has a function descriptor by default but it can be disabled for a
- // certain CFunction call (only used for Kind::kCallAddress).
- kNoFunctionDescriptor = 1u << 10,
};
using Flags = base::Flags<Flag>;
@@ -307,9 +305,27 @@ class V8_EXPORT_PRIVATE CallDescriptor final
// The number of return values from this call.
size_t ReturnCount() const { return location_sig_->return_count(); }
- // The number of C parameters to this call.
+ // The number of C parameters to this call. The following invariant
+ // should hold true:
+ // ParameterCount() == GPParameterCount() + FPParameterCount()
size_t ParameterCount() const { return location_sig_->parameter_count(); }
+ // The number of general purpose C parameters to this call.
+ size_t GPParameterCount() const {
+ if (!gp_param_count_) {
+ ComputeParamCounts();
+ }
+ return gp_param_count_.value();
+ }
+
+ // The number of floating point C parameters to this call.
+ size_t FPParameterCount() const {
+ if (!fp_param_count_) {
+ ComputeParamCounts();
+ }
+ return fp_param_count_.value();
+ }
+
// The number of stack parameter slots to the call.
size_t ParameterSlotCount() const { return param_slot_count_; }
@@ -419,6 +435,8 @@ class V8_EXPORT_PRIVATE CallDescriptor final
}
private:
+ void ComputeParamCounts() const;
+
friend class Linkage;
const Kind kind_;
@@ -436,6 +454,9 @@ class V8_EXPORT_PRIVATE CallDescriptor final
const Flags flags_;
const StackArgumentOrder stack_order_;
const char* const debug_name_;
+
+ mutable base::Optional<size_t> gp_param_count_;
+ mutable base::Optional<size_t> fp_param_count_;
};
DEFINE_OPERATORS_FOR_FLAGS(CallDescriptor::Flags)
diff --git a/chromium/v8/src/compiler/loop-analysis.cc b/chromium/v8/src/compiler/loop-analysis.cc
index e184534ed78..7b660856b72 100644
--- a/chromium/v8/src/compiler/loop-analysis.cc
+++ b/chromium/v8/src/compiler/loop-analysis.cc
@@ -5,12 +5,17 @@
#include "src/compiler/loop-analysis.h"
#include "src/codegen/tick-counter.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/node-marker.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/zone/zone.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/wasm-code-manager.h"
+#endif
+
namespace v8 {
namespace internal {
@@ -581,12 +586,24 @@ ZoneUnorderedSet<Node*>* LoopFinder::FindSmallUnnestedLoopFromHeader(
loop_header);
// All uses are outside the loop, do nothing.
break;
- case IrOpcode::kCall:
case IrOpcode::kTailCall:
case IrOpcode::kJSWasmCall:
case IrOpcode::kJSCall:
// Call nodes are considered to have unbounded size, i.e. >max_size.
+ // An exception is the call to the stack guard builtin at the beginning
+ // of many loops.
return nullptr;
+ case IrOpcode::kCall: {
+ Node* callee = node->InputAt(0);
+ if (callee->opcode() == IrOpcode::kRelocatableInt32Constant ||
+ callee->opcode() == IrOpcode::kRelocatableInt64Constant) {
+ auto info = OpParameter<RelocatablePtrConstantInfo>(callee->op());
+ if (info.value() != v8::internal::wasm::WasmCode::kWasmStackGuard) {
+ return nullptr;
+ }
+ }
+ V8_FALLTHROUGH;
+ }
default:
for (Node* use : node->uses()) {
if (visited->count(use) == 0) queue.push_back(use);
diff --git a/chromium/v8/src/compiler/loop-unrolling.cc b/chromium/v8/src/compiler/loop-unrolling.cc
index 973bb7af193..357b17a3ec6 100644
--- a/chromium/v8/src/compiler/loop-unrolling.cc
+++ b/chromium/v8/src/compiler/loop-unrolling.cc
@@ -35,11 +35,11 @@ void UnrollLoop(Node* loop_node, ZoneUnorderedSet<Node*>* loop, uint32_t depth,
NodeVector copies(tmp_zone);
NodeCopier copier(graph, copied_size, &copies, unrolling_count);
- {
- copier.CopyNodes(graph, tmp_zone, graph->NewNode(common->Dead()),
- base::make_iterator_range(loop->begin(), loop->end()),
- source_positions, node_origins);
- }
+ source_positions->AddDecorator();
+ copier.CopyNodes(graph, tmp_zone, graph->NewNode(common->Dead()),
+ base::make_iterator_range(loop->begin(), loop->end()),
+ source_positions, node_origins);
+ source_positions->RemoveDecorator();
#define COPY(node, n) copier.map(node, n)
#define FOREACH_COPY_INDEX(i) for (uint32_t i = 0; i < unrolling_count; i++)
diff --git a/chromium/v8/src/compiler/machine-graph-verifier.cc b/chromium/v8/src/compiler/machine-graph-verifier.cc
index 88679283d94..31f05266799 100644
--- a/chromium/v8/src/compiler/machine-graph-verifier.cc
+++ b/chromium/v8/src/compiler/machine-graph-verifier.cc
@@ -121,10 +121,14 @@ class MachineRepresentationInferrer {
break;
case IrOpcode::kWord32AtomicLoad:
case IrOpcode::kWord64AtomicLoad:
+ representation_vector_[node->id()] =
+ PromoteRepresentation(AtomicLoadParametersOf(node->op())
+ .representation()
+ .representation());
+ break;
case IrOpcode::kLoad:
case IrOpcode::kLoadImmutable:
case IrOpcode::kProtectedLoad:
- case IrOpcode::kPoisonedLoad:
representation_vector_[node->id()] = PromoteRepresentation(
LoadRepresentationOf(node->op()).representation());
break;
@@ -154,8 +158,8 @@ class MachineRepresentationInferrer {
}
case IrOpcode::kWord32AtomicStore:
case IrOpcode::kWord64AtomicStore:
- representation_vector_[node->id()] =
- PromoteRepresentation(AtomicStoreRepresentationOf(node->op()));
+ representation_vector_[node->id()] = PromoteRepresentation(
+ AtomicStoreParametersOf(node->op()).representation());
break;
case IrOpcode::kWord32AtomicPairLoad:
case IrOpcode::kWord32AtomicPairStore:
@@ -206,15 +210,8 @@ class MachineRepresentationInferrer {
case IrOpcode::kChangeInt32ToTagged:
case IrOpcode::kChangeUint32ToTagged:
case IrOpcode::kBitcastWordToTagged:
- case IrOpcode::kTaggedPoisonOnSpeculation:
representation_vector_[node->id()] = MachineRepresentation::kTagged;
break;
- case IrOpcode::kWord32PoisonOnSpeculation:
- representation_vector_[node->id()] = MachineRepresentation::kWord32;
- break;
- case IrOpcode::kWord64PoisonOnSpeculation:
- representation_vector_[node->id()] = MachineRepresentation::kWord64;
- break;
case IrOpcode::kCompressedHeapConstant:
representation_vector_[node->id()] =
MachineRepresentation::kCompressedPointer;
@@ -394,14 +391,6 @@ class MachineRepresentationChecker {
CheckValueInputRepresentationIs(
node, 0, MachineType::PointerRepresentation());
break;
- case IrOpcode::kWord32PoisonOnSpeculation:
- CheckValueInputRepresentationIs(node, 0,
- MachineRepresentation::kWord32);
- break;
- case IrOpcode::kWord64PoisonOnSpeculation:
- CheckValueInputRepresentationIs(node, 0,
- MachineRepresentation::kWord64);
- break;
case IrOpcode::kBitcastTaggedToWord:
case IrOpcode::kBitcastTaggedToWordForTagAndSmiBits:
if (COMPRESS_POINTERS_BOOL) {
@@ -410,9 +399,6 @@ class MachineRepresentationChecker {
CheckValueInputIsTagged(node, 0);
}
break;
- case IrOpcode::kTaggedPoisonOnSpeculation:
- CheckValueInputIsTagged(node, 0);
- break;
case IrOpcode::kTruncateFloat64ToWord32:
case IrOpcode::kTruncateFloat64ToUint32:
case IrOpcode::kTruncateFloat64ToFloat32:
@@ -557,7 +543,7 @@ class MachineRepresentationChecker {
case IrOpcode::kParameter:
case IrOpcode::kProjection:
break;
- case IrOpcode::kAbortCSAAssert:
+ case IrOpcode::kAbortCSADcheck:
CheckValueInputIsTagged(node, 0);
break;
case IrOpcode::kLoad:
@@ -566,7 +552,6 @@ class MachineRepresentationChecker {
case IrOpcode::kWord32AtomicLoad:
case IrOpcode::kWord32AtomicPairLoad:
case IrOpcode::kWord64AtomicLoad:
- case IrOpcode::kPoisonedLoad:
CheckValueInputIsTaggedOrPointer(node, 0);
CheckValueInputRepresentationIs(
node, 1, MachineType::PointerRepresentation());
@@ -605,9 +590,12 @@ class MachineRepresentationChecker {
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTaggedSigned:
if (COMPRESS_POINTERS_BOOL &&
- node->opcode() == IrOpcode::kStore &&
- IsAnyTagged(
- StoreRepresentationOf(node->op()).representation())) {
+ ((node->opcode() == IrOpcode::kStore &&
+ IsAnyTagged(StoreRepresentationOf(node->op())
+ .representation())) ||
+ (node->opcode() == IrOpcode::kWord32AtomicStore &&
+ IsAnyTagged(AtomicStoreParametersOf(node->op())
+ .representation())))) {
CheckValueInputIsCompressedOrTagged(node, 2);
} else {
CheckValueInputIsTagged(node, 2);
diff --git a/chromium/v8/src/compiler/machine-operator-reducer.cc b/chromium/v8/src/compiler/machine-operator-reducer.cc
index 33d58c854ba..db137dfeb49 100644
--- a/chromium/v8/src/compiler/machine-operator-reducer.cc
+++ b/chromium/v8/src/compiler/machine-operator-reducer.cc
@@ -947,6 +947,20 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
return ReduceWord64Comparisons(node);
}
+ case IrOpcode::kFloat32Select:
+ case IrOpcode::kFloat64Select:
+ case IrOpcode::kWord32Select:
+ case IrOpcode::kWord64Select: {
+ Int32Matcher match(node->InputAt(0));
+ if (match.HasResolvedValue()) {
+ if (match.Is(0)) {
+ return Replace(node->InputAt(2));
+ } else {
+ return Replace(node->InputAt(1));
+ }
+ }
+ break;
+ }
default:
break;
}
@@ -1240,17 +1254,12 @@ Reduction MachineOperatorReducer::ReduceUint32Mod(Node* node) {
Reduction MachineOperatorReducer::ReduceStore(Node* node) {
NodeMatcher nm(node);
- MachineRepresentation rep;
- int value_input;
- if (nm.IsStore()) {
- rep = StoreRepresentationOf(node->op()).representation();
- value_input = 2;
- } else {
- DCHECK(nm.IsUnalignedStore());
- rep = UnalignedStoreRepresentationOf(node->op());
- value_input = 2;
- }
+ DCHECK(nm.IsStore() || nm.IsUnalignedStore());
+ MachineRepresentation rep =
+ nm.IsStore() ? StoreRepresentationOf(node->op()).representation()
+ : UnalignedStoreRepresentationOf(node->op());
+ const int value_input = 2;
Node* const value = node->InputAt(value_input);
switch (value->opcode()) {
@@ -2061,7 +2070,6 @@ bool IsFloat64RepresentableAsFloat32(const Float64Matcher& m) {
} // namespace
-
Reduction MachineOperatorReducer::ReduceFloat64Compare(Node* node) {
DCHECK(IrOpcode::kFloat64Equal == node->opcode() ||
IrOpcode::kFloat64LessThan == node->opcode() ||
diff --git a/chromium/v8/src/compiler/machine-operator.cc b/chromium/v8/src/compiler/machine-operator.cc
index 411c6d4cb32..e2d1686d5d0 100644
--- a/chromium/v8/src/compiler/machine-operator.cc
+++ b/chromium/v8/src/compiler/machine-operator.cc
@@ -32,6 +32,41 @@ std::ostream& operator<<(std::ostream& os, StoreRepresentation rep) {
return os << rep.representation() << ", " << rep.write_barrier_kind();
}
+bool operator==(AtomicStoreParameters lhs, AtomicStoreParameters rhs) {
+ return lhs.store_representation() == rhs.store_representation() &&
+ lhs.order() == rhs.order();
+}
+
+bool operator!=(AtomicStoreParameters lhs, AtomicStoreParameters rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(AtomicStoreParameters params) {
+ return base::hash_combine(hash_value(params.store_representation()),
+ params.order());
+}
+
+std::ostream& operator<<(std::ostream& os, AtomicStoreParameters params) {
+ return os << params.store_representation() << ", " << params.order();
+}
+
+bool operator==(AtomicLoadParameters lhs, AtomicLoadParameters rhs) {
+ return lhs.representation() == rhs.representation() &&
+ lhs.order() == rhs.order();
+}
+
+bool operator!=(AtomicLoadParameters lhs, AtomicLoadParameters rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(AtomicLoadParameters params) {
+ return base::hash_combine(params.representation(), params.order());
+}
+
+std::ostream& operator<<(std::ostream& os, AtomicLoadParameters params) {
+ return os << params.representation() << ", " << params.order();
+}
+
size_t hash_value(MemoryAccessKind kind) { return static_cast<size_t>(kind); }
std::ostream& operator<<(std::ostream& os, MemoryAccessKind kind) {
@@ -121,21 +156,29 @@ bool operator==(LoadLaneParameters lhs, LoadLaneParameters rhs) {
LoadRepresentation LoadRepresentationOf(Operator const* op) {
DCHECK(IrOpcode::kLoad == op->opcode() ||
IrOpcode::kProtectedLoad == op->opcode() ||
- IrOpcode::kWord32AtomicLoad == op->opcode() ||
- IrOpcode::kWord64AtomicLoad == op->opcode() ||
- IrOpcode::kWord32AtomicPairLoad == op->opcode() ||
- IrOpcode::kPoisonedLoad == op->opcode() ||
IrOpcode::kUnalignedLoad == op->opcode() ||
IrOpcode::kLoadImmutable == op->opcode());
return OpParameter<LoadRepresentation>(op);
}
+AtomicLoadParameters AtomicLoadParametersOf(Operator const* op) {
+ DCHECK(IrOpcode::kWord32AtomicLoad == op->opcode() ||
+ IrOpcode::kWord64AtomicLoad == op->opcode());
+ return OpParameter<AtomicLoadParameters>(op);
+}
+
StoreRepresentation const& StoreRepresentationOf(Operator const* op) {
DCHECK(IrOpcode::kStore == op->opcode() ||
IrOpcode::kProtectedStore == op->opcode());
return OpParameter<StoreRepresentation>(op);
}
+AtomicStoreParameters const& AtomicStoreParametersOf(Operator const* op) {
+ DCHECK(IrOpcode::kWord32AtomicStore == op->opcode() ||
+ IrOpcode::kWord64AtomicStore == op->opcode());
+ return OpParameter<AtomicStoreParameters>(op);
+}
+
UnalignedStoreRepresentation const& UnalignedStoreRepresentationOf(
Operator const* op) {
DCHECK_EQ(IrOpcode::kUnalignedStore, op->opcode());
@@ -182,12 +225,6 @@ StackSlotRepresentation const& StackSlotRepresentationOf(Operator const* op) {
return OpParameter<StackSlotRepresentation>(op);
}
-MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
- DCHECK(IrOpcode::kWord32AtomicStore == op->opcode() ||
- IrOpcode::kWord64AtomicStore == op->opcode());
- return OpParameter<MachineRepresentation>(op);
-}
-
MachineType AtomicOpType(Operator const* op) {
return OpParameter<MachineType>(op);
}
@@ -650,6 +687,30 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
V(S128Load32Zero) \
V(S128Load64Zero)
+#if TAGGED_SIZE_8_BYTES
+
+#define ATOMIC_TAGGED_TYPE_LIST(V)
+
+#define ATOMIC64_TAGGED_TYPE_LIST(V) \
+ V(TaggedSigned) \
+ V(TaggedPointer) \
+ V(AnyTagged) \
+ V(CompressedPointer) \
+ V(AnyCompressed)
+
+#else
+
+#define ATOMIC_TAGGED_TYPE_LIST(V) \
+ V(TaggedSigned) \
+ V(TaggedPointer) \
+ V(AnyTagged) \
+ V(CompressedPointer) \
+ V(AnyCompressed)
+
+#define ATOMIC64_TAGGED_TYPE_LIST(V)
+
+#endif // TAGGED_SIZE_8_BYTES
+
#define ATOMIC_U32_TYPE_LIST(V) \
V(Uint8) \
V(Uint16) \
@@ -665,6 +726,28 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
ATOMIC_U32_TYPE_LIST(V) \
V(Uint64)
+#if TAGGED_SIZE_8_BYTES
+
+#define ATOMIC_TAGGED_REPRESENTATION_LIST(V)
+
+#define ATOMIC64_TAGGED_REPRESENTATION_LIST(V) \
+ V(kTaggedSigned) \
+ V(kTaggedPointer) \
+ V(kTagged)
+
+#else
+
+#define ATOMIC_TAGGED_REPRESENTATION_LIST(V) \
+ V(kTaggedSigned) \
+ V(kTaggedPointer) \
+ V(kTagged) \
+ V(kCompressedPointer) \
+ V(kCompressed)
+
+#define ATOMIC64_TAGGED_REPRESENTATION_LIST(V)
+
+#endif // TAGGED_SIZE_8_BYTES
+
#define ATOMIC_REPRESENTATION_LIST(V) \
V(kWord8) \
V(kWord16) \
@@ -831,13 +914,6 @@ struct MachineOperatorGlobalCache {
Operator::kEliminatable, "Load", 2, 1, \
1, 1, 1, 0, MachineType::Type()) {} \
}; \
- struct PoisonedLoad##Type##Operator final \
- : public Operator1<LoadRepresentation> { \
- PoisonedLoad##Type##Operator() \
- : Operator1<LoadRepresentation>( \
- IrOpcode::kPoisonedLoad, Operator::kEliminatable, \
- "PoisonedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
struct UnalignedLoad##Type##Operator final \
: public Operator1<LoadRepresentation> { \
UnalignedLoad##Type##Operator() \
@@ -861,7 +937,6 @@ struct MachineOperatorGlobalCache {
0, 0, 1, 0, 0, MachineType::Type()) {} \
}; \
Load##Type##Operator kLoad##Type; \
- PoisonedLoad##Type##Operator kPoisonedLoad##Type; \
UnalignedLoad##Type##Operator kUnalignedLoad##Type; \
ProtectedLoad##Type##Operator kProtectedLoad##Type; \
LoadImmutable##Type##Operator kLoadImmutable##Type;
@@ -976,55 +1051,63 @@ struct MachineOperatorGlobalCache {
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
-#define ATOMIC_LOAD(Type) \
- struct Word32AtomicLoad##Type##Operator final \
- : public Operator1<LoadRepresentation> { \
- Word32AtomicLoad##Type##Operator() \
- : Operator1<LoadRepresentation>( \
- IrOpcode::kWord32AtomicLoad, Operator::kEliminatable, \
- "Word32AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
- Word32AtomicLoad##Type##Operator kWord32AtomicLoad##Type;
+#define ATOMIC_LOAD(Type) \
+ struct Word32SeqCstLoad##Type##Operator \
+ : public Operator1<AtomicLoadParameters> { \
+ Word32SeqCstLoad##Type##Operator() \
+ : Operator1<AtomicLoadParameters>( \
+ IrOpcode::kWord32AtomicLoad, Operator::kEliminatable, \
+ "Word32AtomicLoad", 2, 1, 1, 1, 1, 0, \
+ AtomicLoadParameters(MachineType::Type(), \
+ AtomicMemoryOrder::kSeqCst)) {} \
+ }; \
+ Word32SeqCstLoad##Type##Operator kWord32SeqCstLoad##Type;
ATOMIC_TYPE_LIST(ATOMIC_LOAD)
#undef ATOMIC_LOAD
-#define ATOMIC_LOAD(Type) \
- struct Word64AtomicLoad##Type##Operator final \
- : public Operator1<LoadRepresentation> { \
- Word64AtomicLoad##Type##Operator() \
- : Operator1<LoadRepresentation>( \
- IrOpcode::kWord64AtomicLoad, Operator::kEliminatable, \
- "Word64AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
- Word64AtomicLoad##Type##Operator kWord64AtomicLoad##Type;
+#define ATOMIC_LOAD(Type) \
+ struct Word64SeqCstLoad##Type##Operator \
+ : public Operator1<AtomicLoadParameters> { \
+ Word64SeqCstLoad##Type##Operator() \
+ : Operator1<AtomicLoadParameters>( \
+ IrOpcode::kWord64AtomicLoad, Operator::kEliminatable, \
+ "Word64AtomicLoad", 2, 1, 1, 1, 1, 0, \
+ AtomicLoadParameters(MachineType::Type(), \
+ AtomicMemoryOrder::kSeqCst)) {} \
+ }; \
+ Word64SeqCstLoad##Type##Operator kWord64SeqCstLoad##Type;
ATOMIC_U64_TYPE_LIST(ATOMIC_LOAD)
#undef ATOMIC_LOAD
#define ATOMIC_STORE(Type) \
- struct Word32AtomicStore##Type##Operator \
- : public Operator1<MachineRepresentation> { \
- Word32AtomicStore##Type##Operator() \
- : Operator1<MachineRepresentation>( \
+ struct Word32SeqCstStore##Type##Operator \
+ : public Operator1<AtomicStoreParameters> { \
+ Word32SeqCstStore##Type##Operator() \
+ : Operator1<AtomicStoreParameters>( \
IrOpcode::kWord32AtomicStore, \
Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
"Word32AtomicStore", 3, 1, 1, 0, 1, 0, \
- MachineRepresentation::Type) {} \
+ AtomicStoreParameters(MachineRepresentation::Type, \
+ kNoWriteBarrier, \
+ AtomicMemoryOrder::kSeqCst)) {} \
}; \
- Word32AtomicStore##Type##Operator kWord32AtomicStore##Type;
+ Word32SeqCstStore##Type##Operator kWord32SeqCstStore##Type;
ATOMIC_REPRESENTATION_LIST(ATOMIC_STORE)
#undef ATOMIC_STORE
#define ATOMIC_STORE(Type) \
- struct Word64AtomicStore##Type##Operator \
- : public Operator1<MachineRepresentation> { \
- Word64AtomicStore##Type##Operator() \
- : Operator1<MachineRepresentation>( \
+ struct Word64SeqCstStore##Type##Operator \
+ : public Operator1<AtomicStoreParameters> { \
+ Word64SeqCstStore##Type##Operator() \
+ : Operator1<AtomicStoreParameters>( \
IrOpcode::kWord64AtomicStore, \
Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
"Word64AtomicStore", 3, 1, 1, 0, 1, 0, \
- MachineRepresentation::Type) {} \
+ AtomicStoreParameters(MachineRepresentation::Type, \
+ kNoWriteBarrier, \
+ AtomicMemoryOrder::kSeqCst)) {} \
}; \
- Word64AtomicStore##Type##Operator kWord64AtomicStore##Type;
+ Word64SeqCstStore##Type##Operator kWord64SeqCstStore##Type;
ATOMIC64_REPRESENTATION_LIST(ATOMIC_STORE)
#undef ATOMIC_STORE
@@ -1084,21 +1167,23 @@ struct MachineOperatorGlobalCache {
ATOMIC_U64_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
#undef ATOMIC_COMPARE_EXCHANGE
- struct Word32AtomicPairLoadOperator : public Operator {
- Word32AtomicPairLoadOperator()
- : Operator(IrOpcode::kWord32AtomicPairLoad,
- Operator::kNoDeopt | Operator::kNoThrow,
- "Word32AtomicPairLoad", 2, 1, 1, 2, 1, 0) {}
+ struct Word32SeqCstPairLoadOperator : public Operator1<AtomicMemoryOrder> {
+ Word32SeqCstPairLoadOperator()
+ : Operator1<AtomicMemoryOrder>(IrOpcode::kWord32AtomicPairLoad,
+ Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicPairLoad", 2, 1, 1, 2, 1, 0,
+ AtomicMemoryOrder::kSeqCst) {}
};
- Word32AtomicPairLoadOperator kWord32AtomicPairLoad;
-
- struct Word32AtomicPairStoreOperator : public Operator {
- Word32AtomicPairStoreOperator()
- : Operator(IrOpcode::kWord32AtomicPairStore,
- Operator::kNoDeopt | Operator::kNoThrow,
- "Word32AtomicPairStore", 4, 1, 1, 0, 1, 0) {}
+ Word32SeqCstPairLoadOperator kWord32SeqCstPairLoad;
+
+ struct Word32SeqCstPairStoreOperator : public Operator1<AtomicMemoryOrder> {
+ Word32SeqCstPairStoreOperator()
+ : Operator1<AtomicMemoryOrder>(IrOpcode::kWord32AtomicPairStore,
+ Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicPairStore", 4, 1, 1, 0, 1,
+ 0, AtomicMemoryOrder::kSeqCst) {}
};
- Word32AtomicPairStoreOperator kWord32AtomicPairStore;
+ Word32SeqCstPairStoreOperator kWord32SeqCstPairStore;
#define ATOMIC_PAIR_OP(op) \
struct Word32AtomicPair##op##Operator : public Operator { \
@@ -1157,36 +1242,12 @@ struct MachineOperatorGlobalCache {
};
BitcastMaybeObjectToWordOperator kBitcastMaybeObjectToWord;
- struct TaggedPoisonOnSpeculation : public Operator {
- TaggedPoisonOnSpeculation()
- : Operator(IrOpcode::kTaggedPoisonOnSpeculation,
- Operator::kEliminatable | Operator::kNoWrite,
- "TaggedPoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
+ struct AbortCSADcheckOperator : public Operator {
+ AbortCSADcheckOperator()
+ : Operator(IrOpcode::kAbortCSADcheck, Operator::kNoThrow,
+ "AbortCSADcheck", 1, 1, 1, 0, 1, 0) {}
};
- TaggedPoisonOnSpeculation kTaggedPoisonOnSpeculation;
-
- struct Word32PoisonOnSpeculation : public Operator {
- Word32PoisonOnSpeculation()
- : Operator(IrOpcode::kWord32PoisonOnSpeculation,
- Operator::kEliminatable | Operator::kNoWrite,
- "Word32PoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
- };
- Word32PoisonOnSpeculation kWord32PoisonOnSpeculation;
-
- struct Word64PoisonOnSpeculation : public Operator {
- Word64PoisonOnSpeculation()
- : Operator(IrOpcode::kWord64PoisonOnSpeculation,
- Operator::kEliminatable | Operator::kNoWrite,
- "Word64PoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
- };
- Word64PoisonOnSpeculation kWord64PoisonOnSpeculation;
-
- struct AbortCSAAssertOperator : public Operator {
- AbortCSAAssertOperator()
- : Operator(IrOpcode::kAbortCSAAssert, Operator::kNoThrow,
- "AbortCSAAssert", 1, 1, 1, 0, 1, 0) {}
- };
- AbortCSAAssertOperator kAbortCSAAssert;
+ AbortCSADcheckOperator kAbortCSADcheck;
struct DebugBreakOperator : public Operator {
DebugBreakOperator()
@@ -1366,16 +1427,6 @@ const Operator* MachineOperatorBuilder::LoadImmutable(LoadRepresentation rep) {
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::PoisonedLoad(LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return &cache_.kPoisonedLoad##Type; \
- }
- MACHINE_TYPE_LIST(LOAD)
-#undef LOAD
- UNREACHABLE();
-}
-
const Operator* MachineOperatorBuilder::ProtectedLoad(LoadRepresentation rep) {
#define LOAD(Type) \
if (rep == MachineType::Type()) { \
@@ -1575,8 +1626,8 @@ const Operator* MachineOperatorBuilder::BitcastMaybeObjectToWord() {
return &cache_.kBitcastMaybeObjectToWord;
}
-const Operator* MachineOperatorBuilder::AbortCSAAssert() {
- return &cache_.kAbortCSAAssert;
+const Operator* MachineOperatorBuilder::AbortCSADcheck() {
+ return &cache_.kAbortCSADcheck;
}
const Operator* MachineOperatorBuilder::DebugBreak() {
@@ -1592,23 +1643,47 @@ const Operator* MachineOperatorBuilder::MemBarrier() {
}
const Operator* MachineOperatorBuilder::Word32AtomicLoad(
- LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return &cache_.kWord32AtomicLoad##Type; \
+ AtomicLoadParameters params) {
+#define CACHED_LOAD(Type) \
+ if (params.representation() == MachineType::Type() && \
+ params.order() == AtomicMemoryOrder::kSeqCst) { \
+ return &cache_.kWord32SeqCstLoad##Type; \
+ }
+ ATOMIC_TYPE_LIST(CACHED_LOAD)
+#undef CACHED_LOAD
+
+#define LOAD(Type) \
+ if (params.representation() == MachineType::Type()) { \
+ return zone_->New<Operator1<AtomicLoadParameters>>( \
+ IrOpcode::kWord32AtomicLoad, Operator::kEliminatable, \
+ "Word32AtomicLoad", 2, 1, 1, 1, 1, 0, params); \
}
ATOMIC_TYPE_LIST(LOAD)
+ ATOMIC_TAGGED_TYPE_LIST(LOAD)
#undef LOAD
+
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word32AtomicStore(
- MachineRepresentation rep) {
-#define STORE(kRep) \
- if (rep == MachineRepresentation::kRep) { \
- return &cache_.kWord32AtomicStore##kRep; \
+ AtomicStoreParameters params) {
+#define CACHED_STORE(kRep) \
+ if (params.representation() == MachineRepresentation::kRep && \
+ params.order() == AtomicMemoryOrder::kSeqCst) { \
+ return &cache_.kWord32SeqCstStore##kRep; \
+ }
+ ATOMIC_REPRESENTATION_LIST(CACHED_STORE)
+#undef CACHED_STORE
+
+#define STORE(kRep) \
+ if (params.representation() == MachineRepresentation::kRep) { \
+ return zone_->New<Operator1<AtomicStoreParameters>>( \
+ IrOpcode::kWord32AtomicStore, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
+ "Word32AtomicStore", 3, 1, 1, 0, 1, 0, params); \
}
ATOMIC_REPRESENTATION_LIST(STORE)
+ ATOMIC_TAGGED_REPRESENTATION_LIST(STORE)
#undef STORE
UNREACHABLE();
}
@@ -1685,24 +1760,49 @@ const Operator* MachineOperatorBuilder::Word32AtomicXor(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word64AtomicLoad(
- LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return &cache_.kWord64AtomicLoad##Type; \
+ AtomicLoadParameters params) {
+#define CACHED_LOAD(Type) \
+ if (params.representation() == MachineType::Type() && \
+ params.order() == AtomicMemoryOrder::kSeqCst) { \
+ return &cache_.kWord64SeqCstLoad##Type; \
+ }
+ ATOMIC_U64_TYPE_LIST(CACHED_LOAD)
+#undef CACHED_LOAD
+
+#define LOAD(Type) \
+ if (params.representation() == MachineType::Type()) { \
+ return zone_->New<Operator1<AtomicLoadParameters>>( \
+ IrOpcode::kWord64AtomicLoad, Operator::kEliminatable, \
+ "Word64AtomicLoad", 2, 1, 1, 1, 1, 0, params); \
}
ATOMIC_U64_TYPE_LIST(LOAD)
+ ATOMIC64_TAGGED_TYPE_LIST(LOAD)
#undef LOAD
+
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word64AtomicStore(
- MachineRepresentation rep) {
-#define STORE(kRep) \
- if (rep == MachineRepresentation::kRep) { \
- return &cache_.kWord64AtomicStore##kRep; \
+ AtomicStoreParameters params) {
+#define CACHED_STORE(kRep) \
+ if (params.representation() == MachineRepresentation::kRep && \
+ params.order() == AtomicMemoryOrder::kSeqCst) { \
+ return &cache_.kWord64SeqCstStore##kRep; \
+ }
+ ATOMIC64_REPRESENTATION_LIST(CACHED_STORE)
+#undef CACHED_STORE
+
+#define STORE(kRep) \
+ if (params.representation() == MachineRepresentation::kRep) { \
+ return zone_->New<Operator1<AtomicStoreParameters>>( \
+ IrOpcode::kWord64AtomicStore, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
+ "Word64AtomicStore", 3, 1, 1, 0, 1, 0, params); \
}
ATOMIC64_REPRESENTATION_LIST(STORE)
+ ATOMIC64_TAGGED_REPRESENTATION_LIST(STORE)
#undef STORE
+
UNREACHABLE();
}
@@ -1777,12 +1877,24 @@ const Operator* MachineOperatorBuilder::Word64AtomicCompareExchange(
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::Word32AtomicPairLoad() {
- return &cache_.kWord32AtomicPairLoad;
+const Operator* MachineOperatorBuilder::Word32AtomicPairLoad(
+ AtomicMemoryOrder order) {
+ if (order == AtomicMemoryOrder::kSeqCst) {
+ return &cache_.kWord32SeqCstPairLoad;
+ }
+ return zone_->New<Operator1<AtomicMemoryOrder>>(
+ IrOpcode::kWord32AtomicPairLoad, Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicPairLoad", 2, 1, 1, 2, 1, 0, order);
}
-const Operator* MachineOperatorBuilder::Word32AtomicPairStore() {
- return &cache_.kWord32AtomicPairStore;
+const Operator* MachineOperatorBuilder::Word32AtomicPairStore(
+ AtomicMemoryOrder order) {
+ if (order == AtomicMemoryOrder::kSeqCst) {
+ return &cache_.kWord32SeqCstPairStore;
+ }
+ return zone_->New<Operator1<AtomicMemoryOrder>>(
+ IrOpcode::kWord32AtomicPairStore, Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicPairStore", 4, 1, 1, 0, 1, 0, order);
}
const Operator* MachineOperatorBuilder::Word32AtomicPairAdd() {
@@ -1813,18 +1925,6 @@ const Operator* MachineOperatorBuilder::Word32AtomicPairCompareExchange() {
return &cache_.kWord32AtomicPairCompareExchange;
}
-const Operator* MachineOperatorBuilder::TaggedPoisonOnSpeculation() {
- return &cache_.kTaggedPoisonOnSpeculation;
-}
-
-const Operator* MachineOperatorBuilder::Word32PoisonOnSpeculation() {
- return &cache_.kWord32PoisonOnSpeculation;
-}
-
-const Operator* MachineOperatorBuilder::Word64PoisonOnSpeculation() {
- return &cache_.kWord64PoisonOnSpeculation;
-}
-
#define EXTRACT_LANE_OP(Type, Sign, lane_count) \
const Operator* MachineOperatorBuilder::Type##ExtractLane##Sign( \
int32_t lane_index) { \
@@ -1918,8 +2018,12 @@ StackCheckKind StackCheckKindOf(Operator const* op) {
#undef ATOMIC_TYPE_LIST
#undef ATOMIC_U64_TYPE_LIST
#undef ATOMIC_U32_TYPE_LIST
+#undef ATOMIC_TAGGED_TYPE_LIST
+#undef ATOMIC64_TAGGED_TYPE_LIST
#undef ATOMIC_REPRESENTATION_LIST
+#undef ATOMIC_TAGGED_REPRESENTATION_LIST
#undef ATOMIC64_REPRESENTATION_LIST
+#undef ATOMIC64_TAGGED_REPRESENTATION_LIST
#undef SIMD_LANE_OP_LIST
#undef STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST
#undef LOAD_TRANSFORM_LIST
diff --git a/chromium/v8/src/compiler/machine-operator.h b/chromium/v8/src/compiler/machine-operator.h
index 0ee3649ad0c..493ea08ac14 100644
--- a/chromium/v8/src/compiler/machine-operator.h
+++ b/chromium/v8/src/compiler/machine-operator.h
@@ -8,6 +8,7 @@
#include "src/base/compiler-specific.h"
#include "src/base/enum-set.h"
#include "src/base/flags.h"
+#include "src/codegen/atomic-memory-order.h"
#include "src/codegen/machine-type.h"
#include "src/compiler/globals.h"
#include "src/compiler/write-barrier-kind.h"
@@ -50,6 +51,32 @@ using LoadRepresentation = MachineType;
V8_EXPORT_PRIVATE LoadRepresentation LoadRepresentationOf(Operator const*)
V8_WARN_UNUSED_RESULT;
+// A Word(32|64)AtomicLoad needs both a LoadRepresentation and a memory
+// order.
+class AtomicLoadParameters final {
+ public:
+ AtomicLoadParameters(LoadRepresentation representation,
+ AtomicMemoryOrder order)
+ : representation_(representation), order_(order) {}
+
+ LoadRepresentation representation() const { return representation_; }
+ AtomicMemoryOrder order() const { return order_; }
+
+ private:
+ LoadRepresentation representation_;
+ AtomicMemoryOrder order_;
+};
+
+V8_EXPORT_PRIVATE bool operator==(AtomicLoadParameters, AtomicLoadParameters);
+bool operator!=(AtomicLoadParameters, AtomicLoadParameters);
+
+size_t hash_value(AtomicLoadParameters);
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, AtomicLoadParameters);
+
+V8_EXPORT_PRIVATE AtomicLoadParameters AtomicLoadParametersOf(Operator const*)
+ V8_WARN_UNUSED_RESULT;
+
enum class MemoryAccessKind {
kNormal,
kUnaligned,
@@ -92,6 +119,10 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
V8_EXPORT_PRIVATE LoadTransformParameters const& LoadTransformParametersOf(
Operator const*) V8_WARN_UNUSED_RESULT;
+V8_EXPORT_PRIVATE bool operator==(LoadTransformParameters,
+ LoadTransformParameters);
+bool operator!=(LoadTransformParameters, LoadTransformParameters);
+
struct LoadLaneParameters {
MemoryAccessKind kind;
LoadRepresentation rep;
@@ -131,6 +162,43 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, StoreRepresentation);
V8_EXPORT_PRIVATE StoreRepresentation const& StoreRepresentationOf(
Operator const*) V8_WARN_UNUSED_RESULT;
+// A Word(32|64)AtomicStore needs both a StoreRepresentation and a memory order.
+class AtomicStoreParameters final {
+ public:
+ AtomicStoreParameters(MachineRepresentation representation,
+ WriteBarrierKind write_barrier_kind,
+ AtomicMemoryOrder order)
+ : store_representation_(representation, write_barrier_kind),
+ order_(order) {}
+
+ MachineRepresentation representation() const {
+ return store_representation_.representation();
+ }
+ WriteBarrierKind write_barrier_kind() const {
+ return store_representation_.write_barrier_kind();
+ }
+ AtomicMemoryOrder order() const { return order_; }
+
+ StoreRepresentation store_representation() const {
+ return store_representation_;
+ }
+
+ private:
+ StoreRepresentation store_representation_;
+ AtomicMemoryOrder order_;
+};
+
+V8_EXPORT_PRIVATE bool operator==(AtomicStoreParameters, AtomicStoreParameters);
+bool operator!=(AtomicStoreParameters, AtomicStoreParameters);
+
+size_t hash_value(AtomicStoreParameters);
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
+ AtomicStoreParameters);
+
+V8_EXPORT_PRIVATE AtomicStoreParameters const& AtomicStoreParametersOf(
+ Operator const*) V8_WARN_UNUSED_RESULT;
+
// An UnalignedStore needs a MachineType.
using UnalignedStoreRepresentation = MachineRepresentation;
@@ -173,9 +241,6 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
V8_EXPORT_PRIVATE StackSlotRepresentation const& StackSlotRepresentationOf(
Operator const* op) V8_WARN_UNUSED_RESULT;
-MachineRepresentation AtomicStoreRepresentationOf(Operator const* op)
- V8_WARN_UNUSED_RESULT;
-
MachineType AtomicOpType(Operator const* op) V8_WARN_UNUSED_RESULT;
class S128ImmediateParameter {
@@ -343,7 +408,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
MachineOperatorBuilder& operator=(const MachineOperatorBuilder&) = delete;
const Operator* Comment(const char* msg);
- const Operator* AbortCSAAssert();
+ const Operator* AbortCSADcheck();
const Operator* DebugBreak();
const Operator* UnsafePointerAdd();
@@ -852,7 +917,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// load [base + index]
const Operator* Load(LoadRepresentation rep);
const Operator* LoadImmutable(LoadRepresentation rep);
- const Operator* PoisonedLoad(LoadRepresentation rep);
const Operator* ProtectedLoad(LoadRepresentation rep);
const Operator* LoadTransform(MemoryAccessKind kind,
@@ -879,11 +943,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* StackSlot(int size, int alignment = 0);
const Operator* StackSlot(MachineRepresentation rep, int alignment = 0);
- // Destroy value by masking when misspeculating.
- const Operator* TaggedPoisonOnSpeculation();
- const Operator* Word32PoisonOnSpeculation();
- const Operator* Word64PoisonOnSpeculation();
-
// Access to the machine stack.
const Operator* LoadFramePointer();
const Operator* LoadParentFramePointer();
@@ -901,13 +960,13 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* MemBarrier();
// atomic-load [base + index]
- const Operator* Word32AtomicLoad(LoadRepresentation rep);
+ const Operator* Word32AtomicLoad(AtomicLoadParameters params);
// atomic-load [base + index]
- const Operator* Word64AtomicLoad(LoadRepresentation rep);
+ const Operator* Word64AtomicLoad(AtomicLoadParameters params);
// atomic-store [base + index], value
- const Operator* Word32AtomicStore(MachineRepresentation rep);
+ const Operator* Word32AtomicStore(AtomicStoreParameters params);
// atomic-store [base + index], value
- const Operator* Word64AtomicStore(MachineRepresentation rep);
+ const Operator* Word64AtomicStore(AtomicStoreParameters params);
// atomic-exchange [base + index], value
const Operator* Word32AtomicExchange(MachineType type);
// atomic-exchange [base + index], value
@@ -937,9 +996,9 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// atomic-xor [base + index], value
const Operator* Word64AtomicXor(MachineType type);
// atomic-pair-load [base + index]
- const Operator* Word32AtomicPairLoad();
+ const Operator* Word32AtomicPairLoad(AtomicMemoryOrder order);
// atomic-pair-sub [base + index], value_high, value-low
- const Operator* Word32AtomicPairStore();
+ const Operator* Word32AtomicPairStore(AtomicMemoryOrder order);
// atomic-pair-add [base + index], value_high, value_low
const Operator* Word32AtomicPairAdd();
// atomic-pair-sub [base + index], value_high, value-low
@@ -980,7 +1039,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
V(Word, Ror) \
V(Word, Clz) \
V(Word, Equal) \
- V(Word, PoisonOnSpeculation) \
V(Int, Add) \
V(Int, Sub) \
V(Int, Mul) \
diff --git a/chromium/v8/src/compiler/memory-lowering.cc b/chromium/v8/src/compiler/memory-lowering.cc
index 9673a51844e..27ad71c07a6 100644
--- a/chromium/v8/src/compiler/memory-lowering.cc
+++ b/chromium/v8/src/compiler/memory-lowering.cc
@@ -60,7 +60,6 @@ class MemoryLowering::AllocationGroup final : public ZoneObject {
MemoryLowering::MemoryLowering(JSGraph* jsgraph, Zone* zone,
JSGraphAssembler* graph_assembler,
- PoisoningMitigationLevel poisoning_level,
AllocationFolding allocation_folding,
WriteBarrierAssertFailedCallback callback,
const char* function_debug_name)
@@ -71,7 +70,6 @@ MemoryLowering::MemoryLowering(JSGraph* jsgraph, Zone* zone,
machine_(jsgraph->machine()),
graph_assembler_(graph_assembler),
allocation_folding_(allocation_folding),
- poisoning_level_(poisoning_level),
write_barrier_assert_failed_(callback),
function_debug_name_(function_debug_name) {}
@@ -401,11 +399,7 @@ Reduction MemoryLowering::ReduceLoadElement(Node* node) {
node->ReplaceInput(1, ComputeIndex(access, index));
MachineType type = access.machine_type;
DCHECK(!type.IsMapWord());
- if (NeedsPoisoning(access.load_sensitivity)) {
- NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
- } else {
- NodeProperties::ChangeOp(node, machine()->Load(type));
- }
+ NodeProperties::ChangeOp(node, machine()->Load(type));
return Changed(node);
}
@@ -413,8 +407,7 @@ Node* MemoryLowering::DecodeExternalPointer(
Node* node, ExternalPointerTag external_pointer_tag) {
#ifdef V8_HEAP_SANDBOX
DCHECK(V8_HEAP_SANDBOX_BOOL);
- DCHECK(node->opcode() == IrOpcode::kLoad ||
- node->opcode() == IrOpcode::kPoisonedLoad);
+ DCHECK(node->opcode() == IrOpcode::kLoad);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
__ InitializeEffectControl(effect, control);
@@ -476,16 +469,11 @@ Reduction MemoryLowering::ReduceLoadField(Node* node) {
}
if (type.IsMapWord()) {
- DCHECK(!NeedsPoisoning(access.load_sensitivity));
DCHECK(!access.type.Is(Type::SandboxedExternalPointer()));
return ReduceLoadMap(node);
}
- if (NeedsPoisoning(access.load_sensitivity)) {
- NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
- } else {
- NodeProperties::ChangeOp(node, machine()->Load(type));
- }
+ NodeProperties::ChangeOp(node, machine()->Load(type));
if (V8_HEAP_SANDBOX_BOOL &&
access.type.Is(Type::SandboxedExternalPointer())) {
@@ -655,21 +643,6 @@ WriteBarrierKind MemoryLowering::ComputeWriteBarrierKind(
return write_barrier_kind;
}
-bool MemoryLowering::NeedsPoisoning(LoadSensitivity load_sensitivity) const {
- // Safe loads do not need poisoning.
- if (load_sensitivity == LoadSensitivity::kSafe) return false;
-
- switch (poisoning_level_) {
- case PoisoningMitigationLevel::kDontPoison:
- return false;
- case PoisoningMitigationLevel::kPoisonAll:
- return true;
- case PoisoningMitigationLevel::kPoisonCriticalOnly:
- return load_sensitivity == LoadSensitivity::kCritical;
- }
- UNREACHABLE();
-}
-
MemoryLowering::AllocationGroup::AllocationGroup(Node* node,
AllocationType allocation,
Zone* zone)
diff --git a/chromium/v8/src/compiler/memory-lowering.h b/chromium/v8/src/compiler/memory-lowering.h
index 1fbe18abff3..9edb880e6fd 100644
--- a/chromium/v8/src/compiler/memory-lowering.h
+++ b/chromium/v8/src/compiler/memory-lowering.h
@@ -75,7 +75,6 @@ class MemoryLowering final : public Reducer {
MemoryLowering(
JSGraph* jsgraph, Zone* zone, JSGraphAssembler* graph_assembler,
- PoisoningMitigationLevel poisoning_level,
AllocationFolding allocation_folding =
AllocationFolding::kDontAllocationFolding,
WriteBarrierAssertFailedCallback callback = [](Node*, Node*, const char*,
@@ -112,7 +111,6 @@ class MemoryLowering final : public Reducer {
Node* DecodeExternalPointer(Node* encoded_pointer, ExternalPointerTag tag);
Reduction ReduceLoadMap(Node* encoded_pointer);
Node* ComputeIndex(ElementAccess const& access, Node* node);
- bool NeedsPoisoning(LoadSensitivity load_sensitivity) const;
void EnsureAllocateOperator();
Node* GetWasmInstanceNode();
@@ -133,7 +131,6 @@ class MemoryLowering final : public Reducer {
MachineOperatorBuilder* machine_;
JSGraphAssembler* graph_assembler_;
AllocationFolding allocation_folding_;
- PoisoningMitigationLevel poisoning_level_;
WriteBarrierAssertFailedCallback write_barrier_assert_failed_;
const char* function_debug_name_;
diff --git a/chromium/v8/src/compiler/memory-optimizer.cc b/chromium/v8/src/compiler/memory-optimizer.cc
index 860ea1fae18..a92dd67c627 100644
--- a/chromium/v8/src/compiler/memory-optimizer.cc
+++ b/chromium/v8/src/compiler/memory-optimizer.cc
@@ -22,7 +22,7 @@ namespace {
bool CanAllocate(const Node* node) {
switch (node->opcode()) {
- case IrOpcode::kAbortCSAAssert:
+ case IrOpcode::kAbortCSADcheck:
case IrOpcode::kBitcastTaggedToWord:
case IrOpcode::kBitcastWordToTagged:
case IrOpcode::kComment:
@@ -40,7 +40,6 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kLoadLane:
case IrOpcode::kLoadTransform:
case IrOpcode::kMemoryBarrier:
- case IrOpcode::kPoisonedLoad:
case IrOpcode::kProtectedLoad:
case IrOpcode::kProtectedStore:
case IrOpcode::kRetain:
@@ -54,7 +53,6 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kStoreField:
case IrOpcode::kStoreLane:
case IrOpcode::kStoreToObject:
- case IrOpcode::kTaggedPoisonOnSpeculation:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kUnalignedStore:
case IrOpcode::kUnreachable:
@@ -77,7 +75,6 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kWord32AtomicStore:
case IrOpcode::kWord32AtomicSub:
case IrOpcode::kWord32AtomicXor:
- case IrOpcode::kWord32PoisonOnSpeculation:
case IrOpcode::kWord64AtomicAdd:
case IrOpcode::kWord64AtomicAnd:
case IrOpcode::kWord64AtomicCompareExchange:
@@ -87,7 +84,6 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kWord64AtomicStore:
case IrOpcode::kWord64AtomicSub:
case IrOpcode::kWord64AtomicXor:
- case IrOpcode::kWord64PoisonOnSpeculation:
return false;
case IrOpcode::kCall:
@@ -183,13 +179,12 @@ void WriteBarrierAssertFailed(Node* node, Node* object, const char* name,
} // namespace
MemoryOptimizer::MemoryOptimizer(
- JSGraph* jsgraph, Zone* zone, PoisoningMitigationLevel poisoning_level,
+ JSGraph* jsgraph, Zone* zone,
MemoryLowering::AllocationFolding allocation_folding,
const char* function_debug_name, TickCounter* tick_counter)
: graph_assembler_(jsgraph, zone),
- memory_lowering_(jsgraph, zone, &graph_assembler_, poisoning_level,
- allocation_folding, WriteBarrierAssertFailed,
- function_debug_name),
+ memory_lowering_(jsgraph, zone, &graph_assembler_, allocation_folding,
+ WriteBarrierAssertFailed, function_debug_name),
jsgraph_(jsgraph),
empty_state_(AllocationState::Empty(zone)),
pending_(zone),
diff --git a/chromium/v8/src/compiler/memory-optimizer.h b/chromium/v8/src/compiler/memory-optimizer.h
index 3845304fdd6..7d8bca44d45 100644
--- a/chromium/v8/src/compiler/memory-optimizer.h
+++ b/chromium/v8/src/compiler/memory-optimizer.h
@@ -30,7 +30,6 @@ using NodeId = uint32_t;
class MemoryOptimizer final {
public:
MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
- PoisoningMitigationLevel poisoning_level,
MemoryLowering::AllocationFolding allocation_folding,
const char* function_debug_name, TickCounter* tick_counter);
~MemoryOptimizer() = default;
diff --git a/chromium/v8/src/compiler/node-matchers.h b/chromium/v8/src/compiler/node-matchers.h
index 1ce40234243..86e48844211 100644
--- a/chromium/v8/src/compiler/node-matchers.h
+++ b/chromium/v8/src/compiler/node-matchers.h
@@ -16,6 +16,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
+#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
#include "src/objects/heap-object.h"
@@ -743,7 +744,6 @@ struct BaseWithIndexAndDisplacementMatcher {
switch (from->opcode()) {
case IrOpcode::kLoad:
case IrOpcode::kLoadImmutable:
- case IrOpcode::kPoisonedLoad:
case IrOpcode::kProtectedLoad:
case IrOpcode::kInt32Add:
case IrOpcode::kInt64Add:
@@ -817,6 +817,14 @@ struct V8_EXPORT_PRIVATE DiamondMatcher
Node* if_false_;
};
+struct LoadTransformMatcher
+ : ValueMatcher<LoadTransformParameters, IrOpcode::kLoadTransform> {
+ explicit LoadTransformMatcher(Node* node) : ValueMatcher(node) {}
+ bool Is(LoadTransformation t) {
+ return HasResolvedValue() && ResolvedValue().transformation == t;
+ }
+};
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/compiler/opcodes.h b/chromium/v8/src/compiler/opcodes.h
index 912bd7b5cec..d3739f55b37 100644
--- a/chromium/v8/src/compiler/opcodes.h
+++ b/chromium/v8/src/compiler/opcodes.h
@@ -463,7 +463,6 @@
V(PlainPrimitiveToFloat64) \
V(PlainPrimitiveToNumber) \
V(PlainPrimitiveToWord32) \
- V(PoisonIndex) \
V(RestLength) \
V(RuntimeAbort) \
V(StoreDataViewElement) \
@@ -682,11 +681,10 @@
MACHINE_FLOAT64_BINOP_LIST(V) \
MACHINE_FLOAT64_UNOP_LIST(V) \
MACHINE_ATOMIC_OP_LIST(V) \
- V(AbortCSAAssert) \
+ V(AbortCSADcheck) \
V(DebugBreak) \
V(Comment) \
V(Load) \
- V(PoisonedLoad) \
V(LoadImmutable) \
V(Store) \
V(StackSlot) \
@@ -746,9 +744,6 @@
V(Word64Select) \
V(Float32Select) \
V(Float64Select) \
- V(TaggedPoisonOnSpeculation) \
- V(Word32PoisonOnSpeculation) \
- V(Word64PoisonOnSpeculation) \
V(LoadStackCheckOffset) \
V(LoadFramePointer) \
V(LoadParentFramePointer) \
diff --git a/chromium/v8/src/compiler/pipeline-statistics.cc b/chromium/v8/src/compiler/pipeline-statistics.cc
index 82a6e6bb3ec..16366bf5889 100644
--- a/chromium/v8/src/compiler/pipeline-statistics.cc
+++ b/chromium/v8/src/compiler/pipeline-statistics.cc
@@ -10,21 +10,12 @@
#include "src/compiler/zone-stats.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/string.h"
-#include "src/tracing/trace-event.h"
namespace v8 {
namespace internal {
namespace compiler {
-namespace {
-
-// We log detailed phase information about the pipeline
-// in both the v8.turbofan and the v8.wasm.turbofan categories.
-constexpr const char kTraceCategory[] = // --
- TRACE_DISABLED_BY_DEFAULT("v8.turbofan") "," // --
- TRACE_DISABLED_BY_DEFAULT("v8.wasm.turbofan");
-
-} // namespace
+constexpr char PipelineStatistics::kTraceCategory[];
void PipelineStatistics::CommonStats::Begin(
PipelineStatistics* pipeline_stats) {
@@ -62,6 +53,7 @@ PipelineStatistics::PipelineStatistics(OptimizedCompilationInfo* info,
: outer_zone_(info->zone()),
zone_stats_(zone_stats),
compilation_stats_(compilation_stats),
+ code_kind_(info->code_kind()),
phase_kind_name_(nullptr),
phase_name_(nullptr) {
if (info->has_shared_info()) {
@@ -70,7 +62,6 @@ PipelineStatistics::PipelineStatistics(OptimizedCompilationInfo* info,
total_stats_.Begin(this);
}
-
PipelineStatistics::~PipelineStatistics() {
if (InPhaseKind()) EndPhaseKind();
CompilationStatistics::BasicStats diff;
@@ -82,7 +73,8 @@ PipelineStatistics::~PipelineStatistics() {
void PipelineStatistics::BeginPhaseKind(const char* phase_kind_name) {
DCHECK(!InPhase());
if (InPhaseKind()) EndPhaseKind();
- TRACE_EVENT_BEGIN0(kTraceCategory, phase_kind_name);
+ TRACE_EVENT_BEGIN1(kTraceCategory, phase_kind_name, "kind",
+ CodeKindToString(code_kind_));
phase_kind_name_ = phase_kind_name;
phase_kind_stats_.Begin(this);
}
@@ -92,11 +84,14 @@ void PipelineStatistics::EndPhaseKind() {
CompilationStatistics::BasicStats diff;
phase_kind_stats_.End(this, &diff);
compilation_stats_->RecordPhaseKindStats(phase_kind_name_, diff);
- TRACE_EVENT_END0(kTraceCategory, phase_kind_name_);
+ TRACE_EVENT_END2(kTraceCategory, phase_kind_name_, "kind",
+ CodeKindToString(code_kind_), "stats",
+ TRACE_STR_COPY(diff.AsJSON().c_str()));
}
void PipelineStatistics::BeginPhase(const char* phase_name) {
- TRACE_EVENT_BEGIN0(kTraceCategory, phase_name);
+ TRACE_EVENT_BEGIN1(kTraceCategory, phase_name, "kind",
+ CodeKindToString(code_kind_));
DCHECK(InPhaseKind());
phase_name_ = phase_name;
phase_stats_.Begin(this);
@@ -107,7 +102,9 @@ void PipelineStatistics::EndPhase() {
CompilationStatistics::BasicStats diff;
phase_stats_.End(this, &diff);
compilation_stats_->RecordPhaseStats(phase_kind_name_, phase_name_, diff);
- TRACE_EVENT_END0(kTraceCategory, phase_name_);
+ TRACE_EVENT_END2(kTraceCategory, phase_name_, "kind",
+ CodeKindToString(code_kind_), "stats",
+ TRACE_STR_COPY(diff.AsJSON().c_str()));
}
} // namespace compiler
diff --git a/chromium/v8/src/compiler/pipeline-statistics.h b/chromium/v8/src/compiler/pipeline-statistics.h
index 8a05d98011d..19f7574e2a5 100644
--- a/chromium/v8/src/compiler/pipeline-statistics.h
+++ b/chromium/v8/src/compiler/pipeline-statistics.h
@@ -11,6 +11,8 @@
#include "src/base/platform/elapsed-timer.h"
#include "src/compiler/zone-stats.h"
#include "src/diagnostics/compilation-statistics.h"
+#include "src/objects/code-kind.h"
+#include "src/tracing/trace-event.h"
namespace v8 {
namespace internal {
@@ -29,6 +31,12 @@ class PipelineStatistics : public Malloced {
void BeginPhaseKind(const char* phase_kind_name);
void EndPhaseKind();
+ // We log detailed phase information about the pipeline
+ // in both the v8.turbofan and the v8.wasm.turbofan categories.
+ static constexpr char kTraceCategory[] =
+ TRACE_DISABLED_BY_DEFAULT("v8.turbofan") "," // --
+ TRACE_DISABLED_BY_DEFAULT("v8.wasm.turbofan");
+
private:
size_t OuterZoneSize() {
return static_cast<size_t>(outer_zone_->allocation_size());
@@ -60,6 +68,7 @@ class PipelineStatistics : public Malloced {
Zone* outer_zone_;
ZoneStats* zone_stats_;
CompilationStatistics* compilation_stats_;
+ CodeKind code_kind_;
std::string function_name_;
// Stats for the entire compilation.
diff --git a/chromium/v8/src/compiler/pipeline.cc b/chromium/v8/src/compiler/pipeline.cc
index e802cd72682..d4e47f7361e 100644
--- a/chromium/v8/src/compiler/pipeline.cc
+++ b/chromium/v8/src/compiler/pipeline.cc
@@ -84,6 +84,7 @@
#include "src/execution/isolate-inl.h"
#include "src/heap/local-heap.h"
#include "src/init/bootstrapper.h"
+#include "src/logging/code-events.h"
#include "src/logging/counters.h"
#include "src/logging/runtime-call-stats-scope.h"
#include "src/objects/shared-function-info.h"
@@ -95,6 +96,7 @@
#if V8_ENABLE_WEBASSEMBLY
#include "src/compiler/wasm-compiler.h"
+#include "src/compiler/wasm-inlining.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/wasm-engine.h"
@@ -547,8 +549,7 @@ class PipelineData {
code_generator_ = new CodeGenerator(
codegen_zone(), frame(), linkage, sequence(), info(), isolate(),
osr_helper_, start_source_position_, jump_optimization_info_,
- info()->GetPoisoningMitigationLevel(), assembler_options(),
- info_->builtin(), max_unoptimized_frame_height(),
+ assembler_options(), info_->builtin(), max_unoptimized_frame_height(),
max_pushed_argument_count(),
FLAG_trace_turbo_stack_accesses ? debug_name_.get() : nullptr);
}
@@ -947,13 +948,10 @@ void PrintCode(Isolate* isolate, Handle<Code> code,
void TraceScheduleAndVerify(OptimizedCompilationInfo* info, PipelineData* data,
Schedule* schedule, const char* phase_name) {
-#ifdef V8_RUNTIME_CALL_STATS
- PipelineRunScope scope(data, "V8.TraceScheduleAndVerify",
- RuntimeCallCounterId::kOptimizeTraceScheduleAndVerify,
- RuntimeCallStats::kThreadSpecific);
-#else
- PipelineRunScope scope(data, "V8.TraceScheduleAndVerify");
-#endif
+ RCS_SCOPE(data->runtime_call_stats(),
+ RuntimeCallCounterId::kOptimizeTraceScheduleAndVerify,
+ RuntimeCallStats::kThreadSpecific);
+ TRACE_EVENT0(PipelineStatistics::kTraceCategory, "V8.TraceScheduleAndVerify");
if (info->trace_turbo_json()) {
UnparkedScopeIfNeeded scope(data->broker());
AllowHandleDereference allow_deref;
@@ -1161,18 +1159,6 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
if (FLAG_turbo_inlining) {
compilation_info()->set_inlining();
}
-
- // This is the bottleneck for computing and setting poisoning level in the
- // optimizing compiler.
- PoisoningMitigationLevel load_poisoning =
- PoisoningMitigationLevel::kDontPoison;
- if (FLAG_untrusted_code_mitigations) {
- // For full mitigations, this can be changed to
- // PoisoningMitigationLevel::kPoisonAll.
- load_poisoning = PoisoningMitigationLevel::kPoisonCriticalOnly;
- }
- compilation_info()->SetPoisoningMitigationLevel(load_poisoning);
-
if (FLAG_turbo_allocation_folding) {
compilation_info()->set_allocation_folding();
}
@@ -1424,8 +1410,8 @@ struct InliningPhase {
};
#if V8_ENABLE_WEBASSEMBLY
-struct WasmInliningPhase {
- DECL_PIPELINE_PHASE_CONSTANTS(WasmInlining)
+struct JSWasmInliningPhase {
+ DECL_PIPELINE_PHASE_CONSTANTS(JSWasmInlining)
void Run(PipelineData* data, Zone* temp_zone) {
DCHECK(data->has_js_wasm_calls());
@@ -1629,10 +1615,10 @@ struct SimplifiedLoweringPhase {
DECL_PIPELINE_PHASE_CONSTANTS(SimplifiedLowering)
void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
- SimplifiedLowering lowering(
- data->jsgraph(), data->broker(), temp_zone, data->source_positions(),
- data->node_origins(), data->info()->GetPoisoningMitigationLevel(),
- &data->info()->tick_counter(), linkage, data->observe_node_manager());
+ SimplifiedLowering lowering(data->jsgraph(), data->broker(), temp_zone,
+ data->source_positions(), data->node_origins(),
+ &data->info()->tick_counter(), linkage,
+ data->observe_node_manager());
// RepresentationChanger accesses the heap.
UnparkedScopeIfNeeded scope(data->broker());
@@ -1699,6 +1685,28 @@ struct WasmLoopUnrollingPhase {
}
}
};
+
+struct WasmInliningPhase {
+ DECL_PIPELINE_PHASE_CONSTANTS(WasmInlining)
+
+ void Run(PipelineData* data, Zone* temp_zone, wasm::CompilationEnv* env,
+ const wasm::WireBytesStorage* wire_bytes) {
+ GraphReducer graph_reducer(
+ temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
+ data->jsgraph()->Dead(), data->observe_node_manager());
+ DeadCodeElimination dead(&graph_reducer, data->graph(),
+ data->mcgraph()->common(), temp_zone);
+ // For now, hard-code inlining the function at index 0.
+ InlineByIndex heuristics({0});
+ WasmInliner inliner(&graph_reducer, env, data->source_positions(),
+ data->node_origins(), data->mcgraph(), wire_bytes,
+ &heuristics);
+ AddReducer(data, &graph_reducer, &dead);
+ AddReducer(data, &graph_reducer, &inliner);
+
+ graph_reducer.ReduceGraph();
+ }
+};
#endif // V8_ENABLE_WEBASSEMBLY
struct LoopExitEliminationPhase {
@@ -1797,7 +1805,6 @@ struct EffectControlLinearizationPhase {
// - introduce effect phis and rewire effects to get SSA again.
LinearizeEffectControl(data->jsgraph(), schedule, temp_zone,
data->source_positions(), data->node_origins(),
- data->info()->GetPoisoningMitigationLevel(),
data->broker());
}
{
@@ -1846,9 +1853,9 @@ struct LoadEliminationPhase {
GraphReducer graph_reducer(
temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead(), data->observe_node_manager());
- BranchElimination branch_condition_elimination(&graph_reducer,
- data->jsgraph(), temp_zone,
- BranchElimination::kEARLY);
+ BranchElimination branch_condition_elimination(
+ &graph_reducer, data->jsgraph(), temp_zone, data->source_positions(),
+ BranchElimination::kEARLY);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
@@ -1899,7 +1906,7 @@ struct MemoryOptimizationPhase {
// Optimize allocations and load/store operations.
MemoryOptimizer optimizer(
- data->jsgraph(), temp_zone, data->info()->GetPoisoningMitigationLevel(),
+ data->jsgraph(), temp_zone,
data->info()->allocation_folding()
? MemoryLowering::AllocationFolding::kDoAllocationFolding
: MemoryLowering::AllocationFolding::kDontAllocationFolding,
@@ -1915,8 +1922,8 @@ struct LateOptimizationPhase {
GraphReducer graph_reducer(
temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead(), data->observe_node_manager());
- BranchElimination branch_condition_elimination(&graph_reducer,
- data->jsgraph(), temp_zone);
+ BranchElimination branch_condition_elimination(
+ &graph_reducer, data->jsgraph(), temp_zone, data->source_positions());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
@@ -1989,7 +1996,6 @@ struct ScheduledEffectControlLinearizationPhase {
// - lower simplified memory and select nodes to machine level nodes.
LowerToMachineSchedule(data->jsgraph(), data->schedule(), temp_zone,
data->source_positions(), data->node_origins(),
- data->info()->GetPoisoningMitigationLevel(),
data->broker());
// TODO(rmcilroy) Avoid having to rebuild rpo_order on schedule each time.
@@ -2045,7 +2051,7 @@ struct WasmOptimizationPhase {
data->machine(), temp_zone);
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
BranchElimination branch_condition_elimination(
- &graph_reducer, data->jsgraph(), temp_zone);
+ &graph_reducer, data->jsgraph(), temp_zone, data->source_positions());
AddReducer(data, &graph_reducer, &machine_reducer);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
@@ -2100,7 +2106,7 @@ struct CsaEarlyOptimizationPhase {
data->machine(), temp_zone);
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
BranchElimination branch_condition_elimination(
- &graph_reducer, data->jsgraph(), temp_zone);
+ &graph_reducer, data->jsgraph(), temp_zone, data->source_positions());
AddReducer(data, &graph_reducer, &machine_reducer);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
@@ -2118,8 +2124,8 @@ struct CsaOptimizationPhase {
GraphReducer graph_reducer(
temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead(), data->observe_node_manager());
- BranchElimination branch_condition_elimination(&graph_reducer,
- data->jsgraph(), temp_zone);
+ BranchElimination branch_condition_elimination(
+ &graph_reducer, data->jsgraph(), temp_zone, data->source_positions());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph(),
@@ -2205,7 +2211,6 @@ struct InstructionSelectionPhase {
data->assembler_options().enable_root_relative_access
? InstructionSelector::kEnableRootsRelativeAddressing
: InstructionSelector::kDisableRootsRelativeAddressing,
- data->info()->GetPoisoningMitigationLevel(),
data->info()->trace_turbo_json()
? InstructionSelector::kEnableTraceTurboJson
: InstructionSelector::kDisableTraceTurboJson);
@@ -2607,6 +2612,9 @@ CompilationJob::Status WasmHeapStubCompilationJob::FinalizeJobImpl(
tracing_scope.stream(), isolate);
}
#endif
+ PROFILE(isolate, CodeCreateEvent(CodeEventListener::STUB_TAG,
+ Handle<AbstractCode>::cast(code),
+ compilation_info()->GetDebugName().get()));
return SUCCEEDED;
}
return FAILED;
@@ -2750,8 +2758,8 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
#if V8_ENABLE_WEBASSEMBLY
if (data->has_js_wasm_calls()) {
DCHECK(data->info()->inline_js_wasm_calls());
- Run<WasmInliningPhase>();
- RunPrintAndVerify(WasmInliningPhase::phase_name(), true);
+ Run<JSWasmInliningPhase>();
+ RunPrintAndVerify(JSWasmInliningPhase::phase_name(), true);
}
#endif // V8_ENABLE_WEBASSEMBLY
@@ -2853,8 +2861,8 @@ bool PipelineImpl::OptimizeGraphForMidTier(Linkage* linkage) {
#if V8_ENABLE_WEBASSEMBLY
if (data->has_js_wasm_calls()) {
DCHECK(data->info()->inline_js_wasm_calls());
- Run<WasmInliningPhase>();
- RunPrintAndVerify(WasmInliningPhase::phase_name(), true);
+ Run<JSWasmInliningPhase>();
+ RunPrintAndVerify(JSWasmInliningPhase::phase_name(), true);
}
#endif // V8_ENABLE_WEBASSEMBLY
@@ -2969,17 +2977,12 @@ int HashGraphForPGO(Graph* graph) {
MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
JSGraph* jsgraph, SourcePositionTable* source_positions, CodeKind kind,
- const char* debug_name, Builtin builtin,
- PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options,
+ const char* debug_name, Builtin builtin, const AssemblerOptions& options,
const ProfileDataFromFile* profile_data) {
OptimizedCompilationInfo info(base::CStrVector(debug_name), graph->zone(),
kind);
info.set_builtin(builtin);
- if (poisoning_level != PoisoningMitigationLevel::kDontPoison) {
- info.SetPoisoningMitigationLevel(poisoning_level);
- }
-
// Construct a pipeline for scheduling and code generation.
ZoneStats zone_stats(isolate->allocator());
NodeOriginTable node_origins(graph);
@@ -3097,7 +3100,7 @@ std::ostream& operator<<(std::ostream& out, const BlockStartsAsJSON& s) {
// static
wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
CallDescriptor* call_descriptor, MachineGraph* mcgraph, CodeKind kind,
- int wasm_kind, const char* debug_name, const AssemblerOptions& options,
+ const char* debug_name, const AssemblerOptions& options,
SourcePositionTable* source_positions) {
Graph* graph = mcgraph->graph();
OptimizedCompilationInfo info(base::CStrVector(debug_name), graph->zone(),
@@ -3160,6 +3163,9 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
result.frame_slot_count = code_generator->frame()->GetTotalFrameSlotCount();
result.tagged_parameter_slots = call_descriptor->GetTaggedParameterSlots();
result.result_tier = wasm::ExecutionTier::kTurbofan;
+ if (kind == CodeKind::WASM_TO_JS_FUNCTION) {
+ result.kind = wasm::WasmCompilationResult::kWasmToJsWrapper;
+ }
DCHECK(result.succeeded());
@@ -3195,7 +3201,8 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
// static
void Pipeline::GenerateCodeForWasmFunction(
- OptimizedCompilationInfo* info, MachineGraph* mcgraph,
+ OptimizedCompilationInfo* info, wasm::CompilationEnv* env,
+ const wasm::WireBytesStorage* wire_bytes_storage, MachineGraph* mcgraph,
CallDescriptor* call_descriptor, SourcePositionTable* source_positions,
NodeOriginTable* node_origins, wasm::FunctionBody function_body,
const wasm::WasmModule* module, int function_index,
@@ -3225,6 +3232,10 @@ void Pipeline::GenerateCodeForWasmFunction(
pipeline.Run<WasmLoopUnrollingPhase>(loop_info);
pipeline.RunPrintAndVerify(WasmLoopUnrollingPhase::phase_name(), true);
}
+ if (FLAG_wasm_inlining) {
+ pipeline.Run<WasmInliningPhase>(env, wire_bytes_storage);
+ pipeline.RunPrintAndVerify(WasmInliningPhase::phase_name(), true);
+ }
const bool is_asm_js = is_asmjs_module(module);
if (FLAG_wasm_opt || is_asm_js) {
@@ -3546,18 +3557,7 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
config.reset(RegisterConfiguration::RestrictGeneralRegisters(registers));
AllocateRegistersForTopTier(config.get(), call_descriptor, run_verifier);
} else {
- const RegisterConfiguration* config;
- if (data->info()->GetPoisoningMitigationLevel() !=
- PoisoningMitigationLevel::kDontPoison) {
-#ifdef V8_TARGET_ARCH_IA32
- FATAL("Poisoning is not supported on ia32.");
-#else
- config = RegisterConfiguration::Poisoning();
-#endif // V8_TARGET_ARCH_IA32
- } else {
- config = RegisterConfiguration::Default();
- }
-
+ const RegisterConfiguration* config = RegisterConfiguration::Default();
if (data->info()->IsTurboprop() && FLAG_turboprop_mid_tier_reg_alloc) {
AllocateRegistersForMidTier(config, call_descriptor, run_verifier);
} else {
@@ -3643,7 +3643,6 @@ std::ostream& operator<<(std::ostream& out,
out << "\"codeStartRegisterCheck\": "
<< s.offsets_info->code_start_register_check << ", ";
out << "\"deoptCheck\": " << s.offsets_info->deopt_check << ", ";
- out << "\"initPoison\": " << s.offsets_info->init_poison << ", ";
out << "\"blocksStart\": " << s.offsets_info->blocks_start << ", ";
out << "\"outOfLineCode\": " << s.offsets_info->out_of_line_code << ", ";
out << "\"deoptimizationExits\": " << s.offsets_info->deoptimization_exits
diff --git a/chromium/v8/src/compiler/pipeline.h b/chromium/v8/src/compiler/pipeline.h
index ea67b31e06c..2a166b2073e 100644
--- a/chromium/v8/src/compiler/pipeline.h
+++ b/chromium/v8/src/compiler/pipeline.h
@@ -23,11 +23,13 @@ class ProfileDataFromFile;
class RegisterConfiguration;
namespace wasm {
+struct CompilationEnv;
struct FunctionBody;
class NativeModule;
struct WasmCompilationResult;
class WasmEngine;
struct WasmModule;
+class WireBytesStorage;
} // namespace wasm
namespace compiler {
@@ -54,7 +56,8 @@ class Pipeline : public AllStatic {
// Run the pipeline for the WebAssembly compilation info.
static void GenerateCodeForWasmFunction(
- OptimizedCompilationInfo* info, MachineGraph* mcgraph,
+ OptimizedCompilationInfo* info, wasm::CompilationEnv* env,
+ const wasm::WireBytesStorage* wire_bytes_storage, MachineGraph* mcgraph,
CallDescriptor* call_descriptor, SourcePositionTable* source_positions,
NodeOriginTable* node_origins, wasm::FunctionBody function_body,
const wasm::WasmModule* module, int function_index,
@@ -63,8 +66,7 @@ class Pipeline : public AllStatic {
// Run the pipeline on a machine graph and generate code.
static wasm::WasmCompilationResult GenerateCodeForWasmNativeStub(
CallDescriptor* call_descriptor, MachineGraph* mcgraph, CodeKind kind,
- int wasm_kind, const char* debug_name,
- const AssemblerOptions& assembler_options,
+ const char* debug_name, const AssemblerOptions& assembler_options,
SourcePositionTable* source_positions = nullptr);
// Returns a new compilation job for a wasm heap stub.
@@ -78,8 +80,7 @@ class Pipeline : public AllStatic {
static MaybeHandle<Code> GenerateCodeForCodeStub(
Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
JSGraph* jsgraph, SourcePositionTable* source_positions, CodeKind kind,
- const char* debug_name, Builtin builtin,
- PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options,
+ const char* debug_name, Builtin builtin, const AssemblerOptions& options,
const ProfileDataFromFile* profile_data);
// ---------------------------------------------------------------------------
diff --git a/chromium/v8/src/compiler/property-access-builder.cc b/chromium/v8/src/compiler/property-access-builder.cc
index a64521d6f66..456512a8671 100644
--- a/chromium/v8/src/compiler/property-access-builder.cc
+++ b/chromium/v8/src/compiler/property-access-builder.cc
@@ -168,7 +168,9 @@ base::Optional<Node*> PropertyAccessBuilder::FoldLoadDictPrototypeConstant(
Map::GetConstructorFunction(
*map_handle, *broker()->target_native_context().object())
.value();
- map = MakeRef(broker(), constructor.initial_map());
+ // {constructor.initial_map()} is loaded/stored with acquire-release
+ // semantics for constructors.
+ map = MakeRefAssumeMemoryFence(broker(), constructor.initial_map());
DCHECK(map.object()->IsJSObjectMap());
}
dependencies()->DependOnConstantInDictionaryPrototypeChain(
@@ -235,7 +237,6 @@ Node* PropertyAccessBuilder::BuildLoadDataField(NameRef const& name,
Type::Any(),
MachineType::AnyTagged(),
kPointerWriteBarrier,
- LoadSensitivity::kCritical,
field_access.const_field_info};
storage = *effect = graph()->NewNode(
simplified()->LoadField(storage_access), storage, *effect, *control);
@@ -263,7 +264,6 @@ Node* PropertyAccessBuilder::BuildLoadDataField(NameRef const& name,
Type::OtherInternal(),
MachineType::TaggedPointer(),
kPointerWriteBarrier,
- LoadSensitivity::kCritical,
field_access.const_field_info};
storage = *effect = graph()->NewNode(
simplified()->LoadField(storage_access), storage, *effect, *control);
@@ -291,7 +291,6 @@ Node* PropertyAccessBuilder::BuildMinimorphicLoadDataField(
access_info.field_type(),
MachineType::TypeForRepresentation(field_representation),
kFullWriteBarrier,
- LoadSensitivity::kCritical,
ConstFieldInfo::None()};
return BuildLoadDataField(name, lookup_start_object, field_access,
access_info.is_inobject(), effect, control);
@@ -319,7 +318,6 @@ Node* PropertyAccessBuilder::BuildLoadDataField(
access_info.field_type(),
MachineType::TypeForRepresentation(field_representation),
kFullWriteBarrier,
- LoadSensitivity::kCritical,
access_info.GetConstFieldInfo()};
if (field_representation == MachineRepresentation::kTaggedPointer ||
field_representation == MachineRepresentation::kCompressedPointer) {
diff --git a/chromium/v8/src/compiler/raw-machine-assembler.cc b/chromium/v8/src/compiler/raw-machine-assembler.cc
index 7ed217d4e36..2a2eb07fe17 100644
--- a/chromium/v8/src/compiler/raw-machine-assembler.cc
+++ b/chromium/v8/src/compiler/raw-machine-assembler.cc
@@ -18,8 +18,7 @@ namespace compiler {
RawMachineAssembler::RawMachineAssembler(
Isolate* isolate, Graph* graph, CallDescriptor* call_descriptor,
MachineRepresentation word, MachineOperatorBuilder::Flags flags,
- MachineOperatorBuilder::AlignmentRequirements alignment_requirements,
- PoisoningMitigationLevel poisoning_level)
+ MachineOperatorBuilder::AlignmentRequirements alignment_requirements)
: isolate_(isolate),
graph_(graph),
schedule_(zone()->New<Schedule>(zone())),
@@ -30,8 +29,7 @@ RawMachineAssembler::RawMachineAssembler(
call_descriptor_(call_descriptor),
target_parameter_(nullptr),
parameters_(parameter_count(), zone()),
- current_block_(schedule()->start()),
- poisoning_level_(poisoning_level) {
+ current_block_(schedule()->start()) {
int param_count = static_cast<int>(parameter_count());
// Add an extra input for the JSFunction parameter to the start node.
graph->SetStart(graph->NewNode(common_.Start(param_count + 1)));
@@ -192,12 +190,12 @@ void RawMachineAssembler::OptimizeControlFlow(Schedule* schedule, Graph* graph,
false_block->ClearPredecessors();
size_t arity = block->PredecessorCount();
- for (size_t i = 0; i < arity; ++i) {
- BasicBlock* predecessor = block->PredecessorAt(i);
+ for (size_t j = 0; j < arity; ++j) {
+ BasicBlock* predecessor = block->PredecessorAt(j);
predecessor->ClearSuccessors();
if (block->deferred()) predecessor->set_deferred(true);
Node* branch_clone = graph->CloneNode(branch);
- int phi_input = static_cast<int>(i);
+ int phi_input = static_cast<int>(j);
NodeProperties::ReplaceValueInput(
branch_clone, NodeProperties::GetValueInput(phi, phi_input), 0);
BasicBlock* new_true_block = schedule->NewBasicBlock();
@@ -472,7 +470,7 @@ void RawMachineAssembler::MarkControlDeferred(Node* control_node) {
return;
case IrOpcode::kIfTrue: {
Node* branch = NodeProperties::GetControlInput(control_node);
- BranchHint hint = BranchOperatorInfoOf(branch->op()).hint;
+ BranchHint hint = BranchHintOf(branch->op());
if (hint == BranchHint::kTrue) {
// The other possibility is also deferred, so the responsible branch
// has to be before.
@@ -485,7 +483,7 @@ void RawMachineAssembler::MarkControlDeferred(Node* control_node) {
}
case IrOpcode::kIfFalse: {
Node* branch = NodeProperties::GetControlInput(control_node);
- BranchHint hint = BranchOperatorInfoOf(branch->op()).hint;
+ BranchHint hint = BranchHintOf(branch->op());
if (hint == BranchHint::kFalse) {
// The other possibility is also deferred, so the responsible branch
// has to be before.
@@ -516,11 +514,10 @@ void RawMachineAssembler::MarkControlDeferred(Node* control_node) {
}
}
- BranchOperatorInfo info = BranchOperatorInfoOf(responsible_branch->op());
- if (info.hint == new_branch_hint) return;
- NodeProperties::ChangeOp(
- responsible_branch,
- common()->Branch(new_branch_hint, info.is_safety_check));
+ BranchHint hint = BranchHintOf(responsible_branch->op());
+ if (hint == new_branch_hint) return;
+ NodeProperties::ChangeOp(responsible_branch,
+ common()->Branch(new_branch_hint));
}
Node* RawMachineAssembler::TargetParameter() {
@@ -544,9 +541,7 @@ void RawMachineAssembler::Goto(RawMachineLabel* label) {
void RawMachineAssembler::Branch(Node* condition, RawMachineLabel* true_val,
RawMachineLabel* false_val) {
DCHECK(current_block_ != schedule()->end());
- Node* branch = MakeNode(
- common()->Branch(BranchHint::kNone, IsSafetyCheck::kNoSafetyCheck), 1,
- &condition);
+ Node* branch = MakeNode(common()->Branch(BranchHint::kNone), 1, &condition);
BasicBlock* true_block = schedule()->NewBasicBlock();
BasicBlock* false_block = schedule()->NewBasicBlock();
schedule()->AddBranch(CurrentBlock(), branch, true_block, false_block);
@@ -576,14 +571,14 @@ void RawMachineAssembler::Switch(Node* index, RawMachineLabel* default_label,
size_t succ_count = case_count + 1;
Node* switch_node = MakeNode(common()->Switch(succ_count), 1, &index);
BasicBlock** succ_blocks = zone()->NewArray<BasicBlock*>(succ_count);
- for (size_t index = 0; index < case_count; ++index) {
- int32_t case_value = case_values[index];
+ for (size_t i = 0; i < case_count; ++i) {
+ int32_t case_value = case_values[i];
BasicBlock* case_block = schedule()->NewBasicBlock();
Node* case_node =
graph()->NewNode(common()->IfValue(case_value), switch_node);
schedule()->AddNode(case_block, case_node);
- schedule()->AddGoto(case_block, Use(case_labels[index]));
- succ_blocks[index] = case_block;
+ schedule()->AddGoto(case_block, Use(case_labels[i]));
+ succ_blocks[i] = case_block;
}
BasicBlock* default_block = schedule()->NewBasicBlock();
Node* default_node = graph()->NewNode(common()->IfDefault(), switch_node);
@@ -678,8 +673,8 @@ void RawMachineAssembler::PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3,
current_block_ = nullptr;
}
-void RawMachineAssembler::AbortCSAAssert(Node* message) {
- AddNode(machine()->AbortCSAAssert(), message);
+void RawMachineAssembler::AbortCSADcheck(Node* message) {
+ AddNode(machine()->AbortCSADcheck(), message);
}
void RawMachineAssembler::DebugBreak() { AddNode(machine()->DebugBreak()); }
diff --git a/chromium/v8/src/compiler/raw-machine-assembler.h b/chromium/v8/src/compiler/raw-machine-assembler.h
index a811fa7bf9c..23051dfbba6 100644
--- a/chromium/v8/src/compiler/raw-machine-assembler.h
+++ b/chromium/v8/src/compiler/raw-machine-assembler.h
@@ -52,9 +52,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
MachineOperatorBuilder::Flag::kNoFlags,
MachineOperatorBuilder::AlignmentRequirements alignment_requirements =
MachineOperatorBuilder::AlignmentRequirements::
- FullUnalignedAccessSupport(),
- PoisoningMitigationLevel poisoning_level =
- PoisoningMitigationLevel::kPoisonCriticalOnly);
+ FullUnalignedAccessSupport());
~RawMachineAssembler() = default;
RawMachineAssembler(const RawMachineAssembler&) = delete;
@@ -67,7 +65,6 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
CommonOperatorBuilder* common() { return &common_; }
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
CallDescriptor* call_descriptor() const { return call_descriptor_; }
- PoisoningMitigationLevel poisoning_level() const { return poisoning_level_; }
// Only used for tests: Finalizes the schedule and exports it to be used for
// code generation. Note that this RawMachineAssembler becomes invalid after
@@ -132,19 +129,11 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
}
// Memory Operations.
- Node* Load(MachineType type, Node* base,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
- return Load(type, base, IntPtrConstant(0), needs_poisoning);
+ Node* Load(MachineType type, Node* base) {
+ return Load(type, base, IntPtrConstant(0));
}
- Node* Load(MachineType type, Node* base, Node* index,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
+ Node* Load(MachineType type, Node* base, Node* index) {
const Operator* op = machine()->Load(type);
- CHECK_NE(PoisoningMitigationLevel::kPoisonAll, poisoning_level_);
- if (needs_poisoning == LoadSensitivity::kCritical &&
- poisoning_level_ == PoisoningMitigationLevel::kPoisonCriticalOnly) {
- op = machine()->PoisonedLoad(type);
- }
-
Node* load = AddNode(op, base, index);
return load;
}
@@ -174,10 +163,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
bool IsMapOffsetConstantMinusTag(int offset) {
return offset == HeapObject::kMapOffset - kHeapObjectTag;
}
- Node* LoadFromObject(
- MachineType type, Node* base, Node* offset,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
- CHECK_EQ(needs_poisoning, LoadSensitivity::kSafe);
+ Node* LoadFromObject(MachineType type, Node* base, Node* offset) {
DCHECK_IMPLIES(V8_MAP_PACKING_BOOL && IsMapOffsetConstantMinusTag(offset),
type == MachineType::MapInHeader());
ObjectAccess access = {type, WriteBarrierKind::kNoWriteBarrier};
@@ -253,20 +239,20 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
}
// Atomic memory operations.
- Node* AtomicLoad(MachineType type, Node* base, Node* index) {
- DCHECK_NE(type.representation(), MachineRepresentation::kWord64);
- return AddNode(machine()->Word32AtomicLoad(type), base, index);
+ Node* AtomicLoad(AtomicLoadParameters rep, Node* base, Node* index) {
+ DCHECK_NE(rep.representation().representation(),
+ MachineRepresentation::kWord64);
+ return AddNode(machine()->Word32AtomicLoad(rep), base, index);
}
- Node* AtomicLoad64(Node* base, Node* index) {
+ Node* AtomicLoad64(AtomicLoadParameters rep, Node* base, Node* index) {
if (machine()->Is64()) {
// This uses Uint64() intentionally: AtomicLoad is not implemented for
// Int64(), which is fine because the machine instruction only cares
// about words.
- return AddNode(machine()->Word64AtomicLoad(MachineType::Uint64()), base,
- index);
+ return AddNode(machine()->Word64AtomicLoad(rep), base, index);
} else {
- return AddNode(machine()->Word32AtomicPairLoad(), base, index);
+ return AddNode(machine()->Word32AtomicPairLoad(rep.order()), base, index);
}
}
@@ -276,22 +262,24 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
#define VALUE_HALVES value, value_high
#endif
- Node* AtomicStore(MachineRepresentation rep, Node* base, Node* index,
+ Node* AtomicStore(AtomicStoreParameters params, Node* base, Node* index,
Node* value) {
DCHECK(!IsMapOffsetConstantMinusTag(index));
- DCHECK_NE(rep, MachineRepresentation::kWord64);
- return AddNode(machine()->Word32AtomicStore(rep), base, index, value);
+ DCHECK_NE(params.representation(), MachineRepresentation::kWord64);
+ return AddNode(machine()->Word32AtomicStore(params), base, index, value);
}
- Node* AtomicStore64(Node* base, Node* index, Node* value, Node* value_high) {
+ Node* AtomicStore64(AtomicStoreParameters params, Node* base, Node* index,
+ Node* value, Node* value_high) {
if (machine()->Is64()) {
DCHECK_NULL(value_high);
- return AddNode(
- machine()->Word64AtomicStore(MachineRepresentation::kWord64), base,
- index, value);
+ return AddNode(machine()->Word64AtomicStore(params), base, index, value);
} else {
- return AddNode(machine()->Word32AtomicPairStore(), base, index,
- VALUE_HALVES);
+ DCHECK(params.representation() != MachineRepresentation::kTaggedPointer &&
+ params.representation() != MachineRepresentation::kTaggedSigned &&
+ params.representation() != MachineRepresentation::kTagged);
+ return AddNode(machine()->Word32AtomicPairStore(params.order()), base,
+ index, VALUE_HALVES);
}
}
@@ -959,20 +947,6 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
return HeapConstant(isolate()->factory()->InternalizeUtf8String(string));
}
- Node* TaggedPoisonOnSpeculation(Node* value) {
- if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
- return AddNode(machine()->TaggedPoisonOnSpeculation(), value);
- }
- return value;
- }
-
- Node* WordPoisonOnSpeculation(Node* value) {
- if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
- return AddNode(machine()->WordPoisonOnSpeculation(), value);
- }
- return value;
- }
-
// Call a given call descriptor and the given arguments.
// The call target is passed as part of the {inputs} array.
Node* CallN(CallDescriptor* call_descriptor, int input_count,
@@ -1059,7 +1033,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
void PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3, Node* v4);
void Bind(RawMachineLabel* label);
void Deoptimize(Node* state);
- void AbortCSAAssert(Node* message);
+ void AbortCSADcheck(Node* message);
void DebugBreak();
void Unreachable();
void Comment(const std::string& msg);
@@ -1136,6 +1110,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
CommonOperatorBuilder* common);
Isolate* isolate_;
+
Graph* graph_;
Schedule* schedule_;
SourcePositionTable* source_positions_;
@@ -1146,7 +1121,6 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* target_parameter_;
NodeVector parameters_;
BasicBlock* current_block_;
- PoisoningMitigationLevel poisoning_level_;
};
class V8_EXPORT_PRIVATE RawMachineLabel final {
diff --git a/chromium/v8/src/compiler/scheduler.cc b/chromium/v8/src/compiler/scheduler.cc
index 07a716bfa7a..a54caf2abe1 100644
--- a/chromium/v8/src/compiler/scheduler.cc
+++ b/chromium/v8/src/compiler/scheduler.cc
@@ -967,8 +967,9 @@ class SpecialRPONumberer : public ZoneObject {
if (HasLoopNumber(current)) {
++loop_depth;
current_loop = &loops_[GetLoopNumber(current)];
- BasicBlock* end = current_loop->end;
- current->set_loop_end(end == nullptr ? BeyondEndSentinel() : end);
+ BasicBlock* loop_end = current_loop->end;
+ current->set_loop_end(loop_end == nullptr ? BeyondEndSentinel()
+ : loop_end);
current_header = current_loop->header;
TRACE("id:%d is a loop header, increment loop depth to %d\n",
current->id().ToInt(), loop_depth);
@@ -1025,8 +1026,8 @@ class SpecialRPONumberer : public ZoneObject {
// loop header H are members of the loop too. O(|blocks between M and H|).
while (queue_length > 0) {
BasicBlock* block = (*queue)[--queue_length].block;
- for (size_t i = 0; i < block->PredecessorCount(); i++) {
- BasicBlock* pred = block->PredecessorAt(i);
+ for (size_t j = 0; j < block->PredecessorCount(); j++) {
+ BasicBlock* pred = block->PredecessorAt(j);
if (pred != header) {
if (!loops_[loop_num].members->Contains(pred->id().ToInt())) {
loops_[loop_num].members->Add(pred->id().ToInt());
@@ -1124,7 +1125,7 @@ class SpecialRPONumberer : public ZoneObject {
// Check the contiguousness of loops.
int count = 0;
for (int j = 0; j < static_cast<int>(order->size()); j++) {
- BasicBlock* block = order->at(j);
+ block = order->at(j);
DCHECK_EQ(block->rpo_number(), j);
if (j < header->rpo_number() || j >= end->rpo_number()) {
DCHECK(!header->LoopContains(block));
@@ -1440,9 +1441,9 @@ class ScheduleLateNodeVisitor {
queue->push(node);
do {
scheduler_->tick_counter_->TickAndMaybeEnterSafepoint();
- Node* const node = queue->front();
+ Node* const n = queue->front();
queue->pop();
- VisitNode(node);
+ VisitNode(n);
} while (!queue->empty());
}
}
@@ -1821,8 +1822,8 @@ void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
// temporary solution and should be merged into the rest of the scheduler as
// soon as the approach settled for all floating loops.
NodeVector propagation_roots(control_flow_builder_->control_);
- for (Node* node : control_flow_builder_->control_) {
- for (Node* use : node->uses()) {
+ for (Node* control : control_flow_builder_->control_) {
+ for (Node* use : control->uses()) {
if (NodeProperties::IsPhi(use) && IsLive(use)) {
propagation_roots.push_back(use);
}
@@ -1830,8 +1831,8 @@ void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
}
if (FLAG_trace_turbo_scheduler) {
TRACE("propagation roots: ");
- for (Node* node : propagation_roots) {
- TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
+ for (Node* r : propagation_roots) {
+ TRACE("#%d:%s ", r->id(), r->op()->mnemonic());
}
TRACE("\n");
}
diff --git a/chromium/v8/src/compiler/simplified-lowering.cc b/chromium/v8/src/compiler/simplified-lowering.cc
index 1c07a23dded..6416eed376d 100644
--- a/chromium/v8/src/compiler/simplified-lowering.cc
+++ b/chromium/v8/src/compiler/simplified-lowering.cc
@@ -1735,11 +1735,9 @@ class RepresentationSelector {
VisitBinop<T>(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
if (lower<T>()) {
- if (lowering->poisoning_level_ ==
- PoisoningMitigationLevel::kDontPoison &&
- (index_type.IsNone() || length_type.IsNone() ||
- (index_type.Min() >= 0.0 &&
- index_type.Max() < length_type.Min()))) {
+ if (index_type.IsNone() || length_type.IsNone() ||
+ (index_type.Min() >= 0.0 &&
+ index_type.Max() < length_type.Min())) {
// The bounds check is redundant if we already know that
// the index is within the bounds of [0.0, length[.
// TODO(neis): Move this into TypedOptimization?
@@ -3181,11 +3179,6 @@ class RepresentationSelector {
}
case IrOpcode::kCheckBounds:
return VisitCheckBounds<T>(node, lowering);
- case IrOpcode::kPoisonIndex: {
- VisitUnop<T>(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
- return;
- }
case IrOpcode::kCheckHeapObject: {
if (InputCannotBe(node, Type::SignedSmall())) {
VisitUnop<T>(node, UseInfo::AnyTagged(),
@@ -3835,7 +3828,7 @@ class RepresentationSelector {
case IrOpcode::kDateNow:
VisitInputs<T>(node);
- return SetOutput<T>(node, MachineRepresentation::kTaggedPointer);
+ return SetOutput<T>(node, MachineRepresentation::kTagged);
case IrOpcode::kFrameState:
return VisitFrameState<T>(FrameState{node});
case IrOpcode::kStateValues:
@@ -4225,18 +4218,19 @@ void RepresentationSelector::InsertUnreachableIfNecessary<LOWER>(Node* node) {
}
}
-SimplifiedLowering::SimplifiedLowering(
- JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone,
- SourcePositionTable* source_positions, NodeOriginTable* node_origins,
- PoisoningMitigationLevel poisoning_level, TickCounter* tick_counter,
- Linkage* linkage, ObserveNodeManager* observe_node_manager)
+SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker,
+ Zone* zone,
+ SourcePositionTable* source_positions,
+ NodeOriginTable* node_origins,
+ TickCounter* tick_counter,
+ Linkage* linkage,
+ ObserveNodeManager* observe_node_manager)
: jsgraph_(jsgraph),
broker_(broker),
zone_(zone),
type_cache_(TypeCache::Get()),
source_positions_(source_positions),
node_origins_(node_origins),
- poisoning_level_(poisoning_level),
tick_counter_(tick_counter),
linkage_(linkage),
observe_node_manager_(observe_node_manager) {}
diff --git a/chromium/v8/src/compiler/simplified-lowering.h b/chromium/v8/src/compiler/simplified-lowering.h
index 54017b34f7a..f60bc1a7e3e 100644
--- a/chromium/v8/src/compiler/simplified-lowering.h
+++ b/chromium/v8/src/compiler/simplified-lowering.h
@@ -31,7 +31,6 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone,
SourcePositionTable* source_position,
NodeOriginTable* node_origins,
- PoisoningMitigationLevel poisoning_level,
TickCounter* tick_counter, Linkage* linkage,
ObserveNodeManager* observe_node_manager = nullptr);
~SimplifiedLowering() = default;
@@ -83,8 +82,6 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
SourcePositionTable* source_positions_;
NodeOriginTable* node_origins_;
- PoisoningMitigationLevel poisoning_level_;
-
TickCounter* const tick_counter_;
Linkage* const linkage_;
diff --git a/chromium/v8/src/compiler/simplified-operator-reducer.cc b/chromium/v8/src/compiler/simplified-operator-reducer.cc
index ea9e9f4ba5d..33edd66b4ff 100644
--- a/chromium/v8/src/compiler/simplified-operator-reducer.cc
+++ b/chromium/v8/src/compiler/simplified-operator-reducer.cc
@@ -77,7 +77,7 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
case IrOpcode::kChangeInt32ToTagged: {
Int32Matcher m(node->InputAt(0));
if (m.HasResolvedValue()) return ReplaceNumber(m.ResolvedValue());
- if (m.IsChangeTaggedToInt32() || m.IsChangeTaggedSignedToInt32()) {
+ if (m.IsChangeTaggedSignedToInt32()) {
return Replace(m.InputAt(0));
}
break;
diff --git a/chromium/v8/src/compiler/simplified-operator.cc b/chromium/v8/src/compiler/simplified-operator.cc
index 9c4f8f083ac..9461194b559 100644
--- a/chromium/v8/src/compiler/simplified-operator.cc
+++ b/chromium/v8/src/compiler/simplified-operator.cc
@@ -73,22 +73,6 @@ size_t hash_value(FieldAccess const& access) {
access.is_store_in_literal);
}
-size_t hash_value(LoadSensitivity load_sensitivity) {
- return static_cast<size_t>(load_sensitivity);
-}
-
-std::ostream& operator<<(std::ostream& os, LoadSensitivity load_sensitivity) {
- switch (load_sensitivity) {
- case LoadSensitivity::kCritical:
- return os << "Critical";
- case LoadSensitivity::kSafe:
- return os << "Safe";
- case LoadSensitivity::kUnsafe:
- return os << "Unsafe";
- }
- UNREACHABLE();
-}
-
std::ostream& operator<<(std::ostream& os, FieldAccess const& access) {
os << "[" << access.base_is_tagged << ", " << access.offset << ", ";
#ifdef OBJECT_PRINT
@@ -107,9 +91,6 @@ std::ostream& operator<<(std::ostream& os, FieldAccess const& access) {
if (access.is_store_in_literal) {
os << " (store in literal)";
}
- if (FLAG_untrusted_code_mitigations) {
- os << ", " << access.load_sensitivity;
- }
os << "]";
return os;
}
@@ -145,9 +126,6 @@ std::ostream& operator<<(std::ostream& os, ElementAccess const& access) {
os << access.base_is_tagged << ", " << access.header_size << ", "
<< access.type << ", " << access.machine_type << ", "
<< access.write_barrier_kind;
- if (FLAG_untrusted_code_mitigations) {
- os << ", " << access.load_sensitivity;
- }
return os;
}
@@ -719,129 +697,128 @@ bool operator==(CheckMinusZeroParameters const& lhs,
return lhs.mode() == rhs.mode() && lhs.feedback() == rhs.feedback();
}
-#define PURE_OP_LIST(V) \
- V(BooleanNot, Operator::kNoProperties, 1, 0) \
- V(NumberEqual, Operator::kCommutative, 2, 0) \
- V(NumberLessThan, Operator::kNoProperties, 2, 0) \
- V(NumberLessThanOrEqual, Operator::kNoProperties, 2, 0) \
- V(NumberAdd, Operator::kCommutative, 2, 0) \
- V(NumberSubtract, Operator::kNoProperties, 2, 0) \
- V(NumberMultiply, Operator::kCommutative, 2, 0) \
- V(NumberDivide, Operator::kNoProperties, 2, 0) \
- V(NumberModulus, Operator::kNoProperties, 2, 0) \
- V(NumberBitwiseOr, Operator::kCommutative, 2, 0) \
- V(NumberBitwiseXor, Operator::kCommutative, 2, 0) \
- V(NumberBitwiseAnd, Operator::kCommutative, 2, 0) \
- V(NumberShiftLeft, Operator::kNoProperties, 2, 0) \
- V(NumberShiftRight, Operator::kNoProperties, 2, 0) \
- V(NumberShiftRightLogical, Operator::kNoProperties, 2, 0) \
- V(NumberImul, Operator::kCommutative, 2, 0) \
- V(NumberAbs, Operator::kNoProperties, 1, 0) \
- V(NumberClz32, Operator::kNoProperties, 1, 0) \
- V(NumberCeil, Operator::kNoProperties, 1, 0) \
- V(NumberFloor, Operator::kNoProperties, 1, 0) \
- V(NumberFround, Operator::kNoProperties, 1, 0) \
- V(NumberAcos, Operator::kNoProperties, 1, 0) \
- V(NumberAcosh, Operator::kNoProperties, 1, 0) \
- V(NumberAsin, Operator::kNoProperties, 1, 0) \
- V(NumberAsinh, Operator::kNoProperties, 1, 0) \
- V(NumberAtan, Operator::kNoProperties, 1, 0) \
- V(NumberAtan2, Operator::kNoProperties, 2, 0) \
- V(NumberAtanh, Operator::kNoProperties, 1, 0) \
- V(NumberCbrt, Operator::kNoProperties, 1, 0) \
- V(NumberCos, Operator::kNoProperties, 1, 0) \
- V(NumberCosh, Operator::kNoProperties, 1, 0) \
- V(NumberExp, Operator::kNoProperties, 1, 0) \
- V(NumberExpm1, Operator::kNoProperties, 1, 0) \
- V(NumberLog, Operator::kNoProperties, 1, 0) \
- V(NumberLog1p, Operator::kNoProperties, 1, 0) \
- V(NumberLog10, Operator::kNoProperties, 1, 0) \
- V(NumberLog2, Operator::kNoProperties, 1, 0) \
- V(NumberMax, Operator::kNoProperties, 2, 0) \
- V(NumberMin, Operator::kNoProperties, 2, 0) \
- V(NumberPow, Operator::kNoProperties, 2, 0) \
- V(NumberRound, Operator::kNoProperties, 1, 0) \
- V(NumberSign, Operator::kNoProperties, 1, 0) \
- V(NumberSin, Operator::kNoProperties, 1, 0) \
- V(NumberSinh, Operator::kNoProperties, 1, 0) \
- V(NumberSqrt, Operator::kNoProperties, 1, 0) \
- V(NumberTan, Operator::kNoProperties, 1, 0) \
- V(NumberTanh, Operator::kNoProperties, 1, 0) \
- V(NumberTrunc, Operator::kNoProperties, 1, 0) \
- V(NumberToBoolean, Operator::kNoProperties, 1, 0) \
- V(NumberToInt32, Operator::kNoProperties, 1, 0) \
- V(NumberToString, Operator::kNoProperties, 1, 0) \
- V(NumberToUint32, Operator::kNoProperties, 1, 0) \
- V(NumberToUint8Clamped, Operator::kNoProperties, 1, 0) \
- V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \
- V(BigIntNegate, Operator::kNoProperties, 1, 0) \
- V(StringConcat, Operator::kNoProperties, 3, 0) \
- V(StringToNumber, Operator::kNoProperties, 1, 0) \
- V(StringFromSingleCharCode, Operator::kNoProperties, 1, 0) \
- V(StringFromSingleCodePoint, Operator::kNoProperties, 1, 0) \
- V(StringIndexOf, Operator::kNoProperties, 3, 0) \
- V(StringLength, Operator::kNoProperties, 1, 0) \
- V(StringToLowerCaseIntl, Operator::kNoProperties, 1, 0) \
- V(StringToUpperCaseIntl, Operator::kNoProperties, 1, 0) \
- V(TypeOf, Operator::kNoProperties, 1, 1) \
- V(PlainPrimitiveToNumber, Operator::kNoProperties, 1, 0) \
- V(PlainPrimitiveToWord32, Operator::kNoProperties, 1, 0) \
- V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedSignedToInt32, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedSignedToInt64, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedToInt32, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedToInt64, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedToUint32, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedToFloat64, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedToTaggedSigned, Operator::kNoProperties, 1, 0) \
- V(ChangeFloat64ToTaggedPointer, Operator::kNoProperties, 1, 0) \
- V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1, 0) \
- V(ChangeInt32ToTagged, Operator::kNoProperties, 1, 0) \
- V(ChangeInt64ToTagged, Operator::kNoProperties, 1, 0) \
- V(ChangeUint32ToTagged, Operator::kNoProperties, 1, 0) \
- V(ChangeUint64ToTagged, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedToBit, Operator::kNoProperties, 1, 0) \
- V(ChangeBitToTagged, Operator::kNoProperties, 1, 0) \
- V(TruncateBigIntToUint64, Operator::kNoProperties, 1, 0) \
- V(ChangeUint64ToBigInt, Operator::kNoProperties, 1, 0) \
- V(TruncateTaggedToBit, Operator::kNoProperties, 1, 0) \
- V(TruncateTaggedPointerToBit, Operator::kNoProperties, 1, 0) \
- V(TruncateTaggedToWord32, Operator::kNoProperties, 1, 0) \
- V(TruncateTaggedToFloat64, Operator::kNoProperties, 1, 0) \
- V(ObjectIsArrayBufferView, Operator::kNoProperties, 1, 0) \
- V(ObjectIsBigInt, Operator::kNoProperties, 1, 0) \
- V(ObjectIsCallable, Operator::kNoProperties, 1, 0) \
- V(ObjectIsConstructor, Operator::kNoProperties, 1, 0) \
- V(ObjectIsDetectableCallable, Operator::kNoProperties, 1, 0) \
- V(ObjectIsMinusZero, Operator::kNoProperties, 1, 0) \
- V(NumberIsMinusZero, Operator::kNoProperties, 1, 0) \
- V(ObjectIsNaN, Operator::kNoProperties, 1, 0) \
- V(NumberIsNaN, Operator::kNoProperties, 1, 0) \
- V(ObjectIsNonCallable, Operator::kNoProperties, 1, 0) \
- V(ObjectIsNumber, Operator::kNoProperties, 1, 0) \
- V(ObjectIsReceiver, Operator::kNoProperties, 1, 0) \
- V(ObjectIsSmi, Operator::kNoProperties, 1, 0) \
- V(ObjectIsString, Operator::kNoProperties, 1, 0) \
- V(ObjectIsSymbol, Operator::kNoProperties, 1, 0) \
- V(ObjectIsUndetectable, Operator::kNoProperties, 1, 0) \
- V(NumberIsFloat64Hole, Operator::kNoProperties, 1, 0) \
- V(NumberIsFinite, Operator::kNoProperties, 1, 0) \
- V(ObjectIsFiniteNumber, Operator::kNoProperties, 1, 0) \
- V(NumberIsInteger, Operator::kNoProperties, 1, 0) \
- V(ObjectIsSafeInteger, Operator::kNoProperties, 1, 0) \
- V(NumberIsSafeInteger, Operator::kNoProperties, 1, 0) \
- V(ObjectIsInteger, Operator::kNoProperties, 1, 0) \
- V(ConvertTaggedHoleToUndefined, Operator::kNoProperties, 1, 0) \
- V(SameValue, Operator::kCommutative, 2, 0) \
- V(SameValueNumbersOnly, Operator::kCommutative, 2, 0) \
- V(NumberSameValue, Operator::kCommutative, 2, 0) \
- V(ReferenceEqual, Operator::kCommutative, 2, 0) \
- V(StringEqual, Operator::kCommutative, 2, 0) \
- V(StringLessThan, Operator::kNoProperties, 2, 0) \
- V(StringLessThanOrEqual, Operator::kNoProperties, 2, 0) \
- V(ToBoolean, Operator::kNoProperties, 1, 0) \
- V(NewConsString, Operator::kNoProperties, 3, 0) \
- V(PoisonIndex, Operator::kNoProperties, 1, 0)
+#define PURE_OP_LIST(V) \
+ V(BooleanNot, Operator::kNoProperties, 1, 0) \
+ V(NumberEqual, Operator::kCommutative, 2, 0) \
+ V(NumberLessThan, Operator::kNoProperties, 2, 0) \
+ V(NumberLessThanOrEqual, Operator::kNoProperties, 2, 0) \
+ V(NumberAdd, Operator::kCommutative, 2, 0) \
+ V(NumberSubtract, Operator::kNoProperties, 2, 0) \
+ V(NumberMultiply, Operator::kCommutative, 2, 0) \
+ V(NumberDivide, Operator::kNoProperties, 2, 0) \
+ V(NumberModulus, Operator::kNoProperties, 2, 0) \
+ V(NumberBitwiseOr, Operator::kCommutative, 2, 0) \
+ V(NumberBitwiseXor, Operator::kCommutative, 2, 0) \
+ V(NumberBitwiseAnd, Operator::kCommutative, 2, 0) \
+ V(NumberShiftLeft, Operator::kNoProperties, 2, 0) \
+ V(NumberShiftRight, Operator::kNoProperties, 2, 0) \
+ V(NumberShiftRightLogical, Operator::kNoProperties, 2, 0) \
+ V(NumberImul, Operator::kCommutative, 2, 0) \
+ V(NumberAbs, Operator::kNoProperties, 1, 0) \
+ V(NumberClz32, Operator::kNoProperties, 1, 0) \
+ V(NumberCeil, Operator::kNoProperties, 1, 0) \
+ V(NumberFloor, Operator::kNoProperties, 1, 0) \
+ V(NumberFround, Operator::kNoProperties, 1, 0) \
+ V(NumberAcos, Operator::kNoProperties, 1, 0) \
+ V(NumberAcosh, Operator::kNoProperties, 1, 0) \
+ V(NumberAsin, Operator::kNoProperties, 1, 0) \
+ V(NumberAsinh, Operator::kNoProperties, 1, 0) \
+ V(NumberAtan, Operator::kNoProperties, 1, 0) \
+ V(NumberAtan2, Operator::kNoProperties, 2, 0) \
+ V(NumberAtanh, Operator::kNoProperties, 1, 0) \
+ V(NumberCbrt, Operator::kNoProperties, 1, 0) \
+ V(NumberCos, Operator::kNoProperties, 1, 0) \
+ V(NumberCosh, Operator::kNoProperties, 1, 0) \
+ V(NumberExp, Operator::kNoProperties, 1, 0) \
+ V(NumberExpm1, Operator::kNoProperties, 1, 0) \
+ V(NumberLog, Operator::kNoProperties, 1, 0) \
+ V(NumberLog1p, Operator::kNoProperties, 1, 0) \
+ V(NumberLog10, Operator::kNoProperties, 1, 0) \
+ V(NumberLog2, Operator::kNoProperties, 1, 0) \
+ V(NumberMax, Operator::kNoProperties, 2, 0) \
+ V(NumberMin, Operator::kNoProperties, 2, 0) \
+ V(NumberPow, Operator::kNoProperties, 2, 0) \
+ V(NumberRound, Operator::kNoProperties, 1, 0) \
+ V(NumberSign, Operator::kNoProperties, 1, 0) \
+ V(NumberSin, Operator::kNoProperties, 1, 0) \
+ V(NumberSinh, Operator::kNoProperties, 1, 0) \
+ V(NumberSqrt, Operator::kNoProperties, 1, 0) \
+ V(NumberTan, Operator::kNoProperties, 1, 0) \
+ V(NumberTanh, Operator::kNoProperties, 1, 0) \
+ V(NumberTrunc, Operator::kNoProperties, 1, 0) \
+ V(NumberToBoolean, Operator::kNoProperties, 1, 0) \
+ V(NumberToInt32, Operator::kNoProperties, 1, 0) \
+ V(NumberToString, Operator::kNoProperties, 1, 0) \
+ V(NumberToUint32, Operator::kNoProperties, 1, 0) \
+ V(NumberToUint8Clamped, Operator::kNoProperties, 1, 0) \
+ V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \
+ V(BigIntNegate, Operator::kNoProperties, 1, 0) \
+ V(StringConcat, Operator::kNoProperties, 3, 0) \
+ V(StringToNumber, Operator::kNoProperties, 1, 0) \
+ V(StringFromSingleCharCode, Operator::kNoProperties, 1, 0) \
+ V(StringFromSingleCodePoint, Operator::kNoProperties, 1, 0) \
+ V(StringIndexOf, Operator::kNoProperties, 3, 0) \
+ V(StringLength, Operator::kNoProperties, 1, 0) \
+ V(StringToLowerCaseIntl, Operator::kNoProperties, 1, 0) \
+ V(StringToUpperCaseIntl, Operator::kNoProperties, 1, 0) \
+ V(TypeOf, Operator::kNoProperties, 1, 1) \
+ V(PlainPrimitiveToNumber, Operator::kNoProperties, 1, 0) \
+ V(PlainPrimitiveToWord32, Operator::kNoProperties, 1, 0) \
+ V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedSignedToInt32, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedSignedToInt64, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToInt32, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToInt64, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToUint32, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToFloat64, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToTaggedSigned, Operator::kNoProperties, 1, 0) \
+ V(ChangeFloat64ToTaggedPointer, Operator::kNoProperties, 1, 0) \
+ V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1, 0) \
+ V(ChangeInt32ToTagged, Operator::kNoProperties, 1, 0) \
+ V(ChangeInt64ToTagged, Operator::kNoProperties, 1, 0) \
+ V(ChangeUint32ToTagged, Operator::kNoProperties, 1, 0) \
+ V(ChangeUint64ToTagged, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToBit, Operator::kNoProperties, 1, 0) \
+ V(ChangeBitToTagged, Operator::kNoProperties, 1, 0) \
+ V(TruncateBigIntToUint64, Operator::kNoProperties, 1, 0) \
+ V(ChangeUint64ToBigInt, Operator::kNoProperties, 1, 0) \
+ V(TruncateTaggedToBit, Operator::kNoProperties, 1, 0) \
+ V(TruncateTaggedPointerToBit, Operator::kNoProperties, 1, 0) \
+ V(TruncateTaggedToWord32, Operator::kNoProperties, 1, 0) \
+ V(TruncateTaggedToFloat64, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsArrayBufferView, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsBigInt, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsCallable, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsConstructor, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsDetectableCallable, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsMinusZero, Operator::kNoProperties, 1, 0) \
+ V(NumberIsMinusZero, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsNaN, Operator::kNoProperties, 1, 0) \
+ V(NumberIsNaN, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsNonCallable, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsNumber, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsReceiver, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsSmi, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsString, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsSymbol, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsUndetectable, Operator::kNoProperties, 1, 0) \
+ V(NumberIsFloat64Hole, Operator::kNoProperties, 1, 0) \
+ V(NumberIsFinite, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsFiniteNumber, Operator::kNoProperties, 1, 0) \
+ V(NumberIsInteger, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsSafeInteger, Operator::kNoProperties, 1, 0) \
+ V(NumberIsSafeInteger, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsInteger, Operator::kNoProperties, 1, 0) \
+ V(ConvertTaggedHoleToUndefined, Operator::kNoProperties, 1, 0) \
+ V(SameValue, Operator::kCommutative, 2, 0) \
+ V(SameValueNumbersOnly, Operator::kCommutative, 2, 0) \
+ V(NumberSameValue, Operator::kCommutative, 2, 0) \
+ V(ReferenceEqual, Operator::kCommutative, 2, 0) \
+ V(StringEqual, Operator::kCommutative, 2, 0) \
+ V(StringLessThan, Operator::kNoProperties, 2, 0) \
+ V(StringLessThanOrEqual, Operator::kNoProperties, 2, 0) \
+ V(ToBoolean, Operator::kNoProperties, 1, 0) \
+ V(NewConsString, Operator::kNoProperties, 3, 0)
#define EFFECT_DEPENDENT_OP_LIST(V) \
V(BigIntAdd, Operator::kNoProperties, 2, 1) \
diff --git a/chromium/v8/src/compiler/simplified-operator.h b/chromium/v8/src/compiler/simplified-operator.h
index d7a59014486..0602b795a93 100644
--- a/chromium/v8/src/compiler/simplified-operator.h
+++ b/chromium/v8/src/compiler/simplified-operator.h
@@ -46,10 +46,6 @@ size_t hash_value(BaseTaggedness);
std::ostream& operator<<(std::ostream&, BaseTaggedness);
-size_t hash_value(LoadSensitivity);
-
-std::ostream& operator<<(std::ostream&, LoadSensitivity);
-
struct ConstFieldInfo {
// the map that introduced the const field, if any. An access is considered
// mutable iff the handle is null.
@@ -82,7 +78,6 @@ struct FieldAccess {
Type type; // type of the field.
MachineType machine_type; // machine type of the field.
WriteBarrierKind write_barrier_kind; // write barrier hint.
- LoadSensitivity load_sensitivity; // load safety for poisoning.
ConstFieldInfo const_field_info; // the constness of this access, and the
// field owner map, if the access is const
bool is_store_in_literal; // originates from a kStoreInLiteral access
@@ -96,14 +91,12 @@ struct FieldAccess {
type(Type::None()),
machine_type(MachineType::None()),
write_barrier_kind(kFullWriteBarrier),
- load_sensitivity(LoadSensitivity::kUnsafe),
const_field_info(ConstFieldInfo::None()),
is_store_in_literal(false) {}
FieldAccess(BaseTaggedness base_is_tagged, int offset, MaybeHandle<Name> name,
MaybeHandle<Map> map, Type type, MachineType machine_type,
WriteBarrierKind write_barrier_kind,
- LoadSensitivity load_sensitivity = LoadSensitivity::kUnsafe,
ConstFieldInfo const_field_info = ConstFieldInfo::None(),
bool is_store_in_literal = false
#ifdef V8_HEAP_SANDBOX
@@ -118,7 +111,6 @@ struct FieldAccess {
type(type),
machine_type(machine_type),
write_barrier_kind(write_barrier_kind),
- load_sensitivity(load_sensitivity),
const_field_info(const_field_info),
is_store_in_literal(is_store_in_literal)
#ifdef V8_HEAP_SANDBOX
@@ -162,25 +154,21 @@ struct ElementAccess {
Type type; // type of the element.
MachineType machine_type; // machine type of the element.
WriteBarrierKind write_barrier_kind; // write barrier hint.
- LoadSensitivity load_sensitivity; // load safety for poisoning.
ElementAccess()
: base_is_tagged(kTaggedBase),
header_size(0),
type(Type::None()),
machine_type(MachineType::None()),
- write_barrier_kind(kFullWriteBarrier),
- load_sensitivity(LoadSensitivity::kUnsafe) {}
+ write_barrier_kind(kFullWriteBarrier) {}
ElementAccess(BaseTaggedness base_is_tagged, int header_size, Type type,
- MachineType machine_type, WriteBarrierKind write_barrier_kind,
- LoadSensitivity load_sensitivity = LoadSensitivity::kUnsafe)
+ MachineType machine_type, WriteBarrierKind write_barrier_kind)
: base_is_tagged(base_is_tagged),
header_size(header_size),
type(type),
machine_type(machine_type),
- write_barrier_kind(write_barrier_kind),
- load_sensitivity(load_sensitivity) {}
+ write_barrier_kind(write_barrier_kind) {}
int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
};
@@ -926,7 +914,6 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* TruncateTaggedToBit();
const Operator* TruncateTaggedPointerToBit();
- const Operator* PoisonIndex();
const Operator* CompareMaps(ZoneHandleSet<Map>);
const Operator* MapGuard(ZoneHandleSet<Map> maps);
diff --git a/chromium/v8/src/compiler/typed-optimization.cc b/chromium/v8/src/compiler/typed-optimization.cc
index ce9b6fdb184..5025233c882 100644
--- a/chromium/v8/src/compiler/typed-optimization.cc
+++ b/chromium/v8/src/compiler/typed-optimization.cc
@@ -814,9 +814,9 @@ Reduction TypedOptimization::ReduceJSToNumberInput(Node* input) {
HeapObjectMatcher m(input);
if (m.HasResolvedValue() && m.Ref(broker()).IsString()) {
StringRef input_value = m.Ref(broker()).AsString();
- double number;
- ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(number, input_value.ToNumber());
- return Replace(jsgraph()->Constant(number));
+ base::Optional<double> number = input_value.ToNumber();
+ if (!number.has_value()) return NoChange();
+ return Replace(jsgraph()->Constant(number.value()));
}
}
if (input_type.IsHeapConstant()) {
diff --git a/chromium/v8/src/compiler/typer.cc b/chromium/v8/src/compiler/typer.cc
index 529f1cc7bba..a96d1ea981b 100644
--- a/chromium/v8/src/compiler/typer.cc
+++ b/chromium/v8/src/compiler/typer.cc
@@ -882,9 +882,10 @@ bool Typer::Visitor::InductionVariablePhiTypeIsPrefixedPoint(
InductionVariable* induction_var) {
Node* node = induction_var->phi();
DCHECK_EQ(node->opcode(), IrOpcode::kInductionVariablePhi);
+ Node* arith = node->InputAt(1);
Type type = NodeProperties::GetType(node);
Type initial_type = Operand(node, 0);
- Node* arith = node->InputAt(1);
+ Type arith_type = Operand(node, 1);
Type increment_type = Operand(node, 2);
// Intersect {type} with useful bounds.
@@ -910,26 +911,30 @@ bool Typer::Visitor::InductionVariablePhiTypeIsPrefixedPoint(
type = Type::Intersect(type, bound_type, typer_->zone());
}
- // Apply ordinary typing to the "increment" operation.
- // clang-format off
- switch (arith->opcode()) {
+ if (arith_type.IsNone()) {
+ type = Type::None();
+ } else {
+ // Apply ordinary typing to the "increment" operation.
+ // clang-format off
+ switch (arith->opcode()) {
#define CASE(x) \
- case IrOpcode::k##x: \
- type = Type##x(type, increment_type); \
- break;
- CASE(JSAdd)
- CASE(JSSubtract)
- CASE(NumberAdd)
- CASE(NumberSubtract)
- CASE(SpeculativeNumberAdd)
- CASE(SpeculativeNumberSubtract)
- CASE(SpeculativeSafeIntegerAdd)
- CASE(SpeculativeSafeIntegerSubtract)
+ case IrOpcode::k##x: \
+ type = Type##x(type, increment_type); \
+ break;
+ CASE(JSAdd)
+ CASE(JSSubtract)
+ CASE(NumberAdd)
+ CASE(NumberSubtract)
+ CASE(SpeculativeNumberAdd)
+ CASE(SpeculativeNumberSubtract)
+ CASE(SpeculativeSafeIntegerAdd)
+ CASE(SpeculativeSafeIntegerSubtract)
#undef CASE
- default:
- UNREACHABLE();
+ default:
+ UNREACHABLE();
+ }
+ // clang-format on
}
- // clang-format on
type = Type::Union(initial_type, type, typer_->zone());
@@ -2065,10 +2070,6 @@ Type Typer::Visitor::TypeStringLength(Node* node) {
Type Typer::Visitor::TypeStringSubstring(Node* node) { return Type::String(); }
-Type Typer::Visitor::TypePoisonIndex(Node* node) {
- return Type::Union(Operand(node, 0), typer_->cache_->kSingletonZero, zone());
-}
-
Type Typer::Visitor::TypeCheckBounds(Node* node) {
return typer_->operation_typer_.CheckBounds(Operand(node, 0),
Operand(node, 1));
diff --git a/chromium/v8/src/compiler/types.cc b/chromium/v8/src/compiler/types.cc
index a1f9b93dce1..15c9f195e04 100644
--- a/chromium/v8/src/compiler/types.cc
+++ b/chromium/v8/src/compiler/types.cc
@@ -275,6 +275,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
DCHECK(!map.is_undetectable());
return kBoundFunction;
case JS_FUNCTION_TYPE:
+ case JS_CLASS_CONSTRUCTOR_TYPE:
case JS_PROMISE_CONSTRUCTOR_TYPE:
case JS_REG_EXP_CONSTRUCTOR_TYPE:
case JS_ARRAY_CONSTRUCTOR_TYPE:
diff --git a/chromium/v8/src/compiler/types.h b/chromium/v8/src/compiler/types.h
index a28a28c59e9..cdd8c0b0f08 100644
--- a/chromium/v8/src/compiler/types.h
+++ b/chromium/v8/src/compiler/types.h
@@ -72,10 +72,6 @@ namespace compiler {
// existing assumptions or tests.
// Consequently, do not normally use Equals for type tests, always use Is!
//
-// The NowIs operator implements state-sensitive subtying, as described above.
-// Any compilation decision based on such temporary properties requires runtime
-// guarding!
-//
//
// PROPERTIES
//
diff --git a/chromium/v8/src/compiler/verifier.cc b/chromium/v8/src/compiler/verifier.cc
index f33edaa6c0d..a8bbd06b5f2 100644
--- a/chromium/v8/src/compiler/verifier.cc
+++ b/chromium/v8/src/compiler/verifier.cc
@@ -919,7 +919,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
break;
case IrOpcode::kComment:
- case IrOpcode::kAbortCSAAssert:
+ case IrOpcode::kAbortCSADcheck:
case IrOpcode::kDebugBreak:
case IrOpcode::kRetain:
case IrOpcode::kUnsafePointerAdd:
@@ -1422,10 +1422,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 1, TypeCache::Get()->kPositiveSafeInteger);
CheckTypeIs(node, TypeCache::Get()->kPositiveSafeInteger);
break;
- case IrOpcode::kPoisonIndex:
- CheckValueInputIs(node, 0, Type::Unsigned32());
- CheckTypeIs(node, Type::Unsigned32());
- break;
case IrOpcode::kCheckClosure:
// Any -> Function
CheckValueInputIs(node, 0, Type::Any());
@@ -1641,7 +1637,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// -----------------------
case IrOpcode::kLoad:
case IrOpcode::kLoadImmutable:
- case IrOpcode::kPoisonedLoad:
case IrOpcode::kProtectedLoad:
case IrOpcode::kProtectedStore:
case IrOpcode::kStore:
@@ -1817,9 +1812,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kWord32PairShl:
case IrOpcode::kWord32PairShr:
case IrOpcode::kWord32PairSar:
- case IrOpcode::kTaggedPoisonOnSpeculation:
- case IrOpcode::kWord32PoisonOnSpeculation:
- case IrOpcode::kWord64PoisonOnSpeculation:
case IrOpcode::kLoadStackCheckOffset:
case IrOpcode::kLoadFramePointer:
case IrOpcode::kLoadParentFramePointer:
diff --git a/chromium/v8/src/compiler/wasm-compiler.cc b/chromium/v8/src/compiler/wasm-compiler.cc
index f91c21fd1d4..8446640bfc8 100644
--- a/chromium/v8/src/compiler/wasm-compiler.cc
+++ b/chromium/v8/src/compiler/wasm-compiler.cc
@@ -44,6 +44,7 @@
#include "src/roots/roots.h"
#include "src/tracing/trace-event.h"
#include "src/trap-handler/trap-handler.h"
+#include "src/wasm/code-space-access.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/graph-builder-interface.h"
@@ -196,14 +197,7 @@ class WasmGraphAssembler : public GraphAssembler {
return Call(call_descriptor, call_target, args...);
}
- void EnsureEnd() {
- if (graph()->end() == nullptr) {
- graph()->SetEnd(graph()->NewNode(mcgraph()->common()->End(0)));
- }
- }
-
void MergeControlToEnd(Node* node) {
- EnsureEnd();
NodeProperties::MergeControlToEnd(graph(), mcgraph()->common(), node);
}
@@ -212,7 +206,6 @@ class WasmGraphAssembler : public GraphAssembler {
if (FLAG_debug_code) {
auto ok = MakeLabel();
GotoIfNot(condition, &ok);
- EnsureEnd();
Unreachable();
Bind(&ok);
}
@@ -472,7 +465,6 @@ WasmGraphBuilder::WasmGraphBuilder(
mcgraph_(mcgraph),
env_(env),
has_simd_(ContainsSimd(sig)),
- untrusted_code_mitigations_(FLAG_untrusted_code_mitigations),
sig_(sig),
source_position_table_(source_position_table),
isolate_(isolate) {
@@ -501,6 +493,8 @@ void WasmGraphBuilder::Start(unsigned params) {
gasm_->LoadFunctionDataFromJSFunction(
Param(Linkage::kJSCallClosureParamIndex, "%closure")))
: Param(wasm::kWasmInstanceParameterIndex);
+
+ graph()->SetEnd(graph()->NewNode(mcgraph()->common()->End(0)));
}
Node* WasmGraphBuilder::Param(int index, const char* debug_name) {
@@ -660,8 +654,9 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position) {
mcgraph()->machine()->StackPointerGreaterThan(StackCheckKind::kWasm),
limit, effect()));
- Diamond stack_check(graph(), mcgraph()->common(), check, BranchHint::kTrue);
- stack_check.Chain(control());
+ Node* if_true;
+ Node* if_false;
+ gasm_->Branch(check, &if_true, &if_false, BranchHint::kTrue);
if (stack_check_call_operator_ == nullptr) {
// Build and cache the stack check call operator and the constant
@@ -682,15 +677,18 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position) {
stack_check_call_operator_ = mcgraph()->common()->Call(call_descriptor);
}
- Node* call = graph()->NewNode(stack_check_call_operator_.get(),
- stack_check_code_node_.get(), effect(),
- stack_check.if_false);
-
+ Node* call =
+ graph()->NewNode(stack_check_call_operator_.get(),
+ stack_check_code_node_.get(), effect(), if_false);
SetSourcePosition(call, position);
- Node* ephi = stack_check.EffectPhi(effect(), call);
+ DCHECK_GT(call->op()->ControlOutputCount(), 0);
+ Node* merge = graph()->NewNode(mcgraph()->common()->Merge(2), if_true, call);
+ DCHECK_GT(call->op()->EffectOutputCount(), 0);
+ Node* ephi = graph()->NewNode(mcgraph()->common()->EffectPhi(2), effect(),
+ call, merge);
- SetEffectControl(ephi, stack_check.merge);
+ SetEffectControl(ephi, merge);
}
void WasmGraphBuilder::PatchInStackCheckIfNeeded() {
@@ -2901,18 +2899,18 @@ Node* WasmGraphBuilder::BuildCallNode(const wasm::FunctionSig* sig,
return call;
}
-Node* WasmGraphBuilder::BuildWasmCall(
- const wasm::FunctionSig* sig, base::Vector<Node*> args,
- base::Vector<Node*> rets, wasm::WasmCodePosition position,
- Node* instance_node, UseRetpoline use_retpoline, Node* frame_state) {
- CallDescriptor* call_descriptor =
- GetWasmCallDescriptor(mcgraph()->zone(), sig, use_retpoline,
- kWasmFunction, frame_state != nullptr);
+Node* WasmGraphBuilder::BuildWasmCall(const wasm::FunctionSig* sig,
+ base::Vector<Node*> args,
+ base::Vector<Node*> rets,
+ wasm::WasmCodePosition position,
+ Node* instance_node, Node* frame_state) {
+ CallDescriptor* call_descriptor = GetWasmCallDescriptor(
+ mcgraph()->zone(), sig, kWasmFunction, frame_state != nullptr);
const Operator* op = mcgraph()->common()->Call(call_descriptor);
Node* call =
BuildCallNode(sig, args, position, instance_node, op, frame_state);
- // TODO(manoskouk): Don't always set control if we ever add properties to wasm
- // calls.
+ // TODO(manoskouk): If we have kNoThrow calls, do not set them as control.
+ DCHECK_GT(call->op()->ControlOutputCount(), 0);
SetControl(call);
size_t ret_count = sig->return_count();
@@ -2935,15 +2933,14 @@ Node* WasmGraphBuilder::BuildWasmCall(
Node* WasmGraphBuilder::BuildWasmReturnCall(const wasm::FunctionSig* sig,
base::Vector<Node*> args,
wasm::WasmCodePosition position,
- Node* instance_node,
- UseRetpoline use_retpoline) {
+ Node* instance_node) {
CallDescriptor* call_descriptor =
- GetWasmCallDescriptor(mcgraph()->zone(), sig, use_retpoline);
+ GetWasmCallDescriptor(mcgraph()->zone(), sig);
const Operator* op = mcgraph()->common()->TailCall(call_descriptor);
Node* call = BuildCallNode(sig, args, position, instance_node, op);
- // TODO(manoskouk): {call} will not always be a control node if we ever add
- // properties to wasm calls.
+ // TODO(manoskouk): If we have kNoThrow calls, do not merge them to end.
+ DCHECK_GT(call->op()->ControlOutputCount(), 0);
gasm_->MergeControlToEnd(call);
return call;
@@ -2982,15 +2979,13 @@ Node* WasmGraphBuilder::BuildImportCall(const wasm::FunctionSig* sig,
Node* target_node = gasm_->LoadFromObject(
MachineType::Pointer(), imported_targets, func_index_times_pointersize);
args[0] = target_node;
- const UseRetpoline use_retpoline =
- untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline;
switch (continuation) {
case kCallContinues:
- return BuildWasmCall(sig, args, rets, position, ref_node, use_retpoline);
+ return BuildWasmCall(sig, args, rets, position, ref_node);
case kReturnCall:
DCHECK(rets.empty());
- return BuildWasmReturnCall(sig, args, position, ref_node, use_retpoline);
+ return BuildWasmReturnCall(sig, args, position, ref_node);
}
}
@@ -3010,7 +3005,7 @@ Node* WasmGraphBuilder::CallDirect(uint32_t index, base::Vector<Node*> args,
Address code = static_cast<Address>(index);
args[0] = mcgraph()->RelocatableIntPtrConstant(code, RelocInfo::WASM_CALL);
- return BuildWasmCall(sig, args, rets, position, nullptr, kNoRetpoline);
+ return BuildWasmCall(sig, args, rets, position, nullptr);
}
Node* WasmGraphBuilder::CallIndirect(uint32_t table_index, uint32_t sig_index,
@@ -3095,16 +3090,6 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
Node* in_bounds = gasm_->Uint32LessThan(key, ift_size);
TrapIfFalse(wasm::kTrapTableOutOfBounds, in_bounds, position);
- // Mask the key to prevent SSCA.
- if (untrusted_code_mitigations_) {
- // mask = ((key - size) & ~key) >> 31
- Node* neg_key = gasm_->Word32Xor(key, Int32Constant(-1));
- Node* masked_diff =
- gasm_->Word32And(gasm_->Int32Sub(key, ift_size), neg_key);
- Node* mask = gasm_->Word32Sar(masked_diff, Int32Constant(31));
- key = gasm_->Word32And(key, mask);
- }
-
const wasm::ValueType table_type = env_->module->tables[table_index].type;
// Check that the table entry is not null and that the type of the function is
// **identical with** the function type declared at the call site (no
@@ -3140,16 +3125,12 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
intptr_scaled_key);
args[0] = target;
- const UseRetpoline use_retpoline =
- untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline;
switch (continuation) {
case kCallContinues:
- return BuildWasmCall(sig, args, rets, position, target_instance,
- use_retpoline);
+ return BuildWasmCall(sig, args, rets, position, target_instance);
case kReturnCall:
- return BuildWasmReturnCall(sig, args, position, target_instance,
- use_retpoline);
+ return BuildWasmReturnCall(sig, args, position, target_instance);
}
}
@@ -3178,7 +3159,7 @@ Node* WasmGraphBuilder::BuildLoadCallTargetFromExportedFunctionData(
}
// TODO(9495): Support CAPI function refs.
-Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index,
+Node* WasmGraphBuilder::BuildCallRef(const wasm::FunctionSig* sig,
base::Vector<Node*> args,
base::Vector<Node*> rets,
CheckForNull null_check,
@@ -3189,8 +3170,6 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index,
position);
}
- const wasm::FunctionSig* sig = env_->module->signature(sig_index);
-
Node* function_data = gasm_->LoadFunctionDataFromJSFunction(args[0]);
auto load_target = gasm_->MakeLabel();
@@ -3244,31 +3223,43 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index,
args[0] = end_label.PhiAt(0);
- const UseRetpoline use_retpoline =
- untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline;
-
Node* call = continuation == kCallContinues
- ? BuildWasmCall(sig, args, rets, position, instance_node,
- use_retpoline)
- : BuildWasmReturnCall(sig, args, position, instance_node,
- use_retpoline);
+ ? BuildWasmCall(sig, args, rets, position, instance_node)
+ : BuildWasmReturnCall(sig, args, position, instance_node);
return call;
}
-Node* WasmGraphBuilder::CallRef(uint32_t sig_index, base::Vector<Node*> args,
+void WasmGraphBuilder::CompareToExternalFunctionAtIndex(
+ Node* func_ref, uint32_t function_index, Node** success_control,
+ Node** failure_control) {
+ // Since we are comparing to a function reference, it is guaranteed that
+ // instance->wasm_external_functions() has been initialized.
+ Node* external_functions = gasm_->LoadFromObject(
+ MachineType::TaggedPointer(), GetInstance(),
+ wasm::ObjectAccess::ToTagged(
+ WasmInstanceObject::kWasmExternalFunctionsOffset));
+ Node* function_ref = gasm_->LoadFixedArrayElement(
+ external_functions, gasm_->IntPtrConstant(function_index),
+ MachineType::AnyTagged());
+ gasm_->Branch(gasm_->WordEqual(function_ref, func_ref), success_control,
+ failure_control, BranchHint::kTrue);
+}
+
+Node* WasmGraphBuilder::CallRef(const wasm::FunctionSig* sig,
+ base::Vector<Node*> args,
base::Vector<Node*> rets,
WasmGraphBuilder::CheckForNull null_check,
wasm::WasmCodePosition position) {
- return BuildCallRef(sig_index, args, rets, null_check,
- IsReturnCall::kCallContinues, position);
+ return BuildCallRef(sig, args, rets, null_check, IsReturnCall::kCallContinues,
+ position);
}
-Node* WasmGraphBuilder::ReturnCallRef(uint32_t sig_index,
+Node* WasmGraphBuilder::ReturnCallRef(const wasm::FunctionSig* sig,
base::Vector<Node*> args,
WasmGraphBuilder::CheckForNull null_check,
wasm::WasmCodePosition position) {
- return BuildCallRef(sig_index, args, {}, null_check,
- IsReturnCall::kReturnCall, position);
+ return BuildCallRef(sig, args, {}, null_check, IsReturnCall::kReturnCall,
+ position);
}
Node* WasmGraphBuilder::ReturnCall(uint32_t index, base::Vector<Node*> args,
@@ -3287,7 +3278,7 @@ Node* WasmGraphBuilder::ReturnCall(uint32_t index, base::Vector<Node*> args,
Address code = static_cast<Address>(index);
args[0] = mcgraph()->RelocatableIntPtrConstant(code, RelocInfo::WASM_CALL);
- return BuildWasmReturnCall(sig, args, position, nullptr, kNoRetpoline);
+ return BuildWasmReturnCall(sig, args, position, nullptr);
}
Node* WasmGraphBuilder::ReturnCallIndirect(uint32_t table_index,
@@ -3416,15 +3407,6 @@ void WasmGraphBuilder::InitInstanceCache(
// Load the memory size.
instance_cache->mem_size =
LOAD_MUTABLE_INSTANCE_FIELD(MemorySize, MachineType::UintPtr());
-
- if (untrusted_code_mitigations_) {
- // Load the memory mask.
- instance_cache->mem_mask =
- LOAD_INSTANCE_FIELD(MemoryMask, MachineType::UintPtr());
- } else {
- // Explicitly set to nullptr to ensure a SEGV when we try to use it.
- instance_cache->mem_mask = nullptr;
- }
}
void WasmGraphBuilder::PrepareInstanceCacheForLoop(
@@ -3435,10 +3417,6 @@ void WasmGraphBuilder::PrepareInstanceCacheForLoop(
INTRODUCE_PHI(mem_start, MachineType::PointerRepresentation());
INTRODUCE_PHI(mem_size, MachineType::PointerRepresentation());
- if (untrusted_code_mitigations_) {
- INTRODUCE_PHI(mem_mask, MachineType::PointerRepresentation());
- }
-
#undef INTRODUCE_PHI
}
@@ -3453,10 +3431,6 @@ void WasmGraphBuilder::NewInstanceCacheMerge(WasmInstanceCacheNodes* to,
INTRODUCE_PHI(mem_start, MachineType::PointerRepresentation());
INTRODUCE_PHI(mem_size, MachineRepresentation::kWord32);
- if (untrusted_code_mitigations_) {
- INTRODUCE_PHI(mem_mask, MachineRepresentation::kWord32);
- }
-
#undef INTRODUCE_PHI
}
@@ -3467,10 +3441,6 @@ void WasmGraphBuilder::MergeInstanceCacheInto(WasmInstanceCacheNodes* to,
merge, to->mem_size, from->mem_size);
to->mem_start = CreateOrMergeIntoPhi(MachineType::PointerRepresentation(),
merge, to->mem_start, from->mem_start);
- if (untrusted_code_mitigations_) {
- to->mem_mask = CreateOrMergeIntoPhi(MachineType::PointerRepresentation(),
- merge, to->mem_mask, from->mem_mask);
- }
}
Node* WasmGraphBuilder::CreateOrMergeIntoPhi(MachineRepresentation rep,
@@ -3623,7 +3593,7 @@ Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(Runtime::FunctionId f,
Builtin::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit;
Node* centry_stub =
gasm_->LoadFromObject(MachineType::Pointer(), isolate_root,
- IsolateData::builtin_slot_offset(centry_id));
+ IsolateData::BuiltinSlotOffset(centry_id));
// TODO(titzer): allow arbitrary number of runtime arguments
// At the moment we only allow 5 parameters. If more parameters are needed,
// increase this constant accordingly.
@@ -3839,13 +3809,6 @@ WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
// Introduce the actual bounds check.
Node* cond = gasm_->UintLessThan(index, effective_size);
TrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
-
- if (untrusted_code_mitigations_) {
- // In the fallthrough case, condition the index with the memory mask.
- Node* mem_mask = instance_cache_->mem_mask;
- DCHECK_NOT_NULL(mem_mask);
- index = gasm_->WordAnd(index, mem_mask);
- }
return {index, kDynamicallyChecked};
}
@@ -4345,13 +4308,6 @@ Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) {
gasm_->UintLessThan(index, mem_size), BranchHint::kTrue);
bounds_check.Chain(control());
- if (untrusted_code_mitigations_) {
- // Condition the index with the memory mask.
- Node* mem_mask = instance_cache_->mem_mask;
- DCHECK_NOT_NULL(mem_mask);
- index = gasm_->WordAnd(index, mem_mask);
- }
-
Node* load = graph()->NewNode(mcgraph()->machine()->Load(type), mem_start,
index, effect(), bounds_check.if_true);
SetEffectControl(bounds_check.EffectPhi(load, effect()), bounds_check.merge);
@@ -4396,13 +4352,6 @@ Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index,
BranchHint::kTrue);
bounds_check.Chain(control());
- if (untrusted_code_mitigations_) {
- // Condition the index with the memory mask.
- Node* mem_mask = instance_cache_->mem_mask;
- DCHECK_NOT_NULL(mem_mask);
- index = gasm_->Word32And(index, mem_mask);
- }
-
index = BuildChangeUint32ToUintPtr(index);
const Operator* store_op = mcgraph()->machine()->Store(StoreRepresentation(
type.representation(), WriteBarrierKind::kNoWriteBarrier));
@@ -5240,16 +5189,26 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
const Operator* (MachineOperatorBuilder::*)(MachineType);
using OperatorByRep =
const Operator* (MachineOperatorBuilder::*)(MachineRepresentation);
+ using OperatorByAtomicLoadRep =
+ const Operator* (MachineOperatorBuilder::*)(AtomicLoadParameters);
+ using OperatorByAtomicStoreRep =
+ const Operator* (MachineOperatorBuilder::*)(AtomicStoreParameters);
const Type type;
const MachineType machine_type;
const OperatorByType operator_by_type = nullptr;
const OperatorByRep operator_by_rep = nullptr;
+ const OperatorByAtomicLoadRep operator_by_atomic_load_params = nullptr;
+ const OperatorByAtomicStoreRep operator_by_atomic_store_rep = nullptr;
constexpr AtomicOpInfo(Type t, MachineType m, OperatorByType o)
: type(t), machine_type(m), operator_by_type(o) {}
constexpr AtomicOpInfo(Type t, MachineType m, OperatorByRep o)
: type(t), machine_type(m), operator_by_rep(o) {}
+ constexpr AtomicOpInfo(Type t, MachineType m, OperatorByAtomicLoadRep o)
+ : type(t), machine_type(m), operator_by_atomic_load_params(o) {}
+ constexpr AtomicOpInfo(Type t, MachineType m, OperatorByAtomicStoreRep o)
+ : type(t), machine_type(m), operator_by_atomic_store_rep(o) {}
// Constexpr, hence just a table lookup in most compilers.
static constexpr AtomicOpInfo Get(wasm::WasmOpcode opcode) {
@@ -5358,11 +5317,21 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
// {offset} is validated to be within uintptr_t range in {BoundsCheckMem}.
uintptr_t capped_offset = static_cast<uintptr_t>(offset);
if (info.type != AtomicOpInfo::kSpecial) {
- const Operator* op =
- info.operator_by_type
- ? (mcgraph()->machine()->*info.operator_by_type)(info.machine_type)
- : (mcgraph()->machine()->*info.operator_by_rep)(
- info.machine_type.representation());
+ const Operator* op;
+ if (info.operator_by_type) {
+ op = (mcgraph()->machine()->*info.operator_by_type)(info.machine_type);
+ } else if (info.operator_by_rep) {
+ op = (mcgraph()->machine()->*info.operator_by_rep)(
+ info.machine_type.representation());
+ } else if (info.operator_by_atomic_load_params) {
+ op = (mcgraph()->machine()->*info.operator_by_atomic_load_params)(
+ AtomicLoadParameters(info.machine_type, AtomicMemoryOrder::kSeqCst));
+ } else {
+ op = (mcgraph()->machine()->*info.operator_by_atomic_store_rep)(
+ AtomicStoreParameters(info.machine_type.representation(),
+ WriteBarrierKind::kNoWriteBarrier,
+ AtomicMemoryOrder::kSeqCst));
+ }
Node* input_nodes[6] = {MemBuffer(capped_offset), index};
int num_actual_inputs = info.type;
@@ -5610,13 +5579,17 @@ Node* WasmGraphBuilder::ArrayNewWithRtt(uint32_t array_index,
wasm::WasmCodePosition position) {
TrapIfFalse(wasm::kTrapArrayTooLarge,
gasm_->Uint32LessThanOrEqual(
- length, gasm_->Uint32Constant(wasm::kV8MaxWasmArrayLength)),
+ length, gasm_->Uint32Constant(WasmArray::MaxLength(type))),
position);
wasm::ValueType element_type = type->element_type();
+ // TODO(7748): Consider using gasm_->Allocate().
Builtin stub = ChooseArrayAllocationBuiltin(element_type, initial_value);
- Node* a =
- gasm_->CallBuiltin(stub, Operator::kEliminatable, rtt, length,
- Int32Constant(element_type.element_size_bytes()));
+ // Do NOT mark this as Operator::kEliminatable, because that would cause the
+ // Call node to have no control inputs, which means it could get scheduled
+ // before the check/trap above.
+ Node* a = gasm_->CallBuiltin(
+ stub, Operator::kNoDeopt | Operator::kNoThrow, rtt, length,
+ Int32Constant(element_type.element_size_bytes()));
if (initial_value != nullptr) {
// TODO(manoskouk): If the loop is ever removed here, we have to update
// ArrayNewWithRtt() in graph-builder-interface.cc to not mark the current
@@ -5628,8 +5601,6 @@ Node* WasmGraphBuilder::ArrayNewWithRtt(uint32_t array_index,
Node* element_size = Int32Constant(element_type.element_size_bytes());
Node* end_offset =
gasm_->Int32Add(start_offset, gasm_->Int32Mul(element_size, length));
- // Loops need the graph's end to have been set up.
- gasm_->EnsureEnd();
gasm_->Goto(&loop, start_offset);
gasm_->Bind(&loop);
{
@@ -5646,6 +5617,25 @@ Node* WasmGraphBuilder::ArrayNewWithRtt(uint32_t array_index,
return a;
}
+Node* WasmGraphBuilder::ArrayInit(uint32_t array_index,
+ const wasm::ArrayType* type, Node* rtt,
+ base::Vector<Node*> elements) {
+ wasm::ValueType element_type = type->element_type();
+ // TODO(7748): Consider using gasm_->Allocate().
+ Node* array =
+ gasm_->CallBuiltin(Builtin::kWasmAllocateArray_Uninitialized,
+ Operator::kNoDeopt | Operator::kNoThrow, rtt,
+ Int32Constant(static_cast<int32_t>(elements.size())),
+ Int32Constant(element_type.element_size_bytes()));
+ for (int i = 0; i < static_cast<int>(elements.size()); i++) {
+ Node* offset =
+ gasm_->WasmArrayElementOffset(Int32Constant(i), element_type);
+ gasm_->StoreToObject(ObjectAccessForGCStores(element_type), array, offset,
+ elements[i]);
+ }
+ return array;
+}
+
Node* WasmGraphBuilder::RttCanon(uint32_t type_index) {
Node* maps_list =
LOAD_INSTANCE_FIELD(ManagedObjectMaps, MachineType::TaggedPointer());
@@ -6005,24 +5995,40 @@ Node* WasmGraphBuilder::ArrayLen(Node* array_object, CheckForNull null_check,
return gasm_->LoadWasmArrayLength(array_object);
}
-// TODO(7748): Change {CallBuiltin} to {BuildCCall}. Add an option to copy in a
-// loop for small array sizes. To find the length limit, run
-// test/mjsunit/wasm/array-copy-benchmark.js.
+// TODO(7748): Add an option to copy in a loop for small array sizes. To find
+// the length limit, run test/mjsunit/wasm/array-copy-benchmark.js.
void WasmGraphBuilder::ArrayCopy(Node* dst_array, Node* dst_index,
- Node* src_array, Node* src_index, Node* length,
+ CheckForNull dst_null_check, Node* src_array,
+ Node* src_index, CheckForNull src_null_check,
+ Node* length,
wasm::WasmCodePosition position) {
- // TODO(7748): Skip null checks when possible.
- TrapIfTrue(wasm::kTrapNullDereference, gasm_->WordEqual(dst_array, RefNull()),
- position);
- TrapIfTrue(wasm::kTrapNullDereference, gasm_->WordEqual(src_array, RefNull()),
- position);
+ if (dst_null_check == kWithNullCheck) {
+ TrapIfTrue(wasm::kTrapNullDereference,
+ gasm_->WordEqual(dst_array, RefNull()), position);
+ }
+ if (src_null_check == kWithNullCheck) {
+ TrapIfTrue(wasm::kTrapNullDereference,
+ gasm_->WordEqual(src_array, RefNull()), position);
+ }
BoundsCheckArrayCopy(dst_array, dst_index, length, position);
BoundsCheckArrayCopy(src_array, src_index, length, position);
- Operator::Properties copy_properties =
- Operator::kIdempotent | Operator::kNoThrow | Operator::kNoDeopt;
- // The builtin needs the int parameters first.
- gasm_->CallBuiltin(Builtin::kWasmArrayCopy, copy_properties, dst_index,
- src_index, length, dst_array, src_array);
+
+ auto skip = gasm_->MakeLabel();
+
+ gasm_->GotoIf(gasm_->WordEqual(length, Int32Constant(0)), &skip,
+ BranchHint::kFalse);
+
+ Node* function =
+ gasm_->ExternalConstant(ExternalReference::wasm_array_copy());
+ MachineType arg_types[]{
+ MachineType::TaggedPointer(), MachineType::TaggedPointer(),
+ MachineType::Uint32(), MachineType::TaggedPointer(),
+ MachineType::Uint32(), MachineType::Uint32()};
+ MachineSignature sig(0, 6, arg_types);
+ BuildCCall(&sig, function, GetInstance(), dst_array, dst_index, src_array,
+ src_index, length);
+ gasm_->Goto(&skip);
+ gasm_->Bind(&skip);
}
// 1 bit V8 Smi tag, 31 bits V8 Smi shift, 1 bit i31ref high-bit truncation.
@@ -6659,8 +6665,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// The (cached) call target is the jump table slot for that function.
args[0] = BuildLoadCallTargetFromExportedFunctionData(function_data);
BuildWasmCall(sig_, base::VectorOf(args), base::VectorOf(rets),
- wasm::kNoCodePosition, nullptr, kNoRetpoline,
- frame_state);
+ wasm::kNoCodePosition, nullptr, frame_state);
}
}
@@ -6929,8 +6934,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Convert wasm numbers to JS values.
pos = AddArgumentNodes(base::VectorOf(args), pos, wasm_count, sig_);
- args[pos++] = undefined_node; // new target
- args[pos++] = Int32Constant(wasm_count); // argument count
+ args[pos++] = undefined_node; // new target
+ args[pos++] =
+ Int32Constant(JSParameterCount(wasm_count)); // argument count
args[pos++] = function_context;
args[pos++] = effect();
args[pos++] = control();
@@ -6957,8 +6963,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
for (int i = wasm_count; i < expected_arity; ++i) {
args[pos++] = undefined_node;
}
- args[pos++] = undefined_node; // new target
- args[pos++] = Int32Constant(wasm_count); // argument count
+ args[pos++] = undefined_node; // new target
+ args[pos++] =
+ Int32Constant(JSParameterCount(wasm_count)); // argument count
Node* function_context =
gasm_->LoadContextFromJSFunction(callable_node);
@@ -6981,7 +6988,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
args[pos++] =
gasm_->GetBuiltinPointerTarget(Builtin::kCall_ReceiverIsAny);
args[pos++] = callable_node;
- args[pos++] = Int32Constant(wasm_count); // argument count
+ args[pos++] =
+ Int32Constant(JSParameterCount(wasm_count)); // argument count
args[pos++] = undefined_node; // receiver
auto call_descriptor = Linkage::GetStubCallDescriptor(
@@ -7162,8 +7170,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
int pos = 0;
args[pos++] = gasm_->GetBuiltinPointerTarget(Builtin::kCall_ReceiverIsAny);
args[pos++] = callable;
- args[pos++] = Int32Constant(wasm_count); // argument count
- args[pos++] = UndefinedValue(); // receiver
+ args[pos++] =
+ Int32Constant(JSParameterCount(wasm_count)); // argument count
+ args[pos++] = UndefinedValue(); // receiver
auto call_descriptor = Linkage::GetStubCallDescriptor(
graph()->zone(), CallTrampolineDescriptor{}, wasm_count + 1,
@@ -7457,7 +7466,7 @@ std::pair<WasmImportCallKind, Handle<JSReceiver>> ResolveWasmImportCall(
return std::make_pair(WasmImportCallKind::kUseCallBuiltin, callable);
}
- if (shared->internal_formal_parameter_count() ==
+ if (shared->internal_formal_parameter_count_without_receiver() ==
expected_sig->parameter_count()) {
return std::make_pair(WasmImportCallKind::kJSFunctionArityMatch,
callable);
@@ -7538,7 +7547,7 @@ wasm::WasmCompilationResult CompileWasmMathIntrinsic(
wasm::CompilationEnv env(
nullptr, wasm::kNoBoundsChecks,
wasm::RuntimeExceptionSupport::kNoRuntimeExceptionSupport,
- wasm::WasmFeatures::All());
+ wasm::WasmFeatures::All(), wasm::DynamicTiering::kDisabled);
WasmGraphBuilder builder(&env, mcgraph->zone(), mcgraph, sig,
source_positions);
@@ -7569,11 +7578,12 @@ wasm::WasmCompilationResult CompileWasmMathIntrinsic(
call_descriptor = GetI32WasmCallDescriptor(&zone, call_descriptor);
}
- wasm::WasmCompilationResult result = Pipeline::GenerateCodeForWasmNativeStub(
- call_descriptor, mcgraph, CodeKind::WASM_FUNCTION,
- wasm::WasmCode::kFunction, debug_name, WasmStubAssemblerOptions(),
- source_positions);
- return result;
+ // The code does not call to JS, but conceptually it is an import wrapper,
+ // hence use {WASM_TO_JS_FUNCTION} here.
+ // TODO(wasm): Rename this to {WASM_IMPORT_CALL}?
+ return Pipeline::GenerateCodeForWasmNativeStub(
+ call_descriptor, mcgraph, CodeKind::WASM_TO_JS_FUNCTION, debug_name,
+ WasmStubAssemblerOptions(), source_positions);
}
} // namespace
@@ -7623,17 +7633,13 @@ wasm::WasmCompilationResult CompileWasmImportCallWrapper(
// Schedule and compile to machine code.
CallDescriptor* incoming =
- GetWasmCallDescriptor(&zone, sig, WasmGraphBuilder::kNoRetpoline,
- WasmCallKind::kWasmImportWrapper);
+ GetWasmCallDescriptor(&zone, sig, WasmCallKind::kWasmImportWrapper);
if (machine->Is32()) {
incoming = GetI32WasmCallDescriptor(&zone, incoming);
}
- wasm::WasmCompilationResult result = Pipeline::GenerateCodeForWasmNativeStub(
- incoming, mcgraph, CodeKind::WASM_TO_JS_FUNCTION,
- wasm::WasmCode::kWasmToJsWrapper, func_name, WasmStubAssemblerOptions(),
- source_position_table);
- result.kind = wasm::WasmCompilationResult::kWasmToJsWrapper;
- return result;
+ return Pipeline::GenerateCodeForWasmNativeStub(
+ incoming, mcgraph, CodeKind::WASM_TO_JS_FUNCTION, func_name,
+ WasmStubAssemblerOptions(), source_position_table);
}
wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::NativeModule* native_module,
@@ -7665,24 +7671,27 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::NativeModule* native_module,
// Run the compiler pipeline to generate machine code.
CallDescriptor* call_descriptor =
- GetWasmCallDescriptor(&zone, sig, WasmGraphBuilder::kNoRetpoline,
- WasmCallKind::kWasmCapiFunction);
+ GetWasmCallDescriptor(&zone, sig, WasmCallKind::kWasmCapiFunction);
if (mcgraph->machine()->Is32()) {
call_descriptor = GetI32WasmCallDescriptor(&zone, call_descriptor);
}
const char* debug_name = "WasmCapiCall";
wasm::WasmCompilationResult result = Pipeline::GenerateCodeForWasmNativeStub(
- call_descriptor, mcgraph, CodeKind::WASM_TO_CAPI_FUNCTION,
- wasm::WasmCode::kWasmToCapiWrapper, debug_name,
+ call_descriptor, mcgraph, CodeKind::WASM_TO_CAPI_FUNCTION, debug_name,
WasmStubAssemblerOptions(), source_positions);
- std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
- wasm::kAnonymousFuncIndex, result.code_desc, result.frame_slot_count,
- result.tagged_parameter_slots,
- result.protected_instructions_data.as_vector(),
- result.source_positions.as_vector(), wasm::WasmCode::kWasmToCapiWrapper,
- wasm::ExecutionTier::kNone, wasm::kNoDebugging);
- return native_module->PublishCode(std::move(wasm_code));
+ wasm::WasmCode* published_code;
+ {
+ wasm::CodeSpaceWriteScope code_space_write_scope(native_module);
+ std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
+ wasm::kAnonymousFuncIndex, result.code_desc, result.frame_slot_count,
+ result.tagged_parameter_slots,
+ result.protected_instructions_data.as_vector(),
+ result.source_positions.as_vector(), wasm::WasmCode::kWasmToCapiWrapper,
+ wasm::ExecutionTier::kNone, wasm::kNoDebugging);
+ published_code = native_module->PublishCode(std::move(wasm_code));
+ }
+ return published_code;
}
MaybeHandle<Code> CompileWasmToJSWrapper(Isolate* isolate,
@@ -7716,8 +7725,7 @@ MaybeHandle<Code> CompileWasmToJSWrapper(Isolate* isolate,
// Generate the call descriptor.
CallDescriptor* incoming =
- GetWasmCallDescriptor(zone.get(), sig, WasmGraphBuilder::kNoRetpoline,
- WasmCallKind::kWasmImportWrapper);
+ GetWasmCallDescriptor(zone.get(), sig, WasmCallKind::kWasmImportWrapper);
// Run the compilation job synchronously.
std::unique_ptr<OptimizedCompilationJob> job(
@@ -7853,7 +7861,7 @@ bool BuildGraphForWasmFunction(wasm::CompilationEnv* env,
auto* allocator = wasm::GetWasmEngine()->allocator();
wasm::VoidResult graph_construction_result = wasm::BuildTFGraph(
allocator, env->enabled_features, env->module, &builder, detected,
- func_body, loop_infos, node_origins, func_index);
+ func_body, loop_infos, node_origins, func_index, wasm::kRegularFunction);
if (graph_construction_result.failed()) {
if (FLAG_trace_wasm_compiler) {
StdoutStream{} << "Compilation failed: "
@@ -7886,8 +7894,9 @@ base::Vector<const char> GetDebugName(Zone* zone, int index) {
} // namespace
wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
- wasm::CompilationEnv* env, const wasm::FunctionBody& func_body,
- int func_index, Counters* counters, wasm::WasmFeatures* detected) {
+ wasm::CompilationEnv* env, const wasm::WireBytesStorage* wire_bytes_storage,
+ const wasm::FunctionBody& func_body, int func_index, Counters* counters,
+ wasm::WasmFeatures* detected) {
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
"wasm.CompileTopTier", "func_index", func_index, "body_size",
func_body.end - func_body.start);
@@ -7936,12 +7945,14 @@ wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
}
if (ContainsSimd(func_body.sig) && !CpuFeatures::SupportsWasmSimd128()) {
- call_descriptor = GetI32WasmCallDescriptorForSimd(&zone, call_descriptor);
+ // Fail compilation if hardware does not support SIMD.
+ return wasm::WasmCompilationResult{};
}
- Pipeline::GenerateCodeForWasmFunction(
- &info, mcgraph, call_descriptor, source_positions, node_origins,
- func_body, env->module, func_index, &loop_infos);
+ Pipeline::GenerateCodeForWasmFunction(&info, env, wire_bytes_storage, mcgraph,
+ call_descriptor, source_positions,
+ node_origins, func_body, env->module,
+ func_index, &loop_infos);
if (counters) {
int zone_bytes =
@@ -7997,10 +8008,9 @@ class LinkageLocationAllocator {
} // namespace
// General code uses the above configuration data.
-CallDescriptor* GetWasmCallDescriptor(
- Zone* zone, const wasm::FunctionSig* fsig,
- WasmGraphBuilder::UseRetpoline use_retpoline, WasmCallKind call_kind,
- bool need_frame_state) {
+CallDescriptor* GetWasmCallDescriptor(Zone* zone, const wasm::FunctionSig* fsig,
+ WasmCallKind call_kind,
+ bool need_frame_state) {
// The extra here is to accomodate the instance object as first parameter
// and, when specified, the additional callable.
bool extra_callable_param =
@@ -8078,10 +8088,9 @@ CallDescriptor* GetWasmCallDescriptor(
descriptor_kind = CallDescriptor::kCallWasmCapiFunction;
}
- CallDescriptor::Flags flags =
- use_retpoline ? CallDescriptor::kRetpoline
- : need_frame_state ? CallDescriptor::kNeedsFrameState
- : CallDescriptor::kNoFlags;
+ CallDescriptor::Flags flags = need_frame_state
+ ? CallDescriptor::kNeedsFrameState
+ : CallDescriptor::kNoFlags;
return zone->New<CallDescriptor>( // --
descriptor_kind, // kind
target_type, // target MachineType
diff --git a/chromium/v8/src/compiler/wasm-compiler.h b/chromium/v8/src/compiler/wasm-compiler.h
index 71e3111c8c6..ad33c7e1c69 100644
--- a/chromium/v8/src/compiler/wasm-compiler.h
+++ b/chromium/v8/src/compiler/wasm-compiler.h
@@ -53,13 +53,15 @@ using TFNode = compiler::Node;
using TFGraph = compiler::MachineGraph;
class WasmCode;
class WasmFeatures;
+class WireBytesStorage;
enum class LoadTransformationKind : uint8_t;
} // namespace wasm
namespace compiler {
wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
- wasm::CompilationEnv*, const wasm::FunctionBody&, int func_index, Counters*,
+ wasm::CompilationEnv*, const wasm::WireBytesStorage* wire_bytes_storage,
+ const wasm::FunctionBody&, int func_index, Counters*,
wasm::WasmFeatures* detected);
// Calls to Wasm imports are handled in several different ways, depending on the
@@ -176,7 +178,6 @@ class JSWasmCallData {
struct WasmInstanceCacheNodes {
Node* mem_start;
Node* mem_size;
- Node* mem_mask;
};
struct WasmLoopInfo {
@@ -207,10 +208,6 @@ class WasmGraphBuilder {
kNeedsBoundsCheck = true,
kCanOmitBoundsCheck = false
};
- enum UseRetpoline : bool { // --
- kRetpoline = true,
- kNoRetpoline = false
- };
enum CheckForNull : bool { // --
kWithNullCheck = true,
kWithoutNullCheck = false
@@ -328,16 +325,19 @@ class WasmGraphBuilder {
Node* CallIndirect(uint32_t table_index, uint32_t sig_index,
base::Vector<Node*> args, base::Vector<Node*> rets,
wasm::WasmCodePosition position);
- Node* CallRef(uint32_t sig_index, base::Vector<Node*> args,
+ Node* CallRef(const wasm::FunctionSig* sig, base::Vector<Node*> args,
base::Vector<Node*> rets, CheckForNull null_check,
wasm::WasmCodePosition position);
+ void CompareToExternalFunctionAtIndex(Node* func_ref, uint32_t function_index,
+ Node** success_control,
+ Node** failure_control);
Node* ReturnCall(uint32_t index, base::Vector<Node*> args,
wasm::WasmCodePosition position);
Node* ReturnCallIndirect(uint32_t table_index, uint32_t sig_index,
base::Vector<Node*> args,
wasm::WasmCodePosition position);
- Node* ReturnCallRef(uint32_t sig_index, base::Vector<Node*> args,
+ Node* ReturnCallRef(const wasm::FunctionSig* sig, base::Vector<Node*> args,
CheckForNull null_check, wasm::WasmCodePosition position);
void BrOnNull(Node* ref_object, Node** non_null_node, Node** null_node);
@@ -474,9 +474,11 @@ class WasmGraphBuilder {
wasm::WasmCodePosition position);
Node* ArrayLen(Node* array_object, CheckForNull null_check,
wasm::WasmCodePosition position);
- void ArrayCopy(Node* dst_array, Node* dst_index, Node* src_array,
- Node* src_index, Node* length,
- wasm::WasmCodePosition position);
+ void ArrayCopy(Node* dst_array, Node* dst_index, CheckForNull dst_null_check,
+ Node* src_array, Node* src_index, CheckForNull src_null_check,
+ Node* length, wasm::WasmCodePosition position);
+ Node* ArrayInit(uint32_t array_index, const wasm::ArrayType* type, Node* rtt,
+ base::Vector<Node*> elements);
Node* I31New(Node* input);
Node* I31GetS(Node* input);
Node* I31GetU(Node* input);
@@ -576,12 +578,11 @@ class WasmGraphBuilder {
IsReturnCall continuation);
Node* BuildWasmCall(const wasm::FunctionSig* sig, base::Vector<Node*> args,
base::Vector<Node*> rets, wasm::WasmCodePosition position,
- Node* instance_node, UseRetpoline use_retpoline,
- Node* frame_state = nullptr);
+ Node* instance_node, Node* frame_state = nullptr);
Node* BuildWasmReturnCall(const wasm::FunctionSig* sig,
base::Vector<Node*> args,
wasm::WasmCodePosition position,
- Node* instance_node, UseRetpoline use_retpoline);
+ Node* instance_node);
Node* BuildImportCall(const wasm::FunctionSig* sig, base::Vector<Node*> args,
base::Vector<Node*> rets,
wasm::WasmCodePosition position, int func_index,
@@ -590,7 +591,7 @@ class WasmGraphBuilder {
base::Vector<Node*> rets,
wasm::WasmCodePosition position, Node* func_index,
IsReturnCall continuation);
- Node* BuildCallRef(uint32_t sig_index, base::Vector<Node*> args,
+ Node* BuildCallRef(const wasm::FunctionSig* sig, base::Vector<Node*> args,
base::Vector<Node*> rets, CheckForNull null_check,
IsReturnCall continuation,
wasm::WasmCodePosition position);
@@ -765,7 +766,6 @@ class WasmGraphBuilder {
bool use_js_isolate_and_params() const { return isolate_ != nullptr; }
bool has_simd_ = false;
bool needs_stack_check_ = false;
- const bool untrusted_code_mitigations_ = true;
const wasm::FunctionSig* const sig_;
@@ -791,8 +791,6 @@ V8_EXPORT_PRIVATE void BuildInlinedJSToWasmWrapper(
V8_EXPORT_PRIVATE CallDescriptor* GetWasmCallDescriptor(
Zone* zone, const wasm::FunctionSig* signature,
- WasmGraphBuilder::UseRetpoline use_retpoline =
- WasmGraphBuilder::kNoRetpoline,
WasmCallKind kind = kWasmFunction, bool need_frame_state = false);
V8_EXPORT_PRIVATE CallDescriptor* GetI32WasmCallDescriptor(
diff --git a/chromium/v8/src/compiler/wasm-inlining.cc b/chromium/v8/src/compiler/wasm-inlining.cc
new file mode 100644
index 00000000000..965b467d677
--- /dev/null
+++ b/chromium/v8/src/compiler/wasm-inlining.cc
@@ -0,0 +1,311 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/wasm-inlining.h"
+
+#include "src/compiler/all-nodes.h"
+#include "src/compiler/compiler-source-position-table.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/wasm-compiler.h"
+#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/graph-builder-interface.h"
+#include "src/wasm/wasm-features.h"
+#include "src/wasm/wasm-module.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Reduction WasmInliner::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kCall:
+ case IrOpcode::kTailCall:
+ return ReduceCall(node);
+ default:
+ return NoChange();
+ }
+}
+
+// TODO(12166): Save inlined frames for trap/--trace-wasm purposes. Consider
+// tail calls.
+// TODO(12166): Inline indirect calls/call_ref.
+Reduction WasmInliner::ReduceCall(Node* call) {
+ DCHECK(call->opcode() == IrOpcode::kCall ||
+ call->opcode() == IrOpcode::kTailCall);
+ Node* callee = NodeProperties::GetValueInput(call, 0);
+ IrOpcode::Value reloc_opcode = mcgraph_->machine()->Is32()
+ ? IrOpcode::kRelocatableInt32Constant
+ : IrOpcode::kRelocatableInt64Constant;
+ if (callee->opcode() != reloc_opcode) return NoChange();
+ auto info = OpParameter<RelocatablePtrConstantInfo>(callee->op());
+ uint32_t inlinee_index = static_cast<uint32_t>(info.value());
+ if (!heuristics_->DoInline(source_positions_->GetSourcePosition(call),
+ inlinee_index)) {
+ return NoChange();
+ }
+
+ CHECK_LT(inlinee_index, module()->functions.size());
+ const wasm::WasmFunction* inlinee = &module()->functions[inlinee_index];
+
+ base::Vector<const byte> function_bytes = wire_bytes_->GetCode(inlinee->code);
+
+ const wasm::FunctionBody inlinee_body(inlinee->sig, inlinee->code.offset(),
+ function_bytes.begin(),
+ function_bytes.end());
+ wasm::WasmFeatures detected;
+ WasmGraphBuilder builder(env_, zone(), mcgraph_, inlinee_body.sig,
+ source_positions_);
+ std::vector<WasmLoopInfo> infos;
+
+ size_t subgraph_min_node_id = graph()->NodeCount();
+ wasm::DecodeResult result;
+ Node* inlinee_start;
+ Node* inlinee_end;
+ {
+ Graph::SubgraphScope scope(graph());
+ result = wasm::BuildTFGraph(zone()->allocator(), env_->enabled_features,
+ module(), &builder, &detected, inlinee_body,
+ &infos, node_origins_, inlinee_index,
+ wasm::kInlinedFunction);
+ inlinee_start = graph()->start();
+ inlinee_end = graph()->end();
+ }
+
+ if (result.failed()) return NoChange();
+ return call->opcode() == IrOpcode::kCall
+ ? InlineCall(call, inlinee_start, inlinee_end, inlinee->sig,
+ subgraph_min_node_id)
+ : InlineTailCall(call, inlinee_start, inlinee_end);
+}
+
+/* Rewire callee formal parameters to the call-site real parameters. Rewire
+ * effect and control dependencies of callee's start node with the respective
+ * inputs of the call node.
+ */
+void WasmInliner::RewireFunctionEntry(Node* call, Node* callee_start) {
+ Node* control = NodeProperties::GetControlInput(call);
+ Node* effect = NodeProperties::GetEffectInput(call);
+
+ for (Edge edge : callee_start->use_edges()) {
+ Node* use = edge.from();
+ switch (use->opcode()) {
+ case IrOpcode::kParameter: {
+ // Index 0 is the callee node.
+ int index = 1 + ParameterIndexOf(use->op());
+ Replace(use, NodeProperties::GetValueInput(call, index));
+ break;
+ }
+ default:
+ if (NodeProperties::IsEffectEdge(edge)) {
+ edge.UpdateTo(effect);
+ } else if (NodeProperties::IsControlEdge(edge)) {
+ edge.UpdateTo(control);
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+ }
+}
+
+Reduction WasmInliner::InlineTailCall(Node* call, Node* callee_start,
+ Node* callee_end) {
+ DCHECK(call->opcode() == IrOpcode::kTailCall);
+ // 1) Rewire function entry.
+ RewireFunctionEntry(call, callee_start);
+ // 2) For tail calls, all we have to do is rewire all terminators of the
+ // inlined graph to the end of the caller graph.
+ for (Node* const input : callee_end->inputs()) {
+ DCHECK(IrOpcode::IsGraphTerminator(input->opcode()));
+ NodeProperties::MergeControlToEnd(graph(), common(), input);
+ Revisit(graph()->end());
+ }
+ callee_end->Kill();
+ return Replace(mcgraph()->Dead());
+}
+
+Reduction WasmInliner::InlineCall(Node* call, Node* callee_start,
+ Node* callee_end,
+ const wasm::FunctionSig* inlinee_sig,
+ size_t subgraph_min_node_id) {
+ DCHECK(call->opcode() == IrOpcode::kCall);
+
+ // 0) Before doing anything, if {call} has an exception handler, collect all
+ // unhandled calls in the subgraph.
+ Node* handler = nullptr;
+ std::vector<Node*> unhandled_subcalls;
+ if (NodeProperties::IsExceptionalCall(call, &handler)) {
+ AllNodes subgraph_nodes(zone(), callee_end, graph());
+ for (Node* node : subgraph_nodes.reachable) {
+ if (node->id() >= subgraph_min_node_id &&
+ !node->op()->HasProperty(Operator::kNoThrow) &&
+ !NodeProperties::IsExceptionalCall(node)) {
+ unhandled_subcalls.push_back(node);
+ }
+ }
+ }
+
+ // 1) Rewire function entry.
+ RewireFunctionEntry(call, callee_start);
+
+ // 2) Handle all graph terminators for the callee.
+ NodeVector return_nodes(zone());
+ for (Node* const input : callee_end->inputs()) {
+ DCHECK(IrOpcode::IsGraphTerminator(input->opcode()));
+ switch (input->opcode()) {
+ case IrOpcode::kReturn:
+ // Returns are collected to be rewired into the caller graph later.
+ return_nodes.push_back(input);
+ break;
+ case IrOpcode::kDeoptimize:
+ case IrOpcode::kTerminate:
+ case IrOpcode::kThrow:
+ NodeProperties::MergeControlToEnd(graph(), common(), input);
+ Revisit(graph()->end());
+ break;
+ case IrOpcode::kTailCall: {
+ // A tail call in the callee inlined in a regular call in the caller has
+ // to be transformed into a regular call, and then returned from the
+ // inlinee. It will then be handled like any other return.
+ auto descriptor = CallDescriptorOf(input->op());
+ NodeProperties::ChangeOp(input, common()->Call(descriptor));
+ int return_arity = static_cast<int>(inlinee_sig->return_count());
+ NodeVector return_inputs(zone());
+ // The first input of a return node is always the 0 constant.
+ return_inputs.push_back(graph()->NewNode(common()->Int32Constant(0)));
+ if (return_arity == 1) {
+ return_inputs.push_back(input);
+ } else if (return_arity > 1) {
+ for (int i = 0; i < return_arity; i++) {
+ return_inputs.push_back(
+ graph()->NewNode(common()->Projection(i), input, input));
+ }
+ }
+
+ // Add effect and control inputs.
+ return_inputs.push_back(input->op()->EffectOutputCount() > 0
+ ? input
+ : NodeProperties::GetEffectInput(input));
+ return_inputs.push_back(input->op()->ControlOutputCount() > 0
+ ? input
+ : NodeProperties::GetControlInput(input));
+
+ Node* ret = graph()->NewNode(common()->Return(return_arity),
+ static_cast<int>(return_inputs.size()),
+ return_inputs.data());
+ return_nodes.push_back(ret);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ }
+ callee_end->Kill();
+
+ // 3) Rewire unhandled calls to the handler.
+ std::vector<Node*> on_exception_nodes;
+ for (Node* subcall : unhandled_subcalls) {
+ Node* on_success = graph()->NewNode(common()->IfSuccess(), subcall);
+ NodeProperties::ReplaceUses(subcall, subcall, subcall, on_success);
+ NodeProperties::ReplaceControlInput(on_success, subcall);
+ Node* on_exception =
+ graph()->NewNode(common()->IfException(), subcall, subcall);
+ on_exception_nodes.push_back(on_exception);
+ }
+
+ int subcall_count = static_cast<int>(on_exception_nodes.size());
+
+ if (subcall_count > 0) {
+ Node* control_output =
+ graph()->NewNode(common()->Merge(subcall_count), subcall_count,
+ on_exception_nodes.data());
+ on_exception_nodes.push_back(control_output);
+ Node* value_output = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, subcall_count),
+ subcall_count + 1, on_exception_nodes.data());
+ Node* effect_output =
+ graph()->NewNode(common()->EffectPhi(subcall_count), subcall_count + 1,
+ on_exception_nodes.data());
+ ReplaceWithValue(handler, value_output, effect_output, control_output);
+ } else if (handler != nullptr) {
+ // Nothing in the inlined function can throw. Remove the handler.
+ ReplaceWithValue(handler, mcgraph()->Dead(), mcgraph()->Dead(),
+ mcgraph()->Dead());
+ }
+
+ if (return_nodes.size() > 0) {
+ /* 4) Collect all return site value, effect, and control inputs into phis
+ * and merges. */
+ int const return_count = static_cast<int>(return_nodes.size());
+ NodeVector controls(zone());
+ NodeVector effects(zone());
+ for (Node* const return_node : return_nodes) {
+ controls.push_back(NodeProperties::GetControlInput(return_node));
+ effects.push_back(NodeProperties::GetEffectInput(return_node));
+ }
+ Node* control_output = graph()->NewNode(common()->Merge(return_count),
+ return_count, &controls.front());
+ effects.push_back(control_output);
+ Node* effect_output =
+ graph()->NewNode(common()->EffectPhi(return_count),
+ static_cast<int>(effects.size()), &effects.front());
+
+ // The first input of a return node is discarded. This is because Wasm
+ // functions always return an additional 0 constant as a first return value.
+ DCHECK(
+ Int32Matcher(NodeProperties::GetValueInput(return_nodes[0], 0)).Is(0));
+ int const return_arity = return_nodes[0]->op()->ValueInputCount() - 1;
+ NodeVector values(zone());
+ for (int i = 0; i < return_arity; i++) {
+ NodeVector ith_values(zone());
+ for (Node* const return_node : return_nodes) {
+ Node* value = NodeProperties::GetValueInput(return_node, i + 1);
+ ith_values.push_back(value);
+ }
+ ith_values.push_back(control_output);
+ // Find the correct machine representation for the return values from the
+ // inlinee signature.
+ MachineRepresentation repr =
+ inlinee_sig->GetReturn(i).machine_representation();
+ Node* ith_value_output = graph()->NewNode(
+ common()->Phi(repr, return_count),
+ static_cast<int>(ith_values.size()), &ith_values.front());
+ values.push_back(ith_value_output);
+ }
+ for (Node* return_node : return_nodes) return_node->Kill();
+
+ if (return_arity == 0) {
+ // Void function, no value uses.
+ ReplaceWithValue(call, mcgraph()->Dead(), effect_output, control_output);
+ } else if (return_arity == 1) {
+ // One return value. Just replace value uses of the call node with it.
+ ReplaceWithValue(call, values[0], effect_output, control_output);
+ } else {
+ // Multiple returns. We have to find the projections of the call node and
+ // replace them with the returned values.
+ for (Edge use_edge : call->use_edges()) {
+ if (NodeProperties::IsValueEdge(use_edge)) {
+ Node* use = use_edge.from();
+ DCHECK_EQ(use->opcode(), IrOpcode::kProjection);
+ ReplaceWithValue(use, values[ProjectionIndexOf(use->op())]);
+ }
+ }
+ // All value inputs are replaced by the above loop, so it is ok to use
+ // Dead() as a dummy for value replacement.
+ ReplaceWithValue(call, mcgraph()->Dead(), effect_output, control_output);
+ }
+ return Replace(mcgraph()->Dead());
+ } else {
+ // The callee can never return. The call node and all its uses are dead.
+ ReplaceWithValue(call, mcgraph()->Dead(), mcgraph()->Dead(),
+ mcgraph()->Dead());
+ return Changed(call);
+ }
+}
+
+const wasm::WasmModule* WasmInliner::module() const { return env_->module; }
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/compiler/wasm-inlining.h b/chromium/v8/src/compiler/wasm-inlining.h
new file mode 100644
index 00000000000..b63e232198e
--- /dev/null
+++ b/chromium/v8/src/compiler/wasm-inlining.h
@@ -0,0 +1,108 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
+#ifndef V8_COMPILER_WASM_INLINING_H_
+#define V8_COMPILER_WASM_INLINING_H_
+
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+
+namespace v8 {
+namespace internal {
+
+namespace wasm {
+struct CompilationEnv;
+struct WasmModule;
+struct WasmFunction;
+class WireBytesStorage;
+} // namespace wasm
+
+class BytecodeOffset;
+class OptimizedCompilationInfo;
+
+namespace compiler {
+
+class NodeOriginTable;
+class SourcePositionTable;
+
+// Parent class for classes that provide heuristics on how to inline in wasm.
+class WasmInliningHeuristics {
+ public:
+ virtual bool DoInline(SourcePosition position,
+ uint32_t function_index) const = 0;
+};
+
+// A simple inlining heuristic that inlines all function calls to a set of given
+// function indices.
+class InlineByIndex : public WasmInliningHeuristics {
+ public:
+ explicit InlineByIndex(uint32_t function_index)
+ : WasmInliningHeuristics(), function_indices_(function_index) {}
+ InlineByIndex(std::initializer_list<uint32_t> function_indices)
+ : WasmInliningHeuristics(), function_indices_(function_indices) {}
+
+ bool DoInline(SourcePosition position,
+ uint32_t function_index) const override {
+ return function_indices_.count(function_index) > 0;
+ }
+
+ private:
+ std::unordered_set<uint32_t> function_indices_;
+};
+
+// The WasmInliner provides the core graph inlining machinery for Webassembly
+// graphs. Note that this class only deals with the mechanics of how to inline
+// one graph into another; heuristics that decide what and how much to inline
+// are provided by {WasmInliningHeuristics}.
+class WasmInliner final : public AdvancedReducer {
+ public:
+ WasmInliner(Editor* editor, wasm::CompilationEnv* env,
+ SourcePositionTable* source_positions,
+ NodeOriginTable* node_origins, MachineGraph* mcgraph,
+ const wasm::WireBytesStorage* wire_bytes,
+ const WasmInliningHeuristics* heuristics)
+ : AdvancedReducer(editor),
+ env_(env),
+ source_positions_(source_positions),
+ node_origins_(node_origins),
+ mcgraph_(mcgraph),
+ wire_bytes_(wire_bytes),
+ heuristics_(heuristics) {}
+
+ const char* reducer_name() const override { return "WasmInliner"; }
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ Zone* zone() const { return mcgraph_->zone(); }
+ CommonOperatorBuilder* common() const { return mcgraph_->common(); }
+ Graph* graph() const { return mcgraph_->graph(); }
+ MachineGraph* mcgraph() const { return mcgraph_; }
+ const wasm::WasmModule* module() const;
+ const wasm::WasmFunction* inlinee() const;
+
+ Reduction ReduceCall(Node* call);
+ Reduction InlineCall(Node* call, Node* callee_start, Node* callee_end,
+ const wasm::FunctionSig* inlinee_sig,
+ size_t subgraph_min_node_id);
+ Reduction InlineTailCall(Node* call, Node* callee_start, Node* callee_end);
+ void RewireFunctionEntry(Node* call, Node* callee_start);
+
+ wasm::CompilationEnv* const env_;
+ SourcePositionTable* const source_positions_;
+ NodeOriginTable* const node_origins_;
+ MachineGraph* const mcgraph_;
+ const wasm::WireBytesStorage* const wire_bytes_;
+ const WasmInliningHeuristics* const heuristics_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_WASM_INLINING_H_
diff --git a/chromium/v8/src/d8/async-hooks-wrapper.cc b/chromium/v8/src/d8/async-hooks-wrapper.cc
index 84191b98154..13b67ce8ea0 100644
--- a/chromium/v8/src/d8/async-hooks-wrapper.cc
+++ b/chromium/v8/src/d8/async-hooks-wrapper.cc
@@ -3,6 +3,11 @@
// found in the LICENSE file.
#include "src/d8/async-hooks-wrapper.h"
+
+#include "include/v8-function.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-primitive.h"
+#include "include/v8-template.h"
#include "src/d8/d8.h"
#include "src/execution/isolate-inl.h"
@@ -120,66 +125,74 @@ Local<Object> AsyncHooks::CreateHook(
void AsyncHooks::ShellPromiseHook(PromiseHookType type, Local<Promise> promise,
Local<Value> parent) {
- AsyncHooks* hooks =
- PerIsolateData::Get(promise->GetIsolate())->GetAsyncHooks();
-
- HandleScope handle_scope(hooks->isolate_);
-
- Local<Context> currentContext = hooks->isolate_->GetCurrentContext();
- DCHECK(!currentContext.IsEmpty());
+ v8::Isolate* isolate = promise->GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- if (type == PromiseHookType::kInit) {
- ++hooks->current_async_id;
- Local<Integer> async_id =
- Integer::New(hooks->isolate_, hooks->current_async_id);
-
- CHECK(!promise
- ->HasPrivate(currentContext,
- hooks->async_id_smb.Get(hooks->isolate_))
+ AsyncHooks* hooks = PerIsolateData::Get(isolate)->GetAsyncHooks();
+ HandleScope handle_scope(isolate);
+ // Temporarily clear any scheduled_exception to allow evaluating JS that can
+ // throw.
+ i::Handle<i::Object> scheduled_exception;
+ if (i_isolate->has_scheduled_exception()) {
+ scheduled_exception = handle(i_isolate->scheduled_exception(), i_isolate);
+ i_isolate->clear_scheduled_exception();
+ }
+ {
+ TryCatch try_catch(isolate);
+ try_catch.SetVerbose(true);
+
+ Local<Context> currentContext = isolate->GetCurrentContext();
+ DCHECK(!currentContext.IsEmpty());
+
+ if (type == PromiseHookType::kInit) {
+ ++hooks->current_async_id;
+ Local<Integer> async_id = Integer::New(isolate, hooks->current_async_id);
+ CHECK(
+ !promise->HasPrivate(currentContext, hooks->async_id_smb.Get(isolate))
.ToChecked());
- promise->SetPrivate(currentContext,
- hooks->async_id_smb.Get(hooks->isolate_), async_id);
-
- if (parent->IsPromise()) {
- Local<Promise> parent_promise = parent.As<Promise>();
- Local<Value> parent_async_id =
- parent_promise
- ->GetPrivate(hooks->isolate_->GetCurrentContext(),
- hooks->async_id_smb.Get(hooks->isolate_))
- .ToLocalChecked();
- promise->SetPrivate(currentContext,
- hooks->trigger_id_smb.Get(hooks->isolate_),
- parent_async_id);
- } else {
- CHECK(parent->IsUndefined());
- Local<Integer> trigger_id = Integer::New(hooks->isolate_, 0);
- promise->SetPrivate(currentContext,
- hooks->trigger_id_smb.Get(hooks->isolate_),
- trigger_id);
+ promise->SetPrivate(currentContext, hooks->async_id_smb.Get(isolate),
+ async_id);
+
+ if (parent->IsPromise()) {
+ Local<Promise> parent_promise = parent.As<Promise>();
+ Local<Value> parent_async_id =
+ parent_promise
+ ->GetPrivate(currentContext, hooks->async_id_smb.Get(isolate))
+ .ToLocalChecked();
+ promise->SetPrivate(currentContext, hooks->trigger_id_smb.Get(isolate),
+ parent_async_id);
+ } else {
+ CHECK(parent->IsUndefined());
+ promise->SetPrivate(currentContext, hooks->trigger_id_smb.Get(isolate),
+ Integer::New(isolate, 0));
+ }
+ } else if (type == PromiseHookType::kBefore) {
+ AsyncContext ctx;
+ ctx.execution_async_id =
+ promise->GetPrivate(currentContext, hooks->async_id_smb.Get(isolate))
+ .ToLocalChecked()
+ .As<Integer>()
+ ->Value();
+ ctx.trigger_async_id =
+ promise
+ ->GetPrivate(currentContext, hooks->trigger_id_smb.Get(isolate))
+ .ToLocalChecked()
+ .As<Integer>()
+ ->Value();
+ hooks->asyncContexts.push(ctx);
+ } else if (type == PromiseHookType::kAfter) {
+ hooks->asyncContexts.pop();
+ }
+ if (!i::StackLimitCheck{i_isolate}.HasOverflowed()) {
+ for (AsyncHooksWrap* wrap : hooks->async_wraps_) {
+ PromiseHookDispatch(type, promise, parent, wrap, hooks);
+ if (try_catch.HasCaught()) break;
+ }
+ if (try_catch.HasCaught()) Shell::ReportException(isolate, &try_catch);
}
- } else if (type == PromiseHookType::kBefore) {
- AsyncContext ctx;
- ctx.execution_async_id =
- promise
- ->GetPrivate(hooks->isolate_->GetCurrentContext(),
- hooks->async_id_smb.Get(hooks->isolate_))
- .ToLocalChecked()
- .As<Integer>()
- ->Value();
- ctx.trigger_async_id =
- promise
- ->GetPrivate(hooks->isolate_->GetCurrentContext(),
- hooks->trigger_id_smb.Get(hooks->isolate_))
- .ToLocalChecked()
- .As<Integer>()
- ->Value();
- hooks->asyncContexts.push(ctx);
- } else if (type == PromiseHookType::kAfter) {
- hooks->asyncContexts.pop();
}
-
- for (AsyncHooksWrap* wrap : hooks->async_wraps_) {
- PromiseHookDispatch(type, promise, parent, wrap, hooks);
+ if (!scheduled_exception.is_null()) {
+ i_isolate->set_scheduled_exception(*scheduled_exception);
}
}
@@ -215,28 +228,14 @@ void AsyncHooks::PromiseHookDispatch(PromiseHookType type,
Local<Promise> promise,
Local<Value> parent, AsyncHooksWrap* wrap,
AsyncHooks* hooks) {
- if (!wrap->IsEnabled()) {
- return;
- }
+ if (!wrap->IsEnabled()) return;
+ v8::Isolate* v8_isolate = hooks->isolate_;
+ HandleScope handle_scope(v8_isolate);
- HandleScope handle_scope(hooks->isolate_);
-
- TryCatch try_catch(hooks->isolate_);
- try_catch.SetVerbose(true);
-
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(hooks->isolate_);
- if (isolate->has_scheduled_exception()) {
- isolate->ScheduleThrow(isolate->scheduled_exception());
-
- DCHECK(try_catch.HasCaught());
- Shell::ReportException(hooks->isolate_, &try_catch);
- return;
- }
-
- Local<Value> rcv = Undefined(hooks->isolate_);
- Local<Context> context = hooks->isolate_->GetCurrentContext();
+ Local<Value> rcv = Undefined(v8_isolate);
+ Local<Context> context = v8_isolate->GetCurrentContext();
Local<Value> async_id =
- promise->GetPrivate(context, hooks->async_id_smb.Get(hooks->isolate_))
+ promise->GetPrivate(context, hooks->async_id_smb.Get(v8_isolate))
.ToLocalChecked();
Local<Value> args[1] = {async_id};
@@ -245,28 +244,31 @@ void AsyncHooks::PromiseHookDispatch(PromiseHookType type,
MaybeLocal<Value> result;
// Sacrifice the brevity for readability and debugfulness
- if (type == PromiseHookType::kInit) {
- if (!wrap->init_function().IsEmpty()) {
- Local<Value> initArgs[4] = {
- async_id, String::NewFromUtf8Literal(hooks->isolate_, "PROMISE"),
- promise
- ->GetPrivate(context, hooks->trigger_id_smb.Get(hooks->isolate_))
- .ToLocalChecked(),
- promise};
- result = wrap->init_function()->Call(context, rcv, 4, initArgs);
- }
- } else if (type == PromiseHookType::kBefore) {
- if (!wrap->before_function().IsEmpty()) {
- result = wrap->before_function()->Call(context, rcv, 1, args);
- }
- } else if (type == PromiseHookType::kAfter) {
- if (!wrap->after_function().IsEmpty()) {
- result = wrap->after_function()->Call(context, rcv, 1, args);
- }
- } else if (type == PromiseHookType::kResolve) {
- if (!wrap->promiseResolve_function().IsEmpty()) {
- result = wrap->promiseResolve_function()->Call(context, rcv, 1, args);
- }
+ switch (type) {
+ case PromiseHookType::kInit:
+ if (!wrap->init_function().IsEmpty()) {
+ Local<Value> initArgs[4] = {
+ async_id, String::NewFromUtf8Literal(v8_isolate, "PROMISE"),
+ promise->GetPrivate(context, hooks->trigger_id_smb.Get(v8_isolate))
+ .ToLocalChecked(),
+ promise};
+ result = wrap->init_function()->Call(context, rcv, 4, initArgs);
+ }
+ break;
+ case PromiseHookType::kBefore:
+ if (!wrap->before_function().IsEmpty()) {
+ result = wrap->before_function()->Call(context, rcv, 1, args);
+ }
+ break;
+ case PromiseHookType::kAfter:
+ if (!wrap->after_function().IsEmpty()) {
+ result = wrap->after_function()->Call(context, rcv, 1, args);
+ }
+ break;
+ case PromiseHookType::kResolve:
+ if (!wrap->promiseResolve_function().IsEmpty()) {
+ result = wrap->promiseResolve_function()->Call(context, rcv, 1, args);
+ }
}
}
diff --git a/chromium/v8/src/d8/async-hooks-wrapper.h b/chromium/v8/src/d8/async-hooks-wrapper.h
index f339b6e3163..23cc0be9c06 100644
--- a/chromium/v8/src/d8/async-hooks-wrapper.h
+++ b/chromium/v8/src/d8/async-hooks-wrapper.h
@@ -7,11 +7,18 @@
#include <stack>
-#include "include/v8.h"
+#include "include/v8-function-callback.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-promise.h"
#include "src/objects/objects.h"
namespace v8 {
+class Function;
+class Isolate;
+class ObjectTemplate;
+class Value;
+
using async_id_t = double;
struct AsyncContext {
diff --git a/chromium/v8/src/d8/d8-platforms.cc b/chromium/v8/src/d8/d8-platforms.cc
index 722b2bc4e2e..cd48a35bbde 100644
--- a/chromium/v8/src/d8/d8-platforms.cc
+++ b/chromium/v8/src/d8/d8-platforms.cc
@@ -82,7 +82,14 @@ class PredictablePlatform final : public Platform {
}
double MonotonicallyIncreasingTime() override {
- return synthetic_time_in_sec_ += 0.00001;
+ // In predictable mode, there should be no (observable) concurrency, but we
+ // still run some tests that explicitly specify '--predictable' in the
+ // '--isolates' variant, where several threads run the same test in
+ // different isolates. To avoid TSan issues in that scenario we use atomic
+ // increments here.
+ uint64_t synthetic_time =
+ synthetic_time_.fetch_add(1, std::memory_order_relaxed);
+ return 1e-5 * synthetic_time;
}
double CurrentClockTimeMillis() override {
@@ -96,7 +103,7 @@ class PredictablePlatform final : public Platform {
Platform* platform() const { return platform_.get(); }
private:
- double synthetic_time_in_sec_ = 0.0;
+ std::atomic<uint64_t> synthetic_time_{0};
std::unique_ptr<Platform> platform_;
};
diff --git a/chromium/v8/src/d8/d8-posix.cc b/chromium/v8/src/d8/d8-posix.cc
index 05e475f5387..8db4beff0f0 100644
--- a/chromium/v8/src/d8/d8-posix.cc
+++ b/chromium/v8/src/d8/d8-posix.cc
@@ -16,6 +16,8 @@
#include <sys/wait.h>
#include <unistd.h>
+#include "include/v8-container.h"
+#include "include/v8-template.h"
#include "src/base/platform/wrappers.h"
#include "src/d8/d8.h"
@@ -163,10 +165,12 @@ class ExecArgs {
"os.system(): String conversion of program name failed");
return false;
}
- int len = prog.length() + 3;
- char* c_arg = new char[len];
- snprintf(c_arg, len, "%s", *prog);
- exec_args_[0] = c_arg;
+ {
+ int len = prog.length() + 3;
+ char* c_arg = new char[len];
+ snprintf(c_arg, len, "%s", *prog);
+ exec_args_[0] = c_arg;
+ }
int i = 1;
for (unsigned j = 0; j < command_args->Length(); i++, j++) {
Local<Value> arg(
diff --git a/chromium/v8/src/d8/d8-test.cc b/chromium/v8/src/d8/d8-test.cc
index 635a1f45141..c474d3adb8c 100644
--- a/chromium/v8/src/d8/d8-test.cc
+++ b/chromium/v8/src/d8/d8-test.cc
@@ -5,6 +5,7 @@
#include "src/d8/d8.h"
#include "include/v8-fast-api-calls.h"
+#include "include/v8-template.h"
#include "src/api/api-inl.h"
// This file exposes a d8.test.fast_c_api object, which adds testing facility
@@ -16,7 +17,8 @@
// and resetting these counters.
// Make sure to sync the following with src/compiler/globals.h.
-#if defined(V8_TARGET_ARCH_X64)
+#if defined(V8_TARGET_ARCH_X64) || \
+ (defined(V8_TARGET_ARCH_ARM64) && !defined(USE_SIMULATOR))
#define V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
#endif
@@ -94,10 +96,8 @@ class FastCApiObject {
#ifdef V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
typedef double Type;
- static constexpr CTypeInfo type_info = CTypeInfo(CTypeInfo::Type::kFloat64);
#else
typedef int32_t Type;
- static constexpr CTypeInfo type_info = CTypeInfo(CTypeInfo::Type::kInt32);
#endif // V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
static Type AddAllSequenceFastCallback(Local<Object> receiver,
bool should_fallback,
@@ -119,8 +119,9 @@ class FastCApiObject {
}
Type buffer[1024];
- bool result = TryCopyAndConvertArrayToCppBuffer<&type_info, Type>(
- seq_arg, buffer, 1024);
+ bool result = TryToCopyAndConvertArrayToCppBuffer<
+ i::CTypeInfoBuilder<Type>::Build().GetId(), Type>(seq_arg, buffer,
+ 1024);
if (!result) {
options.fallback = 1;
return 0;
@@ -630,16 +631,19 @@ Local<FunctionTemplate> Shell::CreateTestFastCApiTemplate(Isolate* isolate) {
SideEffectType::kHasSideEffect, &is_valid_api_object_c_func));
api_obj_ctor->PrototypeTemplate()->Set(
isolate, "fast_call_count",
- FunctionTemplate::New(isolate, FastCApiObject::FastCallCount,
- Local<Value>(), signature));
+ FunctionTemplate::New(
+ isolate, FastCApiObject::FastCallCount, Local<Value>(), signature,
+ 1, ConstructorBehavior::kThrow, SideEffectType::kHasNoSideEffect));
api_obj_ctor->PrototypeTemplate()->Set(
isolate, "slow_call_count",
- FunctionTemplate::New(isolate, FastCApiObject::SlowCallCount,
- Local<Value>(), signature));
+ FunctionTemplate::New(
+ isolate, FastCApiObject::SlowCallCount, Local<Value>(), signature,
+ 1, ConstructorBehavior::kThrow, SideEffectType::kHasNoSideEffect));
api_obj_ctor->PrototypeTemplate()->Set(
isolate, "reset_counts",
FunctionTemplate::New(isolate, FastCApiObject::ResetCounts,
- Local<Value>(), signature));
+ Local<Value>(), signature, 1,
+ ConstructorBehavior::kThrow));
}
api_obj_ctor->InstanceTemplate()->SetInternalFieldCount(
FastCApiObject::kV8WrapperObjectIndex + 1);
diff --git a/chromium/v8/src/d8/d8.cc b/chromium/v8/src/d8/d8.cc
index 2b831bc7473..74553538212 100644
--- a/chromium/v8/src/d8/d8.cc
+++ b/chromium/v8/src/d8/d8.cc
@@ -24,8 +24,12 @@
#include "include/libplatform/libplatform.h"
#include "include/libplatform/v8-tracing.h"
+#include "include/v8-function.h"
+#include "include/v8-initialization.h"
#include "include/v8-inspector.h"
+#include "include/v8-json.h"
#include "include/v8-profiler.h"
+#include "include/v8-wasm.h"
#include "src/api/api-inl.h"
#include "src/base/cpu.h"
#include "src/base/logging.h"
@@ -48,7 +52,7 @@
#include "src/interpreter/interpreter.h"
#include "src/logging/counters.h"
#include "src/logging/log-utils.h"
-#include "src/objects/managed.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/objects.h"
#include "src/parsing/parse-info.h"
@@ -166,7 +170,7 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
void* AllocateVM(size_t length) {
DCHECK_LE(kVMThreshold, length);
- v8::PageAllocator* page_allocator = i::GetPlatformPageAllocator();
+ v8::PageAllocator* page_allocator = i::GetArrayBufferPageAllocator();
size_t page_size = page_allocator->AllocatePageSize();
size_t allocated = RoundUp(length, page_size);
return i::AllocatePages(page_allocator, nullptr, allocated, page_size,
@@ -174,7 +178,7 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
}
void FreeVM(void* data, size_t length) {
- v8::PageAllocator* page_allocator = i::GetPlatformPageAllocator();
+ v8::PageAllocator* page_allocator = i::GetArrayBufferPageAllocator();
size_t page_size = page_allocator->AllocatePageSize();
size_t allocated = RoundUp(length, page_size);
CHECK(i::FreePages(page_allocator, data, allocated));
@@ -236,7 +240,7 @@ class MockArrayBufferAllocatiorWithLimit : public MockArrayBufferAllocator {
std::atomic<size_t> space_left_;
};
-#ifdef V8_OS_LINUX
+#if MULTI_MAPPED_ALLOCATOR_AVAILABLE
// This is a mock allocator variant that provides a huge virtual allocation
// backed by a small real allocation that is repeatedly mapped. If you create an
@@ -329,7 +333,7 @@ class MultiMappedAllocator : public ArrayBufferAllocatorBase {
base::Mutex regions_mutex_;
};
-#endif // V8_OS_LINUX
+#endif // MULTI_MAPPED_ALLOCATOR_AVAILABLE
v8::Platform* g_default_platform;
std::unique_ptr<v8::Platform> g_platform;
@@ -702,7 +706,7 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
ScriptOrigin origin(isolate, name);
for (int i = 1; i < options.repeat_compile; ++i) {
- HandleScope handle_scope(isolate);
+ HandleScope handle_scope_for_compiling(isolate);
if (CompileString<Script>(isolate, context, source, origin).IsEmpty()) {
return false;
}
@@ -846,16 +850,21 @@ std::string NormalizePath(const std::string& path,
std::string segment;
while (std::getline(segment_stream, segment, '/')) {
if (segment == "..") {
- segments.pop_back();
+ if (!segments.empty()) segments.pop_back();
} else if (segment != ".") {
segments.push_back(segment);
}
}
// Join path segments.
std::ostringstream os;
- std::copy(segments.begin(), segments.end() - 1,
- std::ostream_iterator<std::string>(os, "/"));
- os << *segments.rbegin();
+ if (segments.size() > 1) {
+ std::copy(segments.begin(), segments.end() - 1,
+ std::ostream_iterator<std::string>(os, "/"));
+ os << *segments.rbegin();
+ } else {
+ os << "/";
+ if (!segments.empty()) os << segments[0];
+ }
return os.str();
}
@@ -1995,8 +2004,14 @@ void Shell::TestVerifySourcePositions(
auto callable = i::Handle<i::JSFunctionOrBoundFunction>::cast(arg_handle);
while (callable->IsJSBoundFunction()) {
+ internal::DisallowGarbageCollection no_gc;
auto bound_function = i::Handle<i::JSBoundFunction>::cast(callable);
auto bound_target = bound_function->bound_target_function();
+ if (!bound_target.IsJSFunctionOrBoundFunction()) {
+ internal::AllowGarbageCollection allow_gc;
+ isolate->ThrowError("Expected function as bound target.");
+ return;
+ }
callable =
handle(i::JSFunctionOrBoundFunction::cast(bound_target), i_isolate);
}
@@ -2009,7 +2024,7 @@ void Shell::TestVerifySourcePositions(
i::Handle<i::BytecodeArray> bytecodes =
handle(function->shared().GetBytecodeArray(i_isolate), i_isolate);
i::interpreter::BytecodeArrayIterator bytecode_iterator(bytecodes);
- bool has_baseline = function->shared().HasBaselineData();
+ bool has_baseline = function->shared().HasBaselineCode();
i::Handle<i::ByteArray> bytecode_offsets;
std::unique_ptr<i::baseline::BytecodeOffsetIterator> offset_iterator;
if (has_baseline) {
@@ -2990,7 +3005,7 @@ Local<ObjectTemplate> Shell::CreateD8Template(Isolate* isolate) {
// Correctness fuzzing will attempt to compare results of tests with and
// without turbo_fast_api_calls, so we don't expose the fast_c_api
// constructor when --correctness_fuzzer_suppressions is on.
- if (i::FLAG_turbo_fast_api_calls &&
+ if (options.expose_fast_api && i::FLAG_turbo_fast_api_calls &&
!i::FLAG_correctness_fuzzer_suppressions) {
test_template->Set(isolate, "FastCAPI",
Shell::CreateTestFastCApiTemplate(isolate));
@@ -3166,13 +3181,15 @@ void Shell::WriteIgnitionDispatchCountersFile(v8::Isolate* isolate) {
Local<Context> context = Context::New(isolate);
Context::Scope context_scope(context);
- Local<Object> dispatch_counters = reinterpret_cast<i::Isolate*>(isolate)
- ->interpreter()
- ->GetDispatchCountersObject();
+ i::Handle<i::JSObject> dispatch_counters =
+ reinterpret_cast<i::Isolate*>(isolate)
+ ->interpreter()
+ ->GetDispatchCountersObject();
std::ofstream dispatch_counters_stream(
i::FLAG_trace_ignition_dispatches_output_file);
dispatch_counters_stream << *String::Utf8Value(
- isolate, JSON::Stringify(context, dispatch_counters).ToLocalChecked());
+ isolate, JSON::Stringify(context, Utils::ToLocal(dispatch_counters))
+ .ToLocalChecked());
}
namespace {
@@ -3235,10 +3252,10 @@ void Shell::WriteLcovData(v8::Isolate* isolate, const char* file) {
int end_line = end.GetLineNumber();
uint32_t count = function_data.Count();
- Local<String> name;
+ Local<String> function_name;
std::stringstream name_stream;
- if (function_data.Name().ToLocal(&name)) {
- name_stream << ToSTLString(isolate, name);
+ if (function_data.Name().ToLocal(&function_name)) {
+ name_stream << ToSTLString(isolate, function_name);
} else {
name_stream << "<" << start_line + 1 << "-";
name_stream << start.GetColumnNumber() << ">";
@@ -3258,8 +3275,8 @@ void Shell::WriteLcovData(v8::Isolate* isolate, const char* file) {
}
}
// Write per-line coverage. LCOV uses 1-based line numbers.
- for (size_t i = 0; i < lines.size(); i++) {
- sink << "DA:" << (i + 1) << "," << lines[i] << std::endl;
+ for (size_t j = 0; j < lines.size(); j++) {
+ sink << "DA:" << (j + 1) << "," << lines[j] << std::endl;
}
sink << "end_of_record" << std::endl;
}
@@ -3491,15 +3508,9 @@ void Shell::ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args) {
isolate->ThrowError("Error reading file");
return;
}
- std::unique_ptr<v8::BackingStore> backing_store =
- ArrayBuffer::NewBackingStore(
- data, length,
- [](void* data, size_t length, void*) {
- delete[] reinterpret_cast<uint8_t*>(data);
- },
- nullptr);
- Local<v8::ArrayBuffer> buffer =
- ArrayBuffer::New(isolate, std::move(backing_store));
+ Local<v8::ArrayBuffer> buffer = ArrayBuffer::New(isolate, length);
+ memcpy(buffer->GetBackingStore()->Data(), data, length);
+ delete[] data;
args.GetReturnValue().Set(buffer);
}
@@ -4252,6 +4263,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strcmp(argv[i], "--throws") == 0) {
options.expected_to_throw = true;
argv[i] = nullptr;
+ } else if (strcmp(argv[i], "--no-fail") == 0) {
+ options.no_fail = true;
+ argv[i] = nullptr;
} else if (strncmp(argv[i], "--icu-data-file=", 16) == 0) {
options.icu_data_file = argv[i] + 16;
argv[i] = nullptr;
@@ -4357,8 +4371,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
options.fuzzilli_coverage_statistics = true;
argv[i] = nullptr;
#endif
- } else if (strcmp(argv[i], "--fuzzy-module-file-extensions") == 0) {
- options.fuzzy_module_file_extensions = true;
+ } else if (strcmp(argv[i], "--no-fuzzy-module-file-extensions") == 0) {
+ DCHECK(options.fuzzy_module_file_extensions);
+ options.fuzzy_module_file_extensions = false;
argv[i] = nullptr;
#if defined(V8_ENABLE_SYSTEM_INSTRUMENTATION)
} else if (strcmp(argv[i], "--enable-system-instrumentation") == 0) {
@@ -4381,6 +4396,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
options.wasm_trap_handler = false;
argv[i] = nullptr;
#endif // V8_ENABLE_WEBASSEMBLY
+ } else if (strcmp(argv[i], "--expose-fast-api") == 0) {
+ options.expose_fast_api = true;
+ argv[i] = nullptr;
}
}
@@ -4404,10 +4422,15 @@ bool Shell::SetOptions(int argc, char* argv[]) {
options.mock_arraybuffer_allocator = i::FLAG_mock_arraybuffer_allocator;
options.mock_arraybuffer_allocator_limit =
i::FLAG_mock_arraybuffer_allocator_limit;
-#if V8_OS_LINUX
+#if MULTI_MAPPED_ALLOCATOR_AVAILABLE
options.multi_mapped_mock_allocator = i::FLAG_multi_mapped_mock_allocator;
#endif
+ if (i::FLAG_stress_snapshot && options.expose_fast_api &&
+ check_d8_flag_contradictions) {
+ FATAL("Flag --expose-fast-api is incompatible with --stress-snapshot.");
+ }
+
// Set up isolated source groups.
options.isolate_sources = new SourceGroup[options.num_isolates];
SourceGroup* current = options.isolate_sources;
@@ -4501,7 +4524,8 @@ int Shell::RunMain(Isolate* isolate, bool last_run) {
Shell::unhandled_promise_rejections_.store(0);
}
// In order to finish successfully, success must be != expected_to_throw.
- return success == Shell::options.expected_to_throw ? 1 : 0;
+ if (Shell::options.no_fail) return 0;
+ return (success == Shell::options.expected_to_throw ? 1 : 0);
}
void Shell::CollectGarbage(Isolate* isolate) {
@@ -5019,7 +5043,7 @@ int Shell::Main(int argc, char* argv[]) {
options.thread_pool_size, v8::platform::IdleTaskSupport::kEnabled,
in_process_stack_dumping, std::move(tracing));
g_default_platform = g_platform.get();
- if (i::FLAG_verify_predictable) {
+ if (i::FLAG_predictable) {
g_platform = MakePredictablePlatform(std::move(g_platform));
}
if (options.stress_delay_tasks) {
@@ -5037,6 +5061,11 @@ int Shell::Main(int argc, char* argv[]) {
V8::SetFlagsFromString("--redirect-code-traces-to=code.asm");
}
v8::V8::InitializePlatform(g_platform.get());
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ if (!v8::V8::InitializeVirtualMemoryCage()) {
+ FATAL("Could not initialize the virtual memory cage");
+ }
+#endif
v8::V8::Initialize();
if (options.snapshot_blob) {
v8::V8::InitializeExternalStartupDataFromFile(options.snapshot_blob);
@@ -5053,19 +5082,19 @@ int Shell::Main(int argc, char* argv[]) {
memory_limit >= options.mock_arraybuffer_allocator_limit
? memory_limit
: std::numeric_limits<size_t>::max());
-#if V8_OS_LINUX
+#if MULTI_MAPPED_ALLOCATOR_AVAILABLE
MultiMappedAllocator multi_mapped_mock_allocator;
-#endif // V8_OS_LINUX
+#endif
if (options.mock_arraybuffer_allocator) {
if (memory_limit) {
Shell::array_buffer_allocator = &mock_arraybuffer_allocator_with_limit;
} else {
Shell::array_buffer_allocator = &mock_arraybuffer_allocator;
}
-#if V8_OS_LINUX
+#if MULTI_MAPPED_ALLOCATOR_AVAILABLE
} else if (options.multi_mapped_mock_allocator) {
Shell::array_buffer_allocator = &multi_mapped_mock_allocator;
-#endif // V8_OS_LINUX
+#endif
} else {
Shell::array_buffer_allocator = &shell_array_buffer_allocator;
}
@@ -5165,15 +5194,15 @@ int Shell::Main(int argc, char* argv[]) {
ShellOptions::CodeCacheOptions::kNoProduceCache) {
printf("============ Run: Produce code cache ============\n");
// First run to produce the cache
- Isolate::CreateParams create_params;
- create_params.array_buffer_allocator = Shell::array_buffer_allocator;
+ Isolate::CreateParams create_params2;
+ create_params2.array_buffer_allocator = Shell::array_buffer_allocator;
i::FLAG_hash_seed ^= 1337; // Use a different hash seed.
- Isolate* isolate2 = Isolate::New(create_params);
+ Isolate* isolate2 = Isolate::New(create_params2);
i::FLAG_hash_seed ^= 1337; // Restore old hash seed.
{
- D8Console console(isolate2);
- Initialize(isolate2, &console);
- PerIsolateData data(isolate2);
+ D8Console console2(isolate2);
+ Initialize(isolate2, &console2);
+ PerIsolateData data2(isolate2);
Isolate::Scope isolate_scope(isolate2);
result = RunMain(isolate2, false);
diff --git a/chromium/v8/src/d8/d8.h b/chromium/v8/src/d8/d8.h
index 9d3cc4f6d2e..77b3ca6679b 100644
--- a/chromium/v8/src/d8/d8.h
+++ b/chromium/v8/src/d8/d8.h
@@ -14,6 +14,9 @@
#include <unordered_set>
#include <vector>
+#include "include/v8-array-buffer.h"
+#include "include/v8-isolate.h"
+#include "include/v8-script.h"
#include "src/base/once.h"
#include "src/base/platform/time.h"
#include "src/base/platform/wrappers.h"
@@ -24,7 +27,11 @@
namespace v8 {
+class BackingStore;
+class CompiledWasmModule;
class D8Console;
+class Message;
+class TryCatch;
enum class ModuleType { kJavaScript, kJSON, kInvalid };
@@ -385,14 +392,17 @@ class ShellOptions {
DisallowReassignment<bool> interactive_shell = {"shell", false};
bool test_shell = false;
DisallowReassignment<bool> expected_to_throw = {"throws", false};
+ DisallowReassignment<bool> no_fail = {"no-fail", false};
DisallowReassignment<bool> ignore_unhandled_promises = {
"ignore-unhandled-promises", false};
DisallowReassignment<bool> mock_arraybuffer_allocator = {
"mock-arraybuffer-allocator", false};
DisallowReassignment<size_t> mock_arraybuffer_allocator_limit = {
"mock-arraybuffer-allocator-limit", 0};
+#if MULTI_MAPPED_ALLOCATOR_AVAILABLE
DisallowReassignment<bool> multi_mapped_mock_allocator = {
"multi-mapped-mock-allocator", false};
+#endif
DisallowReassignment<bool> enable_inspector = {"enable-inspector", false};
int num_isolates = 1;
DisallowReassignment<v8::ScriptCompiler::CompileOptions, true>
@@ -433,6 +443,7 @@ class ShellOptions {
#if V8_ENABLE_WEBASSEMBLY
DisallowReassignment<bool> wasm_trap_handler = {"wasm-trap-handler", true};
#endif // V8_ENABLE_WEBASSEMBLY
+ DisallowReassignment<bool> expose_fast_api = {"expose-fast-api", false};
};
class Shell : public i::AllStatic {
diff --git a/chromium/v8/src/date/date.cc b/chromium/v8/src/date/date.cc
index 250539e24cb..9b0665aba07 100644
--- a/chromium/v8/src/date/date.cc
+++ b/chromium/v8/src/date/date.cc
@@ -455,5 +455,83 @@ DateCache::DST* DateCache::LeastRecentlyUsedDST(DST* skip) {
return result;
}
+namespace {
+
+// ES6 section 20.3.1.1 Time Values and Time Range
+const double kMinYear = -1000000.0;
+const double kMaxYear = -kMinYear;
+const double kMinMonth = -10000000.0;
+const double kMaxMonth = -kMinMonth;
+
+const double kMsPerDay = 86400000.0;
+
+const double kMsPerSecond = 1000.0;
+const double kMsPerMinute = 60000.0;
+const double kMsPerHour = 3600000.0;
+
+} // namespace
+
+double MakeDate(double day, double time) {
+ if (std::isfinite(day) && std::isfinite(time)) {
+ return time + day * kMsPerDay;
+ }
+ return std::numeric_limits<double>::quiet_NaN();
+}
+
+double MakeDay(double year, double month, double date) {
+ if ((kMinYear <= year && year <= kMaxYear) &&
+ (kMinMonth <= month && month <= kMaxMonth) && std::isfinite(date)) {
+ int y = FastD2I(year);
+ int m = FastD2I(month);
+ y += m / 12;
+ m %= 12;
+ if (m < 0) {
+ m += 12;
+ y -= 1;
+ }
+ DCHECK_LE(0, m);
+ DCHECK_LT(m, 12);
+
+ // kYearDelta is an arbitrary number such that:
+ // a) kYearDelta = -1 (mod 400)
+ // b) year + kYearDelta > 0 for years in the range defined by
+ // ECMA 262 - 15.9.1.1, i.e. upto 100,000,000 days on either side of
+ // Jan 1 1970. This is required so that we don't run into integer
+ // division of negative numbers.
+ // c) there shouldn't be an overflow for 32-bit integers in the following
+ // operations.
+ static const int kYearDelta = 399999;
+ static const int kBaseDay =
+ 365 * (1970 + kYearDelta) + (1970 + kYearDelta) / 4 -
+ (1970 + kYearDelta) / 100 + (1970 + kYearDelta) / 400;
+ int day_from_year = 365 * (y + kYearDelta) + (y + kYearDelta) / 4 -
+ (y + kYearDelta) / 100 + (y + kYearDelta) / 400 -
+ kBaseDay;
+ if ((y % 4 != 0) || (y % 100 == 0 && y % 400 != 0)) {
+ static const int kDayFromMonth[] = {0, 31, 59, 90, 120, 151,
+ 181, 212, 243, 273, 304, 334};
+ day_from_year += kDayFromMonth[m];
+ } else {
+ static const int kDayFromMonth[] = {0, 31, 60, 91, 121, 152,
+ 182, 213, 244, 274, 305, 335};
+ day_from_year += kDayFromMonth[m];
+ }
+ return static_cast<double>(day_from_year - 1) + DoubleToInteger(date);
+ }
+ return std::numeric_limits<double>::quiet_NaN();
+}
+
+double MakeTime(double hour, double min, double sec, double ms) {
+ if (std::isfinite(hour) && std::isfinite(min) && std::isfinite(sec) &&
+ std::isfinite(ms)) {
+ double const h = DoubleToInteger(hour);
+ double const m = DoubleToInteger(min);
+ double const s = DoubleToInteger(sec);
+ double const milli = DoubleToInteger(ms);
+ return h * kMsPerHour + m * kMsPerMinute + s * kMsPerSecond + milli;
+ }
+ return std::numeric_limits<double>::quiet_NaN();
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/date/date.h b/chromium/v8/src/date/date.h
index 1f6c79c5d48..734ab3a26f0 100644
--- a/chromium/v8/src/date/date.h
+++ b/chromium/v8/src/date/date.h
@@ -236,6 +236,17 @@ class V8_EXPORT_PRIVATE DateCache {
base::TimezoneCache* tz_cache_;
};
+// Routines shared between Date and Temporal
+
+// ES6 section 20.3.1.14 MakeDate (day, time)
+double MakeDate(double day, double time);
+
+// ES6 section 20.3.1.13 MakeDay (year, month, date)
+double MakeDay(double year, double month, double date);
+
+// ES6 section 20.3.1.12 MakeTime (hour, min, sec, ms)
+double MakeTime(double hour, double min, double sec, double ms);
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/debug/debug-evaluate.cc b/chromium/v8/src/debug/debug-evaluate.cc
index cecf46d7b7e..915ed7833fe 100644
--- a/chromium/v8/src/debug/debug-evaluate.cc
+++ b/chromium/v8/src/debug/debug-evaluate.cc
@@ -34,9 +34,8 @@ static MaybeHandle<SharedFunctionInfo> GetFunctionInfo(Isolate* isolate,
ScriptOriginOptions(false, true));
script_details.repl_mode = repl_mode;
return Compiler::GetSharedFunctionInfoForScript(
- isolate, source, script_details, nullptr, nullptr,
- ScriptCompiler::kNoCompileOptions, ScriptCompiler::kNoCacheNoReason,
- NOT_NATIVES_CODE);
+ isolate, source, script_details, ScriptCompiler::kNoCompileOptions,
+ ScriptCompiler::kNoCacheNoReason, NOT_NATIVES_CODE);
}
} // namespace
@@ -289,9 +288,8 @@ void DebugEvaluate::ContextBuilder::UpdateValues() {
}
}
-namespace {
-
-bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
+// static
+bool DebugEvaluate::IsSideEffectFreeIntrinsic(Runtime::FunctionId id) {
// Use macro to include only the non-inlined version of an intrinsic.
#define INTRINSIC_ALLOWLIST(V) \
/* Conversions */ \
@@ -386,18 +384,15 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(StringMaxLength) \
V(StringToArray) \
V(AsyncFunctionEnter) \
- V(AsyncFunctionReject) \
V(AsyncFunctionResolve) \
/* Test */ \
V(GetOptimizationStatus) \
V(OptimizeFunctionOnNextCall) \
- V(OptimizeOsr) \
- V(UnblockConcurrentRecompilation)
+ V(OptimizeOsr)
// Intrinsics with inline versions have to be allowlisted here a second time.
#define INLINE_INTRINSIC_ALLOWLIST(V) \
V(AsyncFunctionEnter) \
- V(AsyncFunctionReject) \
V(AsyncFunctionResolve)
#define CASE(Name) case Runtime::k##Name:
@@ -420,6 +415,8 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
#undef INLINE_INTRINSIC_ALLOWLIST
}
+namespace {
+
bool BytecodeHasNoSideEffect(interpreter::Bytecode bytecode) {
using interpreter::Bytecode;
using interpreter::Bytecodes;
@@ -755,6 +752,7 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtin id) {
case Builtin::kStringFromCharCode:
case Builtin::kStringFromCodePoint:
case Builtin::kStringConstructor:
+ case Builtin::kStringListFromIterable:
case Builtin::kStringPrototypeAnchor:
case Builtin::kStringPrototypeAt:
case Builtin::kStringPrototypeBig:
@@ -833,6 +831,78 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtin id) {
case Builtin::kAllocateRegularInOldGeneration:
return DebugInfo::kHasNoSideEffect;
+#ifdef V8_INTL_SUPPORT
+ // Intl builtins.
+ case Builtin::kIntlGetCanonicalLocales:
+ // Intl.Collator builtins.
+ case Builtin::kCollatorConstructor:
+ case Builtin::kCollatorInternalCompare:
+ case Builtin::kCollatorPrototypeCompare:
+ case Builtin::kCollatorPrototypeResolvedOptions:
+ case Builtin::kCollatorSupportedLocalesOf:
+ // Intl.DateTimeFormat builtins.
+ case Builtin::kDateTimeFormatConstructor:
+ case Builtin::kDateTimeFormatInternalFormat:
+ case Builtin::kDateTimeFormatPrototypeFormat:
+ case Builtin::kDateTimeFormatPrototypeFormatRange:
+ case Builtin::kDateTimeFormatPrototypeFormatRangeToParts:
+ case Builtin::kDateTimeFormatPrototypeFormatToParts:
+ case Builtin::kDateTimeFormatPrototypeResolvedOptions:
+ case Builtin::kDateTimeFormatSupportedLocalesOf:
+ // Intl.DisplayNames builtins.
+ case Builtin::kDisplayNamesConstructor:
+ case Builtin::kDisplayNamesPrototypeOf:
+ case Builtin::kDisplayNamesPrototypeResolvedOptions:
+ case Builtin::kDisplayNamesSupportedLocalesOf:
+ // Intl.ListFormat builtins.
+ case Builtin::kListFormatConstructor:
+ case Builtin::kListFormatPrototypeFormat:
+ case Builtin::kListFormatPrototypeFormatToParts:
+ case Builtin::kListFormatPrototypeResolvedOptions:
+ case Builtin::kListFormatSupportedLocalesOf:
+ // Intl.Locale builtins.
+ case Builtin::kLocaleConstructor:
+ case Builtin::kLocalePrototypeBaseName:
+ case Builtin::kLocalePrototypeCalendar:
+ case Builtin::kLocalePrototypeCalendars:
+ case Builtin::kLocalePrototypeCaseFirst:
+ case Builtin::kLocalePrototypeCollation:
+ case Builtin::kLocalePrototypeCollations:
+ case Builtin::kLocalePrototypeHourCycle:
+ case Builtin::kLocalePrototypeHourCycles:
+ case Builtin::kLocalePrototypeLanguage:
+ case Builtin::kLocalePrototypeMaximize:
+ case Builtin::kLocalePrototypeMinimize:
+ case Builtin::kLocalePrototypeNumeric:
+ case Builtin::kLocalePrototypeNumberingSystem:
+ case Builtin::kLocalePrototypeNumberingSystems:
+ case Builtin::kLocalePrototypeRegion:
+ case Builtin::kLocalePrototypeScript:
+ case Builtin::kLocalePrototypeTextInfo:
+ case Builtin::kLocalePrototypeTimeZones:
+ case Builtin::kLocalePrototypeToString:
+ case Builtin::kLocalePrototypeWeekInfo:
+ // Intl.NumberFormat builtins.
+ case Builtin::kNumberFormatConstructor:
+ case Builtin::kNumberFormatInternalFormatNumber:
+ case Builtin::kNumberFormatPrototypeFormatNumber:
+ case Builtin::kNumberFormatPrototypeFormatToParts:
+ case Builtin::kNumberFormatPrototypeResolvedOptions:
+ case Builtin::kNumberFormatSupportedLocalesOf:
+ // Intl.PluralRules builtins.
+ case Builtin::kPluralRulesConstructor:
+ case Builtin::kPluralRulesPrototypeResolvedOptions:
+ case Builtin::kPluralRulesPrototypeSelect:
+ case Builtin::kPluralRulesSupportedLocalesOf:
+ // Intl.RelativeTimeFormat builtins.
+ case Builtin::kRelativeTimeFormatConstructor:
+ case Builtin::kRelativeTimeFormatPrototypeFormat:
+ case Builtin::kRelativeTimeFormatPrototypeFormatToParts:
+ case Builtin::kRelativeTimeFormatPrototypeResolvedOptions:
+ case Builtin::kRelativeTimeFormatSupportedLocalesOf:
+ return DebugInfo::kHasNoSideEffect;
+#endif // V8_INTL_SUPPORT
+
// Set builtins.
case Builtin::kSetIteratorPrototypeNext:
case Builtin::kSetPrototypeAdd:
@@ -884,6 +954,7 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtin id) {
case Builtin::kRegExpPrototypeUnicodeGetter:
case Builtin::kRegExpPrototypeStickyGetter:
return DebugInfo::kRequiresRuntimeChecks;
+
default:
if (FLAG_trace_side_effect_free_debug_evaluate) {
PrintF("[debug-evaluate] built-in %s may cause side effect.\n",
@@ -904,7 +975,7 @@ bool BytecodeRequiresRuntimeCheck(interpreter::Bytecode bytecode) {
case Bytecode::kStaCurrentContextSlot:
return true;
default:
- return false;
+ return interpreter::Bytecodes::IsCallRuntime(bytecode);
}
}
@@ -931,16 +1002,6 @@ DebugInfo::SideEffectState DebugEvaluate::FunctionGetSideEffectState(
for (interpreter::BytecodeArrayIterator it(bytecode_array); !it.done();
it.Advance()) {
interpreter::Bytecode bytecode = it.current_bytecode();
-
- if (interpreter::Bytecodes::IsCallRuntime(bytecode)) {
- Runtime::FunctionId id =
- (bytecode == interpreter::Bytecode::kInvokeIntrinsic)
- ? it.GetIntrinsicIdOperand(0)
- : it.GetRuntimeIdOperand(0);
- if (IntrinsicHasNoSideEffect(id)) continue;
- return DebugInfo::kHasSideEffects;
- }
-
if (BytecodeHasNoSideEffect(bytecode)) continue;
if (BytecodeRequiresRuntimeCheck(bytecode)) {
requires_runtime_checks = true;
@@ -981,7 +1042,7 @@ static bool TransitivelyCalledBuiltinHasNoSideEffect(Builtin caller,
switch (callee) {
// Transitively called Builtins:
case Builtin::kAbort:
- case Builtin::kAbortCSAAssert:
+ case Builtin::kAbortCSADcheck:
case Builtin::kAdaptorWithBuiltinExitFrame:
case Builtin::kArrayConstructorImpl:
case Builtin::kArrayEveryLoopContinuation:
@@ -1061,6 +1122,14 @@ static bool TransitivelyCalledBuiltinHasNoSideEffect(Builtin caller,
case Builtin::kTSANRelaxedStore32SaveFP:
case Builtin::kTSANRelaxedStore64IgnoreFP:
case Builtin::kTSANRelaxedStore64SaveFP:
+ case Builtin::kTSANSeqCstStore8IgnoreFP:
+ case Builtin::kTSANSeqCstStore8SaveFP:
+ case Builtin::kTSANSeqCstStore16IgnoreFP:
+ case Builtin::kTSANSeqCstStore16SaveFP:
+ case Builtin::kTSANSeqCstStore32IgnoreFP:
+ case Builtin::kTSANSeqCstStore32SaveFP:
+ case Builtin::kTSANSeqCstStore64IgnoreFP:
+ case Builtin::kTSANSeqCstStore64SaveFP:
case Builtin::kTSANRelaxedLoad32IgnoreFP:
case Builtin::kTSANRelaxedLoad32SaveFP:
case Builtin::kTSANRelaxedLoad64IgnoreFP:
diff --git a/chromium/v8/src/debug/debug-evaluate.h b/chromium/v8/src/debug/debug-evaluate.h
index 34a6c8d4c75..1a9be54893a 100644
--- a/chromium/v8/src/debug/debug-evaluate.h
+++ b/chromium/v8/src/debug/debug-evaluate.h
@@ -53,6 +53,7 @@ class DebugEvaluate : public AllStatic {
static DebugInfo::SideEffectState FunctionGetSideEffectState(
Isolate* isolate, Handle<SharedFunctionInfo> info);
static void ApplySideEffectChecks(Handle<BytecodeArray> bytecode_array);
+ static bool IsSideEffectFreeIntrinsic(Runtime::FunctionId id);
#ifdef DEBUG
static void VerifyTransitiveBuiltins(Isolate* isolate);
diff --git a/chromium/v8/src/debug/debug-interface.cc b/chromium/v8/src/debug/debug-interface.cc
index 5112c5ba73f..e6ae32f9d2c 100644
--- a/chromium/v8/src/debug/debug-interface.cc
+++ b/chromium/v8/src/debug/debug-interface.cc
@@ -4,6 +4,7 @@
#include "src/debug/debug-interface.h"
+#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/base/utils/random-number-generator.h"
#include "src/codegen/script-details.h"
@@ -16,7 +17,6 @@
#include "src/objects/js-generator-inl.h"
#include "src/objects/stack-frame-info-inl.h"
#include "src/profiler/heap-profiler.h"
-#include "src/regexp/regexp-stack.h"
#include "src/strings/string-builder-inl.h"
#if V8_ENABLE_WEBASSEMBLY
@@ -303,10 +303,7 @@ void SetTerminateOnResume(Isolate* v8_isolate) {
bool CanBreakProgram(Isolate* v8_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
ENTER_V8_DO_NOT_USE(isolate);
- // We cannot break a program if we are currently running a regexp.
- // TODO(yangguo): fix this exception.
- return !isolate->regexp_stack()->is_in_use() &&
- isolate->debug()->AllFramesOnStackAreBlackboxed();
+ return isolate->debug()->AllFramesOnStackAreBlackboxed();
}
Isolate* Script::GetIsolate() const {
@@ -760,8 +757,8 @@ MaybeLocal<UnboundScript> CompileInspectorScript(Isolate* v8_isolate,
{
i::AlignedCachedData* cached_data = nullptr;
i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
- i::Compiler::GetSharedFunctionInfoForScript(
- isolate, str, i::ScriptDetails(), nullptr, cached_data,
+ i::Compiler::GetSharedFunctionInfoForScriptWithCachedData(
+ isolate, str, i::ScriptDetails(), cached_data,
ScriptCompiler::kNoCompileOptions,
ScriptCompiler::kNoCacheBecauseInspector,
i::FLAG_expose_inspector_scripts ? i::NOT_NATIVES_CODE
@@ -862,7 +859,7 @@ Local<Function> GetBuiltin(Isolate* v8_isolate, Builtin requested_builtin) {
.set_map(isolate->strict_function_without_prototype_map())
.Build();
- fun->shared().set_internal_formal_parameter_count(0);
+ fun->shared().set_internal_formal_parameter_count(i::JSParameterCount(0));
fun->shared().set_length(0);
return Utils::ToLocal(handle_scope.CloseAndEscape(fun));
}
@@ -1034,16 +1031,6 @@ int64_t GetNextRandomInt64(v8::Isolate* v8_isolate) {
->NextInt64();
}
-void EnumerateRuntimeCallCounters(v8::Isolate* v8_isolate,
- RuntimeCallCounterCallback callback) {
-#ifdef V8_RUNTIME_CALL_STATS
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- if (isolate->counters()) {
- isolate->counters()->runtime_call_stats()->EnumerateCounters(callback);
- }
-#endif // V8_RUNTIME_CALL_STATS
-}
-
int GetDebuggingId(v8::Local<v8::Function> function) {
i::Handle<i::JSReceiver> callable = v8::Utils::OpenHandle(*function);
if (!callable->IsJSFunction()) return i::DebugInfo::kNoDebuggingId;
@@ -1186,61 +1173,43 @@ TypeProfile::ScriptData TypeProfile::GetScriptData(size_t i) const {
return ScriptData(i, type_profile_);
}
-v8::MaybeLocal<v8::Value> WeakMap::Get(v8::Local<v8::Context> context,
- v8::Local<v8::Value> key) {
- PREPARE_FOR_EXECUTION(context, WeakMap, Get, Value);
- auto self = Utils::OpenHandle(this);
- Local<Value> result;
- i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
- has_pending_exception =
- !ToLocal<Value>(i::Execution::CallBuiltin(isolate, isolate->weakmap_get(),
- self, arraysize(argv), argv),
- &result);
- RETURN_ON_FAILED_EXECUTION(Value);
- RETURN_ESCAPED(result);
+MaybeLocal<v8::Value> EphemeronTable::Get(v8::Isolate* isolate,
+ v8::Local<v8::Value> key) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ auto self = i::Handle<i::EphemeronHashTable>::cast(Utils::OpenHandle(this));
+ i::Handle<i::Object> internal_key = Utils::OpenHandle(*key);
+ DCHECK(internal_key->IsJSReceiver());
+
+ i::Handle<i::Object> value(self->Lookup(internal_key), internal_isolate);
+
+ if (value->IsTheHole()) return {};
+ return Utils::ToLocal(value);
}
-v8::Maybe<bool> WeakMap::Delete(v8::Local<v8::Context> context,
- v8::Local<v8::Value> key) {
- PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, WeakMap, Delete, Nothing<bool>(),
- InternalEscapableScope, false);
- auto self = Utils::OpenHandle(this);
- Local<Value> result;
- i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
- has_pending_exception = !ToLocal<Value>(
- i::Execution::CallBuiltin(isolate, isolate->weakmap_delete(), self,
- arraysize(argv), argv),
- &result);
- RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
- return Just(result->IsTrue());
-}
-
-v8::MaybeLocal<WeakMap> WeakMap::Set(v8::Local<v8::Context> context,
- v8::Local<v8::Value> key,
- v8::Local<v8::Value> value) {
- PREPARE_FOR_EXECUTION(context, WeakMap, Set, WeakMap);
- auto self = Utils::OpenHandle(this);
- i::Handle<i::Object> result;
- i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key),
- Utils::OpenHandle(*value)};
- has_pending_exception =
- !i::Execution::CallBuiltin(isolate, isolate->weakmap_set(), self,
- arraysize(argv), argv)
- .ToHandle(&result);
- RETURN_ON_FAILED_EXECUTION(WeakMap);
- RETURN_ESCAPED(Local<WeakMap>::Cast(Utils::ToLocal(result)));
-}
-
-Local<WeakMap> WeakMap::New(v8::Isolate* isolate) {
+Local<EphemeronTable> EphemeronTable::Set(v8::Isolate* isolate,
+ v8::Local<v8::Value> key,
+ v8::Local<v8::Value> value) {
+ auto self = i::Handle<i::EphemeronHashTable>::cast(Utils::OpenHandle(this));
+ i::Handle<i::Object> internal_key = Utils::OpenHandle(*key);
+ i::Handle<i::Object> internal_value = Utils::OpenHandle(*value);
+ DCHECK(internal_key->IsJSReceiver());
+
+ i::Handle<i::EphemeronHashTable> result(
+ i::EphemeronHashTable::Put(self, internal_key, internal_value));
+
+ return ToApiHandle<EphemeronTable>(result);
+}
+
+Local<EphemeronTable> EphemeronTable::New(v8::Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, WeakMap, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- i::Handle<i::JSWeakMap> obj = i_isolate->factory()->NewJSWeakMap();
- return ToApiHandle<WeakMap>(obj);
+ i::Handle<i::EphemeronHashTable> table =
+ i::EphemeronHashTable::New(i_isolate, 0);
+ return ToApiHandle<EphemeronTable>(table);
}
-WeakMap* WeakMap::Cast(v8::Value* value) {
- return static_cast<WeakMap*>(value);
+EphemeronTable* EphemeronTable::Cast(v8::Value* value) {
+ return static_cast<EphemeronTable*>(value);
}
Local<Value> AccessorPair::getter() {
@@ -1276,7 +1245,7 @@ MaybeLocal<Message> GetMessageFromPromise(Local<Promise> p) {
}
std::unique_ptr<PropertyIterator> PropertyIterator::Create(
- Local<Context> context, Local<Object> object) {
+ Local<Context> context, Local<Object> object, bool skip_indices) {
internal::Isolate* isolate =
reinterpret_cast<i::Isolate*>(object->GetIsolate());
if (IsExecutionTerminatingCheck(isolate)) {
@@ -1284,8 +1253,8 @@ std::unique_ptr<PropertyIterator> PropertyIterator::Create(
}
CallDepthScope<false> call_depth_scope(isolate, context);
- auto result =
- i::DebugPropertyIterator::Create(isolate, Utils::OpenHandle(*object));
+ auto result = i::DebugPropertyIterator::Create(
+ isolate, Utils::OpenHandle(*object), skip_indices);
if (!result) {
DCHECK(isolate->has_pending_exception());
call_depth_scope.Escape();
diff --git a/chromium/v8/src/debug/debug-interface.h b/chromium/v8/src/debug/debug-interface.h
index 81d38011cba..8c0ddb46cbb 100644
--- a/chromium/v8/src/debug/debug-interface.h
+++ b/chromium/v8/src/debug/debug-interface.h
@@ -7,9 +7,14 @@
#include <memory>
+#include "include/v8-callbacks.h"
+#include "include/v8-debug.h"
+#include "include/v8-embedder-heap.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-memory-span.h"
+#include "include/v8-promise.h"
+#include "include/v8-script.h"
#include "include/v8-util.h"
-#include "include/v8.h"
-#include "src/base/platform/time.h"
#include "src/base/vector.h"
#include "src/common/globals.h"
#include "src/debug/interface-types.h"
@@ -20,6 +25,8 @@ class V8Inspector;
namespace v8 {
+class Platform;
+
namespace internal {
struct CoverageBlock;
struct CoverageFunction;
@@ -515,11 +522,6 @@ enum class NativeAccessorType {
int64_t GetNextRandomInt64(v8::Isolate* isolate);
-using RuntimeCallCounterCallback =
- std::function<void(const char* name, int64_t count, base::TimeDelta time)>;
-void EnumerateRuntimeCallCounters(v8::Isolate* isolate,
- RuntimeCallCounterCallback callback);
-
MaybeLocal<Value> CallFunctionOn(Local<Context> context,
Local<Function> function, Local<Value> recv,
int argc, Local<Value> argv[],
@@ -568,19 +570,17 @@ class V8_NODISCARD DisableBreakScope {
std::unique_ptr<i::DisableBreak> scope_;
};
-class WeakMap : public v8::Object {
+class EphemeronTable : public v8::Object {
public:
- WeakMap() = delete;
+ EphemeronTable() = delete;
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT v8::MaybeLocal<v8::Value> Get(
- v8::Local<v8::Context> context, v8::Local<v8::Value> key);
- V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT v8::Maybe<bool> Delete(
- v8::Local<v8::Context> context, v8::Local<v8::Value> key);
- V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT v8::MaybeLocal<WeakMap> Set(
- v8::Local<v8::Context> context, v8::Local<v8::Value> key,
+ v8::Isolate* isolate, v8::Local<v8::Value> key);
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT v8::Local<EphemeronTable> Set(
+ v8::Isolate* isolate, v8::Local<v8::Value> key,
v8::Local<v8::Value> value);
- V8_EXPORT_PRIVATE static Local<WeakMap> New(v8::Isolate* isolate);
- V8_INLINE static WeakMap* Cast(Value* obj);
+ V8_EXPORT_PRIVATE static Local<EphemeronTable> New(v8::Isolate* isolate);
+ V8_INLINE static EphemeronTable* Cast(Value* obj);
};
/**
@@ -619,7 +619,8 @@ class V8_EXPORT_PRIVATE PropertyIterator {
// Creating a PropertyIterator can potentially throw an exception.
// The returned std::unique_ptr is empty iff that happens.
V8_WARN_UNUSED_RESULT static std::unique_ptr<PropertyIterator> Create(
- v8::Local<v8::Context> context, v8::Local<v8::Object> object);
+ v8::Local<v8::Context> context, v8::Local<v8::Object> object,
+ bool skip_indices = false);
virtual ~PropertyIterator() = default;
diff --git a/chromium/v8/src/debug/debug-property-iterator.cc b/chromium/v8/src/debug/debug-property-iterator.cc
index 5d7ecda979b..b0bca65e30b 100644
--- a/chromium/v8/src/debug/debug-property-iterator.cc
+++ b/chromium/v8/src/debug/debug-property-iterator.cc
@@ -15,15 +15,14 @@ namespace v8 {
namespace internal {
std::unique_ptr<DebugPropertyIterator> DebugPropertyIterator::Create(
- Isolate* isolate, Handle<JSReceiver> receiver) {
+ Isolate* isolate, Handle<JSReceiver> receiver, bool skip_indices) {
// Can't use std::make_unique as Ctor is private.
auto iterator = std::unique_ptr<DebugPropertyIterator>(
- new DebugPropertyIterator(isolate, receiver));
+ new DebugPropertyIterator(isolate, receiver, skip_indices));
if (receiver->IsJSProxy()) {
iterator->AdvanceToPrototype();
}
- if (iterator->Done()) return iterator;
if (!iterator->FillKeysForCurrentPrototypeAndStage()) return nullptr;
if (iterator->should_move_to_next_stage() && !iterator->AdvanceInternal()) {
@@ -34,10 +33,15 @@ std::unique_ptr<DebugPropertyIterator> DebugPropertyIterator::Create(
}
DebugPropertyIterator::DebugPropertyIterator(Isolate* isolate,
- Handle<JSReceiver> receiver)
+ Handle<JSReceiver> receiver,
+ bool skip_indices)
: isolate_(isolate),
prototype_iterator_(isolate, receiver, kStartAtReceiver,
- PrototypeIterator::END_AT_NULL) {}
+ PrototypeIterator::END_AT_NULL),
+ skip_indices_(skip_indices),
+ current_key_index_(0),
+ current_keys_(isolate_->factory()->empty_fixed_array()),
+ current_keys_length_(0) {}
bool DebugPropertyIterator::Done() const { return is_done_; }
@@ -54,13 +58,13 @@ bool DebugPropertyIterator::AdvanceInternal() {
calculated_native_accessor_flags_ = false;
while (should_move_to_next_stage()) {
switch (stage_) {
- case Stage::kExoticIndices:
- stage_ = Stage::kEnumerableStrings;
+ case kExoticIndices:
+ stage_ = kEnumerableStrings;
break;
- case Stage::kEnumerableStrings:
- stage_ = Stage::kAllProperties;
+ case kEnumerableStrings:
+ stage_ = kAllProperties;
break;
- case Stage::kAllProperties:
+ case kAllProperties:
AdvanceToPrototype();
break;
}
@@ -70,20 +74,17 @@ bool DebugPropertyIterator::AdvanceInternal() {
}
bool DebugPropertyIterator::is_native_accessor() {
- if (stage_ == kExoticIndices) return false;
CalculateNativeAccessorFlags();
return native_accessor_flags_;
}
bool DebugPropertyIterator::has_native_getter() {
- if (stage_ == kExoticIndices) return false;
CalculateNativeAccessorFlags();
return native_accessor_flags_ &
static_cast<int>(debug::NativeAccessorType::HasGetter);
}
bool DebugPropertyIterator::has_native_setter() {
- if (stage_ == kExoticIndices) return false;
CalculateNativeAccessorFlags();
return native_accessor_flags_ &
static_cast<int>(debug::NativeAccessorType::HasSetter);
@@ -95,7 +96,7 @@ Handle<Name> DebugPropertyIterator::raw_name() const {
return isolate_->factory()->SizeToString(current_key_index_);
} else {
return Handle<Name>::cast(FixedArray::get(
- *keys_, static_cast<int>(current_key_index_), isolate_));
+ *current_keys_, static_cast<int>(current_key_index_), isolate_));
}
}
@@ -140,42 +141,38 @@ bool DebugPropertyIterator::is_own() { return is_own_; }
bool DebugPropertyIterator::is_array_index() {
if (stage_ == kExoticIndices) return true;
- uint32_t index = 0;
- return raw_name()->AsArrayIndex(&index);
+ PropertyKey key(isolate_, raw_name());
+ return key.is_element();
}
bool DebugPropertyIterator::FillKeysForCurrentPrototypeAndStage() {
current_key_index_ = 0;
- exotic_length_ = 0;
- keys_ = Handle<FixedArray>::null();
+ current_keys_ = isolate_->factory()->empty_fixed_array();
+ current_keys_length_ = 0;
if (is_done_) return true;
Handle<JSReceiver> receiver =
PrototypeIterator::GetCurrent<JSReceiver>(prototype_iterator_);
- bool has_exotic_indices = receiver->IsJSTypedArray();
if (stage_ == kExoticIndices) {
- if (!has_exotic_indices) return true;
+ if (skip_indices_ || !receiver->IsJSTypedArray()) return true;
Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(receiver);
- exotic_length_ = typed_array->WasDetached() ? 0 : typed_array->length();
+ current_keys_length_ =
+ typed_array->WasDetached() ? 0 : typed_array->length();
return true;
}
- bool skip_indices = has_exotic_indices;
PropertyFilter filter =
stage_ == kEnumerableStrings ? ENUMERABLE_STRINGS : ALL_PROPERTIES;
- if (!KeyAccumulator::GetKeys(receiver, KeyCollectionMode::kOwnOnly, filter,
- GetKeysConversion::kConvertToString, false,
- skip_indices)
- .ToHandle(&keys_)) {
- keys_ = Handle<FixedArray>::null();
- return false;
+ if (KeyAccumulator::GetKeys(receiver, KeyCollectionMode::kOwnOnly, filter,
+ GetKeysConversion::kConvertToString, false,
+ skip_indices_ || receiver->IsJSTypedArray())
+ .ToHandle(&current_keys_)) {
+ current_keys_length_ = current_keys_->length();
+ return true;
}
- return true;
+ return false;
}
bool DebugPropertyIterator::should_move_to_next_stage() const {
- if (is_done_) return false;
- if (stage_ == kExoticIndices) return current_key_index_ >= exotic_length_;
- return keys_.is_null() ||
- current_key_index_ >= static_cast<size_t>(keys_->length());
+ return !is_done_ && current_key_index_ >= current_keys_length_;
}
namespace {
@@ -210,10 +207,14 @@ base::Flags<debug::NativeAccessorType, int> GetNativeAccessorDescriptorInternal(
void DebugPropertyIterator::CalculateNativeAccessorFlags() {
if (calculated_native_accessor_flags_) return;
- Handle<JSReceiver> receiver =
- PrototypeIterator::GetCurrent<JSReceiver>(prototype_iterator_);
- native_accessor_flags_ =
- GetNativeAccessorDescriptorInternal(receiver, raw_name());
+ if (stage_ == kExoticIndices) {
+ native_accessor_flags_ = 0;
+ } else {
+ Handle<JSReceiver> receiver =
+ PrototypeIterator::GetCurrent<JSReceiver>(prototype_iterator_);
+ native_accessor_flags_ =
+ GetNativeAccessorDescriptorInternal(receiver, raw_name());
+ }
calculated_native_accessor_flags_ = true;
}
} // namespace internal
diff --git a/chromium/v8/src/debug/debug-property-iterator.h b/chromium/v8/src/debug/debug-property-iterator.h
index 38c78b12bda..b28fe78ac88 100644
--- a/chromium/v8/src/debug/debug-property-iterator.h
+++ b/chromium/v8/src/debug/debug-property-iterator.h
@@ -5,14 +5,18 @@
#ifndef V8_DEBUG_DEBUG_PROPERTY_ITERATOR_H_
#define V8_DEBUG_DEBUG_PROPERTY_ITERATOR_H_
+#include "include/v8-local-handle.h"
+#include "include/v8-maybe.h"
+#include "include/v8-object.h"
#include "src/debug/debug-interface.h"
#include "src/execution/isolate.h"
#include "src/handles/handles.h"
#include "src/objects/prototype.h"
-#include "include/v8.h"
-
namespace v8 {
+
+class Name;
+
namespace internal {
class JSReceiver;
@@ -20,7 +24,7 @@ class JSReceiver;
class DebugPropertyIterator final : public debug::PropertyIterator {
public:
V8_WARN_UNUSED_RESULT static std::unique_ptr<DebugPropertyIterator> Create(
- Isolate* isolate, Handle<JSReceiver> receiver);
+ Isolate* isolate, Handle<JSReceiver> receiver, bool skip_indices);
~DebugPropertyIterator() override = default;
DebugPropertyIterator(const DebugPropertyIterator&) = delete;
DebugPropertyIterator& operator=(const DebugPropertyIterator&) = delete;
@@ -39,7 +43,8 @@ class DebugPropertyIterator final : public debug::PropertyIterator {
bool is_array_index() override;
private:
- DebugPropertyIterator(Isolate* isolate, Handle<JSReceiver> receiver);
+ DebugPropertyIterator(Isolate* isolate, Handle<JSReceiver> receiver,
+ bool skip_indices);
V8_WARN_UNUSED_RESULT bool FillKeysForCurrentPrototypeAndStage();
bool should_move_to_next_stage() const;
@@ -50,12 +55,16 @@ class DebugPropertyIterator final : public debug::PropertyIterator {
Isolate* isolate_;
PrototypeIterator prototype_iterator_;
- enum Stage { kExoticIndices = 0, kEnumerableStrings = 1, kAllProperties = 2 };
- Stage stage_ = kExoticIndices;
+ enum {
+ kExoticIndices = 0,
+ kEnumerableStrings = 1,
+ kAllProperties = 2
+ } stage_ = kExoticIndices;
+ bool skip_indices_;
- size_t current_key_index_ = 0;
- Handle<FixedArray> keys_;
- size_t exotic_length_ = 0;
+ size_t current_key_index_;
+ Handle<FixedArray> current_keys_;
+ size_t current_keys_length_;
bool calculated_native_accessor_flags_ = false;
int native_accessor_flags_ = 0;
diff --git a/chromium/v8/src/debug/debug.cc b/chromium/v8/src/debug/debug.cc
index 41775c89656..031910b4dcc 100644
--- a/chromium/v8/src/debug/debug.cc
+++ b/chromium/v8/src/debug/debug.cc
@@ -24,7 +24,7 @@
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
#include "src/execution/v8threads.h"
-#include "src/handles/global-handles.h"
+#include "src/handles/global-handles-inl.h"
#include "src/heap/heap-inl.h" // For NextDebuggingId.
#include "src/init/bootstrapper.h"
#include "src/interpreter/bytecode-array-iterator.h"
@@ -1325,7 +1325,7 @@ class DiscardBaselineCodeVisitor : public ThreadVisitor {
void Debug::DiscardBaselineCode(SharedFunctionInfo shared) {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kDebugger);
- DCHECK(shared.HasBaselineData());
+ DCHECK(shared.HasBaselineCode());
Isolate* isolate = shared.GetIsolate();
DiscardBaselineCodeVisitor visitor(shared);
visitor.VisitThread(isolate, isolate->thread_local_top());
@@ -1333,7 +1333,7 @@ void Debug::DiscardBaselineCode(SharedFunctionInfo shared) {
// TODO(v8:11429): Avoid this heap walk somehow.
HeapObjectIterator iterator(isolate->heap());
auto trampoline = BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
- shared.flush_baseline_data();
+ shared.FlushBaselineCode();
for (HeapObject obj = iterator.Next(); !obj.is_null();
obj = iterator.Next()) {
if (obj.IsJSFunction()) {
@@ -1356,9 +1356,14 @@ void Debug::DiscardAllBaselineCode() {
obj = iterator.Next()) {
if (obj.IsJSFunction()) {
JSFunction fun = JSFunction::cast(obj);
- if (fun.shared().HasBaselineData()) {
+ if (fun.ActiveTierIsBaseline()) {
fun.set_code(*trampoline);
}
+ } else if (obj.IsSharedFunctionInfo()) {
+ SharedFunctionInfo shared = SharedFunctionInfo::cast(obj);
+ if (shared.HasBaselineCode()) {
+ shared.FlushBaselineCode();
+ }
}
}
}
@@ -1369,7 +1374,7 @@ void Debug::DeoptimizeFunction(Handle<SharedFunctionInfo> shared) {
// inlining.
isolate_->AbortConcurrentOptimization(BlockingBehavior::kBlock);
- if (shared->HasBaselineData()) {
+ if (shared->HasBaselineCode()) {
DiscardBaselineCode(*shared);
}
@@ -1399,26 +1404,35 @@ void Debug::PrepareFunctionForDebugExecution(
DCHECK(shared->is_compiled());
DCHECK(shared->HasDebugInfo());
Handle<DebugInfo> debug_info = GetOrCreateDebugInfo(shared);
- if (debug_info->flags(kRelaxedLoad) & DebugInfo::kPreparedForDebugExecution)
+ if (debug_info->flags(kRelaxedLoad) & DebugInfo::kPreparedForDebugExecution) {
return;
-
- if (shared->HasBytecodeArray()) {
- SharedFunctionInfo::InstallDebugBytecode(shared, isolate_);
}
+ // Have to discard baseline code before installing debug bytecode, since the
+ // bytecode array field on the baseline code object is immutable.
if (debug_info->CanBreakAtEntry()) {
// Deopt everything in case the function is inlined anywhere.
Deoptimizer::DeoptimizeAll(isolate_);
DiscardAllBaselineCode();
- InstallDebugBreakTrampoline();
} else {
DeoptimizeFunction(shared);
+ }
+
+ if (shared->HasBytecodeArray()) {
+ DCHECK(!shared->HasBaselineCode());
+ SharedFunctionInfo::InstallDebugBytecode(shared, isolate_);
+ }
+
+ if (debug_info->CanBreakAtEntry()) {
+ InstallDebugBreakTrampoline();
+ } else {
// Update PCs on the stack to point to recompiled code.
RedirectActiveFunctions redirect_visitor(
*shared, RedirectActiveFunctions::Mode::kUseDebugBytecode);
redirect_visitor.VisitThread(isolate_, isolate_->thread_local_top());
isolate_->thread_manager()->IterateArchivedThreads(&redirect_visitor);
}
+
debug_info->set_flags(
debug_info->flags(kRelaxedLoad) | DebugInfo::kPreparedForDebugExecution,
kRelaxedStore);
@@ -1569,7 +1583,16 @@ class SharedFunctionInfoFinder {
}
if (start_position > target_position_) return;
- if (target_position_ > shared.EndPosition()) return;
+ if (target_position_ >= shared.EndPosition()) {
+ // The SharedFunctionInfo::EndPosition() is generally exclusive, but there
+ // are assumptions in various places in the debugger that for script level
+ // (toplevel function) there's an end position that is technically outside
+ // the script. It might be worth revisiting the overall design here at
+ // some point in the future.
+ if (!shared.is_toplevel() || target_position_ > shared.EndPosition()) {
+ return;
+ }
+ }
if (!current_candidate_.is_null()) {
if (current_start_position_ == start_position &&
@@ -2183,8 +2206,7 @@ bool Debug::ShouldBeSkipped() {
DisableBreak no_recursive_break(this);
StackTraceFrameIterator iterator(isolate_);
- CommonFrame* frame = iterator.frame();
- FrameSummary summary = FrameSummary::GetTop(frame);
+ FrameSummary summary = iterator.GetTopValidFrame();
Handle<Object> script_obj = summary.script();
if (!script_obj->IsScript()) return false;
@@ -2673,6 +2695,18 @@ bool Debug::PerformSideEffectCheckAtBytecode(InterpretedFrame* frame) {
handle(bytecode_array, isolate_), offset);
Bytecode bytecode = bytecode_iterator.current_bytecode();
+ if (interpreter::Bytecodes::IsCallRuntime(bytecode)) {
+ auto id = (bytecode == Bytecode::kInvokeIntrinsic)
+ ? bytecode_iterator.GetIntrinsicIdOperand(0)
+ : bytecode_iterator.GetRuntimeIdOperand(0);
+ if (DebugEvaluate::IsSideEffectFreeIntrinsic(id)) {
+ return true;
+ }
+ side_effect_check_failed_ = true;
+ // Throw an uncatchable termination exception.
+ isolate_->TerminateExecution();
+ return false;
+ }
interpreter::Register reg;
switch (bytecode) {
case Bytecode::kStaCurrentContextSlot:
diff --git a/chromium/v8/src/debug/interface-types.h b/chromium/v8/src/debug/interface-types.h
index a2645d33d66..8c8d4bf2ad6 100644
--- a/chromium/v8/src/debug/interface-types.h
+++ b/chromium/v8/src/debug/interface-types.h
@@ -9,11 +9,14 @@
#include <string>
#include <vector>
-#include "include/v8.h"
+#include "include/v8-function-callback.h"
+#include "include/v8-local-handle.h"
#include "src/common/globals.h"
namespace v8 {
+class String;
+
namespace internal {
class BuiltinArguments;
} // namespace internal
diff --git a/chromium/v8/src/deoptimizer/arm/deoptimizer-arm.cc b/chromium/v8/src/deoptimizer/arm/deoptimizer-arm.cc
index f3353b5689c..d0843f43e9e 100644
--- a/chromium/v8/src/deoptimizer/arm/deoptimizer-arm.cc
+++ b/chromium/v8/src/deoptimizer/arm/deoptimizer-arm.cc
@@ -3,10 +3,23 @@
// found in the LICENSE file.
#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/isolate-data.h"
namespace v8 {
namespace internal {
+// The deopt exit sizes below depend on the following IsolateData layout
+// guarantees:
+#define ASSERT_OFFSET(BuiltinName) \
+ STATIC_ASSERT(IsolateData::builtin_tier0_entry_table_offset() + \
+ Builtins::ToInt(BuiltinName) * kSystemPointerSize <= \
+ 0x1000)
+ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Eager);
+ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Lazy);
+ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Soft);
+ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Bailout);
+#undef ASSERT_OFFSET
+
const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
const int Deoptimizer::kNonLazyDeoptExitSize = 2 * kInstrSize;
const int Deoptimizer::kLazyDeoptExitSize = 2 * kInstrSize;
diff --git a/chromium/v8/src/deoptimizer/deoptimized-frame-info.cc b/chromium/v8/src/deoptimizer/deoptimized-frame-info.cc
index a424a73ea1e..c268d7258fa 100644
--- a/chromium/v8/src/deoptimizer/deoptimized-frame-info.cc
+++ b/chromium/v8/src/deoptimizer/deoptimized-frame-info.cc
@@ -27,15 +27,17 @@ DeoptimizedFrameInfo::DeoptimizedFrameInfo(TranslatedState* state,
TranslatedState::iterator frame_it,
Isolate* isolate) {
int parameter_count =
- frame_it->shared_info()->internal_formal_parameter_count();
+ frame_it->shared_info()
+ ->internal_formal_parameter_count_without_receiver();
TranslatedFrame::iterator stack_it = frame_it->begin();
// Get the function. Note that this might materialize the function.
// In case the debugger mutates this value, we should deoptimize
// the function and remember the value in the materialized value store.
- DCHECK_EQ(parameter_count, Handle<JSFunction>::cast(stack_it->GetValue())
- ->shared()
- .internal_formal_parameter_count());
+ DCHECK_EQ(parameter_count,
+ Handle<JSFunction>::cast(stack_it->GetValue())
+ ->shared()
+ .internal_formal_parameter_count_without_receiver());
stack_it++; // Skip the function.
stack_it++; // Skip the receiver.
diff --git a/chromium/v8/src/deoptimizer/deoptimizer.cc b/chromium/v8/src/deoptimizer/deoptimizer.cc
index ea460aa36fd..6bf26d5bf33 100644
--- a/chromium/v8/src/deoptimizer/deoptimizer.cc
+++ b/chromium/v8/src/deoptimizer/deoptimizer.cc
@@ -477,15 +477,6 @@ const char* Deoptimizer::MessageFor(DeoptimizeKind kind, bool reuse_code) {
}
}
-namespace {
-
-uint16_t InternalFormalParameterCountWithReceiver(SharedFunctionInfo sfi) {
- static constexpr int kTheReceiver = 1;
- return sfi.internal_formal_parameter_count() + kTheReceiver;
-}
-
-} // namespace
-
Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
DeoptimizeKind kind, unsigned deopt_exit_index,
Address from, int fp_to_sp_delta)
@@ -541,7 +532,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
}
unsigned size = ComputeInputFrameSize();
const int parameter_count =
- InternalFormalParameterCountWithReceiver(function.shared());
+ function.shared().internal_formal_parameter_count_with_receiver();
input_ = new (size) FrameDescription(size, parameter_count);
if (kSupportsFixedDeoptExitSizes) {
@@ -903,9 +894,10 @@ void Deoptimizer::DoComputeOutputFrames() {
isolate_, input_->GetFramePointerAddress(), stack_fp_, &state_iterator,
input_data.LiteralArray(), input_->GetRegisterValues(), trace_file,
function_.IsHeapObject()
- ? function_.shared().internal_formal_parameter_count()
+ ? function_.shared()
+ .internal_formal_parameter_count_without_receiver()
: 0,
- actual_argument_count_);
+ actual_argument_count_ - kJSArgcReceiverSlots);
// Do the input frame to output frame(s) translation.
size_t count = translated_state_.frames().size();
@@ -1026,7 +1018,8 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
const int bytecode_offset =
goto_catch_handler ? catch_handler_pc_offset_ : real_bytecode_offset;
- const int parameters_count = InternalFormalParameterCountWithReceiver(shared);
+ const int parameters_count =
+ shared.internal_formal_parameter_count_with_receiver();
// If this is the bottom most frame or the previous frame was the arguments
// adaptor fake frame, then we already have extra arguments in the stack
@@ -1068,7 +1061,7 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
const bool advance_bc =
(!is_topmost || (deopt_kind_ == DeoptimizeKind::kLazy)) &&
!goto_catch_handler;
- const bool is_baseline = shared.HasBaselineData();
+ const bool is_baseline = shared.HasBaselineCode();
Code dispatch_builtin =
builtins->code(DispatchBuiltinFor(is_baseline, advance_bc));
@@ -1100,11 +1093,13 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
}
// Note: parameters_count includes the receiver.
+ // TODO(v8:11112): Simplify once the receiver is always included in argc.
if (verbose_tracing_enabled() && is_bottommost &&
- actual_argument_count_ > parameters_count - 1) {
- PrintF(trace_scope_->file(),
- " -- %d extra argument(s) already in the stack --\n",
- actual_argument_count_ - parameters_count + 1);
+ actual_argument_count_ - kJSArgcReceiverSlots > parameters_count - 1) {
+ PrintF(
+ trace_scope_->file(),
+ " -- %d extra argument(s) already in the stack --\n",
+ actual_argument_count_ - kJSArgcReceiverSlots - parameters_count + 1);
}
frame_writer.PushStackJSArguments(value_iterator, parameters_count);
@@ -1185,7 +1180,7 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
(translated_state_.frames()[frame_index - 1]).kind();
argc = previous_frame_kind == TranslatedFrame::kArgumentsAdaptor
? output_[frame_index - 1]->parameter_count()
- : parameters_count - 1;
+ : parameters_count - (kJSArgcIncludesReceiver ? 0 : 1);
}
frame_writer.PushRawValue(argc, "actual argument count\n");
@@ -1334,7 +1329,8 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
TranslatedFrame::iterator value_iterator = translated_frame->begin();
const int argument_count_without_receiver = translated_frame->height() - 1;
const int formal_parameter_count =
- translated_frame->raw_shared_info().internal_formal_parameter_count();
+ translated_frame->raw_shared_info()
+ .internal_formal_parameter_count_without_receiver();
const int extra_argument_count =
argument_count_without_receiver - formal_parameter_count;
// The number of pushed arguments is the maximum of the actual argument count
@@ -1350,8 +1346,8 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
}
// Allocate and store the output frame description.
- FrameDescription* output_frame = new (output_frame_size)
- FrameDescription(output_frame_size, argument_count_without_receiver);
+ FrameDescription* output_frame = new (output_frame_size) FrameDescription(
+ output_frame_size, JSParameterCount(argument_count_without_receiver));
// The top address of the frame is computed from the previous frame's top and
// this frame's size.
const intptr_t top_address =
@@ -1470,9 +1466,8 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
frame_writer.PushTranslatedValue(value_iterator++, "context");
// Number of incoming arguments.
- const uint32_t parameters_count_without_receiver = parameters_count - 1;
- frame_writer.PushRawObject(Smi::FromInt(parameters_count_without_receiver),
- "argc\n");
+ const uint32_t argc = parameters_count - (kJSArgcIncludesReceiver ? 0 : 1);
+ frame_writer.PushRawObject(Smi::FromInt(argc), "argc\n");
// The constructor function was mentioned explicitly in the
// CONSTRUCT_STUB_FRAME.
@@ -2067,7 +2062,7 @@ unsigned Deoptimizer::ComputeInputFrameSize() const {
// static
unsigned Deoptimizer::ComputeIncomingArgumentSize(SharedFunctionInfo shared) {
- int parameter_slots = InternalFormalParameterCountWithReceiver(shared);
+ int parameter_slots = shared.internal_formal_parameter_count_with_receiver();
return parameter_slots * kSystemPointerSize;
}
diff --git a/chromium/v8/src/deoptimizer/loong64/deoptimizer-loong64.cc b/chromium/v8/src/deoptimizer/loong64/deoptimizer-loong64.cc
new file mode 100644
index 00000000000..fb82466af1e
--- /dev/null
+++ b/chromium/v8/src/deoptimizer/loong64/deoptimizer-loong64.cc
@@ -0,0 +1,42 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/deoptimizer/deoptimizer.h"
+
+namespace v8 {
+namespace internal {
+
+const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
+const int Deoptimizer::kNonLazyDeoptExitSize = 2 * kInstrSize;
+const int Deoptimizer::kLazyDeoptExitSize = 2 * kInstrSize;
+const int Deoptimizer::kEagerWithResumeBeforeArgsSize = 3 * kInstrSize;
+const int Deoptimizer::kEagerWithResumeDeoptExitSize =
+ kEagerWithResumeBeforeArgsSize + 2 * kSystemPointerSize;
+// TODO(LOONG_dev): LOONG64 Is the PcOffset right?
+const int Deoptimizer::kEagerWithResumeImmedArgs1PcOffset = kInstrSize;
+const int Deoptimizer::kEagerWithResumeImmedArgs2PcOffset =
+ kInstrSize + kSystemPointerSize;
+
+Float32 RegisterValues::GetFloatRegister(unsigned n) const {
+ return Float32::FromBits(
+ static_cast<uint32_t>(double_registers_[n].get_bits()));
+}
+
+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+ // No embedded constant pool support.
+ UNREACHABLE();
+}
+
+void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc b/chromium/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
index cde68050628..d7cd04bdf74 100644
--- a/chromium/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
+++ b/chromium/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
@@ -2,15 +2,24 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/codegen/assembler-inl.h"
-#include "src/codegen/macro-assembler.h"
-#include "src/codegen/register-configuration.h"
-#include "src/codegen/safepoint-table.h"
#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/isolate-data.h"
namespace v8 {
namespace internal {
+// The deopt exit sizes below depend on the following IsolateData layout
+// guarantees:
+#define ASSERT_OFFSET(BuiltinName) \
+ STATIC_ASSERT(IsolateData::builtin_tier0_entry_table_offset() + \
+ Builtins::ToInt(BuiltinName) * kSystemPointerSize <= \
+ 0x1000)
+ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Eager);
+ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Lazy);
+ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Soft);
+ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Bailout);
+#undef ASSERT_OFFSET
+
const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
const int Deoptimizer::kNonLazyDeoptExitSize = 3 * kInstrSize;
const int Deoptimizer::kLazyDeoptExitSize = 3 * kInstrSize;
diff --git a/chromium/v8/src/deoptimizer/riscv64/deoptimizer-riscv64.cc b/chromium/v8/src/deoptimizer/riscv64/deoptimizer-riscv64.cc
index b7dceed503d..12573ed29be 100644
--- a/chromium/v8/src/deoptimizer/riscv64/deoptimizer-riscv64.cc
+++ b/chromium/v8/src/deoptimizer/riscv64/deoptimizer-riscv64.cc
@@ -8,9 +8,9 @@ namespace v8 {
namespace internal {
const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
-const int Deoptimizer::kNonLazyDeoptExitSize = 4 * kInstrSize;
-const int Deoptimizer::kLazyDeoptExitSize = 4 * kInstrSize;
-const int Deoptimizer::kEagerWithResumeBeforeArgsSize = 5 * kInstrSize;
+const int Deoptimizer::kNonLazyDeoptExitSize = 2 * kInstrSize;
+const int Deoptimizer::kLazyDeoptExitSize = 2 * kInstrSize;
+const int Deoptimizer::kEagerWithResumeBeforeArgsSize = 3 * kInstrSize;
const int Deoptimizer::kEagerWithResumeDeoptExitSize =
kEagerWithResumeBeforeArgsSize + 4 * kInstrSize;
const int Deoptimizer::kEagerWithResumeImmedArgs1PcOffset = kInstrSize;
diff --git a/chromium/v8/src/deoptimizer/s390/deoptimizer-s390.cc b/chromium/v8/src/deoptimizer/s390/deoptimizer-s390.cc
index 54e450f3e88..c776bdb48b2 100644
--- a/chromium/v8/src/deoptimizer/s390/deoptimizer-s390.cc
+++ b/chromium/v8/src/deoptimizer/s390/deoptimizer-s390.cc
@@ -3,10 +3,23 @@
// found in the LICENSE file.
#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/isolate-data.h"
namespace v8 {
namespace internal {
+// The deopt exit sizes below depend on the following IsolateData layout
+// guarantees:
+#define ASSERT_OFFSET(BuiltinName) \
+ STATIC_ASSERT(IsolateData::builtin_tier0_entry_table_offset() + \
+ Builtins::ToInt(BuiltinName) * kSystemPointerSize <= \
+ 0x1000)
+ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Eager);
+ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Lazy);
+ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Soft);
+ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Bailout);
+#undef ASSERT_OFFSET
+
const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
const int Deoptimizer::kNonLazyDeoptExitSize = 6 + 2;
const int Deoptimizer::kLazyDeoptExitSize = 6 + 2;
diff --git a/chromium/v8/src/deoptimizer/translated-state.cc b/chromium/v8/src/deoptimizer/translated-state.cc
index 4f5e3370e65..721918c1955 100644
--- a/chromium/v8/src/deoptimizer/translated-state.cc
+++ b/chromium/v8/src/deoptimizer/translated-state.cc
@@ -678,15 +678,6 @@ TranslatedFrame TranslatedFrame::JavaScriptBuiltinContinuationWithCatchFrame(
return frame;
}
-namespace {
-
-uint16_t InternalFormalParameterCountWithReceiver(SharedFunctionInfo sfi) {
- static constexpr int kTheReceiver = 1;
- return sfi.internal_formal_parameter_count() + kTheReceiver;
-}
-
-} // namespace
-
int TranslatedFrame::GetValueCount() {
// The function is added to all frame state descriptors in
// InstructionSelector::AddInputsToFrameStateDescriptor.
@@ -695,7 +686,7 @@ int TranslatedFrame::GetValueCount() {
switch (kind()) {
case kUnoptimizedFunction: {
int parameter_count =
- InternalFormalParameterCountWithReceiver(raw_shared_info_);
+ raw_shared_info_.internal_formal_parameter_count_with_receiver();
static constexpr int kTheContext = 1;
static constexpr int kTheAccumulator = 1;
return height() + parameter_count + kTheContext + kTheFunction +
@@ -748,7 +739,8 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
if (trace_file != nullptr) {
std::unique_ptr<char[]> name = shared_info.DebugNameCStr();
PrintF(trace_file, " reading input frame %s", name.get());
- int arg_count = InternalFormalParameterCountWithReceiver(shared_info);
+ int arg_count =
+ shared_info.internal_formal_parameter_count_with_receiver();
PrintF(trace_file,
" => bytecode_offset=%d, args=%d, height=%d, retval=%i(#%i); "
"inputs:\n",
@@ -1298,7 +1290,9 @@ TranslatedState::TranslatedState(const JavaScriptFrame* frame)
int actual_argc = frame->GetActualArgumentCount();
Init(frame->isolate(), frame->fp(), frame->fp(), &it, data.LiteralArray(),
nullptr /* registers */, nullptr /* trace file */,
- frame->function().shared().internal_formal_parameter_count(),
+ frame->function()
+ .shared()
+ .internal_formal_parameter_count_without_receiver(),
actual_argc);
}
@@ -1977,21 +1971,21 @@ TranslatedFrame* TranslatedState::GetArgumentsInfoFromJSFrameIndex(
// be shown in a stack trace.
if (frames_[i].kind() ==
TranslatedFrame::kJavaScriptBuiltinContinuation &&
- frames_[i].shared_info()->internal_formal_parameter_count() ==
- kDontAdaptArgumentsSentinel) {
+ frames_[i].shared_info()->IsDontAdaptArguments()) {
DCHECK(frames_[i].shared_info()->IsApiFunction());
// The argument count for this special case is always the second
// to last value in the TranslatedFrame. It should also always be
- // {1}, as the GenericLazyDeoptContinuation builtin only has one
- // argument (the receiver).
+ // {1}, as the GenericLazyDeoptContinuation builtin has one explicit
+ // argument (the result).
static constexpr int kTheContext = 1;
const int height = frames_[i].height() + kTheContext;
*args_count = frames_[i].ValueAt(height - 1)->GetSmiValue();
- DCHECK_EQ(*args_count, 1);
+ DCHECK_EQ(*args_count, JSParameterCount(1));
} else {
- *args_count = InternalFormalParameterCountWithReceiver(
- *frames_[i].shared_info());
+ *args_count = frames_[i]
+ .shared_info()
+ ->internal_formal_parameter_count_with_receiver();
}
return &(frames_[i]);
}
diff --git a/chromium/v8/src/deoptimizer/x64/deoptimizer-x64.cc b/chromium/v8/src/deoptimizer/x64/deoptimizer-x64.cc
index 8a95afdc35c..484ede213ad 100644
--- a/chromium/v8/src/deoptimizer/x64/deoptimizer-x64.cc
+++ b/chromium/v8/src/deoptimizer/x64/deoptimizer-x64.cc
@@ -5,14 +5,27 @@
#if V8_TARGET_ARCH_X64
#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/isolate-data.h"
namespace v8 {
namespace internal {
+// The deopt exit sizes below depend on the following IsolateData layout
+// guarantees:
+#define ASSERT_OFFSET(BuiltinName) \
+ STATIC_ASSERT(IsolateData::builtin_tier0_entry_table_offset() + \
+ Builtins::ToInt(BuiltinName) * kSystemPointerSize <= \
+ 0x7F)
+ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Eager);
+ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Lazy);
+ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Soft);
+ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Bailout);
+#undef ASSERT_OFFSET
+
const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
-const int Deoptimizer::kNonLazyDeoptExitSize = 7;
-const int Deoptimizer::kLazyDeoptExitSize = 7;
-const int Deoptimizer::kEagerWithResumeBeforeArgsSize = 12;
+const int Deoptimizer::kNonLazyDeoptExitSize = 4;
+const int Deoptimizer::kLazyDeoptExitSize = 4;
+const int Deoptimizer::kEagerWithResumeBeforeArgsSize = 9;
const int Deoptimizer::kEagerWithResumeDeoptExitSize =
kEagerWithResumeBeforeArgsSize + 2 * kSystemPointerSize;
const int Deoptimizer::kEagerWithResumeImmedArgs1PcOffset = 5;
diff --git a/chromium/v8/src/diagnostics/arm/disasm-arm.cc b/chromium/v8/src/diagnostics/arm/disasm-arm.cc
index cf37d12a1f9..01b697b4bb4 100644
--- a/chromium/v8/src/diagnostics/arm/disasm-arm.cc
+++ b/chromium/v8/src/diagnostics/arm/disasm-arm.cc
@@ -676,7 +676,6 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
}
default: {
UNREACHABLE();
- return -1;
}
}
out_buffer_pos_ +=
@@ -787,7 +786,6 @@ void Decoder::DecodeType01(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else {
// strex
@@ -808,7 +806,6 @@ void Decoder::DecodeType01(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
}
} else {
@@ -853,7 +850,6 @@ void Decoder::DecodeType01(Instruction* instr) {
default: {
// The PU field is a 2-bit field.
UNREACHABLE();
- break;
}
}
} else {
@@ -894,7 +890,6 @@ void Decoder::DecodeType01(Instruction* instr) {
default: {
// The PU field is a 2-bit field.
UNREACHABLE();
- break;
}
}
return;
@@ -1030,7 +1025,6 @@ void Decoder::DecodeType01(Instruction* instr) {
default: {
// The Opcode field is a 4-bit field.
UNREACHABLE();
- break;
}
}
}
@@ -1107,10 +1101,8 @@ void Decoder::DecodeType3(Instruction* instr) {
break;
case 1:
UNREACHABLE();
- break;
case 2:
UNREACHABLE();
- break;
case 3:
Format(instr, "usat 'rd, #'imm05@16, 'rm'shift_sat");
break;
@@ -1119,7 +1111,6 @@ void Decoder::DecodeType3(Instruction* instr) {
switch (instr->Bits(22, 21)) {
case 0:
UNREACHABLE();
- break;
case 1:
if (instr->Bits(9, 6) == 1) {
if (instr->Bit(20) == 0) {
@@ -1948,7 +1939,6 @@ void Decoder::DecodeFloatingPointDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE(); // Case analysis is exhaustive.
- break;
}
} else if (instr->Opc1Value() == 0x4 && op2) {
// Floating-point minNum/maxNum.
@@ -2002,7 +1992,6 @@ void Decoder::DecodeFloatingPointDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE(); // Case analysis is exhaustive.
- break;
}
} else {
Unknown(instr);
@@ -2037,13 +2026,11 @@ void Decoder::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
int op1 = instr->Bit(4);
if (op0 == 0) {
// Advanced SIMD three registers of same length.
- int Vd, Vm, Vn;
+ int Vm, Vn;
if (instr->Bit(6) == 0) {
- Vd = instr->VFPDRegValue(kDoublePrecision);
Vm = instr->VFPMRegValue(kDoublePrecision);
Vn = instr->VFPNRegValue(kDoublePrecision);
} else {
- Vd = instr->VFPDRegValue(kSimd128Precision);
Vm = instr->VFPMRegValue(kSimd128Precision);
Vn = instr->VFPNRegValue(kSimd128Precision);
}
@@ -2617,12 +2604,10 @@ const char* NameConverter::NameOfCPURegister(int reg) const {
const char* NameConverter::NameOfByteCPURegister(int reg) const {
UNREACHABLE(); // ARM does not have the concept of a byte register
- return "nobytereg";
}
const char* NameConverter::NameOfXMMRegister(int reg) const {
UNREACHABLE(); // ARM does not have any XMM registers
- return "noxmmreg";
}
const char* NameConverter::NameInCode(byte* addr) const {
diff --git a/chromium/v8/src/diagnostics/arm/eh-frame-arm.cc b/chromium/v8/src/diagnostics/arm/eh-frame-arm.cc
index 7d0dc49155a..ef0a421820b 100644
--- a/chromium/v8/src/diagnostics/arm/eh-frame-arm.cc
+++ b/chromium/v8/src/diagnostics/arm/eh-frame-arm.cc
@@ -37,7 +37,6 @@ int EhFrameWriter::RegisterToDwarfCode(Register name) {
return kR0DwarfCode;
default:
UNIMPLEMENTED();
- return -1;
}
}
@@ -54,7 +53,6 @@ const char* EhFrameDisassembler::DwarfRegisterCodeToString(int code) {
return "lr";
default:
UNIMPLEMENTED();
- return nullptr;
}
}
diff --git a/chromium/v8/src/diagnostics/arm/unwinder-arm.cc b/chromium/v8/src/diagnostics/arm/unwinder-arm.cc
index e0e2f0e91f4..e51804caea1 100644
--- a/chromium/v8/src/diagnostics/arm/unwinder-arm.cc
+++ b/chromium/v8/src/diagnostics/arm/unwinder-arm.cc
@@ -5,7 +5,7 @@
#include <memory>
#include "include/v8-unwinder-state.h"
-#include "include/v8.h"
+#include "include/v8-unwinder.h"
#include "src/diagnostics/unwinder.h"
#include "src/execution/frame-constants.h"
diff --git a/chromium/v8/src/diagnostics/arm64/disasm-arm64.cc b/chromium/v8/src/diagnostics/arm64/disasm-arm64.cc
index 93b9531bd5d..af6e7f5441e 100644
--- a/chromium/v8/src/diagnostics/arm64/disasm-arm64.cc
+++ b/chromium/v8/src/diagnostics/arm64/disasm-arm64.cc
@@ -3954,7 +3954,6 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
}
default: {
UNIMPLEMENTED();
- return 0;
}
}
}
@@ -3997,7 +3996,6 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
return 0;
}
UNIMPLEMENTED();
- return 0;
}
case 'L': { // IVLSLane[0123] - suffix indicates access size shift.
AppendToOutput("%d", instr->NEONLSIndex(format[8] - '0'));
@@ -4042,12 +4040,10 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
return static_cast<int>(strlen("IVMIShiftAmt2"));
} else {
UNIMPLEMENTED();
- return 0;
}
}
default: {
UNIMPLEMENTED();
- return 0;
}
}
}
@@ -4342,12 +4338,10 @@ const char* NameConverter::NameOfCPURegister(int reg) const {
const char* NameConverter::NameOfByteCPURegister(int reg) const {
UNREACHABLE(); // ARM64 does not have the concept of a byte register
- return "nobytereg";
}
const char* NameConverter::NameOfXMMRegister(int reg) const {
UNREACHABLE(); // ARM64 does not have any XMM registers
- return "noxmmreg";
}
const char* NameConverter::NameInCode(byte* addr) const {
diff --git a/chromium/v8/src/diagnostics/arm64/eh-frame-arm64.cc b/chromium/v8/src/diagnostics/arm64/eh-frame-arm64.cc
index 115d0cc300c..d27827cfc12 100644
--- a/chromium/v8/src/diagnostics/arm64/eh-frame-arm64.cc
+++ b/chromium/v8/src/diagnostics/arm64/eh-frame-arm64.cc
@@ -38,7 +38,6 @@ int EhFrameWriter::RegisterToDwarfCode(Register name) {
return kX0DwarfCode;
default:
UNIMPLEMENTED();
- return -1;
}
}
@@ -55,7 +54,6 @@ const char* EhFrameDisassembler::DwarfRegisterCodeToString(int code) {
return "sp"; // This could be zr as well
default:
UNIMPLEMENTED();
- return nullptr;
}
}
diff --git a/chromium/v8/src/diagnostics/compilation-statistics.cc b/chromium/v8/src/diagnostics/compilation-statistics.cc
index 40bb239b125..74fa232a080 100644
--- a/chromium/v8/src/diagnostics/compilation-statistics.cc
+++ b/chromium/v8/src/diagnostics/compilation-statistics.cc
@@ -56,6 +56,29 @@ void CompilationStatistics::BasicStats::Accumulate(const BasicStats& stats) {
}
}
+std::string CompilationStatistics::BasicStats::AsJSON() {
+// clang-format off
+#define DICT(s) "{" << s << "}"
+#define QUOTE(s) "\"" << s << "\""
+#define MEMBER(s) QUOTE(s) << ":"
+
+ DCHECK_EQ(function_name_.find("\""), std::string::npos);
+
+ std::stringstream stream;
+ stream << DICT(
+ MEMBER("function_name") << QUOTE(function_name_) << ","
+ MEMBER("total_allocated_bytes") << total_allocated_bytes_ << ","
+ MEMBER("max_allocated_bytes") << max_allocated_bytes_ << ","
+ MEMBER("absolute_max_allocated_bytes") << absolute_max_allocated_bytes_);
+
+ return stream.str();
+
+#undef DICT
+#undef QUOTE
+#undef MEMBER
+ // clang-format on
+}
+
static void WriteLine(std::ostream& os, bool machine_format, const char* name,
const CompilationStatistics::BasicStats& stats,
const CompilationStatistics::BasicStats& total_stats) {
diff --git a/chromium/v8/src/diagnostics/compilation-statistics.h b/chromium/v8/src/diagnostics/compilation-statistics.h
index d14e108d078..a6abdf5e89b 100644
--- a/chromium/v8/src/diagnostics/compilation-statistics.h
+++ b/chromium/v8/src/diagnostics/compilation-statistics.h
@@ -37,6 +37,8 @@ class CompilationStatistics final : public Malloced {
void Accumulate(const BasicStats& stats);
+ std::string AsJSON();
+
base::TimeDelta delta_;
size_t total_allocated_bytes_;
size_t max_allocated_bytes_;
diff --git a/chromium/v8/src/diagnostics/disassembler.cc b/chromium/v8/src/diagnostics/disassembler.cc
index 596362b351a..928fe1f3576 100644
--- a/chromium/v8/src/diagnostics/disassembler.cc
+++ b/chromium/v8/src/diagnostics/disassembler.cc
@@ -128,8 +128,11 @@ const char* V8NameConverter::RootRelativeName(int offset) const {
const unsigned kRootsTableSize = sizeof(RootsTable);
const int kExtRefsTableStart = IsolateData::external_reference_table_offset();
const unsigned kExtRefsTableSize = ExternalReferenceTable::kSizeInBytes;
- const int kBuiltinsTableStart = IsolateData::builtins_table_offset();
- const unsigned kBuiltinsTableSize =
+ const int kBuiltinTier0TableStart = IsolateData::builtin_tier0_table_offset();
+ const unsigned kBuiltinTier0TableSize =
+ Builtins::kBuiltinTier0Count * kSystemPointerSize;
+ const int kBuiltinTableStart = IsolateData::builtin_table_offset();
+ const unsigned kBuiltinTableSize =
Builtins::kBuiltinCount * kSystemPointerSize;
if (static_cast<unsigned>(offset - kRootsTableStart) < kRootsTableSize) {
@@ -143,7 +146,6 @@ const char* V8NameConverter::RootRelativeName(int offset) const {
SNPrintF(v8_buffer_, "root (%s)", RootsTable::name(root_index));
return v8_buffer_.begin();
-
} else if (static_cast<unsigned>(offset - kExtRefsTableStart) <
kExtRefsTableSize) {
uint32_t offset_in_extref_table = offset - kExtRefsTableStart;
@@ -162,17 +164,24 @@ const char* V8NameConverter::RootRelativeName(int offset) const {
isolate_->external_reference_table()->NameFromOffset(
offset_in_extref_table));
return v8_buffer_.begin();
-
- } else if (static_cast<unsigned>(offset - kBuiltinsTableStart) <
- kBuiltinsTableSize) {
- uint32_t offset_in_builtins_table = (offset - kBuiltinsTableStart);
+ } else if (static_cast<unsigned>(offset - kBuiltinTier0TableStart) <
+ kBuiltinTier0TableSize) {
+ uint32_t offset_in_builtins_table = (offset - kBuiltinTier0TableStart);
Builtin builtin =
Builtins::FromInt(offset_in_builtins_table / kSystemPointerSize);
const char* name = Builtins::name(builtin);
SNPrintF(v8_buffer_, "builtin (%s)", name);
return v8_buffer_.begin();
+ } else if (static_cast<unsigned>(offset - kBuiltinTableStart) <
+ kBuiltinTableSize) {
+ uint32_t offset_in_builtins_table = (offset - kBuiltinTableStart);
+ Builtin builtin =
+ Builtins::FromInt(offset_in_builtins_table / kSystemPointerSize);
+ const char* name = Builtins::name(builtin);
+ SNPrintF(v8_buffer_, "builtin (%s)", name);
+ return v8_buffer_.begin();
} else {
// It must be a direct access to one of the external values.
if (directly_accessed_external_refs_.empty()) {
diff --git a/chromium/v8/src/diagnostics/eh-frame.cc b/chromium/v8/src/diagnostics/eh-frame.cc
index d53ea7698a0..223e288e6e9 100644
--- a/chromium/v8/src/diagnostics/eh-frame.cc
+++ b/chromium/v8/src/diagnostics/eh-frame.cc
@@ -27,14 +27,12 @@ void EhFrameWriter::WriteInitialStateInCie() { UNIMPLEMENTED(); }
int EhFrameWriter::RegisterToDwarfCode(Register) {
UNIMPLEMENTED();
- return -1;
}
#ifdef ENABLE_DISASSEMBLER
const char* EhFrameDisassembler::DwarfRegisterCodeToString(int) {
UNIMPLEMENTED();
- return nullptr;
}
#endif
diff --git a/chromium/v8/src/diagnostics/gdb-jit.cc b/chromium/v8/src/diagnostics/gdb-jit.cc
index 53c29cfb242..bc03a189cd5 100644
--- a/chromium/v8/src/diagnostics/gdb-jit.cc
+++ b/chromium/v8/src/diagnostics/gdb-jit.cc
@@ -4,14 +4,17 @@
#include "src/diagnostics/gdb-jit.h"
+#include <iterator>
#include <map>
#include <memory>
#include <vector>
-#include "include/v8.h"
+#include "include/v8-callbacks.h"
#include "src/api/api-inl.h"
+#include "src/base/address-region.h"
#include "src/base/bits.h"
#include "src/base/hashmap.h"
+#include "src/base/memory.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/wrappers.h"
#include "src/base/strings.h"
@@ -63,7 +66,9 @@ class Writer {
T* operator->() { return w_->RawSlotAt<T>(offset_); }
- void set(const T& value) { *w_->RawSlotAt<T>(offset_) = value; }
+ void set(const T& value) {
+ base::WriteUnalignedValue(w_->AddressAt<T>(offset_), value);
+ }
Slot<T> at(int i) { return Slot<T>(w_, offset_ + sizeof(T) * i); }
@@ -75,7 +80,7 @@ class Writer {
template <typename T>
void Write(const T& val) {
Ensure(position_ + sizeof(T));
- *RawSlotAt<T>(position_) = val;
+ base::WriteUnalignedValue(AddressAt<T>(position_), val);
position_ += sizeof(T);
}
@@ -154,6 +159,12 @@ class Writer {
friend class Slot;
template <typename T>
+ Address AddressAt(uintptr_t offset) {
+ DCHECK(offset < capacity_ && offset + sizeof(T) <= capacity_);
+ return reinterpret_cast<Address>(&buffer_[offset]);
+ }
+
+ template <typename T>
T* RawSlotAt(uintptr_t offset) {
DCHECK(offset < capacity_ && offset + sizeof(T) <= capacity_);
return reinterpret_cast<T*>(&buffer_[offset]);
@@ -896,17 +907,20 @@ class CodeDescription {
};
#endif
- CodeDescription(const char* name, Code code, SharedFunctionInfo shared,
- LineInfo* lineinfo)
- : name_(name), code_(code), shared_info_(shared), lineinfo_(lineinfo) {}
+ CodeDescription(const char* name, base::AddressRegion region,
+ SharedFunctionInfo shared, LineInfo* lineinfo,
+ bool is_function)
+ : name_(name),
+ shared_info_(shared),
+ lineinfo_(lineinfo),
+ is_function_(is_function),
+ code_region_(region) {}
const char* name() const { return name_; }
LineInfo* lineinfo() const { return lineinfo_; }
- bool is_function() const {
- return CodeKindIsOptimizedJSFunction(code_.kind());
- }
+ bool is_function() const { return is_function_; }
bool has_scope_info() const { return !shared_info_.is_null(); }
@@ -915,15 +929,11 @@ class CodeDescription {
return shared_info_.scope_info();
}
- uintptr_t CodeStart() const {
- return static_cast<uintptr_t>(code_.InstructionStart());
- }
+ uintptr_t CodeStart() const { return code_region_.begin(); }
- uintptr_t CodeEnd() const {
- return static_cast<uintptr_t>(code_.InstructionEnd());
- }
+ uintptr_t CodeEnd() const { return code_region_.end(); }
- uintptr_t CodeSize() const { return CodeEnd() - CodeStart(); }
+ uintptr_t CodeSize() const { return code_region_.size(); }
bool has_script() {
return !shared_info_.is_null() && shared_info_.script().IsScript();
@@ -933,6 +943,8 @@ class CodeDescription {
bool IsLineInfoAvailable() { return lineinfo_ != nullptr; }
+ base::AddressRegion region() { return code_region_; }
+
#if V8_TARGET_ARCH_X64
uintptr_t GetStackStateStartAddress(StackState state) const {
DCHECK(state < STACK_STATE_MAX);
@@ -946,7 +958,7 @@ class CodeDescription {
#endif
std::unique_ptr<char[]> GetFilename() {
- if (!shared_info_.is_null()) {
+ if (!shared_info_.is_null() && script().name().IsString()) {
return String::cast(script().name()).ToCString();
} else {
std::unique_ptr<char[]> result(new char[1]);
@@ -965,9 +977,10 @@ class CodeDescription {
private:
const char* name_;
- Code code_;
SharedFunctionInfo shared_info_;
LineInfo* lineinfo_;
+ bool is_function_;
+ base::AddressRegion code_region_;
#if V8_TARGET_ARCH_X64
uintptr_t stack_state_start_addresses_[STACK_STATE_MAX];
#endif
@@ -1080,6 +1093,8 @@ class DebugInfoSection : public DebugSection {
UNIMPLEMENTED();
#elif V8_TARGET_ARCH_MIPS64
UNIMPLEMENTED();
+#elif V8_TARGET_ARCH_LOONG64
+ UNIMPLEMENTED();
#elif V8_TARGET_ARCH_PPC64 && V8_OS_LINUX
w->Write<uint8_t>(DW_OP_reg31); // The frame pointer is here on PPC64.
#elif V8_TARGET_ARCH_S390
@@ -1092,7 +1107,7 @@ class DebugInfoSection : public DebugSection {
int params = scope.ParameterCount();
int context_slots = scope.ContextLocalCount();
// The real slot ID is internal_slots + context_slot_id.
- int internal_slots = Context::MIN_CONTEXT_SLOTS;
+ int internal_slots = scope.ContextHeaderLength();
int current_abbreviation = 4;
for (int param = 0; param < params; ++param) {
@@ -1109,7 +1124,7 @@ class DebugInfoSection : public DebugSection {
}
// See contexts.h for more information.
- DCHECK_EQ(Context::MIN_CONTEXT_SLOTS, 3);
+ DCHECK(internal_slots == 2 || internal_slots == 3);
DCHECK_EQ(Context::SCOPE_INFO_INDEX, 0);
DCHECK_EQ(Context::PREVIOUS_INDEX, 1);
DCHECK_EQ(Context::EXTENSION_INDEX, 2);
@@ -1117,8 +1132,10 @@ class DebugInfoSection : public DebugSection {
w->WriteString(".scope_info");
w->WriteULEB128(current_abbreviation++);
w->WriteString(".previous");
- w->WriteULEB128(current_abbreviation++);
- w->WriteString(".extension");
+ if (internal_slots == 3) {
+ w->WriteULEB128(current_abbreviation++);
+ w->WriteString(".extension");
+ }
for (int context_slot = 0; context_slot < context_slots; ++context_slot) {
w->WriteULEB128(current_abbreviation++);
@@ -1814,26 +1831,17 @@ static JITCodeEntry* CreateELFObject(CodeDescription* desc, Isolate* isolate) {
return CreateCodeEntry(reinterpret_cast<Address>(w.buffer()), w.position());
}
-struct AddressRange {
- Address start;
- Address end;
-};
-
-struct AddressRangeLess {
- bool operator()(const AddressRange& a, const AddressRange& b) const {
- if (a.start == b.start) return a.end < b.end;
- return a.start < b.start;
+// Like base::AddressRegion::StartAddressLess but also compares |end| when
+// |begin| is equal.
+struct AddressRegionLess {
+ bool operator()(const base::AddressRegion& a,
+ const base::AddressRegion& b) const {
+ if (a.begin() == b.begin()) return a.end() < b.end();
+ return a.begin() < b.begin();
}
};
-struct CodeMapConfig {
- using Key = AddressRange;
- using Value = JITCodeEntry*;
- using Less = AddressRangeLess;
-};
-
-using CodeMap =
- std::map<CodeMapConfig::Key, CodeMapConfig::Value, CodeMapConfig::Less>;
+using CodeMap = std::map<base::AddressRegion, JITCodeEntry*, AddressRegionLess>;
static CodeMap* GetCodeMap() {
// TODO(jgruber): Don't leak.
@@ -1907,50 +1915,72 @@ static void AddUnwindInfo(CodeDescription* desc) {
static base::LazyMutex mutex = LAZY_MUTEX_INITIALIZER;
-// Remove entries from the map that intersect the given address range,
-// and deregister them from GDB.
-static void RemoveJITCodeEntries(CodeMap* map, const AddressRange& range) {
- DCHECK(range.start < range.end);
+static base::Optional<std::pair<CodeMap::iterator, CodeMap::iterator>>
+GetOverlappingRegions(CodeMap* map, const base::AddressRegion region) {
+ DCHECK_LT(region.begin(), region.end());
- if (map->empty()) return;
+ if (map->empty()) return {};
// Find the first overlapping entry.
- // If successful, points to the first element not less than `range`. The
+ // If successful, points to the first element not less than `region`. The
// returned iterator has the key in `first` and the value in `second`.
- auto it = map->lower_bound(range);
+ auto it = map->lower_bound(region);
auto start_it = it;
if (it == map->end()) {
start_it = map->begin();
+ // Find the first overlapping entry.
+ for (; start_it != map->end(); ++start_it) {
+ if (start_it->first.end() > region.begin()) {
+ break;
+ }
+ }
} else if (it != map->begin()) {
for (--it; it != map->begin(); --it) {
- if ((*it).first.end <= range.start) break;
+ if ((*it).first.end() <= region.begin()) break;
+ start_it = it;
+ }
+ if (it == map->begin() && it->first.end() > region.begin()) {
start_it = it;
}
}
- DCHECK(start_it != map->end());
+ if (start_it == map->end()) {
+ return {};
+ }
- // Find the first non-overlapping entry after `range`.
+ // Find the first non-overlapping entry after `region`.
- const auto end_it = map->lower_bound({range.end, 0});
+ const auto end_it = map->lower_bound({region.end(), 0});
- // Evict intersecting ranges.
+ // Return a range containing intersecting regions.
- if (std::distance(start_it, end_it) < 1) return; // No overlapping entries.
+ if (std::distance(start_it, end_it) < 1)
+ return {}; // No overlapping entries.
- for (auto it = start_it; it != end_it; it++) {
- JITCodeEntry* old_entry = (*it).second;
- UnregisterCodeEntry(old_entry);
- DestroyCodeEntry(old_entry);
- }
+ return {{start_it, end_it}};
+}
+
+// Remove entries from the map that intersect the given address region,
+// and deregister them from GDB.
+static void RemoveJITCodeEntries(CodeMap* map,
+ const base::AddressRegion region) {
+ if (auto overlap = GetOverlappingRegions(map, region)) {
+ auto start_it = overlap->first;
+ auto end_it = overlap->second;
+ for (auto it = start_it; it != end_it; it++) {
+ JITCodeEntry* old_entry = (*it).second;
+ UnregisterCodeEntry(old_entry);
+ DestroyCodeEntry(old_entry);
+ }
- map->erase(start_it, end_it);
+ map->erase(start_it, end_it);
+ }
}
// Insert the entry into the map and register it with GDB.
-static void AddJITCodeEntry(CodeMap* map, const AddressRange& range,
+static void AddJITCodeEntry(CodeMap* map, const base::AddressRegion region,
JITCodeEntry* entry, bool dump_if_enabled,
const char* name_hint) {
#if defined(DEBUG) && !V8_OS_WIN
@@ -1967,24 +1997,21 @@ static void AddJITCodeEntry(CodeMap* map, const AddressRange& range,
}
#endif
- auto result = map->emplace(range, entry);
+ auto result = map->emplace(region, entry);
DCHECK(result.second); // Insertion happened.
USE(result);
RegisterCodeEntry(entry);
}
-static void AddCode(const char* name, Code code, SharedFunctionInfo shared,
- LineInfo* lineinfo) {
+static void AddCode(const char* name, base::AddressRegion region,
+ SharedFunctionInfo shared, LineInfo* lineinfo,
+ Isolate* isolate, bool is_function) {
DisallowGarbageCollection no_gc;
+ CodeDescription code_desc(name, region, shared, lineinfo, is_function);
CodeMap* code_map = GetCodeMap();
- AddressRange range;
- range.start = code.address();
- range.end = code.address() + code.CodeSize();
- RemoveJITCodeEntries(code_map, range);
-
- CodeDescription code_desc(name, code, shared, lineinfo);
+ RemoveJITCodeEntries(code_map, region);
if (!FLAG_gdbjit_full && !code_desc.IsLineInfoAvailable()) {
delete lineinfo;
@@ -1992,7 +2019,6 @@ static void AddCode(const char* name, Code code, SharedFunctionInfo shared,
}
AddUnwindInfo(&code_desc);
- Isolate* isolate = code.GetIsolate();
JITCodeEntry* entry = CreateELFObject(&code_desc, isolate);
delete lineinfo;
@@ -2008,25 +2034,40 @@ static void AddCode(const char* name, Code code, SharedFunctionInfo shared,
should_dump = (name_hint != nullptr);
}
}
- AddJITCodeEntry(code_map, range, entry, should_dump, name_hint);
+ AddJITCodeEntry(code_map, region, entry, should_dump, name_hint);
}
void EventHandler(const v8::JitCodeEvent* event) {
if (!FLAG_gdbjit) return;
- if (event->code_type != v8::JitCodeEvent::JIT_CODE) return;
+ if ((event->code_type != v8::JitCodeEvent::JIT_CODE) &&
+ (event->code_type != v8::JitCodeEvent::WASM_CODE)) {
+ return;
+ }
base::MutexGuard lock_guard(mutex.Pointer());
switch (event->type) {
case v8::JitCodeEvent::CODE_ADDED: {
Address addr = reinterpret_cast<Address>(event->code_start);
- Isolate* isolate = reinterpret_cast<Isolate*>(event->isolate);
- Code code = isolate->heap()->GcSafeFindCodeForInnerPointer(addr);
LineInfo* lineinfo = GetLineInfo(addr);
std::string event_name(event->name.str, event->name.len);
// It's called UnboundScript in the API but it's a SharedFunctionInfo.
SharedFunctionInfo shared = event->script.IsEmpty()
? SharedFunctionInfo()
: *Utils::OpenHandle(*event->script);
- AddCode(event_name.c_str(), code, shared, lineinfo);
+ Isolate* isolate = reinterpret_cast<Isolate*>(event->isolate);
+ bool is_function = false;
+ // TODO(zhin): See if we can use event->code_type to determine
+ // is_function, the difference currently is that JIT_CODE is SparkPlug,
+ // TurboProp, TurboFan, whereas CodeKindIsOptimizedJSFunction is only
+ // TurboProp and TurboFan. is_function is used for AddUnwindInfo, and the
+ // prologue that SP generates probably matches that of TP/TF, so we can
+ // use event->code_type here instead of finding the Code.
+ // TODO(zhin): Rename is_function to be more accurate.
+ if (event->code_type == v8::JitCodeEvent::JIT_CODE) {
+ Code code = isolate->heap()->GcSafeFindCodeForInnerPointer(addr);
+ is_function = CodeKindIsOptimizedJSFunction(code.kind());
+ }
+ AddCode(event_name.c_str(), {addr, event->code_len}, shared, lineinfo,
+ isolate, is_function);
break;
}
case v8::JitCodeEvent::CODE_MOVED:
@@ -2056,6 +2097,23 @@ void EventHandler(const v8::JitCodeEvent* event) {
}
}
}
+
+void AddRegionForTesting(const base::AddressRegion region) {
+ // For testing purposes we don't care about JITCodeEntry, pass nullptr.
+ auto result = GetCodeMap()->emplace(region, nullptr);
+ DCHECK(result.second); // Insertion happened.
+ USE(result);
+}
+
+void ClearCodeMapForTesting() { GetCodeMap()->clear(); }
+
+size_t NumOverlapEntriesForTesting(const base::AddressRegion region) {
+ if (auto overlaps = GetOverlappingRegions(GetCodeMap(), region)) {
+ return std::distance(overlaps->first, overlaps->second);
+ }
+ return 0;
+}
+
#endif
} // namespace GDBJITInterface
} // namespace internal
diff --git a/chromium/v8/src/diagnostics/gdb-jit.h b/chromium/v8/src/diagnostics/gdb-jit.h
index 82f5ce892c9..eb4d515a810 100644
--- a/chromium/v8/src/diagnostics/gdb-jit.h
+++ b/chromium/v8/src/diagnostics/gdb-jit.h
@@ -5,6 +5,8 @@
#ifndef V8_DIAGNOSTICS_GDB_JIT_H_
#define V8_DIAGNOSTICS_GDB_JIT_H_
+#include "src/base/address-region.h"
+
//
// GDB has two ways of interacting with JIT code. With the "JIT compilation
// interface", V8 can tell GDB when it emits JIT code. Unfortunately to do so,
@@ -29,9 +31,19 @@ struct JitCodeEvent;
namespace internal {
namespace GDBJITInterface {
#ifdef ENABLE_GDB_JIT_INTERFACE
+
// JitCodeEventHandler that creates ELF/Mach-O objects and registers them with
// GDB.
void EventHandler(const v8::JitCodeEvent* event);
+
+// Expose some functions for unittests. These only exercise the logic to add
+// AddressRegion to CodeMap, and checking for overlap. It does not touch the
+// actual JITCodeEntry at all.
+V8_EXPORT_PRIVATE void AddRegionForTesting(const base::AddressRegion region);
+V8_EXPORT_PRIVATE void ClearCodeMapForTesting();
+V8_EXPORT_PRIVATE size_t
+NumOverlapEntriesForTesting(const base::AddressRegion region);
+
#endif
} // namespace GDBJITInterface
} // namespace internal
diff --git a/chromium/v8/src/diagnostics/ia32/disasm-ia32.cc b/chromium/v8/src/diagnostics/ia32/disasm-ia32.cc
index de124de7474..fbcba1a4b2e 100644
--- a/chromium/v8/src/diagnostics/ia32/disasm-ia32.cc
+++ b/chromium/v8/src/diagnostics/ia32/disasm-ia32.cc
@@ -89,6 +89,10 @@ static const char* const conditional_move_mnem[] = {
/*8*/ "cmovs", "cmovns", "cmovpe", "cmovpo",
/*12*/ "cmovl", "cmovnl", "cmovng", "cmovg"};
+static const char* const cmp_pseudo_op[16] = {
+ "eq", "lt", "le", "unord", "neq", "nlt", "nle", "ord",
+ "eq_uq", "nge", "ngt", "false", "neq_oq", "ge", "gt", "true"};
+
enum InstructionType {
NO_INSTR,
ZERO_OPERANDS_INSTR,
@@ -415,13 +419,11 @@ int DisassemblerIA32::PrintRightOperandHelper(
UnimplementedInstruction();
return 1;
}
- } else {
- AppendToBuffer("[%s]", (this->*register_name)(rm));
- return 1;
}
- break;
+ AppendToBuffer("[%s]", (this->*register_name)(rm));
+ return 1;
case 1: // fall through
- case 2:
+ case 2: {
if (rm == esp) {
byte sib = *(modrmp + 1);
int scale, index, base;
@@ -436,14 +438,13 @@ int DisassemblerIA32::PrintRightOperandHelper(
disp < 0 ? "-" : "+", disp < 0 ? -disp : disp);
}
return mod == 2 ? 6 : 3;
- } else {
- // No sib.
- int disp = mod == 2 ? Imm32(modrmp + 1) : Imm8(modrmp + 1);
- AppendToBuffer("[%s%s0x%x]", (this->*register_name)(rm),
- disp < 0 ? "-" : "+", disp < 0 ? -disp : disp);
- return mod == 2 ? 5 : 2;
}
- break;
+ // No sib.
+ int disp = mod == 2 ? Imm32(modrmp + 1) : Imm8(modrmp + 1);
+ AppendToBuffer("[%s%s0x%x]", (this->*register_name)(rm),
+ disp < 0 ? "-" : "+", disp < 0 ? -disp : disp);
+ return mod == 2 ? 5 : 2;
+ }
case 3:
AppendToBuffer("%s", (this->*register_name)(rm));
return 1;
@@ -789,6 +790,15 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
SSSE3_UNOP_INSTRUCTION_LIST(DECLARE_SSE_AVX_RM_DIS_CASE)
SSE4_RM_INSTRUCTION_LIST(DECLARE_SSE_AVX_RM_DIS_CASE)
#undef DECLARE_SSE_AVX_RM_DIS_CASE
+
+#define DISASSEMBLE_AVX2_BROADCAST(instruction, _1, _2, _3, code) \
+ case 0x##code: \
+ AppendToBuffer("" #instruction " %s,", NameOfXMMRegister(regop)); \
+ current += PrintRightXMMOperand(current); \
+ break;
+ AVX2_BROADCAST_LIST(DISASSEMBLE_AVX2_BROADCAST)
+#undef DISASSEMBLE_AVX2_BROADCAST
+
default:
UnimplementedInstruction();
}
@@ -808,6 +818,20 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
AppendToBuffer(",%d", Imm8_U(current));
current++;
break;
+ case 0x0a:
+ AppendToBuffer("vroundss %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%d", Imm8_U(current));
+ current++;
+ break;
+ case 0x0b:
+ AppendToBuffer("vroundsd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%d", Imm8_U(current));
+ current++;
+ break;
case 0x0E:
AppendToBuffer("vpblendw %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -900,39 +924,8 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
AppendToBuffer("vmovddup %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
- case 0x51:
- AppendToBuffer("vsqrtsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0x58:
- AppendToBuffer("vaddsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0x59:
- AppendToBuffer("vmulsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0x5C:
- AppendToBuffer("vsubsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0x5D:
- AppendToBuffer("vminsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0x5E:
- AppendToBuffer("vdivsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0x5F:
- AppendToBuffer("vmaxsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
+ case 0x2c:
+ AppendToBuffer("vcvttsd2si %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
case 0x70:
@@ -946,6 +939,14 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+#define DISASM_SSE2_INSTRUCTION_LIST_SD(instruction, _1, _2, opcode) \
+ case 0x##opcode: \
+ AppendToBuffer("v" #instruction " %s,%s,", NameOfXMMRegister(regop), \
+ NameOfXMMRegister(vvvv)); \
+ current += PrintRightXMMOperand(current); \
+ break;
+ SSE2_INSTRUCTION_LIST_SD(DISASM_SSE2_INSTRUCTION_LIST_SD)
+#undef DISASM_SSE2_INSTRUCTION_LIST_SD
default:
UnimplementedInstruction();
}
@@ -967,6 +968,10 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
AppendToBuffer("vmovshdup %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
+ case 0x2c:
+ AppendToBuffer("vcvttss2si %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x51:
AppendToBuffer("vsqrtss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -982,6 +987,11 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0x5a:
+ AppendToBuffer("vcvtss2sd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x5B:
AppendToBuffer("vcvttps2dq %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
@@ -1167,6 +1177,10 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
AppendToBuffer("vmovaps %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
+ case 0x2e:
+ AppendToBuffer("vucomiss %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x50:
AppendToBuffer("vmovmskps %s,%s", NameOfCPURegister(regop),
NameOfXMMRegister(rm));
@@ -1243,12 +1257,10 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
current += PrintRightXMMOperand(current);
break;
case 0xC2: {
- const char* const pseudo_op[] = {"eq", "lt", "le", "unord",
- "neq", "nlt", "nle", "ord"};
AppendToBuffer("vcmpps %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
- AppendToBuffer(", (%s)", pseudo_op[*current]);
+ AppendToBuffer(", (%s)", cmp_pseudo_op[*current]);
current++;
break;
}
@@ -1274,6 +1286,10 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
AppendToBuffer("vmovapd %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
+ case 0x2e:
+ AppendToBuffer("vucomisd %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x50:
AppendToBuffer("vmovmskpd %s,%s", NameOfCPURegister(regop),
NameOfXMMRegister(rm));
@@ -1371,11 +1387,10 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
AppendToBuffer(",%s", NameOfXMMRegister(regop));
break;
case 0xC2: {
- const char* const pseudo_op[] = {"eq", "lt", "le", "unord", "neq"};
AppendToBuffer("vcmppd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
- AppendToBuffer(", (%s)", pseudo_op[*current]);
+ AppendToBuffer(", (%s)", cmp_pseudo_op[*current]);
current++;
break;
}
@@ -1999,11 +2014,9 @@ int DisassemblerIA32::InstructionDecode(v8::base::Vector<char> out_buffer,
data += PrintOperands("xadd", OPER_REG_OP_ORDER, data);
} else if (f0byte == 0xC2) {
data += 2;
- const char* const pseudo_op[] = {"eq", "lt", "le", "unord",
- "neq", "nlt", "nle", "ord"};
AppendToBuffer("cmpps %s, ", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
- AppendToBuffer(", (%s)", pseudo_op[*data]);
+ AppendToBuffer(", (%s)", cmp_pseudo_op[*data]);
data++;
} else if (f0byte == 0xC6) {
// shufps xmm, xmm/m128, imm8
@@ -2485,10 +2498,9 @@ int DisassemblerIA32::InstructionDecode(v8::base::Vector<char> out_buffer,
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
- const char* const pseudo_op[] = {"eq", "lt", "le", "unord", "neq"};
AppendToBuffer("cmppd %s, ", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
- AppendToBuffer(", (%s)", pseudo_op[*data]);
+ AppendToBuffer(", (%s)", cmp_pseudo_op[*data]);
data++;
} else if (*data == 0xC4) {
data++;
@@ -2658,30 +2670,15 @@ int DisassemblerIA32::InstructionDecode(v8::base::Vector<char> out_buffer,
case 0x2D:
mnem = "cvtsd2si";
break;
- case 0x51:
- mnem = "sqrtsd";
- break;
- case 0x58:
- mnem = "addsd";
- break;
- case 0x59:
- mnem = "mulsd";
- break;
- case 0x5C:
- mnem = "subsd";
- break;
- case 0x5D:
- mnem = "minsd";
- break;
- case 0x5E:
- mnem = "divsd";
- break;
- case 0x5F:
- mnem = "maxsd";
- break;
case 0x7C:
mnem = "haddps";
break;
+#define MNEM_FOR_SSE2_INSTRUCTION_LSIT_SD(instruction, _1, _2, opcode) \
+ case 0x##opcode: \
+ mnem = "" #instruction; \
+ break;
+ SSE2_INSTRUCTION_LIST_SD(MNEM_FOR_SSE2_INSTRUCTION_LSIT_SD)
+#undef MNEM_FOR_SSE2_INSTRUCTION_LSIT_SD
}
data += 3;
int mod, regop, rm;
@@ -2694,10 +2691,7 @@ int DisassemblerIA32::InstructionDecode(v8::base::Vector<char> out_buffer,
data += PrintRightXMMOperand(data);
} else if (b2 == 0xC2) {
// Intel manual 2A, Table 3-18.
- const char* const pseudo_op[] = {
- "cmpeqsd", "cmpltsd", "cmplesd", "cmpunordsd",
- "cmpneqsd", "cmpnltsd", "cmpnlesd", "cmpordsd"};
- AppendToBuffer("%s %s,%s", pseudo_op[data[1]],
+ AppendToBuffer("cmp%ssd %s,%s", cmp_pseudo_op[data[1]],
NameOfXMMRegister(regop), NameOfXMMRegister(rm));
data += 2;
} else {
@@ -2835,10 +2829,7 @@ int DisassemblerIA32::InstructionDecode(v8::base::Vector<char> out_buffer,
data += PrintRightXMMOperand(data);
} else if (b2 == 0xC2) {
// Intel manual 2A, Table 3-18.
- const char* const pseudo_op[] = {
- "cmpeqss", "cmpltss", "cmpless", "cmpunordss",
- "cmpneqss", "cmpnltss", "cmpnless", "cmpordss"};
- AppendToBuffer("%s %s,%s", pseudo_op[data[1]],
+ AppendToBuffer("cmp%sss %s,%s", cmp_pseudo_op[data[1]],
NameOfXMMRegister(regop), NameOfXMMRegister(rm));
data += 2;
} else {
diff --git a/chromium/v8/src/diagnostics/loong64/disasm-loong64.cc b/chromium/v8/src/diagnostics/loong64/disasm-loong64.cc
new file mode 100644
index 00000000000..9d8aee96a3a
--- /dev/null
+++ b/chromium/v8/src/diagnostics/loong64/disasm-loong64.cc
@@ -0,0 +1,1711 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <assert.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+
+#if V8_TARGET_ARCH_LOONG64
+
+#include "src/base/platform/platform.h"
+#include "src/base/strings.h"
+#include "src/base/vector.h"
+#include "src/codegen/loong64/constants-loong64.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/diagnostics/disasm.h"
+
+namespace v8 {
+namespace internal {
+
+//------------------------------------------------------------------------------
+
+// Decoder decodes and disassembles instructions into an output buffer.
+// It uses the converter to convert register names and call destinations into
+// more informative description.
+class Decoder {
+ public:
+ Decoder(const disasm::NameConverter& converter,
+ v8::base::Vector<char> out_buffer)
+ : converter_(converter), out_buffer_(out_buffer), out_buffer_pos_(0) {
+ out_buffer_[out_buffer_pos_] = '\0';
+ }
+
+ ~Decoder() {}
+
+ Decoder(const Decoder&) = delete;
+ Decoder& operator=(const Decoder&) = delete;
+
+ // Writes one disassembled instruction into 'buffer' (0-terminated).
+ // Returns the length of the disassembled machine instruction in bytes.
+ int InstructionDecode(byte* instruction);
+
+ private:
+ // Bottleneck functions to print into the out_buffer.
+ void PrintChar(const char ch);
+ void Print(const char* str);
+
+ // Printing of common values.
+ void PrintRegister(int reg);
+ void PrintFPURegister(int freg);
+ void PrintFPUStatusRegister(int freg);
+ void PrintRj(Instruction* instr);
+ void PrintRk(Instruction* instr);
+ void PrintRd(Instruction* instr);
+ void PrintFj(Instruction* instr);
+ void PrintFk(Instruction* instr);
+ void PrintFd(Instruction* instr);
+ void PrintFa(Instruction* instr);
+ void PrintSa2(Instruction* instr);
+ void PrintSa3(Instruction* instr);
+ void PrintUi5(Instruction* instr);
+ void PrintUi6(Instruction* instr);
+ void PrintUi12(Instruction* instr);
+ void PrintMsbw(Instruction* instr);
+ void PrintLsbw(Instruction* instr);
+ void PrintMsbd(Instruction* instr);
+ void PrintLsbd(Instruction* instr);
+ // void PrintCond(Instruction* instr);
+ void PrintSi12(Instruction* instr);
+ void PrintSi14(Instruction* instr);
+ void PrintSi16(Instruction* instr);
+ void PrintSi20(Instruction* instr);
+ void PrintXi12(Instruction* instr);
+ void PrintXi20(Instruction* instr);
+ void PrintCj(Instruction* instr);
+ void PrintCd(Instruction* instr);
+ void PrintCa(Instruction* instr);
+ void PrintCode(Instruction* instr);
+ void PrintHint5(Instruction* instr);
+ void PrintHint15(Instruction* instr);
+ void PrintPCOffs16(Instruction* instr);
+ void PrintPCOffs21(Instruction* instr);
+ void PrintPCOffs26(Instruction* instr);
+ void PrintOffs16(Instruction* instr);
+ void PrintOffs21(Instruction* instr);
+ void PrintOffs26(Instruction* instr);
+
+ // Handle formatting of instructions and their options.
+ int FormatRegister(Instruction* instr, const char* option);
+ int FormatFPURegister(Instruction* instr, const char* option);
+ int FormatOption(Instruction* instr, const char* option);
+ void Format(Instruction* instr, const char* format);
+ void Unknown(Instruction* instr);
+ int DecodeBreakInstr(Instruction* instr);
+
+ // Each of these functions decodes one particular instruction type.
+ int InstructionDecode(Instruction* instr);
+ void DecodeTypekOp6(Instruction* instr);
+ void DecodeTypekOp7(Instruction* instr);
+ void DecodeTypekOp8(Instruction* instr);
+ void DecodeTypekOp10(Instruction* instr);
+ void DecodeTypekOp12(Instruction* instr);
+ void DecodeTypekOp14(Instruction* instr);
+ int DecodeTypekOp17(Instruction* instr);
+ void DecodeTypekOp22(Instruction* instr);
+
+ const disasm::NameConverter& converter_;
+ v8::base::Vector<char> out_buffer_;
+ int out_buffer_pos_;
+};
+
+// Support for assertions in the Decoder formatting functions.
+#define STRING_STARTS_WITH(string, compare_string) \
+ (strncmp(string, compare_string, strlen(compare_string)) == 0)
+
+// Append the ch to the output buffer.
+void Decoder::PrintChar(const char ch) { out_buffer_[out_buffer_pos_++] = ch; }
+
+// Append the str to the output buffer.
+void Decoder::Print(const char* str) {
+ char cur = *str++;
+ while (cur != '\0' && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+ PrintChar(cur);
+ cur = *str++;
+ }
+ out_buffer_[out_buffer_pos_] = 0;
+}
+
+// Print the register name according to the active name converter.
+void Decoder::PrintRegister(int reg) {
+ Print(converter_.NameOfCPURegister(reg));
+}
+
+void Decoder::PrintRj(Instruction* instr) {
+ int reg = instr->RjValue();
+ PrintRegister(reg);
+}
+
+void Decoder::PrintRk(Instruction* instr) {
+ int reg = instr->RkValue();
+ PrintRegister(reg);
+}
+
+void Decoder::PrintRd(Instruction* instr) {
+ int reg = instr->RdValue();
+ PrintRegister(reg);
+}
+
+// Print the FPUregister name according to the active name converter.
+void Decoder::PrintFPURegister(int freg) {
+ Print(converter_.NameOfXMMRegister(freg));
+}
+
+void Decoder::PrintFj(Instruction* instr) {
+ int freg = instr->FjValue();
+ PrintFPURegister(freg);
+}
+
+void Decoder::PrintFk(Instruction* instr) {
+ int freg = instr->FkValue();
+ PrintFPURegister(freg);
+}
+
+void Decoder::PrintFd(Instruction* instr) {
+ int freg = instr->FdValue();
+ PrintFPURegister(freg);
+}
+
+void Decoder::PrintFa(Instruction* instr) {
+ int freg = instr->FaValue();
+ PrintFPURegister(freg);
+}
+
+// Print the integer value of the sa field.
+void Decoder::PrintSa2(Instruction* instr) {
+ int sa = instr->Sa2Value();
+ uint32_t opcode = (instr->InstructionBits() >> 18) << 18;
+ if (opcode == ALSL || opcode == ALSL_D) {
+ sa += 1;
+ }
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa);
+}
+
+void Decoder::PrintSa3(Instruction* instr) {
+ int sa = instr->Sa3Value();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa);
+}
+
+void Decoder::PrintUi5(Instruction* instr) {
+ int ui = instr->Ui5Value();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", ui);
+}
+
+void Decoder::PrintUi6(Instruction* instr) {
+ int ui = instr->Ui6Value();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", ui);
+}
+
+void Decoder::PrintUi12(Instruction* instr) {
+ int ui = instr->Ui12Value();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", ui);
+}
+
+void Decoder::PrintXi12(Instruction* instr) {
+ int xi = instr->Ui12Value();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", xi);
+}
+
+void Decoder::PrintXi20(Instruction* instr) {
+ int xi = instr->Si20Value();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", xi);
+}
+
+void Decoder::PrintMsbd(Instruction* instr) {
+ int msbd = instr->MsbdValue();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", msbd);
+}
+
+void Decoder::PrintLsbd(Instruction* instr) {
+ int lsbd = instr->LsbdValue();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", lsbd);
+}
+
+void Decoder::PrintMsbw(Instruction* instr) {
+ int msbw = instr->MsbwValue();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", msbw);
+}
+
+void Decoder::PrintLsbw(Instruction* instr) {
+ int lsbw = instr->LsbwValue();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", lsbw);
+}
+
+void Decoder::PrintSi12(Instruction* instr) {
+ int si = ((instr->Si12Value()) << (32 - kSi12Bits)) >> (32 - kSi12Bits);
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d(0x%x)",
+ si, instr->Si12Value());
+}
+
+void Decoder::PrintSi14(Instruction* instr) {
+ int si = ((instr->Si14Value()) << (32 - kSi14Bits)) >> (32 - kSi14Bits);
+ si <<= 2;
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d(0x%x)",
+ si, instr->Si14Value() << 2);
+}
+
+void Decoder::PrintSi16(Instruction* instr) {
+ int si = ((instr->Si16Value()) << (32 - kSi16Bits)) >> (32 - kSi16Bits);
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d(0x%x)",
+ si, instr->Si16Value());
+}
+
+void Decoder::PrintSi20(Instruction* instr) {
+ int si = ((instr->Si20Value()) << (32 - kSi20Bits)) >> (32 - kSi20Bits);
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d(0x%x)",
+ si, instr->Si20Value());
+}
+
+void Decoder::PrintCj(Instruction* instr) {
+ int cj = instr->CjValue();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", cj);
+}
+
+void Decoder::PrintCd(Instruction* instr) {
+ int cd = instr->CdValue();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", cd);
+}
+
+void Decoder::PrintCa(Instruction* instr) {
+ int ca = instr->CaValue();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", ca);
+}
+
+void Decoder::PrintCode(Instruction* instr) {
+ int code = instr->CodeValue();
+ out_buffer_pos_ +=
+ base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x(%u)", code, code);
+}
+
+void Decoder::PrintHint5(Instruction* instr) {
+ int hint = instr->Hint5Value();
+ out_buffer_pos_ +=
+ base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x(%u)", hint, hint);
+}
+
+void Decoder::PrintHint15(Instruction* instr) {
+ int hint = instr->Hint15Value();
+ out_buffer_pos_ +=
+ base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x(%u)", hint, hint);
+}
+
+void Decoder::PrintPCOffs16(Instruction* instr) {
+ int n_bits = 2;
+ int offs = instr->Offs16Value();
+ int target = ((offs << n_bits) << (32 - kOffsLowBits - n_bits)) >>
+ (32 - kOffsLowBits - n_bits);
+ out_buffer_pos_ += base::SNPrintF(
+ out_buffer_ + out_buffer_pos_, "%s",
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + target));
+}
+
+void Decoder::PrintPCOffs21(Instruction* instr) {
+ int n_bits = 2;
+ int offs = instr->Offs21Value();
+ int target =
+ ((offs << n_bits) << (32 - kOffsLowBits - kOffs21HighBits - n_bits)) >>
+ (32 - kOffsLowBits - kOffs21HighBits - n_bits);
+ out_buffer_pos_ += base::SNPrintF(
+ out_buffer_ + out_buffer_pos_, "%s",
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + target));
+}
+
+void Decoder::PrintPCOffs26(Instruction* instr) {
+ int n_bits = 2;
+ int offs = instr->Offs26Value();
+ int target =
+ ((offs << n_bits) << (32 - kOffsLowBits - kOffs26HighBits - n_bits)) >>
+ (32 - kOffsLowBits - kOffs26HighBits - n_bits);
+ out_buffer_pos_ += base::SNPrintF(
+ out_buffer_ + out_buffer_pos_, "%s",
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + target));
+}
+
+void Decoder::PrintOffs16(Instruction* instr) {
+ int offs = instr->Offs16Value();
+ out_buffer_pos_ +=
+ base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", offs << 2);
+}
+
+void Decoder::PrintOffs21(Instruction* instr) {
+ int offs = instr->Offs21Value();
+ out_buffer_pos_ +=
+ base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", offs << 2);
+}
+
+void Decoder::PrintOffs26(Instruction* instr) {
+ int offs = instr->Offs26Value();
+ out_buffer_pos_ +=
+ base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", offs << 2);
+}
+
+// Handle all register based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatRegister(Instruction* instr, const char* format) {
+ DCHECK_EQ(format[0], 'r');
+ if (format[1] == 'j') { // 'rj: Rj register.
+ int reg = instr->RjValue();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == 'k') { // 'rk: rk register.
+ int reg = instr->RkValue();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == 'd') { // 'rd: rd register.
+ int reg = instr->RdValue();
+ PrintRegister(reg);
+ return 2;
+ }
+ UNREACHABLE();
+}
+
+// Handle all FPUregister based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
+ DCHECK_EQ(format[0], 'f');
+ if (format[1] == 'j') { // 'fj: fj register.
+ int reg = instr->FjValue();
+ PrintFPURegister(reg);
+ return 2;
+ } else if (format[1] == 'k') { // 'fk: fk register.
+ int reg = instr->FkValue();
+ PrintFPURegister(reg);
+ return 2;
+ } else if (format[1] == 'd') { // 'fd: fd register.
+ int reg = instr->FdValue();
+ PrintFPURegister(reg);
+ return 2;
+ } else if (format[1] == 'a') { // 'fa: fa register.
+ int reg = instr->FaValue();
+ PrintFPURegister(reg);
+ return 2;
+ }
+ UNREACHABLE();
+}
+
+// FormatOption takes a formatting string and interprets it based on
+// the current instructions. The format string points to the first
+// character of the option string (the option escape has already been
+// consumed by the caller.) FormatOption returns the number of
+// characters that were consumed from the formatting string.
+int Decoder::FormatOption(Instruction* instr, const char* format) {
+ switch (format[0]) {
+ case 'c': {
+ switch (format[1]) {
+ case 'a':
+ DCHECK(STRING_STARTS_WITH(format, "ca"));
+ PrintCa(instr);
+ return 2;
+ case 'd':
+ DCHECK(STRING_STARTS_WITH(format, "cd"));
+ PrintCd(instr);
+ return 2;
+ case 'j':
+ DCHECK(STRING_STARTS_WITH(format, "cj"));
+ PrintCj(instr);
+ return 2;
+ case 'o':
+ DCHECK(STRING_STARTS_WITH(format, "code"));
+ PrintCode(instr);
+ return 4;
+ }
+ }
+ case 'f': {
+ return FormatFPURegister(instr, format);
+ }
+ case 'h': {
+ if (format[4] == '5') {
+ DCHECK(STRING_STARTS_WITH(format, "hint5"));
+ PrintHint5(instr);
+ return 5;
+ } else if (format[4] == '1') {
+ DCHECK(STRING_STARTS_WITH(format, "hint15"));
+ PrintHint15(instr);
+ return 6;
+ }
+ break;
+ }
+ case 'l': {
+ switch (format[3]) {
+ case 'w':
+ DCHECK(STRING_STARTS_WITH(format, "lsbw"));
+ PrintLsbw(instr);
+ return 4;
+ case 'd':
+ DCHECK(STRING_STARTS_WITH(format, "lsbd"));
+ PrintLsbd(instr);
+ return 4;
+ default:
+ return 0;
+ }
+ }
+ case 'm': {
+ if (format[3] == 'w') {
+ DCHECK(STRING_STARTS_WITH(format, "msbw"));
+ PrintMsbw(instr);
+ } else if (format[3] == 'd') {
+ DCHECK(STRING_STARTS_WITH(format, "msbd"));
+ PrintMsbd(instr);
+ }
+ return 4;
+ }
+ case 'o': {
+ if (format[1] == 'f') {
+ if (format[4] == '1') {
+ DCHECK(STRING_STARTS_WITH(format, "offs16"));
+ PrintOffs16(instr);
+ return 6;
+ } else if (format[4] == '2') {
+ if (format[5] == '1') {
+ DCHECK(STRING_STARTS_WITH(format, "offs21"));
+ PrintOffs21(instr);
+ return 6;
+ } else if (format[5] == '6') {
+ DCHECK(STRING_STARTS_WITH(format, "offs26"));
+ PrintOffs26(instr);
+ return 6;
+ }
+ }
+ }
+ break;
+ }
+ case 'p': {
+ if (format[6] == '1') {
+ DCHECK(STRING_STARTS_WITH(format, "pcoffs16"));
+ PrintPCOffs16(instr);
+ return 8;
+ } else if (format[6] == '2') {
+ if (format[7] == '1') {
+ DCHECK(STRING_STARTS_WITH(format, "pcoffs21"));
+ PrintPCOffs21(instr);
+ return 8;
+ } else if (format[7] == '6') {
+ DCHECK(STRING_STARTS_WITH(format, "pcoffs26"));
+ PrintPCOffs26(instr);
+ return 8;
+ }
+ }
+ break;
+ }
+ case 'r': {
+ return FormatRegister(instr, format);
+ }
+ case 's': {
+ switch (format[1]) {
+ case 'a':
+ if (format[2] == '2') {
+ DCHECK(STRING_STARTS_WITH(format, "sa2"));
+ PrintSa2(instr);
+ } else if (format[2] == '3') {
+ DCHECK(STRING_STARTS_WITH(format, "sa3"));
+ PrintSa3(instr);
+ }
+ return 3;
+ case 'i':
+ if (format[2] == '2') {
+ DCHECK(STRING_STARTS_WITH(format, "si20"));
+ PrintSi20(instr);
+ return 4;
+ } else if (format[2] == '1') {
+ switch (format[3]) {
+ case '2':
+ DCHECK(STRING_STARTS_WITH(format, "si12"));
+ PrintSi12(instr);
+ return 4;
+ case '4':
+ DCHECK(STRING_STARTS_WITH(format, "si14"));
+ PrintSi14(instr);
+ return 4;
+ case '6':
+ DCHECK(STRING_STARTS_WITH(format, "si16"));
+ PrintSi16(instr);
+ return 4;
+ default:
+ break;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+ case 'u': {
+ if (format[2] == '5') {
+ DCHECK(STRING_STARTS_WITH(format, "ui5"));
+ PrintUi5(instr);
+ return 3;
+ } else if (format[2] == '6') {
+ DCHECK(STRING_STARTS_WITH(format, "ui6"));
+ PrintUi6(instr);
+ return 3;
+ } else if (format[2] == '1') {
+ DCHECK(STRING_STARTS_WITH(format, "ui12"));
+ PrintUi12(instr);
+ return 4;
+ }
+ break;
+ }
+ case 'x': {
+ if (format[2] == '2') {
+ DCHECK(STRING_STARTS_WITH(format, "xi20"));
+ PrintXi20(instr);
+ return 4;
+ } else if (format[3] == '2') {
+ DCHECK(STRING_STARTS_WITH(format, "xi12"));
+ PrintXi12(instr);
+ return 4;
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ return 0;
+}
+
+// Format takes a formatting string for a whole instruction and prints it into
+// the output buffer. All escaped options are handed to FormatOption to be
+// parsed further.
+void Decoder::Format(Instruction* instr, const char* format) {
+ char cur = *format++;
+ while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+ if (cur == '\'') { // Single quote is used as the formatting escape.
+ format += FormatOption(instr, format);
+ } else {
+ out_buffer_[out_buffer_pos_++] = cur;
+ }
+ cur = *format++;
+ }
+ out_buffer_[out_buffer_pos_] = '\0';
+}
+
+// For currently unimplemented decodings the disassembler calls Unknown(instr)
+// which will just print "unknown" of the instruction bits.
+void Decoder::Unknown(Instruction* instr) { Format(instr, "unknown"); }
+
+int Decoder::DecodeBreakInstr(Instruction* instr) {
+ // This is already known to be BREAK instr, just extract the code.
+ /*if (instr->Bits(14, 0) == static_cast<int>(kMaxStopCode)) {
+ // This is stop(msg).
+ Format(instr, "break, code: 'code");
+ out_buffer_pos_ += SNPrintF(
+ out_buffer_ + out_buffer_pos_, "\n%p %08" PRIx64,
+ static_cast<void*>(reinterpret_cast<int32_t*>(instr + kInstrSize)),
+ reinterpret_cast<uint64_t>(
+ *reinterpret_cast<char**>(instr + kInstrSize)));
+ // Size 3: the break_ instr, plus embedded 64-bit char pointer.
+ return 3 * kInstrSize;
+ } else {
+ Format(instr, "break, code: 'code");
+ return kInstrSize;
+ }*/
+ Format(instr, "break code: 'code");
+ return kInstrSize;
+} //===================================================
+
+void Decoder::DecodeTypekOp6(Instruction* instr) {
+ switch (instr->Bits(31, 26) << 26) {
+ case ADDU16I_D:
+ Format(instr, "addu16i.d 'rd, 'rj, 'si16");
+ break;
+ case BEQZ:
+ Format(instr, "beqz 'rj, 'offs21 -> 'pcoffs21");
+ break;
+ case BNEZ:
+ Format(instr, "bnez 'rj, 'offs21 -> 'pcoffs21");
+ break;
+ case BCZ:
+ if (instr->Bit(8))
+ Format(instr, "bcnez fcc'cj, 'offs21 -> 'pcoffs21");
+ else
+ Format(instr, "bceqz fcc'cj, 'offs21 -> 'pcoffs21");
+ break;
+ case JIRL:
+ Format(instr, "jirl 'rd, 'rj, 'offs16");
+ break;
+ case B:
+ Format(instr, "b 'offs26 -> 'pcoffs26");
+ break;
+ case BL:
+ Format(instr, "bl 'offs26 -> 'pcoffs26");
+ break;
+ case BEQ:
+ Format(instr, "beq 'rj, 'rd, 'offs16 -> 'pcoffs16");
+ break;
+ case BNE:
+ Format(instr, "bne 'rj, 'rd, 'offs16 -> 'pcoffs16");
+ break;
+ case BLT:
+ Format(instr, "blt 'rj, 'rd, 'offs16 -> 'pcoffs16");
+ break;
+ case BGE:
+ Format(instr, "bge 'rj, 'rd, 'offs16 -> 'pcoffs16");
+ break;
+ case BLTU:
+ Format(instr, "bltu 'rj, 'rd, 'offs16 -> 'pcoffs16");
+ break;
+ case BGEU:
+ Format(instr, "bgeu 'rj, 'rd, 'offs16 -> 'pcoffs16");
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Decoder::DecodeTypekOp7(Instruction* instr) {
+ switch (instr->Bits(31, 25) << 25) {
+ case LU12I_W:
+ Format(instr, "lu12i.w 'rd, 'xi20");
+ break;
+ case LU32I_D:
+ Format(instr, "lu32i.d 'rd, 'xi20");
+ break;
+ case PCADDI:
+ Format(instr, "pcaddi 'rd, 'xi20");
+ break;
+ case PCALAU12I:
+ Format(instr, "pcalau12i 'rd, 'xi20");
+ break;
+ case PCADDU12I:
+ Format(instr, "pcaddu12i 'rd, 'xi20");
+ break;
+ case PCADDU18I:
+ Format(instr, "pcaddu18i 'rd, 'xi20");
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Decoder::DecodeTypekOp8(Instruction* instr) {
+ switch (instr->Bits(31, 24) << 24) {
+ case LDPTR_W:
+ Format(instr, "ldptr.w 'rd, 'rj, 'si14");
+ break;
+ case STPTR_W:
+ Format(instr, "stptr.w 'rd, 'rj, 'si14");
+ break;
+ case LDPTR_D:
+ Format(instr, "ldptr.d 'rd, 'rj, 'si14");
+ break;
+ case STPTR_D:
+ Format(instr, "stptr.d 'rd, 'rj, 'si14");
+ break;
+ case LL_W:
+ Format(instr, "ll.w 'rd, 'rj, 'si14");
+ break;
+ case SC_W:
+ Format(instr, "sc.w 'rd, 'rj, 'si14");
+ break;
+ case LL_D:
+ Format(instr, "ll.d 'rd, 'rj, 'si14");
+ break;
+ case SC_D:
+ Format(instr, "sc.d 'rd, 'rj, 'si14");
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Decoder::DecodeTypekOp10(Instruction* instr) {
+ switch (instr->Bits(31, 22) << 22) {
+ case BSTR_W: {
+ if (instr->Bit(21) != 0) {
+ if (instr->Bit(15) == 0) {
+ Format(instr, "bstrins.w 'rd, 'rj, 'msbw, 'lsbw");
+ } else {
+ Format(instr, "bstrpick.w 'rd, 'rj, 'msbw, 'lsbw");
+ }
+ }
+ break;
+ }
+ case BSTRINS_D:
+ Format(instr, "bstrins.d 'rd, 'rj, 'msbd, 'lsbd");
+ break;
+ case BSTRPICK_D:
+ Format(instr, "bstrpick.d 'rd, 'rj, 'msbd, 'lsbd");
+ break;
+ case SLTI:
+ Format(instr, "slti 'rd, 'rj, 'si12");
+ break;
+ case SLTUI:
+ Format(instr, "sltui 'rd, 'rj, 'si12");
+ break;
+ case ADDI_W:
+ Format(instr, "addi.w 'rd, 'rj, 'si12");
+ break;
+ case ADDI_D:
+ Format(instr, "addi.d 'rd, 'rj, 'si12");
+ break;
+ case LU52I_D:
+ Format(instr, "lu52i.d 'rd, 'rj, 'xi12");
+ break;
+ case ANDI:
+ Format(instr, "andi 'rd, 'rj, 'xi12");
+ break;
+ case ORI:
+ Format(instr, "ori 'rd, 'rj, 'xi12");
+ break;
+ case XORI:
+ Format(instr, "xori 'rd, 'rj, 'xi12");
+ break;
+ case LD_B:
+ Format(instr, "ld.b 'rd, 'rj, 'si12");
+ break;
+ case LD_H:
+ Format(instr, "ld.h 'rd, 'rj, 'si12");
+ break;
+ case LD_W:
+ Format(instr, "ld.w 'rd, 'rj, 'si12");
+ break;
+ case LD_D:
+ Format(instr, "ld.d 'rd, 'rj, 'si12");
+ break;
+ case ST_B:
+ Format(instr, "st.b 'rd, 'rj, 'si12");
+ break;
+ case ST_H:
+ Format(instr, "st.h 'rd, 'rj, 'si12");
+ break;
+ case ST_W:
+ Format(instr, "st.w 'rd, 'rj, 'si12");
+ break;
+ case ST_D:
+ Format(instr, "st.d 'rd, 'rj, 'si12");
+ break;
+ case LD_BU:
+ Format(instr, "ld.bu 'rd, 'rj, 'si12");
+ break;
+ case LD_HU:
+ Format(instr, "ld.hu 'rd, 'rj, 'si12");
+ break;
+ case LD_WU:
+ Format(instr, "ld.wu 'rd, 'rj, 'si12");
+ break;
+ case FLD_S:
+ Format(instr, "fld.s 'fd, 'rj, 'si12");
+ break;
+ case FST_S:
+ Format(instr, "fst.s 'fd, 'rj, 'si12");
+ break;
+ case FLD_D:
+ Format(instr, "fld.d 'fd, 'rj, 'si12");
+ break;
+ case FST_D:
+ Format(instr, "fst.d 'fd, 'rj, 'si12");
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Decoder::DecodeTypekOp12(Instruction* instr) {
+ switch (instr->Bits(31, 20) << 20) {
+ case FMADD_S:
+ Format(instr, "fmadd.s 'fd, 'fj, 'fk, 'fa");
+ break;
+ case FMADD_D:
+ Format(instr, "fmadd.d 'fd, 'fj, 'fk, 'fa");
+ break;
+ case FMSUB_S:
+ Format(instr, "fmsub.s 'fd, 'fj, 'fk, 'fa");
+ break;
+ case FMSUB_D:
+ Format(instr, "fmsub.d 'fd, 'fj, 'fk, 'fa");
+ break;
+ case FNMADD_S:
+ Format(instr, "fnmadd.s 'fd, 'fj, 'fk, 'fa");
+ break;
+ case FNMADD_D:
+ Format(instr, "fnmadd.d 'fd, 'fj, 'fk, 'fa");
+ break;
+ case FNMSUB_S:
+ Format(instr, "fnmsub.s 'fd, 'fj, 'fk, 'fa");
+ break;
+ case FNMSUB_D:
+ Format(instr, "fnmsub.d 'fd, 'fj, 'fk, 'fa");
+ break;
+ case FCMP_COND_S:
+ switch (instr->Bits(19, 15)) {
+ case CAF:
+ Format(instr, "fcmp.caf.s fcc'cd, 'fj, 'fk");
+ break;
+ case SAF:
+ Format(instr, "fcmp.saf.s fcc'cd, 'fj, 'fk");
+ break;
+ case CLT:
+ Format(instr, "fcmp.clt.s fcc'cd, 'fj, 'fk");
+ break;
+ case CEQ:
+ Format(instr, "fcmp.ceq.s fcc'cd, 'fj, 'fk");
+ break;
+ case SEQ:
+ Format(instr, "fcmp.seq.s fcc'cd, 'fj, 'fk");
+ break;
+ case CLE:
+ Format(instr, "fcmp.cle.s fcc'cd, 'fj, 'fk");
+ break;
+ case SLE:
+ Format(instr, "fcmp.sle.s fcc'cd, 'fj, 'fk");
+ break;
+ case CUN:
+ Format(instr, "fcmp.cun.s fcc'cd, 'fj, 'fk");
+ break;
+ case SUN:
+ Format(instr, "fcmp.sun.s fcc'cd, 'fj, 'fk");
+ break;
+ case CULT:
+ Format(instr, "fcmp.cult.s fcc'cd, 'fj, 'fk");
+ break;
+ case SULT:
+ Format(instr, "fcmp.sult.s fcc'cd, 'fj, 'fk");
+ break;
+ case CUEQ:
+ Format(instr, "fcmp.cueq.s fcc'cd, 'fj, 'fk");
+ break;
+ case SUEQ:
+ Format(instr, "fcmp.sueq.s fcc'cd, 'fj, 'fk");
+ break;
+ case CULE:
+ Format(instr, "fcmp.cule.s fcc'cd, 'fj, 'fk");
+ break;
+ case SULE:
+ Format(instr, "fcmp.sule.s fcc'cd, 'fj, 'fk");
+ break;
+ case CNE:
+ Format(instr, "fcmp.cne.s fcc'cd, 'fj, 'fk");
+ break;
+ case SNE:
+ Format(instr, "fcmp.sne.s fcc'cd, 'fj, 'fk");
+ break;
+ case COR:
+ Format(instr, "fcmp.cor.s fcc'cd, 'fj, 'fk");
+ break;
+ case SOR:
+ Format(instr, "fcmp.sor.s fcc'cd, 'fj, 'fk");
+ break;
+ case CUNE:
+ Format(instr, "fcmp.cune.s fcc'cd, 'fj, 'fk");
+ break;
+ case SUNE:
+ Format(instr, "fcmp.sune.s fcc'cd, 'fj, 'fk");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case FCMP_COND_D:
+ switch (instr->Bits(19, 15)) {
+ case CAF:
+ Format(instr, "fcmp.caf.d fcc'cd, 'fj, 'fk");
+ break;
+ case SAF:
+ Format(instr, "fcmp.saf.d fcc'cd, 'fj, 'fk");
+ break;
+ case CLT:
+ Format(instr, "fcmp.clt.d fcc'cd, 'fj, 'fk");
+ break;
+ case CEQ:
+ Format(instr, "fcmp.ceq.d fcc'cd, 'fj, 'fk");
+ break;
+ case SEQ:
+ Format(instr, "fcmp.seq.d fcc'cd, 'fj, 'fk");
+ break;
+ case CLE:
+ Format(instr, "fcmp.cle.d fcc'cd, 'fj, 'fk");
+ break;
+ case SLE:
+ Format(instr, "fcmp.sle.d fcc'cd, 'fj, 'fk");
+ break;
+ case CUN:
+ Format(instr, "fcmp.cun.d fcc'cd, 'fj, 'fk");
+ break;
+ case SUN:
+ Format(instr, "fcmp.sun.d fcc'cd, 'fj, 'fk");
+ break;
+ case CULT:
+ Format(instr, "fcmp.cult.d fcc'cd, 'fj, 'fk");
+ break;
+ case SULT:
+ Format(instr, "fcmp.sult.d fcc'cd, 'fj, 'fk");
+ break;
+ case CUEQ:
+ Format(instr, "fcmp.cueq.d fcc'cd, 'fj, 'fk");
+ break;
+ case SUEQ:
+ Format(instr, "fcmp.sueq.d fcc'cd, 'fj, 'fk");
+ break;
+ case CULE:
+ Format(instr, "fcmp.cule.d fcc'cd, 'fj, 'fk");
+ break;
+ case SULE:
+ Format(instr, "fcmp.sule.d fcc'cd, 'fj, 'fk");
+ break;
+ case CNE:
+ Format(instr, "fcmp.cne.d fcc'cd, 'fj, 'fk");
+ break;
+ case SNE:
+ Format(instr, "fcmp.sne.d fcc'cd, 'fj, 'fk");
+ break;
+ case COR:
+ Format(instr, "fcmp.cor.d fcc'cd, 'fj, 'fk");
+ break;
+ case SOR:
+ Format(instr, "fcmp.sor.d fcc'cd, 'fj, 'fk");
+ break;
+ case CUNE:
+ Format(instr, "fcmp.cune.d fcc'cd, 'fj, 'fk");
+ break;
+ case SUNE:
+ Format(instr, "fcmp.sune.d fcc'cd, 'fj, 'fk");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case FSEL:
+ Format(instr, "fsel 'fd, 'fj, 'fk, fcc'ca");
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Decoder::DecodeTypekOp14(Instruction* instr) {
+ switch (instr->Bits(31, 18) << 18) {
+ case ALSL:
+ if (instr->Bit(17))
+ Format(instr, "alsl.wu 'rd, 'rj, 'rk, 'sa2");
+ else
+ Format(instr, "alsl.w 'rd, 'rj, 'rk, 'sa2");
+ break;
+ case BYTEPICK_W:
+ Format(instr, "bytepick.w 'rd, 'rj, 'rk, 'sa2");
+ break;
+ case BYTEPICK_D:
+ Format(instr, "bytepick.d 'rd, 'rj, 'rk, 'sa3");
+ break;
+ case ALSL_D:
+ Format(instr, "alsl.d 'rd, 'rj, 'rk, 'sa2");
+ break;
+ case SLLI:
+ if (instr->Bit(16))
+ Format(instr, "slli.d 'rd, 'rj, 'ui6");
+ else
+ Format(instr, "slli.w 'rd, 'rj, 'ui5");
+ break;
+ case SRLI:
+ if (instr->Bit(16))
+ Format(instr, "srli.d 'rd, 'rj, 'ui6");
+ else
+ Format(instr, "srli.w 'rd, 'rj, 'ui5");
+ break;
+ case SRAI:
+ if (instr->Bit(16))
+ Format(instr, "srai.d 'rd, 'rj, 'ui6");
+ else
+ Format(instr, "srai.w 'rd, 'rj, 'ui5");
+ break;
+ case ROTRI:
+ if (instr->Bit(16))
+ Format(instr, "rotri.d 'rd, 'rj, 'ui6");
+ else
+ Format(instr, "rotri.w 'rd, 'rj, 'ui5");
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+int Decoder::DecodeTypekOp17(Instruction* instr) {
+ switch (instr->Bits(31, 15) << 15) {
+ case ADD_W:
+ Format(instr, "add.w 'rd, 'rj, 'rk");
+ break;
+ case ADD_D:
+ Format(instr, "add.d 'rd, 'rj, 'rk");
+ break;
+ case SUB_W:
+ Format(instr, "sub.w 'rd, 'rj, 'rk");
+ break;
+ case SUB_D:
+ Format(instr, "sub.d 'rd, 'rj, 'rk");
+ break;
+ case SLT:
+ Format(instr, "slt 'rd, 'rj, 'rk");
+ break;
+ case SLTU:
+ Format(instr, "sltu 'rd, 'rj, 'rk");
+ break;
+ case MASKEQZ:
+ Format(instr, "maskeqz 'rd, 'rj, 'rk");
+ break;
+ case MASKNEZ:
+ Format(instr, "masknez 'rd, 'rj, 'rk");
+ break;
+ case NOR:
+ Format(instr, "nor 'rd, 'rj, 'rk");
+ break;
+ case AND:
+ Format(instr, "and 'rd, 'rj, 'rk");
+ break;
+ case OR:
+ Format(instr, "or 'rd, 'rj, 'rk");
+ break;
+ case XOR:
+ Format(instr, "xor 'rd, 'rj, 'rk");
+ break;
+ case ORN:
+ Format(instr, "orn 'rd, 'rj, 'rk");
+ break;
+ case ANDN:
+ Format(instr, "andn 'rd, 'rj, 'rk");
+ break;
+ case SLL_W:
+ Format(instr, "sll.w 'rd, 'rj, 'rk");
+ break;
+ case SRL_W:
+ Format(instr, "srl.w 'rd, 'rj, 'rk");
+ break;
+ case SRA_W:
+ Format(instr, "sra.w 'rd, 'rj, 'rk");
+ break;
+ case SLL_D:
+ Format(instr, "sll.d 'rd, 'rj, 'rk");
+ break;
+ case SRL_D:
+ Format(instr, "srl.d 'rd, 'rj, 'rk");
+ break;
+ case SRA_D:
+ Format(instr, "sra.d 'rd, 'rj, 'rk");
+ break;
+ case ROTR_D:
+ Format(instr, "rotr.d 'rd, 'rj, 'rk");
+ break;
+ case ROTR_W:
+ Format(instr, "rotr.w 'rd, 'rj, 'rk");
+ break;
+ case MUL_W:
+ Format(instr, "mul.w 'rd, 'rj, 'rk");
+ break;
+ case MULH_W:
+ Format(instr, "mulh.w 'rd, 'rj, 'rk");
+ break;
+ case MULH_WU:
+ Format(instr, "mulh.wu 'rd, 'rj, 'rk");
+ break;
+ case MUL_D:
+ Format(instr, "mul.d 'rd, 'rj, 'rk");
+ break;
+ case MULH_D:
+ Format(instr, "mulh.d 'rd, 'rj, 'rk");
+ break;
+ case MULH_DU:
+ Format(instr, "mulh.du 'rd, 'rj, 'rk");
+ break;
+ case MULW_D_W:
+ Format(instr, "mulw.d.w 'rd, 'rj, 'rk");
+ break;
+ case MULW_D_WU:
+ Format(instr, "mulw.d.wu 'rd, 'rj, 'rk");
+ break;
+ case DIV_W:
+ Format(instr, "div.w 'rd, 'rj, 'rk");
+ break;
+ case MOD_W:
+ Format(instr, "mod.w 'rd, 'rj, 'rk");
+ break;
+ case DIV_WU:
+ Format(instr, "div.wu 'rd, 'rj, 'rk");
+ break;
+ case MOD_WU:
+ Format(instr, "mod.wu 'rd, 'rj, 'rk");
+ break;
+ case DIV_D:
+ Format(instr, "div.d 'rd, 'rj, 'rk");
+ break;
+ case MOD_D:
+ Format(instr, "mod.d 'rd, 'rj, 'rk");
+ break;
+ case DIV_DU:
+ Format(instr, "div.du 'rd, 'rj, 'rk");
+ break;
+ case MOD_DU:
+ Format(instr, "mod.du 'rd, 'rj, 'rk");
+ break;
+ case BREAK:
+ return DecodeBreakInstr(instr);
+ case FADD_S:
+ Format(instr, "fadd.s 'fd, 'fj, 'fk");
+ break;
+ case FADD_D:
+ Format(instr, "fadd.d 'fd, 'fj, 'fk");
+ break;
+ case FSUB_S:
+ Format(instr, "fsub.s 'fd, 'fj, 'fk");
+ break;
+ case FSUB_D:
+ Format(instr, "fsub.d 'fd, 'fj, 'fk");
+ break;
+ case FMUL_S:
+ Format(instr, "fmul.s 'fd, 'fj, 'fk");
+ break;
+ case FMUL_D:
+ Format(instr, "fmul.d 'fd, 'fj, 'fk");
+ break;
+ case FDIV_S:
+ Format(instr, "fdiv.s 'fd, 'fj, 'fk");
+ break;
+ case FDIV_D:
+ Format(instr, "fdiv.d 'fd, 'fj, 'fk");
+ break;
+ case FMAX_S:
+ Format(instr, "fmax.s 'fd, 'fj, 'fk");
+ break;
+ case FMAX_D:
+ Format(instr, "fmax.d 'fd, 'fj, 'fk");
+ break;
+ case FMIN_S:
+ Format(instr, "fmin.s 'fd, 'fj, 'fk");
+ break;
+ case FMIN_D:
+ Format(instr, "fmin.d 'fd, 'fj, 'fk");
+ break;
+ case FMAXA_S:
+ Format(instr, "fmaxa.s 'fd, 'fj, 'fk");
+ break;
+ case FMAXA_D:
+ Format(instr, "fmaxa.d 'fd, 'fj, 'fk");
+ break;
+ case FMINA_S:
+ Format(instr, "fmina.s 'fd, 'fj, 'fk");
+ break;
+ case FMINA_D:
+ Format(instr, "fmina.d 'fd, 'fj, 'fk");
+ break;
+ case LDX_B:
+ Format(instr, "ldx.b 'rd, 'rj, 'rk");
+ break;
+ case LDX_H:
+ Format(instr, "ldx.h 'rd, 'rj, 'rk");
+ break;
+ case LDX_W:
+ Format(instr, "ldx.w 'rd, 'rj, 'rk");
+ break;
+ case LDX_D:
+ Format(instr, "ldx.d 'rd, 'rj, 'rk");
+ break;
+ case STX_B:
+ Format(instr, "stx.b 'rd, 'rj, 'rk");
+ break;
+ case STX_H:
+ Format(instr, "stx.h 'rd, 'rj, 'rk");
+ break;
+ case STX_W:
+ Format(instr, "stx.w 'rd, 'rj, 'rk");
+ break;
+ case STX_D:
+ Format(instr, "stx.d 'rd, 'rj, 'rk");
+ break;
+ case LDX_BU:
+ Format(instr, "ldx.bu 'rd, 'rj, 'rk");
+ break;
+ case LDX_HU:
+ Format(instr, "ldx.hu 'rd, 'rj, 'rk");
+ break;
+ case LDX_WU:
+ Format(instr, "ldx.wu 'rd, 'rj, 'rk");
+ break;
+ case FLDX_S:
+ Format(instr, "fldx.s 'fd, 'rj, 'rk");
+ break;
+ case FLDX_D:
+ Format(instr, "fldx.d 'fd, 'rj, 'rk");
+ break;
+ case FSTX_S:
+ Format(instr, "fstx.s 'fd, 'rj, 'rk");
+ break;
+ case FSTX_D:
+ Format(instr, "fstx.d 'fd, 'rj, 'rk");
+ break;
+ case AMSWAP_W:
+ Format(instr, "amswap.w 'rd, 'rk, 'rj");
+ break;
+ case AMSWAP_D:
+ Format(instr, "amswap.d 'rd, 'rk, 'rj");
+ break;
+ case AMADD_W:
+ Format(instr, "amadd.w 'rd, 'rk, 'rj");
+ break;
+ case AMADD_D:
+ Format(instr, "amadd.d 'rd, 'rk, 'rj");
+ break;
+ case AMAND_W:
+ Format(instr, "amand.w 'rd, 'rk, 'rj");
+ break;
+ case AMAND_D:
+ Format(instr, "amand.d 'rd, 'rk, 'rj");
+ break;
+ case AMOR_W:
+ Format(instr, "amor.w 'rd, 'rk, 'rj");
+ break;
+ case AMOR_D:
+ Format(instr, "amor.d 'rd, 'rk, 'rj");
+ break;
+ case AMXOR_W:
+ Format(instr, "amxor.w 'rd, 'rk, 'rj");
+ break;
+ case AMXOR_D:
+ Format(instr, "amxor.d 'rd, 'rk, 'rj");
+ break;
+ case AMMAX_W:
+ Format(instr, "ammax.w 'rd, 'rk, 'rj");
+ break;
+ case AMMAX_D:
+ Format(instr, "ammax.d 'rd, 'rk, 'rj");
+ break;
+ case AMMIN_W:
+ Format(instr, "ammin.w 'rd, 'rk, 'rj");
+ break;
+ case AMMIN_D:
+ Format(instr, "ammin.d 'rd, 'rk, 'rj");
+ break;
+ case AMMAX_WU:
+ Format(instr, "ammax.wu 'rd, 'rk, 'rj");
+ break;
+ case AMMAX_DU:
+ Format(instr, "ammax.du 'rd, 'rk, 'rj");
+ break;
+ case AMMIN_WU:
+ Format(instr, "ammin.wu 'rd, 'rk, 'rj");
+ break;
+ case AMMIN_DU:
+ Format(instr, "ammin.du 'rd, 'rk, 'rj");
+ break;
+ case AMSWAP_DB_W:
+ Format(instr, "amswap_db.w 'rd, 'rk, 'rj");
+ break;
+ case AMSWAP_DB_D:
+ Format(instr, "amswap_db.d 'rd, 'rk, 'rj");
+ break;
+ case AMADD_DB_W:
+ Format(instr, "amadd_db.w 'rd, 'rk, 'rj");
+ break;
+ case AMADD_DB_D:
+ Format(instr, "amadd_db.d 'rd, 'rk, 'rj");
+ break;
+ case AMAND_DB_W:
+ Format(instr, "amand_db.w 'rd, 'rk, 'rj");
+ break;
+ case AMAND_DB_D:
+ Format(instr, "amand_db.d 'rd, 'rk, 'rj");
+ break;
+ case AMOR_DB_W:
+ Format(instr, "amor_db.w 'rd, 'rk, 'rj");
+ break;
+ case AMOR_DB_D:
+ Format(instr, "amor_db.d 'rd, 'rk, 'rj");
+ break;
+ case AMXOR_DB_W:
+ Format(instr, "amxor_db.w 'rd, 'rk, 'rj");
+ break;
+ case AMXOR_DB_D:
+ Format(instr, "amxor_db.d 'rd, 'rk, 'rj");
+ break;
+ case AMMAX_DB_W:
+ Format(instr, "ammax_db.w 'rd, 'rk, 'rj");
+ break;
+ case AMMAX_DB_D:
+ Format(instr, "ammax_db.d 'rd, 'rk, 'rj");
+ break;
+ case AMMIN_DB_W:
+ Format(instr, "ammin_db.w 'rd, 'rk, 'rj");
+ break;
+ case AMMIN_DB_D:
+ Format(instr, "ammin_db.d 'rd, 'rk, 'rj");
+ break;
+ case AMMAX_DB_WU:
+ Format(instr, "ammax_db.wu 'rd, 'rk, 'rj");
+ break;
+ case AMMAX_DB_DU:
+ Format(instr, "ammax_db.du 'rd, 'rk, 'rj");
+ break;
+ case AMMIN_DB_WU:
+ Format(instr, "ammin_db.wu 'rd, 'rk, 'rj");
+ break;
+ case AMMIN_DB_DU:
+ Format(instr, "ammin_db.du 'rd, 'rk, 'rj");
+ break;
+ case DBAR:
+ Format(instr, "dbar 'hint15");
+ break;
+ case IBAR:
+ Format(instr, "ibar 'hint15");
+ break;
+ case FSCALEB_S:
+ Format(instr, "fscaleb.s 'fd, 'fj, 'fk");
+ break;
+ case FSCALEB_D:
+ Format(instr, "fscaleb.d 'fd, 'fj, 'fk");
+ break;
+ case FCOPYSIGN_S:
+ Format(instr, "fcopysign.s 'fd, 'fj, 'fk");
+ break;
+ case FCOPYSIGN_D:
+ Format(instr, "fcopysign.d 'fd, 'fj, 'fk");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return kInstrSize;
+}
+
+void Decoder::DecodeTypekOp22(Instruction* instr) {
+ switch (instr->Bits(31, 10) << 10) {
+ case CLZ_W:
+ Format(instr, "clz.w 'rd, 'rj");
+ break;
+ case CTZ_W:
+ Format(instr, "ctz.w 'rd, 'rj");
+ break;
+ case CLZ_D:
+ Format(instr, "clz.d 'rd, 'rj");
+ break;
+ case CTZ_D:
+ Format(instr, "ctz.d 'rd, 'rj");
+ break;
+ case REVB_2H:
+ Format(instr, "revb.2h 'rd, 'rj");
+ break;
+ case REVB_4H:
+ Format(instr, "revb.4h 'rd, 'rj");
+ break;
+ case REVB_2W:
+ Format(instr, "revb.2w 'rd, 'rj");
+ break;
+ case REVB_D:
+ Format(instr, "revb.d 'rd, 'rj");
+ break;
+ case REVH_2W:
+ Format(instr, "revh.2w 'rd, 'rj");
+ break;
+ case REVH_D:
+ Format(instr, "revh.d 'rd, 'rj");
+ break;
+ case BITREV_4B:
+ Format(instr, "bitrev.4b 'rd, 'rj");
+ break;
+ case BITREV_8B:
+ Format(instr, "bitrev.8b 'rd, 'rj");
+ break;
+ case BITREV_W:
+ Format(instr, "bitrev.w 'rd, 'rj");
+ break;
+ case BITREV_D:
+ Format(instr, "bitrev.d 'rd, 'rj");
+ break;
+ case EXT_W_B:
+ Format(instr, "ext.w.b 'rd, 'rj");
+ break;
+ case EXT_W_H:
+ Format(instr, "ext.w.h 'rd, 'rj");
+ break;
+ case FABS_S:
+ Format(instr, "fabs.s 'fd, 'fj");
+ break;
+ case FABS_D:
+ Format(instr, "fabs.d 'fd, 'fj");
+ break;
+ case FNEG_S:
+ Format(instr, "fneg.s 'fd, 'fj");
+ break;
+ case FNEG_D:
+ Format(instr, "fneg.d 'fd, 'fj");
+ break;
+ case FSQRT_S:
+ Format(instr, "fsqrt.s 'fd, 'fj");
+ break;
+ case FSQRT_D:
+ Format(instr, "fsqrt.d 'fd, 'fj");
+ break;
+ case FMOV_S:
+ Format(instr, "fmov.s 'fd, 'fj");
+ break;
+ case FMOV_D:
+ Format(instr, "fmov.d 'fd, 'fj");
+ break;
+ case MOVGR2FR_W:
+ Format(instr, "movgr2fr.w 'fd, 'rj");
+ break;
+ case MOVGR2FR_D:
+ Format(instr, "movgr2fr.d 'fd, 'rj");
+ break;
+ case MOVGR2FRH_W:
+ Format(instr, "movgr2frh.w 'fd, 'rj");
+ break;
+ case MOVFR2GR_S:
+ Format(instr, "movfr2gr.s 'rd, 'fj");
+ break;
+ case MOVFR2GR_D:
+ Format(instr, "movfr2gr.d 'rd, 'fj");
+ break;
+ case MOVFRH2GR_S:
+ Format(instr, "movfrh2gr.s 'rd, 'fj");
+ break;
+ case MOVGR2FCSR:
+ Format(instr, "movgr2fcsr fcsr, 'rj");
+ break;
+ case MOVFCSR2GR:
+ Format(instr, "movfcsr2gr 'rd, fcsr");
+ break;
+ case FCVT_S_D:
+ Format(instr, "fcvt.s.d 'fd, 'fj");
+ break;
+ case FCVT_D_S:
+ Format(instr, "fcvt.d.s 'fd, 'fj");
+ break;
+ case FTINTRM_W_S:
+ Format(instr, "ftintrm.w.s 'fd, 'fj");
+ break;
+ case FTINTRM_W_D:
+ Format(instr, "ftintrm.w.d 'fd, 'fj");
+ break;
+ case FTINTRM_L_S:
+ Format(instr, "ftintrm.l.s 'fd, 'fj");
+ break;
+ case FTINTRM_L_D:
+ Format(instr, "ftintrm.l.d 'fd, 'fj");
+ break;
+ case FTINTRP_W_S:
+ Format(instr, "ftintrp.w.s 'fd, 'fj");
+ break;
+ case FTINTRP_W_D:
+ Format(instr, "ftintrp.w.d 'fd, 'fj");
+ break;
+ case FTINTRP_L_S:
+ Format(instr, "ftintrp.l.s 'fd, 'fj");
+ break;
+ case FTINTRP_L_D:
+ Format(instr, "ftintrp.l.d 'fd, 'fj");
+ break;
+ case FTINTRZ_W_S:
+ Format(instr, "ftintrz.w.s 'fd, 'fj");
+ break;
+ case FTINTRZ_W_D:
+ Format(instr, "ftintrz.w.d 'fd, 'fj");
+ break;
+ case FTINTRZ_L_S:
+ Format(instr, "ftintrz.l.s 'fd, 'fj");
+ break;
+ case FTINTRZ_L_D:
+ Format(instr, "ftintrz.l.d 'fd, 'fj");
+ break;
+ case FTINTRNE_W_S:
+ Format(instr, "ftintrne.w.s 'fd, 'fj");
+ break;
+ case FTINTRNE_W_D:
+ Format(instr, "ftintrne.w.d 'fd, 'fj");
+ break;
+ case FTINTRNE_L_S:
+ Format(instr, "ftintrne.l.s 'fd, 'fj");
+ break;
+ case FTINTRNE_L_D:
+ Format(instr, "ftintrne.l.d 'fd, 'fj");
+ break;
+ case FTINT_W_S:
+ Format(instr, "ftint.w.s 'fd, 'fj");
+ break;
+ case FTINT_W_D:
+ Format(instr, "ftint.w.d 'fd, 'fj");
+ break;
+ case FTINT_L_S:
+ Format(instr, "ftint.l.s 'fd, 'fj");
+ break;
+ case FTINT_L_D:
+ Format(instr, "ftint.l.d 'fd, 'fj");
+ break;
+ case FFINT_S_W:
+ Format(instr, "ffint.s.w 'fd, 'fj");
+ break;
+ case FFINT_S_L:
+ Format(instr, "ffint.s.l 'fd, 'fj");
+ break;
+ case FFINT_D_W:
+ Format(instr, "ffint.d.w 'fd, 'fj");
+ break;
+ case FFINT_D_L:
+ Format(instr, "ffint.d.l 'fd, 'fj");
+ break;
+ case FRINT_S:
+ Format(instr, "frint.s 'fd, 'fj");
+ break;
+ case FRINT_D:
+ Format(instr, "frint.d 'fd, 'fj");
+ break;
+ case MOVFR2CF:
+ Format(instr, "movfr2cf fcc'cd, 'fj");
+ break;
+ case MOVCF2FR:
+ Format(instr, "movcf2fr 'fd, fcc'cj");
+ break;
+ case MOVGR2CF:
+ Format(instr, "movgr2cf fcc'cd, 'rj");
+ break;
+ case MOVCF2GR:
+ Format(instr, "movcf2gr 'rd, fcc'cj");
+ break;
+ case FRECIP_S:
+ Format(instr, "frecip.s 'fd, 'fj");
+ break;
+ case FRECIP_D:
+ Format(instr, "frecip.d 'fd, 'fj");
+ break;
+ case FRSQRT_S:
+ Format(instr, "frsqrt.s 'fd, 'fj");
+ break;
+ case FRSQRT_D:
+ Format(instr, "frsqrt.d 'fd, 'fj");
+ break;
+ case FCLASS_S:
+ Format(instr, "fclass.s 'fd, 'fj");
+ break;
+ case FCLASS_D:
+ Format(instr, "fclass.d 'fd, 'fj");
+ break;
+ case FLOGB_S:
+ Format(instr, "flogb.s 'fd, 'fj");
+ break;
+ case FLOGB_D:
+ Format(instr, "flogb.d 'fd, 'fj");
+ break;
+ case CLO_W:
+ Format(instr, "clo.w 'rd, 'rj");
+ break;
+ case CTO_W:
+ Format(instr, "cto.w 'rd, 'rj");
+ break;
+ case CLO_D:
+ Format(instr, "clo.d 'rd, 'rj");
+ break;
+ case CTO_D:
+ Format(instr, "cto.d 'rd, 'rj");
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+int Decoder::InstructionDecode(byte* instr_ptr) {
+ Instruction* instr = Instruction::At(instr_ptr);
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%08x ", instr->InstructionBits());
+ switch (instr->InstructionType()) {
+ case Instruction::kOp6Type: {
+ DecodeTypekOp6(instr);
+ break;
+ }
+ case Instruction::kOp7Type: {
+ DecodeTypekOp7(instr);
+ break;
+ }
+ case Instruction::kOp8Type: {
+ DecodeTypekOp8(instr);
+ break;
+ }
+ case Instruction::kOp10Type: {
+ DecodeTypekOp10(instr);
+ break;
+ }
+ case Instruction::kOp12Type: {
+ DecodeTypekOp12(instr);
+ break;
+ }
+ case Instruction::kOp14Type: {
+ DecodeTypekOp14(instr);
+ break;
+ }
+ case Instruction::kOp17Type: {
+ return DecodeTypekOp17(instr);
+ }
+ case Instruction::kOp22Type: {
+ DecodeTypekOp22(instr);
+ break;
+ }
+ case Instruction::kUnsupported: {
+ Format(instr, "UNSUPPORTED");
+ break;
+ }
+ default: {
+ Format(instr, "UNSUPPORTED");
+ break;
+ }
+ }
+ return kInstrSize;
+}
+
+} // namespace internal
+} // namespace v8
+
+//------------------------------------------------------------------------------
+
+namespace disasm {
+
+const char* NameConverter::NameOfAddress(byte* addr) const {
+ v8::base::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
+ return tmp_buffer_.begin();
+}
+
+const char* NameConverter::NameOfConstant(byte* addr) const {
+ return NameOfAddress(addr);
+}
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+ return v8::internal::Registers::Name(reg);
+}
+
+const char* NameConverter::NameOfXMMRegister(int reg) const {
+ return v8::internal::FPURegisters::Name(reg);
+}
+
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+ UNREACHABLE();
+}
+
+const char* NameConverter::NameInCode(byte* addr) const {
+ // The default name converter is called for unknown code. So we will not try
+ // to access any memory.
+ return "";
+}
+
+//------------------------------------------------------------------------------
+
+int Disassembler::InstructionDecode(v8::base::Vector<char> buffer,
+ byte* instruction) {
+ v8::internal::Decoder d(converter_, buffer);
+ return d.InstructionDecode(instruction);
+}
+
+int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; }
+
+void Disassembler::Disassemble(FILE* f, byte* begin, byte* end,
+ UnimplementedOpcodeAction unimplemented_action) {
+ NameConverter converter;
+ Disassembler d(converter, unimplemented_action);
+ for (byte* pc = begin; pc < end;) {
+ v8::base::EmbeddedVector<char, 128> buffer;
+ buffer[0] = '\0';
+ byte* prev_pc = pc;
+ pc += d.InstructionDecode(buffer, pc);
+ v8::internal::PrintF(f, "%p %08x %s\n", static_cast<void*>(prev_pc),
+ *reinterpret_cast<int32_t*>(prev_pc), buffer.begin());
+ }
+}
+
+#undef STRING_STARTS_WITH
+
+} // namespace disasm
+
+#endif // V8_TARGET_ARCH_LOONG64
diff --git a/chromium/v8/src/diagnostics/loong64/unwinder-loong64.cc b/chromium/v8/src/diagnostics/loong64/unwinder-loong64.cc
new file mode 100644
index 00000000000..84d2e41cfc8
--- /dev/null
+++ b/chromium/v8/src/diagnostics/loong64/unwinder-loong64.cc
@@ -0,0 +1,14 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/diagnostics/unwinder.h"
+
+namespace v8 {
+
+struct RegisterState;
+
+void GetCalleeSavedRegistersFromEntryFrame(void* fp,
+ RegisterState* register_state) {}
+
+} // namespace v8
diff --git a/chromium/v8/src/diagnostics/mips/disasm-mips.cc b/chromium/v8/src/diagnostics/mips/disasm-mips.cc
index c5aeb274573..32a0bdb0488 100644
--- a/chromium/v8/src/diagnostics/mips/disasm-mips.cc
+++ b/chromium/v8/src/diagnostics/mips/disasm-mips.cc
@@ -555,7 +555,6 @@ void Decoder::PrintMsaDataFormat(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else {
char DF[] = {'b', 'h', 'w', 'd'};
@@ -600,7 +599,6 @@ void Decoder::PrintMsaDataFormat(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
}
@@ -904,7 +902,6 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
PrintSa(instr);
return 2;
}
- break;
case 'd': {
DCHECK(STRING_STARTS_WITH(format, "sd"));
PrintSd(instr);
@@ -1521,7 +1518,6 @@ void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
}
}
@@ -1538,7 +1534,6 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
switch (instr->RsFieldRaw()) {
case BC1: // bc1 handled in DecodeTypeImmediate.
UNREACHABLE();
- break;
case MFC1:
Format(instr, "mfc1 'rt, 'fs");
break;
@@ -1966,7 +1961,6 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
}
}
@@ -1997,7 +1991,6 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
break;
default:
@@ -2703,7 +2696,6 @@ const char* NameConverter::NameOfXMMRegister(int reg) const {
const char* NameConverter::NameOfByteCPURegister(int reg) const {
UNREACHABLE(); // MIPS does not have the concept of a byte register.
- return "nobytereg";
}
const char* NameConverter::NameInCode(byte* addr) const {
diff --git a/chromium/v8/src/diagnostics/mips64/disasm-mips64.cc b/chromium/v8/src/diagnostics/mips64/disasm-mips64.cc
index d8ff14730d0..0712431fc3b 100644
--- a/chromium/v8/src/diagnostics/mips64/disasm-mips64.cc
+++ b/chromium/v8/src/diagnostics/mips64/disasm-mips64.cc
@@ -596,7 +596,6 @@ void Decoder::PrintMsaDataFormat(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else {
char DF[] = {'b', 'h', 'w', 'd'};
@@ -641,7 +640,6 @@ void Decoder::PrintMsaDataFormat(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
}
@@ -945,7 +943,6 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
PrintSa(instr);
return 2;
}
- break;
case 'd': {
DCHECK(STRING_STARTS_WITH(format, "sd"));
PrintSd(instr);
@@ -1744,7 +1741,6 @@ void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
break;
}
@@ -1761,7 +1757,6 @@ void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
break;
}
@@ -1782,7 +1777,6 @@ void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
break;
}
@@ -2250,7 +2244,6 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
break;
}
@@ -2285,7 +2278,6 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
break;
default:
@@ -2993,7 +2985,6 @@ const char* NameConverter::NameOfXMMRegister(int reg) const {
const char* NameConverter::NameOfByteCPURegister(int reg) const {
UNREACHABLE(); // MIPS does not have the concept of a byte register.
- return "nobytereg";
}
const char* NameConverter::NameInCode(byte* addr) const {
diff --git a/chromium/v8/src/diagnostics/objects-debug.cc b/chromium/v8/src/diagnostics/objects-debug.cc
index e45d7580c8f..de003a4a549 100644
--- a/chromium/v8/src/diagnostics/objects-debug.cc
+++ b/chromium/v8/src/diagnostics/objects-debug.cc
@@ -166,7 +166,9 @@ void TaggedIndex::TaggedIndexVerify(Isolate* isolate) {
}
void HeapObject::HeapObjectVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::HeapObjectVerify(*this, isolate);
+ CHECK(IsHeapObject());
+ VerifyPointer(isolate, map(isolate));
+ CHECK(map(isolate).IsMap());
switch (map().instance_type()) {
#define STRING_TYPE_CASE(TYPE, size, name, CamelName) case TYPE:
@@ -293,6 +295,7 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
BigIntBase::cast(*this).BigIntBaseVerify(isolate);
break;
+ case JS_CLASS_CONSTRUCTOR_TYPE:
case JS_PROMISE_CONSTRUCTOR_TYPE:
case JS_REG_EXP_CONSTRUCTOR_TYPE:
case JS_ARRAY_CONSTRUCTOR_TYPE:
@@ -343,8 +346,6 @@ void BytecodeArray::BytecodeArrayVerify(Isolate* isolate) {
}
}
-USE_TORQUE_VERIFIER(JSReceiver)
-
bool JSObject::ElementsAreSafeToExamine(PtrComprCageBase cage_base) const {
// If a GC was caused while constructing this object, the elements
// pointer may point to a one pointer filler map.
@@ -419,7 +420,7 @@ void JSObject::JSObjectVerify(Isolate* isolate) {
for (InternalIndex i : map().IterateOwnDescriptors()) {
PropertyDetails details = descriptors.GetDetails(i);
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
DCHECK_EQ(kData, details.kind());
Representation r = details.representation();
FieldIndex index = FieldIndex::ForDescriptor(map(), i);
@@ -654,7 +655,7 @@ void DescriptorArray::DescriptorArrayVerify(Isolate* isolate) {
}
MaybeObject value = GetValue(descriptor);
HeapObject heap_object;
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
CHECK_EQ(details.field_index(), expected_field_index);
CHECK(
value == MaybeObject::FromObject(FieldType::None()) ||
@@ -785,8 +786,6 @@ void JSDate::JSDateVerify(Isolate* isolate) {
}
}
-USE_TORQUE_VERIFIER(JSMessageObject)
-
void String::StringVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::StringVerify(*this, isolate);
CHECK(length() >= 0 && length() <= Smi::kMaxValue);
@@ -830,7 +829,24 @@ void JSBoundFunction::JSBoundFunctionVerify(Isolate* isolate) {
}
void JSFunction::JSFunctionVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::JSFunctionVerify(*this, isolate);
+ // Don't call TorqueGeneratedClassVerifiers::JSFunctionVerify here because the
+ // Torque class definition contains the field `prototype_or_initial_map` which
+ // may not be allocated.
+
+ // This assertion exists to encourage updating this verification function if
+ // new fields are added in the Torque class layout definition.
+ STATIC_ASSERT(JSFunction::TorqueGeneratedClass::kHeaderSize ==
+ 8 * kTaggedSize);
+
+ JSFunctionOrBoundFunctionVerify(isolate);
+ CHECK(IsJSFunction());
+ VerifyPointer(isolate, shared(isolate));
+ CHECK(shared(isolate).IsSharedFunctionInfo());
+ VerifyPointer(isolate, context(isolate, kRelaxedLoad));
+ CHECK(context(isolate, kRelaxedLoad).IsContext());
+ VerifyPointer(isolate, raw_feedback_cell(isolate));
+ CHECK(raw_feedback_cell(isolate).IsFeedbackCell());
+ VerifyPointer(isolate, raw_code(isolate));
CHECK(raw_code(isolate).IsCodeT());
CHECK(map(isolate).is_callable());
Handle<JSFunction> function(*this, isolate);
@@ -1005,8 +1021,11 @@ void Code::CodeVerify(Isolate* isolate) {
CHECK_LE(constant_pool_offset(), code_comments_offset());
CHECK_LE(code_comments_offset(), unwinding_info_offset());
CHECK_LE(unwinding_info_offset(), MetadataSize());
+#if !defined(_MSC_VER) || defined(__clang__)
+ // See also: PlatformEmbeddedFileWriterWin::AlignToCodeAlignment.
CHECK_IMPLIES(!ReadOnlyHeap::Contains(*this),
IsAligned(InstructionStart(), kCodeAlignment));
+#endif // !defined(_MSC_VER) || defined(__clang__)
CHECK_IMPLIES(!ReadOnlyHeap::Contains(*this),
IsAligned(raw_instruction_start(), kCodeAlignment));
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
@@ -1139,19 +1158,13 @@ void JSWeakRef::JSWeakRefVerify(Isolate* isolate) {
}
void JSFinalizationRegistry::JSFinalizationRegistryVerify(Isolate* isolate) {
- CHECK(IsJSFinalizationRegistry());
- JSObjectVerify(isolate);
- VerifyHeapPointer(isolate, cleanup());
- CHECK(active_cells().IsUndefined(isolate) || active_cells().IsWeakCell());
+ TorqueGeneratedClassVerifiers::JSFinalizationRegistryVerify(*this, isolate);
if (active_cells().IsWeakCell()) {
CHECK(WeakCell::cast(active_cells()).prev().IsUndefined(isolate));
}
- CHECK(cleared_cells().IsUndefined(isolate) || cleared_cells().IsWeakCell());
if (cleared_cells().IsWeakCell()) {
CHECK(WeakCell::cast(cleared_cells()).prev().IsUndefined(isolate));
}
- CHECK(next_dirty().IsUndefined(isolate) ||
- next_dirty().IsJSFinalizationRegistry());
}
void JSWeakMap::JSWeakMapVerify(Isolate* isolate) {
@@ -1236,8 +1249,9 @@ void SmallOrderedHashTable<Derived>::SmallOrderedHashTableVerify(
}
}
}
+
void SmallOrderedHashMap::SmallOrderedHashMapVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::SmallOrderedHashMapVerify(*this, isolate);
+ CHECK(IsSmallOrderedHashMap());
SmallOrderedHashTable<SmallOrderedHashMap>::SmallOrderedHashTableVerify(
isolate);
for (int entry = NumberOfElements(); entry < NumberOfDeletedElements();
@@ -1250,7 +1264,7 @@ void SmallOrderedHashMap::SmallOrderedHashMapVerify(Isolate* isolate) {
}
void SmallOrderedHashSet::SmallOrderedHashSetVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::SmallOrderedHashSetVerify(*this, isolate);
+ CHECK(IsSmallOrderedHashSet());
SmallOrderedHashTable<SmallOrderedHashSet>::SmallOrderedHashTableVerify(
isolate);
for (int entry = NumberOfElements(); entry < NumberOfDeletedElements();
@@ -1264,8 +1278,7 @@ void SmallOrderedHashSet::SmallOrderedHashSetVerify(Isolate* isolate) {
void SmallOrderedNameDictionary::SmallOrderedNameDictionaryVerify(
Isolate* isolate) {
- TorqueGeneratedClassVerifiers::SmallOrderedNameDictionaryVerify(*this,
- isolate);
+ CHECK(IsSmallOrderedNameDictionary());
SmallOrderedHashTable<
SmallOrderedNameDictionary>::SmallOrderedHashTableVerify(isolate);
for (int entry = NumberOfElements(); entry < NumberOfDeletedElements();
@@ -1355,7 +1368,7 @@ void SwissNameDictionary::SwissNameDictionaryVerify(Isolate* isolate,
void JSRegExp::JSRegExpVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::JSRegExpVerify(*this, isolate);
- switch (TypeTag()) {
+ switch (type_tag()) {
case JSRegExp::ATOM: {
FixedArray arr = FixedArray::cast(data());
CHECK(arr.get(JSRegExp::kAtomPatternIndex).IsString());
@@ -1433,7 +1446,7 @@ void JSRegExp::JSRegExpVerify(Isolate* isolate) {
break;
}
default:
- CHECK_EQ(JSRegExp::NOT_COMPILED, TypeTag());
+ CHECK_EQ(JSRegExp::NOT_COMPILED, type_tag());
CHECK(data().IsUndefined(isolate));
break;
}
@@ -1661,9 +1674,20 @@ void WasmExportedFunctionData::WasmExportedFunctionDataVerify(
#endif // V8_ENABLE_WEBASSEMBLY
void DataHandler::DataHandlerVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::DataHandlerVerify(*this, isolate);
+ // Don't call TorqueGeneratedClassVerifiers::DataHandlerVerify because the
+ // Torque definition of this class includes all of the optional fields.
+
+ // This assertion exists to encourage updating this verification function if
+ // new fields are added in the Torque class layout definition.
+ STATIC_ASSERT(DataHandler::kHeaderSize == 6 * kTaggedSize);
+
+ StructVerify(isolate);
+ CHECK(IsDataHandler());
+ VerifyPointer(isolate, smi_handler(isolate));
CHECK_IMPLIES(!smi_handler().IsSmi(),
IsStoreHandler() && smi_handler().IsCodeT());
+ VerifyPointer(isolate, validity_cell(isolate));
+ CHECK(validity_cell().IsSmi() || validity_cell().IsCell());
int data_count = data_field_count();
if (data_count >= 1) {
VerifyMaybeObjectField(isolate, kData1Offset);
diff --git a/chromium/v8/src/diagnostics/objects-printer.cc b/chromium/v8/src/diagnostics/objects-printer.cc
index 46fccedde75..8a98a152db0 100644
--- a/chromium/v8/src/diagnostics/objects-printer.cc
+++ b/chromium/v8/src/diagnostics/objects-printer.cc
@@ -234,6 +234,7 @@ void HeapObject::HeapObjectPrint(std::ostream& os) {
case BIG_INT_BASE_TYPE:
BigIntBase::cast(*this).BigIntBasePrint(os);
break;
+ case JS_CLASS_CONSTRUCTOR_TYPE:
case JS_PROMISE_CONSTRUCTOR_TYPE:
case JS_REG_EXP_CONSTRUCTOR_TYPE:
case JS_ARRAY_CONSTRUCTOR_TYPE:
@@ -295,18 +296,18 @@ bool JSObject::PrintProperties(std::ostream& os) {
os << ": ";
PropertyDetails details = descs.GetDetails(i);
switch (details.location()) {
- case kField: {
+ case PropertyLocation::kField: {
FieldIndex field_index = FieldIndex::ForDescriptor(map(), i);
os << Brief(RawFastPropertyAt(field_index));
break;
}
- case kDescriptor:
+ case PropertyLocation::kDescriptor:
os << Brief(descs.GetStrongValue(i));
break;
}
os << " ";
details.PrintAsFastTo(os, PropertyDetails::kForProperties);
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
int field_index = details.field_index();
if (field_index < nof_inobject_properties) {
os << ", location: in-object";
@@ -821,10 +822,15 @@ namespace {
void PrintContextWithHeader(std::ostream& os, Context context,
const char* type) {
context.PrintHeader(os, type);
- os << "\n - length: " << context.length();
+ os << "\n - type: " << context.map().instance_type();
os << "\n - scope_info: " << Brief(context.scope_info());
os << "\n - previous: " << Brief(context.unchecked_previous());
os << "\n - native_context: " << Brief(context.native_context());
+ if (context.scope_info().HasContextExtensionSlot()) {
+ os << "\n - extension: " << context.extension();
+ }
+ os << "\n - length: " << context.length();
+ os << "\n - elements:";
PrintFixedArrayElements(os, context);
os << "\n";
}
@@ -1336,24 +1342,15 @@ void JSDate::JSDatePrint(std::ostream& os) {
JSObjectPrintBody(os, *this);
}
-void JSProxy::JSProxyPrint(std::ostream& os) {
- PrintHeader(os, "JSProxy");
- os << "\n - target: ";
- target().ShortPrint(os);
- os << "\n - handler: ";
- handler().ShortPrint(os);
- os << "\n";
-}
-
void JSSet::JSSetPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSSet");
- os << " - table: " << Brief(table());
+ os << "\n - table: " << Brief(table());
JSObjectPrintBody(os, *this);
}
void JSMap::JSMapPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSMap");
- os << " - table: " << Brief(table());
+ os << "\n - table: " << Brief(table());
JSObjectPrintBody(os, *this);
}
@@ -1373,18 +1370,6 @@ void JSMapIterator::JSMapIteratorPrint(std::ostream& os) {
JSCollectionIteratorPrint(os, "JSMapIterator");
}
-void WeakCell::WeakCellPrint(std::ostream& os) {
- PrintHeader(os, "WeakCell");
- os << "\n - finalization_registry: " << Brief(finalization_registry());
- os << "\n - target: " << Brief(target());
- os << "\n - holdings: " << Brief(holdings());
- os << "\n - prev: " << Brief(prev());
- os << "\n - next: " << Brief(next());
- os << "\n - unregister_token: " << Brief(unregister_token());
- os << "\n - key_list_prev: " << Brief(key_list_prev());
- os << "\n - key_list_next: " << Brief(key_list_next());
-}
-
void JSWeakRef::JSWeakRefPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSWeakRef");
os << "\n - target: " << Brief(target());
@@ -1513,7 +1498,7 @@ void JSFunction::JSFunctionPrint(std::ostream& os) {
}
os << "\n - formal_parameter_count: "
- << shared().internal_formal_parameter_count();
+ << shared().internal_formal_parameter_count_without_receiver();
os << "\n - kind: " << shared().kind();
os << "\n - context: " << Brief(context());
os << "\n - code: " << Brief(raw_code());
@@ -1583,7 +1568,8 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) {
os << "\n - kind: " << kind();
os << "\n - syntax kind: " << syntax_kind();
os << "\n - function_map_index: " << function_map_index();
- os << "\n - formal_parameter_count: " << internal_formal_parameter_count();
+ os << "\n - formal_parameter_count: "
+ << internal_formal_parameter_count_without_receiver();
os << "\n - expected_nof_properties: " << expected_nof_properties();
os << "\n - language_mode: " << language_mode();
os << "\n - data: " << Brief(function_data(kAcquireLoad));
@@ -1658,7 +1644,7 @@ void Code::CodePrint(std::ostream& os) {
void CodeDataContainer::CodeDataContainerPrint(std::ostream& os) {
PrintHeader(os, "CodeDataContainer");
- os << "\n - kind_specific_flags: " << kind_specific_flags();
+ os << "\n - kind_specific_flags: " << kind_specific_flags(kRelaxedLoad);
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
os << "\n - code: " << Brief(code());
os << "\n - code_entry_point: "
@@ -1673,67 +1659,6 @@ void Foreign::ForeignPrint(std::ostream& os) {
os << "\n";
}
-void CallbackTask::CallbackTaskPrint(std::ostream& os) {
- PrintHeader(os, "CallbackTask");
- os << "\n - callback: " << Brief(callback());
- os << "\n - data: " << Brief(data());
- os << "\n";
-}
-
-void CallableTask::CallableTaskPrint(std::ostream& os) {
- PrintHeader(os, "CallableTask");
- os << "\n - context: " << Brief(context());
- os << "\n - callable: " << Brief(callable());
- os << "\n";
-}
-
-void PromiseFulfillReactionJobTask::PromiseFulfillReactionJobTaskPrint(
- std::ostream& os) {
- PrintHeader(os, "PromiseFulfillReactionJobTask");
- os << "\n - argument: " << Brief(argument());
- os << "\n - context: " << Brief(context());
- os << "\n - handler: " << Brief(handler());
- os << "\n - promise_or_capability: " << Brief(promise_or_capability());
- os << "\n";
-}
-
-void PromiseRejectReactionJobTask::PromiseRejectReactionJobTaskPrint(
- std::ostream& os) {
- PrintHeader(os, "PromiseRejectReactionJobTask");
- os << "\n - argument: " << Brief(argument());
- os << "\n - context: " << Brief(context());
- os << "\n - handler: " << Brief(handler());
- os << "\n - promise_or_capability: " << Brief(promise_or_capability());
- os << "\n";
-}
-
-void PromiseResolveThenableJobTask::PromiseResolveThenableJobTaskPrint(
- std::ostream& os) {
- PrintHeader(os, "PromiseResolveThenableJobTask");
- os << "\n - context: " << Brief(context());
- os << "\n - promise_to_resolve: " << Brief(promise_to_resolve());
- os << "\n - then: " << Brief(then());
- os << "\n - thenable: " << Brief(thenable());
- os << "\n";
-}
-
-void PromiseCapability::PromiseCapabilityPrint(std::ostream& os) {
- PrintHeader(os, "PromiseCapability");
- os << "\n - promise: " << Brief(promise());
- os << "\n - resolve: " << Brief(resolve());
- os << "\n - reject: " << Brief(reject());
- os << "\n";
-}
-
-void PromiseReaction::PromiseReactionPrint(std::ostream& os) {
- PrintHeader(os, "PromiseReaction");
- os << "\n - next: " << Brief(next());
- os << "\n - reject_handler: " << Brief(reject_handler());
- os << "\n - fulfill_handler: " << Brief(fulfill_handler());
- os << "\n - promise_or_capability: " << Brief(promise_or_capability());
- os << "\n";
-}
-
void AsyncGeneratorRequest::AsyncGeneratorRequestPrint(std::ostream& os) {
PrintHeader(os, "AsyncGeneratorRequest");
const char* mode = "Invalid!";
@@ -1754,19 +1679,6 @@ void AsyncGeneratorRequest::AsyncGeneratorRequestPrint(std::ostream& os) {
os << "\n";
}
-void SourceTextModuleInfoEntry::SourceTextModuleInfoEntryPrint(
- std::ostream& os) {
- PrintHeader(os, "SourceTextModuleInfoEntry");
- os << "\n - export_name: " << Brief(export_name());
- os << "\n - local_name: " << Brief(local_name());
- os << "\n - import_name: " << Brief(import_name());
- os << "\n - module_request: " << module_request();
- os << "\n - cell_index: " << cell_index();
- os << "\n - beg_pos: " << beg_pos();
- os << "\n - end_pos: " << end_pos();
- os << "\n";
-}
-
static void PrintModuleFields(Module module, std::ostream& os) {
os << "\n - exports: " << Brief(module.exports());
os << "\n - status: " << module.status();
@@ -1797,14 +1709,6 @@ void SourceTextModule::SourceTextModulePrint(std::ostream& os) {
os << "\n";
}
-void SyntheticModule::SyntheticModulePrint(std::ostream& os) {
- PrintHeader(os, "SyntheticModule");
- PrintModuleFields(*this, os);
- os << "\n - export_names: " << Brief(export_names());
- os << "\n - name: " << Brief(name());
- os << "\n";
-}
-
void JSModuleNamespace::JSModuleNamespacePrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSModuleNamespace");
os << "\n - module: " << Brief(module());
@@ -1821,13 +1725,6 @@ void PrototypeInfo::PrototypeInfoPrint(std::ostream& os) {
os << "\n";
}
-void ClassPositions::ClassPositionsPrint(std::ostream& os) {
- PrintHeader(os, "ClassPositions");
- os << "\n - start position: " << start();
- os << "\n - end position: " << end();
- os << "\n";
-}
-
void ArrayBoilerplateDescription::ArrayBoilerplateDescriptionPrint(
std::ostream& os) {
PrintHeader(os, "ArrayBoilerplateDescription");
@@ -1836,15 +1733,6 @@ void ArrayBoilerplateDescription::ArrayBoilerplateDescriptionPrint(
os << "\n";
}
-void RegExpBoilerplateDescription::RegExpBoilerplateDescriptionPrint(
- std::ostream& os) {
- PrintHeader(os, "RegExpBoilerplateDescription");
- os << "\n - data: " << Brief(data());
- os << "\n - source: " << Brief(source());
- os << "\n - flags: " << flags();
- os << "\n";
-}
-
#if V8_ENABLE_WEBASSEMBLY
void AsmWasmData::AsmWasmDataPrint(std::ostream& os) {
PrintHeader(os, "AsmWasmData");
@@ -1898,10 +1786,11 @@ void WasmStruct::WasmStructPrint(std::ostream& os) {
os << Brief(base::ReadUnalignedValue<Object>(field_address));
break;
case wasm::kS128:
- case wasm::kBottom:
- case wasm::kVoid:
os << "UNIMPLEMENTED"; // TODO(7748): Implement.
break;
+ case wasm::kBottom:
+ case wasm::kVoid:
+ UNREACHABLE();
}
}
os << "\n";
@@ -1947,12 +1836,6 @@ void WasmArray::WasmArrayPrint(std::ostream& os) {
os << "\n";
}
-void WasmExceptionTag::WasmExceptionTagPrint(std::ostream& os) {
- PrintHeader(os, "WasmExceptionTag");
- os << "\n - index: " << index();
- os << "\n";
-}
-
void WasmInstanceObject::WasmInstanceObjectPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "WasmInstanceObject");
os << "\n - module_object: " << Brief(module_object());
@@ -1985,7 +1868,6 @@ void WasmInstanceObject::WasmInstanceObjectPrint(std::ostream& os) {
}
os << "\n - memory_start: " << static_cast<void*>(memory_start());
os << "\n - memory_size: " << memory_size();
- os << "\n - memory_mask: " << AsHex(memory_mask());
os << "\n - imported_function_targets: "
<< static_cast<void*>(imported_function_targets());
os << "\n - globals_start: " << static_cast<void*>(globals_start());
@@ -2045,15 +1927,6 @@ void WasmModuleObject::WasmModuleObjectPrint(std::ostream& os) {
os << "\n";
}
-void WasmTableObject::WasmTableObjectPrint(std::ostream& os) {
- PrintHeader(os, "WasmTableObject");
- os << "\n - elements: " << Brief(elements());
- os << "\n - maximum_length: " << Brief(maximum_length());
- os << "\n - dispatch_tables: " << Brief(dispatch_tables());
- os << "\n - raw_type: " << raw_type();
- os << "\n";
-}
-
void WasmGlobalObject::WasmGlobalObjectPrint(std::ostream& os) {
PrintHeader(os, "WasmGlobalObject");
if (type().is_reference()) {
@@ -2069,21 +1942,6 @@ void WasmGlobalObject::WasmGlobalObjectPrint(std::ostream& os) {
os << "\n";
}
-void WasmMemoryObject::WasmMemoryObjectPrint(std::ostream& os) {
- PrintHeader(os, "WasmMemoryObject");
- os << "\n - array_buffer: " << Brief(array_buffer());
- os << "\n - maximum_pages: " << maximum_pages();
- os << "\n - instances: " << Brief(instances());
- os << "\n";
-}
-
-void WasmTagObject::WasmTagObjectPrint(std::ostream& os) {
- PrintHeader(os, "WasmTagObject");
- os << "\n - serialized_signature: " << Brief(serialized_signature());
- os << "\n - tag: " << Brief(tag());
- os << "\n";
-}
-
void WasmIndirectFunctionTable::WasmIndirectFunctionTablePrint(
std::ostream& os) {
PrintHeader(os, "WasmIndirectFunctionTable");
@@ -2141,13 +1999,6 @@ void StoreHandler::StoreHandlerPrint(std::ostream& os) {
os << "\n";
}
-void AccessorPair::AccessorPairPrint(std::ostream& os) {
- PrintHeader(os, "AccessorPair");
- os << "\n - getter: " << Brief(getter());
- os << "\n - setter: " << Brief(setter());
- os << "\n";
-}
-
void CallHandlerInfo::CallHandlerInfoPrint(std::ostream& os) {
PrintHeader(os, "CallHandlerInfo");
os << "\n - callback: " << Brief(callback());
@@ -2431,18 +2282,6 @@ void ScopeInfo::ScopeInfoPrint(std::ostream& os) {
os << "\n";
}
-void StackFrameInfo::StackFrameInfoPrint(std::ostream& os) {
- PrintHeader(os, "StackFrameInfo");
- os << "\n - receiver_or_instance: " << Brief(receiver_or_instance());
- os << "\n - function: " << Brief(function());
- os << "\n - code_object: " << Brief(TorqueGeneratedClass::code_object());
- os << "\n - code_offset_or_source_position: "
- << code_offset_or_source_position();
- os << "\n - flags: " << flags();
- os << "\n - parameters: " << Brief(parameters());
- os << "\n";
-}
-
void PreparseData::PreparseDataPrint(std::ostream& os) {
PrintHeader(os, "PreparseData");
os << "\n - data_length: " << data_length();
@@ -2459,13 +2298,6 @@ void PreparseData::PreparseDataPrint(std::ostream& os) {
os << "\n";
}
-void InterpreterData::InterpreterDataPrint(std::ostream& os) {
- PrintHeader(os, "InterpreterData");
- os << "\n - bytecode_array: " << Brief(bytecode_array());
- os << "\n - interpreter_trampoline: " << Brief(interpreter_trampoline());
- os << "\n";
-}
-
template <HeapObjectReferenceType kRefType, typename StorageType>
void TaggedImpl<kRefType, StorageType>::Print() {
StdoutStream os;
@@ -2659,12 +2491,12 @@ void DescriptorArray::PrintDescriptorDetails(std::ostream& os,
details.PrintAsFastTo(os, mode);
os << " @ ";
switch (details.location()) {
- case kField: {
+ case PropertyLocation::kField: {
FieldType field_type = GetFieldType(descriptor);
field_type.PrintTo(os);
break;
}
- case kDescriptor:
+ case PropertyLocation::kDescriptor:
Object value = GetStrongValue(descriptor);
os << Brief(value);
if (value.IsAccessorPair()) {
diff --git a/chromium/v8/src/diagnostics/perf-jit.h b/chromium/v8/src/diagnostics/perf-jit.h
index 746f9f7c857..47a6002b087 100644
--- a/chromium/v8/src/diagnostics/perf-jit.h
+++ b/chromium/v8/src/diagnostics/perf-jit.h
@@ -87,6 +87,7 @@ class PerfJitLogger : public CodeEventLogger {
static const uint32_t kElfMachARM = 40;
static const uint32_t kElfMachMIPS = 8;
static const uint32_t kElfMachMIPS64 = 8;
+ static const uint32_t kElfMachLOONG64 = 258;
static const uint32_t kElfMachARM64 = 183;
static const uint32_t kElfMachS390x = 22;
static const uint32_t kElfMachPPC64 = 21;
@@ -103,6 +104,8 @@ class PerfJitLogger : public CodeEventLogger {
return kElfMachMIPS;
#elif V8_TARGET_ARCH_MIPS64
return kElfMachMIPS64;
+#elif V8_TARGET_ARCH_LOONG64
+ return kElfMachLOONG64;
#elif V8_TARGET_ARCH_ARM64
return kElfMachARM64;
#elif V8_TARGET_ARCH_S390X
diff --git a/chromium/v8/src/diagnostics/ppc/disasm-ppc.cc b/chromium/v8/src/diagnostics/ppc/disasm-ppc.cc
index affbc0fc8e7..7d366a6ba12 100644
--- a/chromium/v8/src/diagnostics/ppc/disasm-ppc.cc
+++ b/chromium/v8/src/diagnostics/ppc/disasm-ppc.cc
@@ -917,6 +917,18 @@ void Decoder::DecodeExt2(Instruction* instr) {
Format(instr, "cnttzd'. 'ra, 'rs");
return;
}
+ case BRH: {
+ Format(instr, "brh 'ra, 'rs");
+ return;
+ }
+ case BRW: {
+ Format(instr, "brw 'ra, 'rs");
+ return;
+ }
+ case BRD: {
+ Format(instr, "brd 'ra, 'rs");
+ return;
+ }
case ANDX: {
Format(instr, "and'. 'ra, 'rs, 'rb");
return;
@@ -1393,13 +1405,20 @@ void Decoder::DecodeExt6(Instruction* instr) {
#undef DECODE_XX2_B_INSTRUCTIONS
}
switch (EXT6 | (instr->BitField(10, 2))) {
-#define DECODE_XX2_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
- case opcode_name: { \
- Format(instr, #name " 'Xt, 'Xb"); \
- return; \
+#define DECODE_XX2_VECTOR_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: { \
+ Format(instr, #name " 'Xt, 'Xb"); \
+ return; \
+ }
+ PPC_XX2_OPCODE_VECTOR_A_FORM_LIST(DECODE_XX2_VECTOR_A_INSTRUCTIONS)
+#undef DECODE_XX2_VECTOR_A_INSTRUCTIONS
+#define DECODE_XX2_SCALAR_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: { \
+ Format(instr, #name " 'Dt, 'Db"); \
+ return; \
}
- PPC_XX2_OPCODE_A_FORM_LIST(DECODE_XX2_A_INSTRUCTIONS)
-#undef DECODE_XX2_A_INSTRUCTIONS
+ PPC_XX2_OPCODE_SCALAR_A_FORM_LIST(DECODE_XX2_SCALAR_A_INSTRUCTIONS)
+#undef DECODE_XX2_SCALAR_A_INSTRUCTIONS
}
Unknown(instr); // not used by V8
}
diff --git a/chromium/v8/src/diagnostics/ppc/eh-frame-ppc.cc b/chromium/v8/src/diagnostics/ppc/eh-frame-ppc.cc
index 148d01116df..8f7198cd05e 100644
--- a/chromium/v8/src/diagnostics/ppc/eh-frame-ppc.cc
+++ b/chromium/v8/src/diagnostics/ppc/eh-frame-ppc.cc
@@ -32,7 +32,6 @@ int EhFrameWriter::RegisterToDwarfCode(Register name) {
return kR0DwarfCode;
default:
UNIMPLEMENTED();
- return -1;
}
}
@@ -47,7 +46,6 @@ const char* EhFrameDisassembler::DwarfRegisterCodeToString(int code) {
return "sp";
default:
UNIMPLEMENTED();
- return nullptr;
}
}
diff --git a/chromium/v8/src/diagnostics/riscv64/disasm-riscv64.cc b/chromium/v8/src/diagnostics/riscv64/disasm-riscv64.cc
index 2955612166e..ed899d9212f 100644
--- a/chromium/v8/src/diagnostics/riscv64/disasm-riscv64.cc
+++ b/chromium/v8/src/diagnostics/riscv64/disasm-riscv64.cc
@@ -68,11 +68,15 @@ class Decoder {
// Printing of common values.
void PrintRegister(int reg);
void PrintFPURegister(int freg);
+ void PrintVRegister(int reg);
void PrintFPUStatusRegister(int freg);
void PrintRs1(Instruction* instr);
void PrintRs2(Instruction* instr);
void PrintRd(Instruction* instr);
+ void PrintUimm(Instruction* instr);
void PrintVs1(Instruction* instr);
+ void PrintVs2(Instruction* instr);
+ void PrintVd(Instruction* instr);
void PrintFRs1(Instruction* instr);
void PrintFRs2(Instruction* instr);
void PrintFRs3(Instruction* instr);
@@ -96,10 +100,15 @@ class Decoder {
void PrintRvcImm8Addi4spn(Instruction* instr);
void PrintRvcImm11CJ(Instruction* instr);
void PrintRvcImm8B(Instruction* instr);
+ void PrintRvvVm(Instruction* instr);
void PrintAcquireRelease(Instruction* instr);
void PrintBranchOffset(Instruction* instr);
void PrintStoreOffset(Instruction* instr);
void PrintCSRReg(Instruction* instr);
+ void PrintRvvSEW(Instruction* instr);
+ void PrintRvvLMUL(Instruction* instr);
+ void PrintRvvSimm5(Instruction* instr);
+ void PrintRvvUimm5(Instruction* instr);
void PrintRoundingMode(Instruction* instr);
void PrintMemoryOrder(Instruction* instr, bool is_pred);
@@ -123,6 +132,16 @@ class Decoder {
void DecodeCJType(Instruction* instr);
void DecodeCBType(Instruction* instr);
+ void DecodeVType(Instruction* instr);
+ void DecodeRvvIVV(Instruction* instr);
+ void DecodeRvvFVV(Instruction* instr);
+ void DecodeRvvFVF(Instruction* instr);
+ void DecodeRvvIVI(Instruction* instr);
+ void DecodeRvvIVX(Instruction* instr);
+ void DecodeRvvVL(Instruction* instr);
+ void DecodeRvvVS(Instruction* instr);
+ void DecodeRvvMVV(Instruction* instr);
+ void DecodeRvvMVX(Instruction* instr);
// Printing of instruction name.
void PrintInstructionName(Instruction* instr);
@@ -137,6 +156,8 @@ class Decoder {
void Format(Instruction* instr, const char* format);
void Unknown(Instruction* instr);
+ int switch_sew(Instruction* instr);
+ int switch_nf(Instruction* instr);
const disasm::NameConverter& converter_;
v8::base::Vector<char> out_buffer_;
int out_buffer_pos_;
@@ -164,6 +185,10 @@ void Decoder::PrintRegister(int reg) {
Print(converter_.NameOfCPURegister(reg));
}
+void Decoder::PrintVRegister(int reg) {
+ Print(v8::internal::VRegisters::Name(reg));
+}
+
void Decoder::PrintRs1(Instruction* instr) {
int reg = instr->Rs1Value();
PrintRegister(reg);
@@ -179,11 +204,26 @@ void Decoder::PrintRd(Instruction* instr) {
PrintRegister(reg);
}
-void Decoder::PrintVs1(Instruction* instr) {
+void Decoder::PrintUimm(Instruction* instr) {
int val = instr->Rs1Value();
out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", val);
}
+void Decoder::PrintVs1(Instruction* instr) {
+ int reg = instr->Vs1Value();
+ PrintVRegister(reg);
+}
+
+void Decoder::PrintVs2(Instruction* instr) {
+ int reg = instr->Vs2Value();
+ PrintVRegister(reg);
+}
+
+void Decoder::PrintVd(Instruction* instr) {
+ int reg = instr->VdValue();
+ PrintVRegister(reg);
+}
+
// Print the FPUregister name according to the active name converter.
void Decoder::PrintFPURegister(int freg) {
Print(converter_.NameOfXMMRegister(freg));
@@ -247,6 +287,26 @@ void Decoder::PrintStoreOffset(Instruction* instr) {
out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
}
+void Decoder::PrintRvvSEW(Instruction* instr) {
+ const char* sew = instr->RvvSEW();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%s", sew);
+}
+
+void Decoder::PrintRvvLMUL(Instruction* instr) {
+ const char* lmul = instr->RvvLMUL();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%s", lmul);
+}
+
+void Decoder::PrintRvvSimm5(Instruction* instr) {
+ const int simm5 = instr->RvvSimm5();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", simm5);
+}
+
+void Decoder::PrintRvvUimm5(Instruction* instr) {
+ const uint32_t uimm5 = instr->RvvUimm5();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", uimm5);
+}
+
void Decoder::PrintImm20U(Instruction* instr) {
int32_t imm = instr->Imm20UValue();
out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
@@ -335,6 +395,13 @@ void Decoder::PrintRvcImm8B(Instruction* instr) {
out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
}
+void Decoder::PrintRvvVm(Instruction* instr) {
+ uint8_t imm = instr->RvvVM();
+ if (imm == 0) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, " vm");
+ }
+}
+
void Decoder::PrintAcquireRelease(Instruction* instr) {
bool aq = instr->AqValue();
bool rl = instr->RlValue();
@@ -724,13 +791,50 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
DCHECK(STRING_STARTS_WITH(format, "suc"));
PrintMemoryOrder(instr, false);
return 3;
+ } else if (format[1] == 'e') {
+ DCHECK(STRING_STARTS_WITH(format, "sew"));
+ PrintRvvSEW(instr);
+ return 3;
+ } else if (format[1] == 'i') {
+ DCHECK(STRING_STARTS_WITH(format, "simm5"));
+ PrintRvvSimm5(instr);
+ return 5;
}
UNREACHABLE();
}
- case 'v': { // 'vs1: Raw values from register fields
- DCHECK(STRING_STARTS_WITH(format, "vs1"));
- PrintVs1(instr);
- return 3;
+ case 'v': {
+ if (format[1] == 'd') {
+ DCHECK(STRING_STARTS_WITH(format, "vd"));
+ PrintVd(instr);
+ return 2;
+ } else if (format[2] == '1') {
+ DCHECK(STRING_STARTS_WITH(format, "vs1"));
+ PrintVs1(instr);
+ return 3;
+ } else if (format[2] == '2') {
+ DCHECK(STRING_STARTS_WITH(format, "vs2"));
+ PrintVs2(instr);
+ return 3;
+ } else {
+ DCHECK(STRING_STARTS_WITH(format, "vm"));
+ PrintRvvVm(instr);
+ return 2;
+ }
+ }
+ case 'l': {
+ DCHECK(STRING_STARTS_WITH(format, "lmul"));
+ PrintRvvLMUL(instr);
+ return 4;
+ }
+ case 'u': {
+ if (STRING_STARTS_WITH(format, "uimm5")) {
+ PrintRvvUimm5(instr);
+ return 5;
+ } else {
+ DCHECK(STRING_STARTS_WITH(format, "uimm"));
+ PrintUimm(instr);
+ return 4;
+ }
}
case 't': { // 'target: target of branch instructions'
DCHECK(STRING_STARTS_WITH(format, "target"));
@@ -1308,256 +1412,265 @@ void Decoder::DecodeR4Type(Instruction* instr) {
}
void Decoder::DecodeIType(Instruction* instr) {
- switch (instr->InstructionBits() & kITypeMask) {
- case RO_JALR:
- if (instr->RdValue() == zero_reg.code() &&
- instr->Rs1Value() == ra.code() && instr->Imm12Value() == 0)
- Format(instr, "ret");
- else if (instr->RdValue() == zero_reg.code() && instr->Imm12Value() == 0)
- Format(instr, "jr 'rs1");
- else if (instr->RdValue() == ra.code() && instr->Imm12Value() == 0)
- Format(instr, "jalr 'rs1");
- else
- Format(instr, "jalr 'rd, 'imm12('rs1)'target");
- break;
- case RO_LB:
- Format(instr, "lb 'rd, 'imm12('rs1)");
- break;
- case RO_LH:
- Format(instr, "lh 'rd, 'imm12('rs1)");
- break;
- case RO_LW:
- Format(instr, "lw 'rd, 'imm12('rs1)");
- break;
- case RO_LBU:
- Format(instr, "lbu 'rd, 'imm12('rs1)");
- break;
- case RO_LHU:
- Format(instr, "lhu 'rd, 'imm12('rs1)");
- break;
+ if (instr->vl_vs_width() != -1) {
+ DecodeRvvVL(instr);
+ } else {
+ switch (instr->InstructionBits() & kITypeMask) {
+ case RO_JALR:
+ if (instr->RdValue() == zero_reg.code() &&
+ instr->Rs1Value() == ra.code() && instr->Imm12Value() == 0)
+ Format(instr, "ret");
+ else if (instr->RdValue() == zero_reg.code() &&
+ instr->Imm12Value() == 0)
+ Format(instr, "jr 'rs1");
+ else if (instr->RdValue() == ra.code() && instr->Imm12Value() == 0)
+ Format(instr, "jalr 'rs1");
+ else
+ Format(instr, "jalr 'rd, 'imm12('rs1)");
+ break;
+ case RO_LB:
+ Format(instr, "lb 'rd, 'imm12('rs1)");
+ break;
+ case RO_LH:
+ Format(instr, "lh 'rd, 'imm12('rs1)");
+ break;
+ case RO_LW:
+ Format(instr, "lw 'rd, 'imm12('rs1)");
+ break;
+ case RO_LBU:
+ Format(instr, "lbu 'rd, 'imm12('rs1)");
+ break;
+ case RO_LHU:
+ Format(instr, "lhu 'rd, 'imm12('rs1)");
+ break;
#ifdef V8_TARGET_ARCH_64_BIT
- case RO_LWU:
- Format(instr, "lwu 'rd, 'imm12('rs1)");
- break;
- case RO_LD:
- Format(instr, "ld 'rd, 'imm12('rs1)");
- break;
+ case RO_LWU:
+ Format(instr, "lwu 'rd, 'imm12('rs1)");
+ break;
+ case RO_LD:
+ Format(instr, "ld 'rd, 'imm12('rs1)");
+ break;
#endif /*V8_TARGET_ARCH_64_BIT*/
- case RO_ADDI:
- if (instr->Imm12Value() == 0) {
- if (instr->RdValue() == zero_reg.code() &&
- instr->Rs1Value() == zero_reg.code())
- Format(instr, "nop");
+ case RO_ADDI:
+ if (instr->Imm12Value() == 0) {
+ if (instr->RdValue() == zero_reg.code() &&
+ instr->Rs1Value() == zero_reg.code())
+ Format(instr, "nop");
+ else
+ Format(instr, "mv 'rd, 'rs1");
+ } else if (instr->Rs1Value() == zero_reg.code()) {
+ Format(instr, "li 'rd, 'imm12");
+ } else {
+ Format(instr, "addi 'rd, 'rs1, 'imm12");
+ }
+ break;
+ case RO_SLTI:
+ Format(instr, "slti 'rd, 'rs1, 'imm12");
+ break;
+ case RO_SLTIU:
+ if (instr->Imm12Value() == 1)
+ Format(instr, "seqz 'rd, 'rs1");
else
- Format(instr, "mv 'rd, 'rs1");
- } else if (instr->Rs1Value() == zero_reg.code()) {
- Format(instr, "li 'rd, 'imm12");
- } else {
- Format(instr, "addi 'rd, 'rs1, 'imm12");
- }
- break;
- case RO_SLTI:
- Format(instr, "slti 'rd, 'rs1, 'imm12");
- break;
- case RO_SLTIU:
- if (instr->Imm12Value() == 1)
- Format(instr, "seqz 'rd, 'rs1");
- else
- Format(instr, "sltiu 'rd, 'rs1, 'imm12");
- break;
- case RO_XORI:
- if (instr->Imm12Value() == -1)
- Format(instr, "not 'rd, 'rs1");
- else
- Format(instr, "xori 'rd, 'rs1, 'imm12x");
- break;
- case RO_ORI:
- Format(instr, "ori 'rd, 'rs1, 'imm12x");
- break;
- case RO_ANDI:
- Format(instr, "andi 'rd, 'rs1, 'imm12x");
- break;
- case RO_SLLI:
- Format(instr, "slli 'rd, 'rs1, 's64");
- break;
- case RO_SRLI: { // RO_SRAI
- if (!instr->IsArithShift()) {
- Format(instr, "srli 'rd, 'rs1, 's64");
- } else {
- Format(instr, "srai 'rd, 'rs1, 's64");
+ Format(instr, "sltiu 'rd, 'rs1, 'imm12");
+ break;
+ case RO_XORI:
+ if (instr->Imm12Value() == -1)
+ Format(instr, "not 'rd, 'rs1");
+ else
+ Format(instr, "xori 'rd, 'rs1, 'imm12x");
+ break;
+ case RO_ORI:
+ Format(instr, "ori 'rd, 'rs1, 'imm12x");
+ break;
+ case RO_ANDI:
+ Format(instr, "andi 'rd, 'rs1, 'imm12x");
+ break;
+ case RO_SLLI:
+ Format(instr, "slli 'rd, 'rs1, 's64");
+ break;
+ case RO_SRLI: { // RO_SRAI
+ if (!instr->IsArithShift()) {
+ Format(instr, "srli 'rd, 'rs1, 's64");
+ } else {
+ Format(instr, "srai 'rd, 'rs1, 's64");
+ }
+ break;
}
- break;
- }
#ifdef V8_TARGET_ARCH_64_BIT
- case RO_ADDIW:
- if (instr->Imm12Value() == 0)
- Format(instr, "sext.w 'rd, 'rs1");
- else
- Format(instr, "addiw 'rd, 'rs1, 'imm12");
- break;
- case RO_SLLIW:
- Format(instr, "slliw 'rd, 'rs1, 's32");
- break;
- case RO_SRLIW: { // RO_SRAIW
- if (!instr->IsArithShift()) {
- Format(instr, "srliw 'rd, 'rs1, 's32");
- } else {
- Format(instr, "sraiw 'rd, 'rs1, 's32");
+ case RO_ADDIW:
+ if (instr->Imm12Value() == 0)
+ Format(instr, "sext.w 'rd, 'rs1");
+ else
+ Format(instr, "addiw 'rd, 'rs1, 'imm12");
+ break;
+ case RO_SLLIW:
+ Format(instr, "slliw 'rd, 'rs1, 's32");
+ break;
+ case RO_SRLIW: { // RO_SRAIW
+ if (!instr->IsArithShift()) {
+ Format(instr, "srliw 'rd, 'rs1, 's32");
+ } else {
+ Format(instr, "sraiw 'rd, 'rs1, 's32");
+ }
+ break;
}
- break;
- }
#endif /*V8_TARGET_ARCH_64_BIT*/
- case RO_FENCE:
- if (instr->MemoryOrder(true) == PSIORW &&
- instr->MemoryOrder(false) == PSIORW)
- Format(instr, "fence");
- else
- Format(instr, "fence 'pre, 'suc");
- break;
- case RO_ECALL: { // RO_EBREAK
- if (instr->Imm12Value() == 0) { // ECALL
- Format(instr, "ecall");
- } else if (instr->Imm12Value() == 1) { // EBREAK
- Format(instr, "ebreak");
- } else {
- UNSUPPORTED_RISCV();
+ case RO_FENCE:
+ if (instr->MemoryOrder(true) == PSIORW &&
+ instr->MemoryOrder(false) == PSIORW)
+ Format(instr, "fence");
+ else
+ Format(instr, "fence 'pre, 'suc");
+ break;
+ case RO_ECALL: { // RO_EBREAK
+ if (instr->Imm12Value() == 0) { // ECALL
+ Format(instr, "ecall");
+ } else if (instr->Imm12Value() == 1) { // EBREAK
+ Format(instr, "ebreak");
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
}
- break;
- }
- // TODO(riscv): use Zifencei Standard Extension macro block
- case RO_FENCE_I:
- Format(instr, "fence.i");
- break;
- // TODO(riscv): use Zicsr Standard Extension macro block
- case RO_CSRRW:
- if (instr->CsrValue() == csr_fcsr) {
+ // TODO(riscv): use Zifencei Standard Extension macro block
+ case RO_FENCE_I:
+ Format(instr, "fence.i");
+ break;
+ // TODO(riscv): use Zicsr Standard Extension macro block
+ // FIXME(RISC-V): Add special formatting for CSR registers
+ case RO_CSRRW:
+ if (instr->CsrValue() == csr_fcsr) {
+ if (instr->RdValue() == zero_reg.code())
+ Format(instr, "fscsr 'rs1");
+ else
+ Format(instr, "fscsr 'rd, 'rs1");
+ } else if (instr->CsrValue() == csr_frm) {
+ if (instr->RdValue() == zero_reg.code())
+ Format(instr, "fsrm 'rs1");
+ else
+ Format(instr, "fsrm 'rd, 'rs1");
+ } else if (instr->CsrValue() == csr_fflags) {
+ if (instr->RdValue() == zero_reg.code())
+ Format(instr, "fsflags 'rs1");
+ else
+ Format(instr, "fsflags 'rd, 'rs1");
+ } else if (instr->RdValue() == zero_reg.code()) {
+ Format(instr, "csrw 'csr, 'rs1");
+ } else {
+ Format(instr, "csrrw 'rd, 'csr, 'rs1");
+ }
+ break;
+ case RO_CSRRS:
+ if (instr->Rs1Value() == zero_reg.code()) {
+ switch (instr->CsrValue()) {
+ case csr_instret:
+ Format(instr, "rdinstret 'rd");
+ break;
+ case csr_instreth:
+ Format(instr, "rdinstreth 'rd");
+ break;
+ case csr_time:
+ Format(instr, "rdtime 'rd");
+ break;
+ case csr_timeh:
+ Format(instr, "rdtimeh 'rd");
+ break;
+ case csr_cycle:
+ Format(instr, "rdcycle 'rd");
+ break;
+ case csr_cycleh:
+ Format(instr, "rdcycleh 'rd");
+ break;
+ case csr_fflags:
+ Format(instr, "frflags 'rd");
+ break;
+ case csr_frm:
+ Format(instr, "frrm 'rd");
+ break;
+ case csr_fcsr:
+ Format(instr, "frcsr 'rd");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else if (instr->Rs1Value() == zero_reg.code()) {
+ Format(instr, "csrr 'rd, 'csr");
+ } else if (instr->RdValue() == zero_reg.code()) {
+ Format(instr, "csrs 'csr, 'rs1");
+ } else {
+ Format(instr, "csrrs 'rd, 'csr, 'rs1");
+ }
+ break;
+ case RO_CSRRC:
if (instr->RdValue() == zero_reg.code())
- Format(instr, "fscsr 'rs1");
+ Format(instr, "csrc 'csr, 'rs1");
else
- Format(instr, "fscsr 'rd, 'rs1");
- } else if (instr->CsrValue() == csr_frm) {
+ Format(instr, "csrrc 'rd, 'csr, 'rs1");
+ break;
+ case RO_CSRRWI:
if (instr->RdValue() == zero_reg.code())
- Format(instr, "fsrm 'rs1");
+ Format(instr, "csrwi 'csr, 'uimm");
else
- Format(instr, "fsrm 'rd, 'rs1");
- } else if (instr->CsrValue() == csr_fflags) {
+ Format(instr, "csrrwi 'rd, 'csr, 'uimm");
+ break;
+ case RO_CSRRSI:
if (instr->RdValue() == zero_reg.code())
- Format(instr, "fsflags 'rs1");
+ Format(instr, "csrsi 'csr, 'uimm");
else
- Format(instr, "fsflags 'rd, 'rs1");
- } else if (instr->RdValue() == zero_reg.code()) {
- Format(instr, "csrw 'csr, 'rs1");
- } else {
- Format(instr, "csrrw 'rd, 'csr, 'rs1");
- }
- break;
- case RO_CSRRS:
- if (instr->Rs1Value() == zero_reg.code()) {
- switch (instr->CsrValue()) {
- case csr_instret:
- Format(instr, "rdinstret 'rd");
- break;
- case csr_instreth:
- Format(instr, "rdinstreth 'rd");
- break;
- case csr_time:
- Format(instr, "rdtime 'rd");
- break;
- case csr_timeh:
- Format(instr, "rdtimeh 'rd");
- break;
- case csr_cycle:
- Format(instr, "rdcycle 'rd");
- break;
- case csr_cycleh:
- Format(instr, "rdcycleh 'rd");
- break;
- case csr_fflags:
- Format(instr, "frflags 'rd");
- break;
- case csr_frm:
- Format(instr, "frrm 'rd");
- break;
- case csr_fcsr:
- Format(instr, "frcsr 'rd");
- break;
- default:
- UNREACHABLE();
- }
- } else if (instr->Rs1Value() == zero_reg.code()) {
- Format(instr, "csrr 'rd, 'csr");
- } else if (instr->RdValue() == zero_reg.code()) {
- Format(instr, "csrs 'csr, 'rs1");
- } else {
- Format(instr, "csrrs 'rd, 'csr, 'rs1");
- }
- break;
- case RO_CSRRC:
- if (instr->RdValue() == zero_reg.code())
- Format(instr, "csrc 'csr, 'rs1");
- else
- Format(instr, "csrrc 'rd, 'csr, 'rs1");
- break;
- case RO_CSRRWI:
- if (instr->RdValue() == zero_reg.code())
- Format(instr, "csrwi 'csr, 'vs1");
- else
- Format(instr, "csrrwi 'rd, 'csr, 'vs1");
- break;
- case RO_CSRRSI:
- if (instr->RdValue() == zero_reg.code())
- Format(instr, "csrsi 'csr, 'vs1");
- else
- Format(instr, "csrrsi 'rd, 'csr, 'vs1");
- break;
- case RO_CSRRCI:
- if (instr->RdValue() == zero_reg.code())
- Format(instr, "csrci 'csr, 'vs1");
- else
- Format(instr, "csrrci 'rd, 'csr, 'vs1");
- break;
- // TODO(riscv): use F Extension macro block
- case RO_FLW:
- Format(instr, "flw 'fd, 'imm12('rs1)");
- break;
- // TODO(riscv): use D Extension macro block
- case RO_FLD:
- Format(instr, "fld 'fd, 'imm12('rs1)");
- break;
- default:
- UNSUPPORTED_RISCV();
+ Format(instr, "csrrsi 'rd, 'csr, 'uimm");
+ break;
+ case RO_CSRRCI:
+ if (instr->RdValue() == zero_reg.code())
+ Format(instr, "csrci 'csr, 'uimm");
+ else
+ Format(instr, "csrrci 'rd, 'csr, 'uimm");
+ break;
+ // TODO(riscv): use F Extension macro block
+ case RO_FLW:
+ Format(instr, "flw 'fd, 'imm12('rs1)");
+ break;
+ // TODO(riscv): use D Extension macro block
+ case RO_FLD:
+ Format(instr, "fld 'fd, 'imm12('rs1)");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
}
}
void Decoder::DecodeSType(Instruction* instr) {
- switch (instr->InstructionBits() & kSTypeMask) {
- case RO_SB:
- Format(instr, "sb 'rs2, 'offS('rs1)");
- break;
- case RO_SH:
- Format(instr, "sh 'rs2, 'offS('rs1)");
- break;
- case RO_SW:
- Format(instr, "sw 'rs2, 'offS('rs1)");
- break;
+ if (instr->vl_vs_width() != -1) {
+ DecodeRvvVS(instr);
+ } else {
+ switch (instr->InstructionBits() & kSTypeMask) {
+ case RO_SB:
+ Format(instr, "sb 'rs2, 'offS('rs1)");
+ break;
+ case RO_SH:
+ Format(instr, "sh 'rs2, 'offS('rs1)");
+ break;
+ case RO_SW:
+ Format(instr, "sw 'rs2, 'offS('rs1)");
+ break;
#ifdef V8_TARGET_ARCH_64_BIT
- case RO_SD:
- Format(instr, "sd 'rs2, 'offS('rs1)");
- break;
+ case RO_SD:
+ Format(instr, "sd 'rs2, 'offS('rs1)");
+ break;
#endif /*V8_TARGET_ARCH_64_BIT*/
- // TODO(riscv): use F Extension macro block
- case RO_FSW:
- Format(instr, "fsw 'fs2, 'offS('rs1)");
- break;
- // TODO(riscv): use D Extension macro block
- case RO_FSD:
- Format(instr, "fsd 'fs2, 'offS('rs1)");
- break;
- default:
- UNSUPPORTED_RISCV();
+ // TODO(riscv): use F Extension macro block
+ case RO_FSW:
+ Format(instr, "fsw 'fs2, 'offS('rs1)");
+ break;
+ // TODO(riscv): use D Extension macro block
+ case RO_FSD:
+ Format(instr, "fsd 'fs2, 'offS('rs1)");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
}
}
-
void Decoder::DecodeBType(Instruction* instr) {
switch (instr->InstructionBits() & kBTypeMask) {
case RO_BEQ:
@@ -1595,6 +1708,7 @@ void Decoder::DecodeUType(Instruction* instr) {
UNSUPPORTED_RISCV();
}
}
+// namespace internal
void Decoder::DecodeJType(Instruction* instr) {
// J Type doesn't have additional mask
switch (instr->BaseOpcodeValue()) {
@@ -1791,6 +1905,631 @@ void Decoder::DecodeCBType(Instruction* instr) {
}
}
+void Decoder::DecodeRvvIVV(Instruction* instr) {
+ DCHECK_EQ(instr->InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_IVV);
+ switch (instr->InstructionBits() & kVTypeMask) {
+ case RO_V_VADD_VV:
+ Format(instr, "vadd.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VSADD_VV:
+ Format(instr, "vsadd.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VSADDU_VV:
+ Format(instr, "vsaddu.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VSUB_VV:
+ Format(instr, "vsub.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VSSUB_VV:
+ Format(instr, "vssub.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMIN_VV:
+ Format(instr, "vmin.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMINU_VV:
+ Format(instr, "vminu.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMAX_VV:
+ Format(instr, "vmax.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMAXU_VV:
+ Format(instr, "vmaxu.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VAND_VV:
+ Format(instr, "vand.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VOR_VV:
+ Format(instr, "vor.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VXOR_VV:
+ Format(instr, "vxor.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VRGATHER_VV:
+ Format(instr, "vrgather.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMSEQ_VV:
+ Format(instr, "vmseq.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMSNE_VV:
+ Format(instr, "vmsne.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMSLT_VV:
+ Format(instr, "vmslt.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMSLTU_VV:
+ Format(instr, "vmsltu.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMSLE_VV:
+ Format(instr, "vmsle.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMSLEU_VV:
+ Format(instr, "vmsleu.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMV_VV:
+ if (instr->RvvVM()) {
+ Format(instr, "vmv.vv 'vd, 'vs1");
+ } else {
+ Format(instr, "vmerge.vvm 'vd, 'vs2, 'vs1, v0");
+ }
+ break;
+ case RO_V_VADC_VV:
+ if (!instr->RvvVM()) {
+ Format(instr, "vadc.vvm 'vd, 'vs2, 'vs1");
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case RO_V_VMADC_VV:
+ if (!instr->RvvVM()) {
+ Format(instr, "vmadc.vvm 'vd, 'vs2, 'vs1");
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ break;
+ }
+}
+
+void Decoder::DecodeRvvIVI(Instruction* instr) {
+ DCHECK_EQ(instr->InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_IVI);
+ switch (instr->InstructionBits() & kVTypeMask) {
+ case RO_V_VADD_VI:
+ Format(instr, "vadd.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VSADD_VI:
+ Format(instr, "vsadd.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VSADDU_VI:
+ Format(instr, "vsaddu.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VRSUB_VI:
+ Format(instr, "vrsub.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VAND_VI:
+ Format(instr, "vand.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VOR_VI:
+ Format(instr, "vor.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VXOR_VI:
+ Format(instr, "vxor.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VRGATHER_VI:
+ Format(instr, "vrgather.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VMV_VI:
+ if (instr->RvvVM()) {
+ Format(instr, "vmv.vi 'vd, 'simm5");
+ } else {
+ Format(instr, "vmerge.vim 'vd, 'vs2, 'simm5, v0");
+ }
+ break;
+ case RO_V_VMSEQ_VI:
+ Format(instr, "vmseq.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VMSNE_VI:
+ Format(instr, "vmsne.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VMSLEU_VI:
+ Format(instr, "vmsleu.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VMSLE_VI:
+ Format(instr, "vmsle.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VMSGTU_VI:
+ Format(instr, "vmsgtu.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VMSGT_VI:
+ Format(instr, "vmsgt.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VSLIDEDOWN_VI:
+ Format(instr, "vslidedown.vi 'vd, 'vs2, 'uimm5'vm");
+ break;
+ case RO_V_VSRL_VI:
+ Format(instr, "vsrl.vi 'vd, 'vs2, 'uimm5'vm");
+ break;
+ case RO_V_VSLL_VI:
+ Format(instr, "vsll.vi 'vd, 'vs2, 'uimm5'vm");
+ break;
+ case RO_V_VADC_VI:
+ if (!instr->RvvVM()) {
+ Format(instr, "vadc.vim 'vd, 'vs2, 'uimm5");
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case RO_V_VMADC_VI:
+ if (!instr->RvvVM()) {
+ Format(instr, "vmadc.vim 'vd, 'vs2, 'uimm5");
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ break;
+ }
+}
+
+void Decoder::DecodeRvvIVX(Instruction* instr) {
+ DCHECK_EQ(instr->InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_IVX);
+ switch (instr->InstructionBits() & kVTypeMask) {
+ case RO_V_VADD_VX:
+ Format(instr, "vadd.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VSADD_VX:
+ Format(instr, "vsadd.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VSADDU_VX:
+ Format(instr, "vsaddu.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VSUB_VX:
+ Format(instr, "vsub.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VSSUB_VX:
+ Format(instr, "vssub.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VRSUB_VX:
+ Format(instr, "vrsub.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMIN_VX:
+ Format(instr, "vmin.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMINU_VX:
+ Format(instr, "vminu.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMAX_VX:
+ Format(instr, "vmax.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMAXU_VX:
+ Format(instr, "vmaxu.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VAND_VX:
+ Format(instr, "vand.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VOR_VX:
+ Format(instr, "vor.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VXOR_VX:
+ Format(instr, "vxor.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VRGATHER_VX:
+ Format(instr, "vrgather.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMV_VX:
+ if (instr->RvvVM()) {
+ Format(instr, "vmv.vx 'vd, 'rs1");
+ } else {
+ Format(instr, "vmerge.vxm 'vd, 'vs2, 'rs1, v0");
+ }
+ break;
+ case RO_V_VMSEQ_VX:
+ Format(instr, "vmseq.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMSNE_VX:
+ Format(instr, "vmsne.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMSLT_VX:
+ Format(instr, "vmslt.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMSLTU_VX:
+ Format(instr, "vmsltu.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMSLE_VX:
+ Format(instr, "vmsle.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMSLEU_VX:
+ Format(instr, "vmsleu.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMSGT_VX:
+ Format(instr, "vmsgt.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMSGTU_VX:
+ Format(instr, "vmsgtu.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VSLIDEDOWN_VX:
+ Format(instr, "vslidedown.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VADC_VX:
+ if (!instr->RvvVM()) {
+ Format(instr, "vadc.vxm 'vd, 'vs2, 'rs1");
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case RO_V_VMADC_VX:
+ if (!instr->RvvVM()) {
+ Format(instr, "vmadc.vxm 'vd, 'vs2, 'rs1");
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case RO_V_VSLL_VX:
+ Format(instr, "vsll.vx 'vd, 'vs2, 'rs1");
+ break;
+ case RO_V_VSRL_VX:
+ Format(instr, "vsrl.vx 'vd, 'vs2, 'rs1");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ break;
+ }
+}
+
+void Decoder::DecodeRvvMVV(Instruction* instr) {
+ DCHECK_EQ(instr->InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_MVV);
+ switch (instr->InstructionBits() & kVTypeMask) {
+ case RO_V_VWXUNARY0:
+ if (instr->Vs1Value() == 0x0) {
+ Format(instr, "vmv.x.s 'rd, 'vs2");
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ case RO_V_VREDMAXU:
+ Format(instr, "vredmaxu.vs 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VREDMAX:
+ Format(instr, "vredmax.vs 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VREDMIN:
+ Format(instr, "vredmin.vs 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VREDMINU:
+ Format(instr, "vredminu.vs 'vd, 'vs2, 'vs1'vm");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ break;
+ }
+}
+
+void Decoder::DecodeRvvMVX(Instruction* instr) {
+ DCHECK_EQ(instr->InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_MVX);
+ switch (instr->InstructionBits() & kVTypeMask) {
+ case RO_V_VRXUNARY0:
+ if (instr->Vs2Value() == 0x0) {
+ Format(instr, "vmv.s.x 'vd, 'rs1");
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ break;
+ }
+}
+
+void Decoder::DecodeRvvFVV(Instruction* instr) {
+ DCHECK_EQ(instr->InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_FVV);
+ switch (instr->InstructionBits() & kVTypeMask) {
+ case RO_V_VFUNARY0:
+ switch (instr->Vs1Value()) {
+ case VFCVT_XU_F_V:
+ Format(instr, "vfcvt.xu.f.v 'vd, 'vs2'vm");
+ break;
+ case VFCVT_X_F_V:
+ Format(instr, "vfcvt.x.f.v 'vd, 'vs2'vm");
+ break;
+ case VFNCVT_F_F_W:
+ Format(instr, "vfncvt.f.f.w 'vd, 'vs2'vm");
+ break;
+ case VFCVT_F_X_V:
+ Format(instr, "vfcvt.f.x.v 'vd, 'vs2'vm");
+ break;
+ case VFCVT_F_XU_V:
+ Format(instr, "vfcvt.f.xu.v 'vd, 'vs2'vm");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ break;
+ }
+ break;
+ case RO_V_VFUNARY1:
+ switch (instr->Vs1Value()) {
+ case VFCLASS_V:
+ Format(instr, "vfclass.v 'vd, 'vs2'vm");
+ break;
+ default:
+ break;
+ }
+ break;
+ case RO_V_VMFEQ_VV:
+ Format(instr, "vmfeq.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMFNE_VV:
+ Format(instr, "vmfne.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMFLT_VV:
+ Format(instr, "vmflt.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMFLE_VV:
+ Format(instr, "vmfle.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VFMAX_VV:
+ Format(instr, "vfmax.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VFMIN_VV:
+ Format(instr, "vfmin.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VFSGNJ_VV:
+ Format(instr, "vfsgnj.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VFSGNJN_VV:
+ if (instr->Vs1Value() == instr->Vs2Value()) {
+ Format(instr, "vneg.vv 'vd, 'vs1'vm");
+ } else {
+ Format(instr, "vfsgnjn.vv 'vd, 'vs2, 'vs1'vm");
+ }
+ break;
+ case RO_V_VFSGNJX_VV:
+ if (instr->Vs1Value() == instr->Vs2Value()) {
+ Format(instr, "vabs.vv 'vd, 'vs1'vm");
+ } else {
+ Format(instr, "vfsgnjn.vv 'vd, 'vs2, 'vs1'vm");
+ }
+ break;
+ case RO_V_VFADD_VV:
+ Format(instr, "vfadd.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VFSUB_VV:
+ Format(instr, "vfsub.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VFDIV_VV:
+ Format(instr, "vfdiv.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VFMUL_VV:
+ Format(instr, "vfmul.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ break;
+ }
+}
+
+void Decoder::DecodeRvvFVF(Instruction* instr) {
+ DCHECK_EQ(instr->InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_FVF);
+ switch (instr->InstructionBits() & kVTypeMask) {
+ case RO_V_VFSGNJ_VF:
+ Format(instr, "vfsgnj.vf 'vd, 'vs2, 'fs1'vm");
+ break;
+ case RO_V_VFSGNJN_VF:
+ Format(instr, "vfsgnjn.vf 'vd, 'vs2, 'fs1'vm");
+ break;
+ case RO_V_VFSGNJX_VF:
+ Format(instr, "vfsgnjn.vf 'vd, 'vs2, 'fs1'vm");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ break;
+ }
+}
+
+void Decoder::DecodeVType(Instruction* instr) {
+ switch (instr->InstructionBits() & (kBaseOpcodeMask | kFunct3Mask)) {
+ case OP_IVV:
+ DecodeRvvIVV(instr);
+ return;
+ case OP_FVV:
+ DecodeRvvFVV(instr);
+ return;
+ case OP_MVV:
+ DecodeRvvMVV(instr);
+ return;
+ case OP_IVI:
+ DecodeRvvIVI(instr);
+ return;
+ case OP_IVX:
+ DecodeRvvIVX(instr);
+ return;
+ case OP_FVF:
+ UNSUPPORTED_RISCV();
+ return;
+ case OP_MVX:
+ DecodeRvvMVX(instr);
+ return;
+ }
+ switch (instr->InstructionBits() &
+ (kBaseOpcodeMask | kFunct3Mask | 0x80000000)) {
+ case RO_V_VSETVLI:
+ Format(instr, "vsetvli 'rd, 'rs1, 'sew, 'lmul");
+ break;
+ case RO_V_VSETVL:
+ if (!(instr->InstructionBits() & 0x40000000)) {
+ Format(instr, "vsetvl 'rd, 'rs1, 'rs2");
+ } else {
+ Format(instr, "vsetivli 'rd, 'uimm, 'sew, 'lmul");
+ }
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ break;
+ }
+}
+int Decoder::switch_nf(Instruction* instr) {
+ int nf = 0;
+ switch (instr->InstructionBits() & kRvvNfMask) {
+ case 0x20000000:
+ nf = 2;
+ break;
+ case 0x40000000:
+ nf = 3;
+ break;
+ case 0x60000000:
+ nf = 4;
+ break;
+ case 0x80000000:
+ nf = 5;
+ break;
+ case 0xa0000000:
+ nf = 6;
+ break;
+ case 0xc0000000:
+ nf = 7;
+ break;
+ case 0xe0000000:
+ nf = 8;
+ break;
+ }
+ return nf;
+}
+void Decoder::DecodeRvvVL(Instruction* instr) {
+ char str[50];
+ uint32_t instr_temp =
+ instr->InstructionBits() & (kRvvMopMask | kRvvNfMask | kBaseOpcodeMask);
+ // switch (instr->InstructionBits() &
+ // (kRvvMopMask | kRvvNfMask | kBaseOpcodeMask)) {
+ if (RO_V_VL == instr_temp) {
+ if (!(instr->InstructionBits() & (kRvvRs2Mask))) {
+ snprintf(str, sizeof(str), "vle%d.v 'vd, ('rs1)'vm",
+ instr->vl_vs_width());
+ Format(instr, str);
+ } else {
+ snprintf(str, sizeof(str), "vle%dff.v 'vd, ('rs1)'vm",
+ instr->vl_vs_width());
+ Format(instr, str);
+ }
+ } else if (RO_V_VLS == instr_temp) {
+ snprintf(str, sizeof(str), "vlse%d.v 'vd, ('rs1), 'rs2'vm",
+ instr->vl_vs_width());
+ Format(instr, str);
+
+ } else if (RO_V_VLX == instr_temp) {
+ snprintf(str, sizeof(str), "vlxei%d.v 'vd, ('rs1), 'vs2'vm",
+ instr->vl_vs_width());
+ Format(instr, str);
+ } else if (RO_V_VLSEG2 == instr_temp || RO_V_VLSEG3 == instr_temp ||
+ RO_V_VLSEG4 == instr_temp || RO_V_VLSEG5 == instr_temp ||
+ RO_V_VLSEG6 == instr_temp || RO_V_VLSEG7 == instr_temp ||
+ RO_V_VLSEG8 == instr_temp) {
+ if (!(instr->InstructionBits() & (kRvvRs2Mask))) {
+ snprintf(str, sizeof(str), "vlseg%de%d.v 'vd, ('rs1)'vm",
+ switch_nf(instr), instr->vl_vs_width());
+ } else {
+ snprintf(str, sizeof(str), "vlseg%de%dff.v 'vd, ('rs1)'vm",
+ switch_nf(instr), instr->vl_vs_width());
+ }
+ Format(instr, str);
+ } else if (RO_V_VLSSEG2 == instr_temp || RO_V_VLSSEG3 == instr_temp ||
+ RO_V_VLSSEG4 == instr_temp || RO_V_VLSSEG5 == instr_temp ||
+ RO_V_VLSSEG6 == instr_temp || RO_V_VLSSEG7 == instr_temp ||
+ RO_V_VLSSEG8 == instr_temp) {
+ snprintf(str, sizeof(str), "vlsseg%de%d.v 'vd, ('rs1), 'rs2'vm",
+ switch_nf(instr), instr->vl_vs_width());
+ Format(instr, str);
+ } else if (RO_V_VLXSEG2 == instr_temp || RO_V_VLXSEG3 == instr_temp ||
+ RO_V_VLXSEG4 == instr_temp || RO_V_VLXSEG5 == instr_temp ||
+ RO_V_VLXSEG6 == instr_temp || RO_V_VLXSEG7 == instr_temp ||
+ RO_V_VLXSEG8 == instr_temp) {
+ snprintf(str, sizeof(str), "vlxseg%dei%d.v 'vd, ('rs1), 'vs2'vm",
+ switch_nf(instr), instr->vl_vs_width());
+ Format(instr, str);
+ }
+}
+
+int Decoder::switch_sew(Instruction* instr) {
+ int width = 0;
+ if ((instr->InstructionBits() & kBaseOpcodeMask) != LOAD_FP &&
+ (instr->InstructionBits() & kBaseOpcodeMask) != STORE_FP)
+ return -1;
+ switch (instr->InstructionBits() & (kRvvWidthMask | kRvvMewMask)) {
+ case 0x0:
+ width = 8;
+ break;
+ case 0x00005000:
+ width = 16;
+ break;
+ case 0x00006000:
+ width = 32;
+ break;
+ case 0x00007000:
+ width = 64;
+ break;
+ case 0x10000000:
+ width = 128;
+ break;
+ case 0x10005000:
+ width = 256;
+ break;
+ case 0x10006000:
+ width = 512;
+ break;
+ case 0x10007000:
+ width = 1024;
+ break;
+ default:
+ width = -1;
+ break;
+ }
+ return width;
+}
+
+void Decoder::DecodeRvvVS(Instruction* instr) {
+ char str[50];
+ uint32_t instr_temp =
+ instr->InstructionBits() & (kRvvMopMask | kRvvNfMask | kBaseOpcodeMask);
+ if (RO_V_VS == instr_temp) {
+ snprintf(str, sizeof(str), "vse%d.v 'vd, ('rs1)'vm",
+ instr->vl_vs_width());
+ Format(instr, str);
+ } else if (RO_V_VSS == instr_temp) {
+ snprintf(str, sizeof(str), "vsse%d.v 'vd, ('rs1), 'rs2'vm",
+ instr->vl_vs_width());
+ Format(instr, str);
+ } else if (RO_V_VSX == instr_temp) {
+ snprintf(str, sizeof(str), "vsxei%d.v 'vd, ('rs1), 'vs2'vm",
+ instr->vl_vs_width());
+ Format(instr, str);
+ } else if (RO_V_VSU == instr_temp) {
+ snprintf(str, sizeof(str), "vsuxei%d.v 'vd, ('rs1), 'vs2'vm",
+ instr->vl_vs_width());
+ Format(instr, str);
+ } else if (RO_V_VSSEG2 == instr_temp || RO_V_VSSEG3 == instr_temp ||
+ RO_V_VSSEG4 == instr_temp || RO_V_VSSEG5 == instr_temp ||
+ RO_V_VSSEG6 == instr_temp || RO_V_VSSEG7 == instr_temp ||
+ RO_V_VSSEG8 == instr_temp) {
+ snprintf(str, sizeof(str), "vsseg%de%d.v 'vd, ('rs1)'vm",
+ switch_nf(instr), instr->vl_vs_width());
+ Format(instr, str);
+ } else if (RO_V_VSSSEG2 == instr_temp || RO_V_VSSSEG3 == instr_temp ||
+ RO_V_VSSSEG4 == instr_temp || RO_V_VSSSEG5 == instr_temp ||
+ RO_V_VSSSEG6 == instr_temp || RO_V_VSSSEG7 == instr_temp ||
+ RO_V_VSSSEG8 == instr_temp) {
+ snprintf(str, sizeof(str), "vssseg%de%d.v 'vd, ('rs1), 'rs2'vm",
+ switch_nf(instr), instr->vl_vs_width());
+ Format(instr, str);
+ } else if (RO_V_VSXSEG2 == instr_temp || RO_V_VSXSEG3 == instr_temp ||
+ RO_V_VSXSEG4 == instr_temp || RO_V_VSXSEG5 == instr_temp ||
+ RO_V_VSXSEG6 == instr_temp || RO_V_VSXSEG7 == instr_temp ||
+ RO_V_VSXSEG8 == instr_temp) {
+ snprintf(str, sizeof(str), "vsxseg%dei%d.v 'vd, ('rs1), 'vs2'vm",
+ switch_nf(instr), instr->vl_vs_width());
+ Format(instr, str);
+ }
+}
+
// Disassemble the instruction at *instr_ptr into the output buffer.
// All instructions are one word long, except for the simulator
// pseudo-instruction stop(msg). For that one special case, we return
@@ -1849,6 +2588,9 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
case Instruction::kCBType:
DecodeCBType(instr);
break;
+ case Instruction::kVType:
+ DecodeVType(instr);
+ break;
default:
Format(instr, "UNSUPPORTED");
UNSUPPORTED_RISCV();
@@ -1882,7 +2624,7 @@ const char* NameConverter::NameOfXMMRegister(int reg) const {
const char* NameConverter::NameOfByteCPURegister(int reg) const {
UNREACHABLE(); // RISC-V does not have the concept of a byte register.
- return "nobytereg";
+ // return "nobytereg";
}
const char* NameConverter::NameInCode(byte* addr) const {
diff --git a/chromium/v8/src/diagnostics/s390/eh-frame-s390.cc b/chromium/v8/src/diagnostics/s390/eh-frame-s390.cc
index 4f5994c8dab..6da3095e866 100644
--- a/chromium/v8/src/diagnostics/s390/eh-frame-s390.cc
+++ b/chromium/v8/src/diagnostics/s390/eh-frame-s390.cc
@@ -38,7 +38,6 @@ int EhFrameWriter::RegisterToDwarfCode(Register name) {
return kR0DwarfCode;
default:
UNIMPLEMENTED();
- return -1;
}
}
@@ -55,7 +54,6 @@ const char* EhFrameDisassembler::DwarfRegisterCodeToString(int code) {
return "sp";
default:
UNIMPLEMENTED();
- return nullptr;
}
}
diff --git a/chromium/v8/src/diagnostics/system-jit-win.cc b/chromium/v8/src/diagnostics/system-jit-win.cc
index c77c2231836..5ca36e67e6b 100644
--- a/chromium/v8/src/diagnostics/system-jit-win.cc
+++ b/chromium/v8/src/diagnostics/system-jit-win.cc
@@ -4,7 +4,11 @@
#include "src/diagnostics/system-jit-win.h"
-#include "include/v8.h"
+#include "include/v8-callbacks.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-primitive.h"
+#include "include/v8-script.h"
#include "src/api/api-inl.h"
#include "src/base/lazy-instance.h"
#include "src/base/logging.h"
diff --git a/chromium/v8/src/diagnostics/unwinder.cc b/chromium/v8/src/diagnostics/unwinder.cc
index 68ff6795954..00a5e7dbe68 100644
--- a/chromium/v8/src/diagnostics/unwinder.cc
+++ b/chromium/v8/src/diagnostics/unwinder.cc
@@ -6,7 +6,7 @@
#include <algorithm>
-#include "include/v8.h"
+#include "include/v8-unwinder.h"
#include "src/execution/frame-constants.h"
#include "src/execution/pointer-authentication.h"
diff --git a/chromium/v8/src/diagnostics/unwinding-info-win64.h b/chromium/v8/src/diagnostics/unwinding-info-win64.h
index ca66437e00f..bb32f49e5d8 100644
--- a/chromium/v8/src/diagnostics/unwinding-info-win64.h
+++ b/chromium/v8/src/diagnostics/unwinding-info-win64.h
@@ -5,7 +5,9 @@
#ifndef V8_DIAGNOSTICS_UNWINDING_INFO_WIN64_H_
#define V8_DIAGNOSTICS_UNWINDING_INFO_WIN64_H_
-#include "include/v8.h"
+#include <vector>
+
+#include "include/v8-callbacks.h"
#include "include/v8config.h"
#include "src/common/globals.h"
diff --git a/chromium/v8/src/diagnostics/x64/disasm-x64.cc b/chromium/v8/src/diagnostics/x64/disasm-x64.cc
index 3ddb29e064b..469a6538dc0 100644
--- a/chromium/v8/src/diagnostics/x64/disasm-x64.cc
+++ b/chromium/v8/src/diagnostics/x64/disasm-x64.cc
@@ -244,8 +244,9 @@ static const InstructionDesc cmov_instructions[16] = {
{"cmovle", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
{"cmovg", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false}};
-static const char* const cmp_pseudo_op[8] = {"eq", "lt", "le", "unord",
- "neq", "nlt", "nle", "ord"};
+static const char* const cmp_pseudo_op[16] = {
+ "eq", "lt", "le", "unord", "neq", "nlt", "nle", "ord",
+ "eq_uq", "nge", "ngt", "false", "neq_oq", "ge", "gt", "true"};
namespace {
int8_t Imm8(const uint8_t* data) {
@@ -279,6 +280,10 @@ int64_t Imm64(const uint8_t* data) {
//------------------------------------------------------------------------------
// DisassemblerX64 implementation.
+// Forward-declare NameOfYMMRegister to keep its implementation with the
+// NameConverter methods and register name arrays at bottom.
+const char* NameOfYMMRegister(int reg);
+
// A new DisassemblerX64 object is created to disassemble each instruction.
// The object can only disassemble a single instruction.
class DisassemblerX64 {
@@ -356,6 +361,12 @@ class DisassemblerX64 {
return (checked & 4) == 0;
}
+ bool vex_256() const {
+ DCHECK(vex_byte0_ == VEX3_PREFIX || vex_byte0_ == VEX2_PREFIX);
+ byte checked = vex_byte0_ == VEX3_PREFIX ? vex_byte2_ : vex_byte1_;
+ return (checked & 4) != 0;
+ }
+
bool vex_none() {
DCHECK(vex_byte0_ == VEX3_PREFIX || vex_byte0_ == VEX2_PREFIX);
byte checked = vex_byte0_ == VEX3_PREFIX ? vex_byte2_ : vex_byte1_;
@@ -424,6 +435,14 @@ class DisassemblerX64 {
return converter_.NameOfXMMRegister(reg);
}
+ const char* NameOfAVXRegister(int reg) const {
+ if (vex_256()) {
+ return NameOfYMMRegister(reg);
+ } else {
+ return converter_.NameOfXMMRegister(reg);
+ }
+ }
+
const char* NameOfAddress(byte* addr) const {
return converter_.NameOfAddress(addr);
}
@@ -448,6 +467,7 @@ class DisassemblerX64 {
int PrintRightOperand(byte* modrmp);
int PrintRightByteOperand(byte* modrmp);
int PrintRightXMMOperand(byte* modrmp);
+ int PrintRightAVXOperand(byte* modrmp);
int PrintOperands(const char* mnem, OperandType op_order, byte* data);
int PrintImmediate(byte* data, OperandSize size);
int PrintImmediateOp(byte* data);
@@ -606,6 +626,10 @@ int DisassemblerX64::PrintRightXMMOperand(byte* modrmp) {
return PrintRightOperandHelper(modrmp, &DisassemblerX64::NameOfXMMRegister);
}
+int DisassemblerX64::PrintRightAVXOperand(byte* modrmp) {
+ return PrintRightOperandHelper(modrmp, &DisassemblerX64::NameOfAVXRegister);
+}
+
// Returns number of bytes used including the current *data.
// Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
int DisassemblerX64::PrintOperands(const char* mnem, OperandType op_order,
@@ -866,78 +890,98 @@ int DisassemblerX64::AVXInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
case 0x18:
- AppendToBuffer("vbroadcastss %s,", NameOfXMMRegister(regop));
+ AppendToBuffer("vbroadcastss %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
+ break;
+ case 0x98:
+ AppendToBuffer("vfmadd132p%c %s,%s,", float_size_code(),
+ NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0x99:
AppendToBuffer("vfmadd132s%c %s,%s,", float_size_code(),
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
+ break;
+ case 0xA8:
+ AppendToBuffer("vfmadd213p%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0xA9:
AppendToBuffer("vfmadd213s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0xB8:
AppendToBuffer("vfmadd231p%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0xB9:
AppendToBuffer("vfmadd231s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x9B:
AppendToBuffer("vfmsub132s%c %s,%s,", float_size_code(),
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
+ break;
+ case 0x9C:
+ AppendToBuffer("vfnmadd132p%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0xAB:
AppendToBuffer("vfmsub213s%c %s,%s,", float_size_code(),
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
+ break;
+ case 0xAC:
+ AppendToBuffer("vfnmadd213p%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0xBB:
AppendToBuffer("vfmsub231s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0xBC:
AppendToBuffer("vfnmadd231p%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x9D:
AppendToBuffer("vfnmadd132s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0xAD:
AppendToBuffer("vfnmadd213s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0xBD:
AppendToBuffer("vfnmadd231s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x9F:
AppendToBuffer("vfnmsub132s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0xAF:
AppendToBuffer("vfnmsub213s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0xBF:
AppendToBuffer("vfnmsub231s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0xF7:
AppendToBuffer("shlx%c %s,", operand_size_code(),
@@ -948,9 +992,9 @@ int DisassemblerX64::AVXInstruction(byte* data) {
#define DECLARE_SSE_AVX_DIS_CASE(instruction, notUsed1, notUsed2, notUsed3, \
opcode) \
case 0x##opcode: { \
- AppendToBuffer("v" #instruction " %s,%s,", NameOfXMMRegister(regop), \
- NameOfXMMRegister(vvvv)); \
- current += PrintRightXMMOperand(current); \
+ AppendToBuffer("v" #instruction " %s,%s,", NameOfAVXRegister(regop), \
+ NameOfAVXRegister(vvvv)); \
+ current += PrintRightAVXOperand(current); \
break; \
}
@@ -962,8 +1006,8 @@ int DisassemblerX64::AVXInstruction(byte* data) {
#define DECLARE_SSE_UNOP_AVX_DIS_CASE(instruction, notUsed1, notUsed2, \
notUsed3, opcode) \
case 0x##opcode: { \
- AppendToBuffer("v" #instruction " %s,", NameOfXMMRegister(regop)); \
- current += PrintRightXMMOperand(current); \
+ AppendToBuffer("v" #instruction " %s,", NameOfAVXRegister(regop)); \
+ current += PrintRightAVXOperand(current); \
break; \
}
SSSE3_UNOP_INSTRUCTION_LIST(DECLARE_SSE_UNOP_AVX_DIS_CASE)
@@ -972,8 +1016,8 @@ int DisassemblerX64::AVXInstruction(byte* data) {
#define DISASSEMBLE_AVX2_BROADCAST(instruction, _1, _2, _3, code) \
case 0x##code: \
- AppendToBuffer("" #instruction " %s,", NameOfXMMRegister(regop)); \
- current += PrintRightXMMOperand(current); \
+ AppendToBuffer("" #instruction " %s,", NameOfAVXRegister(regop)); \
+ current += PrintRightAVXOperand(current); \
break;
AVX2_BROADCAST_LIST(DISASSEMBLE_AVX2_BROADCAST)
#undef DISASSEMBLE_AVX2_BROADCAST
@@ -986,96 +1030,96 @@ int DisassemblerX64::AVXInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
case 0x08:
- AppendToBuffer("vroundps %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vroundps %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x09:
- AppendToBuffer("vroundpd %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vroundpd %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x0A:
- AppendToBuffer("vroundss %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vroundss %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x0B:
- AppendToBuffer("vroundsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vroundsd %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x0E:
- AppendToBuffer("vpblendw %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vpblendw %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x0F:
- AppendToBuffer("vpalignr %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vpalignr %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x14:
AppendToBuffer("vpextrb ");
current += PrintRightByteOperand(current);
- AppendToBuffer(",%s,0x%x,", NameOfXMMRegister(regop), *current++);
+ AppendToBuffer(",%s,0x%x,", NameOfAVXRegister(regop), *current++);
break;
case 0x15:
AppendToBuffer("vpextrw ");
current += PrintRightOperand(current);
- AppendToBuffer(",%s,0x%x,", NameOfXMMRegister(regop), *current++);
+ AppendToBuffer(",%s,0x%x,", NameOfAVXRegister(regop), *current++);
break;
case 0x16:
AppendToBuffer("vpextr%c ", rex_w() ? 'q' : 'd');
current += PrintRightOperand(current);
- AppendToBuffer(",%s,0x%x,", NameOfXMMRegister(regop), *current++);
+ AppendToBuffer(",%s,0x%x,", NameOfAVXRegister(regop), *current++);
break;
case 0x17:
AppendToBuffer("vextractps ");
current += PrintRightOperand(current);
- AppendToBuffer(",%s,0x%x,", NameOfXMMRegister(regop), *current++);
+ AppendToBuffer(",%s,0x%x,", NameOfAVXRegister(regop), *current++);
break;
case 0x20:
- AppendToBuffer("vpinsrb %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
+ AppendToBuffer("vpinsrb %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
current += PrintRightByteOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x21:
- AppendToBuffer("vinsertps %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vinsertps %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x22:
AppendToBuffer("vpinsr%c %s,%s,", rex_w() ? 'q' : 'd',
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
current += PrintRightOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x4A: {
- AppendToBuffer("vblendvps %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister((*current++) >> 4));
+ AppendToBuffer("vblendvps %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(",%s", NameOfAVXRegister((*current++) >> 4));
break;
}
case 0x4B: {
- AppendToBuffer("vblendvpd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister((*current++) >> 4));
+ AppendToBuffer("vblendvpd %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(",%s", NameOfAVXRegister((*current++) >> 4));
break;
}
case 0x4C: {
- AppendToBuffer("vpblendvb %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister((*current++) >> 4));
+ AppendToBuffer("vpblendvb %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(",%s", NameOfAVXRegister((*current++) >> 4));
break;
}
default:
@@ -1086,95 +1130,95 @@ int DisassemblerX64::AVXInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
case 0x10:
- AppendToBuffer("vmovss %s,", NameOfXMMRegister(regop));
+ AppendToBuffer("vmovss %s,", NameOfAVXRegister(regop));
if (mod == 3) {
- AppendToBuffer("%s,", NameOfXMMRegister(vvvv));
+ AppendToBuffer("%s,", NameOfAVXRegister(vvvv));
}
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
break;
case 0x11:
AppendToBuffer("vmovss ");
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
if (mod == 3) {
- AppendToBuffer(",%s", NameOfXMMRegister(vvvv));
+ AppendToBuffer(",%s", NameOfAVXRegister(vvvv));
}
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ AppendToBuffer(",%s", NameOfAVXRegister(regop));
break;
case 0x16:
- AppendToBuffer("vmovshdup %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovshdup %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x2A:
AppendToBuffer("%s %s,%s,", vex_w() ? "vcvtqsi2ss" : "vcvtlsi2ss",
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
current += PrintRightOperand(current);
break;
case 0x2C:
AppendToBuffer("vcvttss2si%s %s,", vex_w() ? "q" : "",
NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
break;
case 0x51:
- AppendToBuffer("vsqrtss %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vsqrtss %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x58:
- AppendToBuffer("vaddss %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vaddss %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x59:
- AppendToBuffer("vmulss %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmulss %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x5A:
- AppendToBuffer("vcvtss2sd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vcvtss2sd %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x5B:
- AppendToBuffer("vcvttps2dq %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vcvttps2dq %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x5C:
- AppendToBuffer("vsubss %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vsubss %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x5D:
- AppendToBuffer("vminss %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vminss %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x5E:
- AppendToBuffer("vdivss %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vdivss %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x5F:
- AppendToBuffer("vmaxss %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmaxss %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x6F:
- AppendToBuffer("vmovdqu %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovdqu %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x70:
- AppendToBuffer("vpshufhw %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vpshufhw %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x7F:
AppendToBuffer("vmovdqu ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(",%s", NameOfAVXRegister(regop));
break;
case 0xE6:
- AppendToBuffer("vcvtdq2pd %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vcvtdq2pd %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
default:
UnimplementedInstruction();
@@ -1184,93 +1228,61 @@ int DisassemblerX64::AVXInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
case 0x10:
- AppendToBuffer("vmovsd %s,", NameOfXMMRegister(regop));
+ AppendToBuffer("vmovsd %s,", NameOfAVXRegister(regop));
if (mod == 3) {
- AppendToBuffer("%s,", NameOfXMMRegister(vvvv));
+ AppendToBuffer("%s,", NameOfAVXRegister(vvvv));
}
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
break;
case 0x11:
AppendToBuffer("vmovsd ");
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
if (mod == 3) {
- AppendToBuffer(",%s", NameOfXMMRegister(vvvv));
+ AppendToBuffer(",%s", NameOfAVXRegister(vvvv));
}
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ AppendToBuffer(",%s", NameOfAVXRegister(regop));
break;
case 0x12:
- AppendToBuffer("vmovddup %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovddup %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x2A:
AppendToBuffer("%s %s,%s,", vex_w() ? "vcvtqsi2sd" : "vcvtlsi2sd",
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
current += PrintRightOperand(current);
break;
case 0x2C:
AppendToBuffer("vcvttsd2si%s %s,", vex_w() ? "q" : "",
NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
break;
case 0x2D:
AppendToBuffer("vcvtsd2si%s %s,", vex_w() ? "q" : "",
NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
- break;
- case 0x51:
- AppendToBuffer("vsqrtsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0x58:
- AppendToBuffer("vaddsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0x59:
- AppendToBuffer("vmulsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0x5A:
- AppendToBuffer("vcvtsd2ss %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0x5C:
- AppendToBuffer("vsubsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0x5D:
- AppendToBuffer("vminsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0x5E:
- AppendToBuffer("vdivsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0x5F:
- AppendToBuffer("vmaxsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
break;
case 0xF0:
- AppendToBuffer("vlddqu %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vlddqu %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x70:
- AppendToBuffer("vpshuflw %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vpshuflw %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x7C:
- AppendToBuffer("vhaddps %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
+ AppendToBuffer("vhaddps %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
+ break;
+#define DISASM_SSE2_INSTRUCTION_LIST_SD(instruction, _1, _2, opcode) \
+ case 0x##opcode: \
+ AppendToBuffer("v" #instruction " %s,%s,", NameOfAVXRegister(regop), \
+ NameOfAVXRegister(vvvv)); \
+ current += PrintRightAVXOperand(current); \
+ break;
+ SSE2_INSTRUCTION_LIST_SD(DISASM_SSE2_INSTRUCTION_LIST_SD)
+#undef DISASM_SSE2_INSTRUCTION_LIST_SD
default:
UnimplementedInstruction();
}
@@ -1387,90 +1399,90 @@ int DisassemblerX64::AVXInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
case 0x10:
- AppendToBuffer("vmovups %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovups %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x11:
AppendToBuffer("vmovups ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(",%s", NameOfAVXRegister(regop));
break;
case 0x12:
if (mod == 0b11) {
- AppendToBuffer("vmovhlps %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovhlps %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
} else {
- AppendToBuffer("vmovlps %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovlps %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
}
break;
case 0x13:
AppendToBuffer("vmovlps ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(",%s", NameOfAVXRegister(regop));
break;
case 0x16:
if (mod == 0b11) {
- AppendToBuffer("vmovlhps %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovlhps %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
} else {
- AppendToBuffer("vmovhps %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovhps %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
}
break;
case 0x17:
AppendToBuffer("vmovhps ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(",%s", NameOfAVXRegister(regop));
break;
case 0x28:
- AppendToBuffer("vmovaps %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovaps %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x29:
AppendToBuffer("vmovaps ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(",%s", NameOfAVXRegister(regop));
break;
case 0x2E:
- AppendToBuffer("vucomiss %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vucomiss %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x50:
AppendToBuffer("vmovmskps %s,", NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
break;
case 0xC2: {
- AppendToBuffer("vcmpps %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vcmpps %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(", (%s)", cmp_pseudo_op[*current]);
current += 1;
break;
}
case 0xC6: {
- AppendToBuffer("vshufps %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vshufps %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
}
#define SSE_UNOP_CASE(instruction, unused, code) \
case 0x##code: \
- AppendToBuffer("v" #instruction " %s,", NameOfXMMRegister(regop)); \
- current += PrintRightXMMOperand(current); \
+ AppendToBuffer("v" #instruction " %s,", NameOfAVXRegister(regop)); \
+ current += PrintRightAVXOperand(current); \
break;
SSE_UNOP_INSTRUCTION_LIST(SSE_UNOP_CASE)
#undef SSE_UNOP_CASE
#define SSE_BINOP_CASE(instruction, unused, code) \
case 0x##code: \
- AppendToBuffer("v" #instruction " %s,%s,", NameOfXMMRegister(regop), \
- NameOfXMMRegister(vvvv)); \
- current += PrintRightXMMOperand(current); \
+ AppendToBuffer("v" #instruction " %s,%s,", NameOfAVXRegister(regop), \
+ NameOfAVXRegister(vvvv)); \
+ current += PrintRightAVXOperand(current); \
break;
SSE_BINOP_INSTRUCTION_LIST(SSE_BINOP_CASE)
#undef SSE_BINOP_CASE
@@ -1482,92 +1494,92 @@ int DisassemblerX64::AVXInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
case 0x10:
- AppendToBuffer("vmovupd %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovupd %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x11:
AppendToBuffer("vmovupd ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(",%s", NameOfAVXRegister(regop));
break;
case 0x28:
- AppendToBuffer("vmovapd %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovapd %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x29:
AppendToBuffer("vmovapd ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(",%s", NameOfAVXRegister(regop));
break;
case 0x50:
AppendToBuffer("vmovmskpd %s,", NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
break;
case 0x6E:
AppendToBuffer("vmov%c %s,", vex_w() ? 'q' : 'd',
- NameOfXMMRegister(regop));
+ NameOfAVXRegister(regop));
current += PrintRightOperand(current);
break;
case 0x6F:
- AppendToBuffer("vmovdqa %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovdqa %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x70:
- AppendToBuffer("vpshufd %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vpshufd %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x71:
AppendToBuffer("vps%sw %s,", sf_str[regop / 2],
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",%u", *current++);
break;
case 0x72:
AppendToBuffer("vps%sd %s,", sf_str[regop / 2],
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",%u", *current++);
break;
case 0x73:
AppendToBuffer("vps%sq %s,", sf_str[regop / 2],
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",%u", *current++);
break;
case 0x7E:
AppendToBuffer("vmov%c ", vex_w() ? 'q' : 'd');
current += PrintRightOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ AppendToBuffer(",%s", NameOfAVXRegister(regop));
break;
case 0xC2: {
- AppendToBuffer("vcmppd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vcmppd %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(", (%s)", cmp_pseudo_op[*current]);
current += 1;
break;
}
case 0xC4:
- AppendToBuffer("vpinsrw %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
+ AppendToBuffer("vpinsrw %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
current += PrintRightOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0xC5:
AppendToBuffer("vpextrw %s,", NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0xD7:
AppendToBuffer("vpmovmskb %s,", NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
break;
#define DECLARE_SSE_AVX_DIS_CASE(instruction, notUsed1, notUsed2, opcode) \
case 0x##opcode: { \
- AppendToBuffer("v" #instruction " %s,%s,", NameOfXMMRegister(regop), \
- NameOfXMMRegister(vvvv)); \
- current += PrintRightXMMOperand(current); \
+ AppendToBuffer("v" #instruction " %s,%s,", NameOfAVXRegister(regop), \
+ NameOfAVXRegister(vvvv)); \
+ current += PrintRightAVXOperand(current); \
break; \
}
@@ -1575,8 +1587,8 @@ int DisassemblerX64::AVXInstruction(byte* data) {
#undef DECLARE_SSE_AVX_DIS_CASE
#define DECLARE_SSE_UNOP_AVX_DIS_CASE(instruction, notUsed1, notUsed2, opcode) \
case 0x##opcode: { \
- AppendToBuffer("v" #instruction " %s,", NameOfXMMRegister(regop)); \
- current += PrintRightXMMOperand(current); \
+ AppendToBuffer("v" #instruction " %s,", NameOfAVXRegister(regop)); \
+ current += PrintRightAVXOperand(current); \
break; \
}
@@ -2799,9 +2811,9 @@ int DisassemblerX64::InstructionDecode(v8::base::Vector<char> out_buffer,
for (byte* bp = instr; bp < data; bp++) {
outp += v8::base::SNPrintF(out_buffer + outp, "%02x", *bp);
}
- // Indent instruction, leaving space for 9 bytes, i.e. 18 characters in hex.
- // 9-byte nop and rip-relative mov are (probably) the largest we emit.
- while (outp < 18) {
+ // Indent instruction, leaving space for 10 bytes, i.e. 20 characters in hex.
+ // 10-byte mov is (probably) the largest we emit.
+ while (outp < 20) {
outp += v8::base::SNPrintF(out_buffer + outp, " ");
}
@@ -2823,6 +2835,10 @@ static const char* const xmm_regs[16] = {
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
"xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"};
+static const char* const ymm_regs[16] = {
+ "ymm0", "ymm1", "ymm2", "ymm3", "ymm4", "ymm5", "ymm6", "ymm7",
+ "ymm8", "ymm9", "ymm10", "ymm11", "ymm12", "ymm13", "ymm14", "ymm15"};
+
const char* NameConverter::NameOfAddress(byte* addr) const {
v8::base::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
return tmp_buffer_.begin();
@@ -2847,6 +2863,11 @@ const char* NameConverter::NameOfXMMRegister(int reg) const {
return "noxmmreg";
}
+const char* NameOfYMMRegister(int reg) {
+ if (0 <= reg && reg < 16) return ymm_regs[reg];
+ return "noymmreg";
+}
+
const char* NameConverter::NameInCode(byte* addr) const {
// X64 does not embed debug strings at the moment.
UNREACHABLE();
diff --git a/chromium/v8/src/execution/OWNERS b/chromium/v8/src/execution/OWNERS
index 1a987f65e7e..921f4f742a0 100644
--- a/chromium/v8/src/execution/OWNERS
+++ b/chromium/v8/src/execution/OWNERS
@@ -1,7 +1,6 @@
ishell@chromium.org
jgruber@chromium.org
jkummerow@chromium.org
-mythria@chromium.org
delphick@chromium.org
verwaest@chromium.org
victorgomes@chromium.org
diff --git a/chromium/v8/src/execution/arguments-inl.h b/chromium/v8/src/execution/arguments-inl.h
index 0be23258371..2f69cd7adc4 100644
--- a/chromium/v8/src/execution/arguments-inl.h
+++ b/chromium/v8/src/execution/arguments-inl.h
@@ -15,6 +15,15 @@ namespace v8 {
namespace internal {
template <ArgumentsType T>
+Arguments<T>::ChangeValueScope::ChangeValueScope(Isolate* isolate,
+ Arguments* args, int index,
+ Object value)
+ : location_(args->address_of_arg_at(index)) {
+ old_value_ = handle(Object(*location_), isolate);
+ *location_ = value.ptr();
+}
+
+template <ArgumentsType T>
int Arguments<T>::smi_at(int index) const {
return Smi::ToInt(Object(*address_of_arg_at(index)));
}
diff --git a/chromium/v8/src/execution/arguments.h b/chromium/v8/src/execution/arguments.h
index 9ba80a401f7..e1cd8d8c5f8 100644
--- a/chromium/v8/src/execution/arguments.h
+++ b/chromium/v8/src/execution/arguments.h
@@ -33,6 +33,18 @@ namespace internal {
template <ArgumentsType arguments_type>
class Arguments {
public:
+ // Scope to temporarily change the value of an argument.
+ class ChangeValueScope {
+ public:
+ inline ChangeValueScope(Isolate* isolate, Arguments* args, int index,
+ Object value);
+ ~ChangeValueScope() { *location_ = old_value_->ptr(); }
+
+ private:
+ Address* location_;
+ Handle<Object> old_value_;
+ };
+
Arguments(int length, Address* arguments)
: length_(length), arguments_(arguments) {
DCHECK_GE(length_, 0);
@@ -51,10 +63,6 @@ class Arguments {
inline double number_at(int index) const;
- inline void set_at(int index, Object value) {
- *address_of_arg_at(index) = value.ptr();
- }
-
inline FullObjectSlot slot_at(int index) const {
return FullObjectSlot(address_of_arg_at(index));
}
diff --git a/chromium/v8/src/execution/arm/simulator-arm.cc b/chromium/v8/src/execution/arm/simulator-arm.cc
index ec9c05af699..4ebfe6bbd62 100644
--- a/chromium/v8/src/execution/arm/simulator-arm.cc
+++ b/chromium/v8/src/execution/arm/simulator-arm.cc
@@ -114,14 +114,10 @@ bool ArmDebugger::GetValue(const char* desc, int32_t* value) {
if (regnum != kNoRegister) {
*value = GetRegisterValue(regnum);
return true;
- } else {
- if (strncmp(desc, "0x", 2) == 0) {
- return SScanF(desc + 2, "%x", reinterpret_cast<uint32_t*>(value)) == 1;
- } else {
- return SScanF(desc, "%u", reinterpret_cast<uint32_t*>(value)) == 1;
- }
}
- return false;
+ if (strncmp(desc, "0x", 2) == 0)
+ return SScanF(desc + 2, "%x", reinterpret_cast<uint32_t*>(value)) == 1;
+ return SScanF(desc, "%u", reinterpret_cast<uint32_t*>(value)) == 1;
}
bool ArmDebugger::GetVFPSingleValue(const char* desc, float* value) {
@@ -1192,7 +1188,6 @@ bool Simulator::ConditionallyExecute(Instruction* instr) {
default:
UNREACHABLE();
}
- return false;
}
// Calculate and set the Negative and Zero flags.
@@ -1314,7 +1309,6 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
// by immediate
if ((shift == ROR) && (shift_amount == 0)) {
UNIMPLEMENTED();
- return result;
} else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) {
shift_amount = 32;
}
@@ -1373,7 +1367,6 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
default: {
UNREACHABLE();
- break;
}
}
} else {
@@ -1451,7 +1444,6 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
default: {
UNREACHABLE();
- break;
}
}
}
@@ -1486,7 +1478,6 @@ int32_t Simulator::ProcessPU(Instruction* instr, int num_regs, int reg_size,
switch (instr->PUField()) {
case da_x: {
UNIMPLEMENTED();
- break;
}
case ia_x: {
*start_address = rn_val;
@@ -1717,7 +1708,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
@@ -1769,7 +1759,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
switch (redirection->type()) {
@@ -1783,7 +1772,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
@@ -1847,8 +1835,19 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
#endif
} else {
// builtin call.
+ // FAST_C_CALL is temporarily handled here as well, because we lack
+ // proper support for direct C calls with FP params in the simulator.
+ // The generic BUILTIN_CALL path assumes all parameters are passed in
+ // the GP registers, thus supporting calling the slow callback without
+ // crashing. The reason for that is that in the mjsunit tests we check
+ // the `fast_c_api.supports_fp_params` (which is false on non-simulator
+ // builds for arm/arm64), thus we expect that the slow path will be
+ // called. And since the slow path passes the arguments as a `const
+ // FunctionCallbackInfo<Value>&` (which is a GP argument), the call is
+ // made correctly.
DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL ||
- redirection->type() == ExternalReference::BUILTIN_CALL_PAIR);
+ redirection->type() == ExternalReference::BUILTIN_CALL_PAIR ||
+ redirection->type() == ExternalReference::FAST_C_CALL);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF(
"Call to host function at %p "
@@ -2121,7 +2120,6 @@ void Simulator::DecodeType01(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
} else {
// The instruction is documented as strex rd, rt, [rn], but the
@@ -2165,7 +2163,6 @@ void Simulator::DecodeType01(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
}
} else {
@@ -2219,7 +2216,6 @@ void Simulator::DecodeType01(Instruction* instr) {
default: {
// The PU field is a 2-bit field.
UNREACHABLE();
- break;
}
}
} else {
@@ -2262,7 +2258,6 @@ void Simulator::DecodeType01(Instruction* instr) {
default: {
// The PU field is a 2-bit field.
UNREACHABLE();
- break;
}
}
}
@@ -2600,7 +2595,6 @@ void Simulator::DecodeType01(Instruction* instr) {
default: {
UNREACHABLE();
- break;
}
}
}
@@ -2680,7 +2674,6 @@ void Simulator::DecodeType3(Instruction* instr) {
DCHECK(!instr->HasW());
Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
UNIMPLEMENTED();
- break;
}
case ia_x: {
if (instr->Bit(4) == 0) {
@@ -2714,10 +2707,8 @@ void Simulator::DecodeType3(Instruction* instr) {
break;
case 1:
UNIMPLEMENTED();
- break;
case 2:
UNIMPLEMENTED();
- break;
case 3: {
// Usat.
int32_t sat_pos = instr->Bits(20, 16);
@@ -2746,7 +2737,6 @@ void Simulator::DecodeType3(Instruction* instr) {
switch (instr->Bits(22, 21)) {
case 0:
UNIMPLEMENTED();
- break;
case 1:
if (instr->Bits(9, 6) == 1) {
if (instr->Bit(20) == 0) {
@@ -3442,7 +3432,6 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
set_neon_register(vd, q_data);
}
@@ -4433,7 +4422,6 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
break;
}
@@ -4469,13 +4457,11 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
break;
}
default:
UNREACHABLE();
- break;
}
} else if (opc1 == 0 && (opc2 == 0b0100 || opc2 == 0b0101)) {
DCHECK_EQ(1, instr->Bit(6)); // Only support Q regs.
@@ -4625,7 +4611,6 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
break;
default:
UNIMPLEMENTED();
- break;
}
}
} else if (opc1 == 0b01 && (opc2 & 0b0111) == 0b111) {
@@ -4654,7 +4639,6 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
break;
default:
UNIMPLEMENTED();
- break;
}
}
} else if (opc1 == 0b10 && opc2 == 0b0001) {
@@ -4674,7 +4658,6 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else {
int Vd = instr->VFPDRegValue(kDoublePrecision);
@@ -4692,7 +4675,6 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
}
} else if (opc1 == 0b10 && (opc2 & 0b1110) == 0b0010) {
@@ -4714,7 +4696,6 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else {
// vuzp.<size> Qd, Qm.
@@ -4730,7 +4711,6 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
}
} else {
@@ -4747,10 +4727,8 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
break;
case Neon32:
UNIMPLEMENTED();
- break;
default:
UNREACHABLE();
- break;
}
} else {
// vuzp.<size> Dd, Dm.
@@ -4763,10 +4741,8 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
break;
case Neon32:
UNIMPLEMENTED();
- break;
default:
UNREACHABLE();
- break;
}
}
}
@@ -4811,7 +4787,6 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
}
case Neon64:
UNREACHABLE();
- break;
}
} else if (opc1 == 0b10 && instr->Bit(10) == 1) {
// vrint<q>.<dt> <Dd>, <Dm>
@@ -5078,7 +5053,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (!u && opc == 1 && sz == 2 && q && op1) {
// vmov Qd, Qm.
@@ -5134,7 +5108,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (!u && opc == 3) {
// vcge/vcgt.s<size> Qd, Qm, Qn.
@@ -5152,7 +5125,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (!u && opc == 4 && !op1) {
// vshl s<size> Qd, Qm, Qn.
@@ -5172,7 +5144,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (!u && opc == 6) {
// vmin/vmax.s<size> Qd, Qm, Qn.
@@ -5190,7 +5161,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (!u && opc == 8 && op1) {
// vtst.i<size> Qd, Qm, Qn.
@@ -5207,7 +5177,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (!u && opc == 8 && !op1) {
// vadd.i<size> Qd, Qm, Qn.
@@ -5241,7 +5210,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (!u && opc == 0xA) {
// vpmin/vpmax.s<size> Dd, Dm, Dn.
@@ -5259,7 +5227,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (!u && opc == 0xB) {
// vpadd.i<size> Dd, Dm, Dn.
@@ -5276,7 +5243,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (!u && opc == 0xD && !op1) {
float src1[4], src2[4];
@@ -5347,7 +5313,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (u && opc == 1 && sz == 1 && op1) {
// vbsl.size Qd, Qm, Qn.
@@ -5388,7 +5353,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (u && opc == 2 && op1) {
// vqsub.u<size> Qd, Qm, Qn.
@@ -5405,7 +5369,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (u && opc == 3) {
// vcge/vcgt.u<size> Qd, Qm, Qn.
@@ -5423,7 +5386,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (u && opc == 4 && !op1) {
// vshl u<size> Qd, Qm, Qn.
@@ -5443,7 +5405,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (u && opc == 6) {
// vmin/vmax.u<size> Qd, Qm, Qn.
@@ -5461,7 +5422,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (u && opc == 8 && !op1) {
// vsub.size Qd, Qm, Qn.
@@ -5495,7 +5455,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (u && opc == 0xA) {
// vpmin/vpmax.u<size> Dd, Dm, Dn.
@@ -5513,7 +5472,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (u && opc == 0xD && sz == 0 && q && op1) {
// vmul.f32 Qd, Qn, Qm
@@ -5658,7 +5616,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNIMPLEMENTED();
- break;
}
} else {
// vmovl signed
@@ -5677,7 +5634,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNIMPLEMENTED();
- break;
}
}
} else if (!u && imm3H_L != 0 && opc == 0b0101) {
@@ -5721,7 +5677,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (u && imm3H_L != 0 && opc == 0b0101) {
// vsli.<size> Dd, Dm, shift
@@ -5743,7 +5698,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
}
}
@@ -5807,7 +5761,6 @@ void Simulator::DecodeAdvancedSIMDLoadStoreMultipleStructures(
break;
default:
UNIMPLEMENTED();
- break;
}
if (instr->Bit(21)) {
// vld1
@@ -5993,7 +5946,6 @@ void Simulator::DecodeFloatingPointDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE(); // Case analysis is exhaustive.
- break;
}
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
@@ -6019,7 +5971,6 @@ void Simulator::DecodeFloatingPointDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE(); // Case analysis is exhaustive.
- break;
}
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
@@ -6111,7 +6062,6 @@ void Simulator::DecodeFloatingPointDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE(); // Case analysis is exhaustive.
- break;
}
if (instr->SzValue() == 0x1) {
int n = instr->VFPNRegValue(kDoublePrecision);
@@ -6132,7 +6082,6 @@ void Simulator::DecodeFloatingPointDataProcessing(Instruction* instr) {
break;
default:
UNIMPLEMENTED();
- break;
}
}
@@ -6201,7 +6150,6 @@ void Simulator::InstructionDecode(Instruction* instr) {
}
default: {
UNIMPLEMENTED();
- break;
}
}
}
diff --git a/chromium/v8/src/execution/arm64/simulator-arm64.cc b/chromium/v8/src/execution/arm64/simulator-arm64.cc
index 324bdd99a8d..77fd2ffbd36 100644
--- a/chromium/v8/src/execution/arm64/simulator-arm64.cc
+++ b/chromium/v8/src/execution/arm64/simulator-arm64.cc
@@ -538,6 +538,17 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
TraceSim("Type: Unknown.\n");
UNREACHABLE();
+ // FAST_C_CALL is temporarily handled here as well, because we lack
+ // proper support for direct C calls with FP params in the simulator.
+ // The generic BUILTIN_CALL path assumes all parameters are passed in
+ // the GP registers, thus supporting calling the slow callback without
+ // crashing. The reason for that is that in the mjsunit tests we check
+ // the `fast_c_api.supports_fp_params` (which is false on non-simulator
+ // builds for arm/arm64), thus we expect that the slow path will be
+ // called. And since the slow path passes the arguments as a `const
+ // FunctionCallbackInfo<Value>&` (which is a GP argument), the call is
+ // made correctly.
+ case ExternalReference::FAST_C_CALL:
case ExternalReference::BUILTIN_CALL:
#if defined(V8_OS_WIN)
{
@@ -1517,7 +1528,6 @@ void Simulator::VisitPCRelAddressing(Instruction* instr) {
break;
case ADRP: // Not implemented in the assembler.
UNIMPLEMENTED();
- break;
default:
UNREACHABLE();
}
@@ -2212,7 +2222,6 @@ Simulator::TransactionSize Simulator::get_transaction_size(unsigned size) {
default:
UNREACHABLE();
}
- return TransactionSize::None;
}
void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
@@ -5210,7 +5219,6 @@ void Simulator::VisitNEONScalar2RegMisc(Instruction* instr) {
break;
default:
UNIMPLEMENTED();
- break;
}
} else {
VectorFormat fpf = nfd.GetVectorFormat(nfd.FPScalarFormatMap());
diff --git a/chromium/v8/src/execution/execution.cc b/chromium/v8/src/execution/execution.cc
index 4b7b50bb0e6..4a2095a4958 100644
--- a/chromium/v8/src/execution/execution.cc
+++ b/chromium/v8/src/execution/execution.cc
@@ -13,7 +13,8 @@
#if V8_ENABLE_WEBASSEMBLY
#include "src/compiler/wasm-compiler.h" // Only for static asserts.
-#endif // V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/wasm-engine.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -252,6 +253,13 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(Isolate* isolate,
DCHECK(!params.receiver->IsJSGlobalObject());
DCHECK_LE(params.argc, FixedArray::kMaxLength);
+#if V8_ENABLE_WEBASSEMBLY
+ // If we have PKU support for Wasm, ensure that code is currently write
+ // protected for this thread.
+ DCHECK_IMPLIES(wasm::GetWasmCodeManager()->HasMemoryProtectionKeySupport(),
+ !wasm::GetWasmCodeManager()->MemoryProtectionKeyWritable());
+#endif // V8_ENABLE_WEBASSEMBLY
+
#ifdef USE_SIMULATOR
// Simulators use separate stacks for C++ and JS. JS stack overflow checks
// are performed whenever a JS function is called. However, it can be the case
@@ -346,7 +354,6 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(Isolate* isolate,
// Placeholder for return value.
Object value;
-
Handle<Code> code =
JSEntry(isolate, params.execution_target, params.is_construct);
{
@@ -374,7 +381,8 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(Isolate* isolate,
Address** argv = reinterpret_cast<Address**>(params.argv);
RCS_SCOPE(isolate, RuntimeCallCounterId::kJS_Execution);
value = Object(stub_entry.Call(isolate->isolate_data()->isolate_root(),
- orig_func, func, recv, params.argc, argv));
+ orig_func, func, recv,
+ JSParameterCount(params.argc), argv));
} else {
DCHECK_EQ(Execution::Target::kRunMicrotasks, params.execution_target);
diff --git a/chromium/v8/src/execution/frame-constants.h b/chromium/v8/src/execution/frame-constants.h
index 1148a942123..d353a7092d8 100644
--- a/chromium/v8/src/execution/frame-constants.h
+++ b/chromium/v8/src/execution/frame-constants.h
@@ -283,7 +283,9 @@ class BuiltinExitFrameConstants : public ExitFrameConstants {
static constexpr int kPaddingOffset = kArgcOffset + 1 * kSystemPointerSize;
static constexpr int kFirstArgumentOffset =
kPaddingOffset + 1 * kSystemPointerSize;
- static constexpr int kNumExtraArgsWithReceiver = 5;
+ static constexpr int kNumExtraArgsWithoutReceiver = 4;
+ static constexpr int kNumExtraArgsWithReceiver =
+ kNumExtraArgsWithoutReceiver + 1;
};
// Unoptimized frames are used for interpreted and baseline-compiled JavaScript
@@ -403,6 +405,8 @@ inline static int FrameSlotToFPOffset(int slot) {
#include "src/execution/mips/frame-constants-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/execution/mips64/frame-constants-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/execution/loong64/frame-constants-loong64.h"
#elif V8_TARGET_ARCH_S390
#include "src/execution/s390/frame-constants-s390.h"
#elif V8_TARGET_ARCH_RISCV64
diff --git a/chromium/v8/src/execution/frames.cc b/chromium/v8/src/execution/frames.cc
index f24f1837068..3691f14d865 100644
--- a/chromium/v8/src/execution/frames.cc
+++ b/chromium/v8/src/execution/frames.cc
@@ -142,7 +142,7 @@ void StackFrameIterator::Reset(ThreadLocalTop* top) {
StackFrame* StackFrameIteratorBase::SingletonFor(StackFrame::Type type,
StackFrame::State* state) {
StackFrame* result = SingletonFor(type);
- DCHECK((!result) == (type == StackFrame::NONE));
+ DCHECK((!result) == (type == StackFrame::NO_FRAME_TYPE));
if (result) result->state_ = *state;
return result;
}
@@ -153,7 +153,7 @@ StackFrame* StackFrameIteratorBase::SingletonFor(StackFrame::Type type) {
return &field##_;
switch (type) {
- case StackFrame::NONE:
+ case StackFrame::NO_FRAME_TYPE:
return nullptr;
STACK_FRAME_TYPE_LIST(FRAME_TYPE_CASE)
default:
@@ -206,19 +206,42 @@ int StackTraceFrameIterator::FrameFunctionCount() const {
return static_cast<int>(infos.size());
}
-bool StackTraceFrameIterator::IsValidFrame(StackFrame* frame) const {
+FrameSummary StackTraceFrameIterator::GetTopValidFrame() const {
+ DCHECK(!done());
+ // Like FrameSummary::GetTop, but additionally observes
+ // StackTraceFrameIterator filtering semantics.
+ std::vector<FrameSummary> frames;
+ frame()->Summarize(&frames);
+ if (is_javascript()) {
+ for (int i = static_cast<int>(frames.size()) - 1; i >= 0; i--) {
+ if (!IsValidJSFunction(*frames[i].AsJavaScript().function())) continue;
+ return frames[i];
+ }
+ UNREACHABLE();
+ }
+#if V8_ENABLE_WEBASSEMBLY
+ if (is_wasm()) return frames.back();
+#endif // V8_ENABLE_WEBASSEMBLY
+ UNREACHABLE();
+}
+
+// static
+bool StackTraceFrameIterator::IsValidFrame(StackFrame* frame) {
if (frame->is_java_script()) {
- JavaScriptFrame* js_frame = static_cast<JavaScriptFrame*>(frame);
- if (!js_frame->function().IsJSFunction()) return false;
- return js_frame->function().shared().IsSubjectToDebugging();
+ return IsValidJSFunction(static_cast<JavaScriptFrame*>(frame)->function());
}
- // Apart from JavaScript frames, only Wasm frames are valid.
#if V8_ENABLE_WEBASSEMBLY
if (frame->is_wasm()) return true;
#endif // V8_ENABLE_WEBASSEMBLY
return false;
}
+// static
+bool StackTraceFrameIterator::IsValidJSFunction(JSFunction f) {
+ if (!f.IsJSFunction()) return false;
+ return f.shared().IsSubjectToDebugging();
+}
+
// -------------------------------------------------------------------------
namespace {
@@ -295,7 +318,7 @@ SafeStackFrameIterator::SafeStackFrameIterator(Isolate* isolate, Address pc,
: StackFrameIteratorBase(isolate, false),
low_bound_(sp),
high_bound_(js_entry_sp),
- top_frame_type_(StackFrame::NONE),
+ top_frame_type_(StackFrame::NO_FRAME_TYPE),
top_context_address_(kNullAddress),
external_callback_scope_(isolate->external_callback_scope()),
top_link_register_(lr) {
@@ -323,7 +346,8 @@ SafeStackFrameIterator::SafeStackFrameIterator(Isolate* isolate, Address pc,
top_frame_type_ = type;
state.fp = fast_c_fp;
state.sp = sp;
- state.pc_address = isolate->isolate_data()->fast_c_call_caller_pc_address();
+ state.pc_address = reinterpret_cast<Address*>(
+ isolate->isolate_data()->fast_c_call_caller_pc_address());
advance_frame = false;
} else if (IsValidTop(top)) {
type = ExitFrame::GetStateForFramePointer(Isolate::c_entry_fp(top), &state);
@@ -388,7 +412,7 @@ SafeStackFrameIterator::SafeStackFrameIterator(Isolate* isolate, Address pc,
// The frame anyways will be skipped.
type = StackFrame::OPTIMIZED;
// Top frame is incomplete so we cannot reliably determine its type.
- top_frame_type_ = StackFrame::NONE;
+ top_frame_type_ = StackFrame::NO_FRAME_TYPE;
}
} else {
return;
@@ -573,7 +597,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
if (wasm::WasmCode* wasm_code =
wasm::GetWasmCodeManager()->LookupCode(pc)) {
switch (wasm_code->kind()) {
- case wasm::WasmCode::kFunction:
+ case wasm::WasmCode::kWasmFunction:
return WASM;
case wasm::WasmCode::kWasmToCapiWrapper:
return WASM_EXIT;
@@ -738,7 +762,7 @@ void ExitFrame::Iterate(RootVisitor* v) const {
}
StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
- if (fp == 0) return NONE;
+ if (fp == 0) return NO_FRAME_TYPE;
StackFrame::Type type = ComputeFrameType(fp);
#if V8_ENABLE_WEBASSEMBLY
Address sp = type == WASM_EXIT ? WasmExitFrame::ComputeStackPointer(fp)
@@ -950,7 +974,7 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
safepoint_entry = table.FindEntry(inner_pointer);
stack_slots = wasm_code->stack_slots();
has_tagged_outgoing_params =
- wasm_code->kind() != wasm::WasmCode::kFunction &&
+ wasm_code->kind() != wasm::WasmCode::kWasmFunction &&
wasm_code->kind() != wasm::WasmCode::kWasmToCapiWrapper;
first_tagged_parameter_slot = wasm_code->first_tagged_parameter_slot();
num_tagged_parameter_slots = wasm_code->num_tagged_parameter_slots();
@@ -1035,7 +1059,7 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
// in the place on the stack that one finds the frame type.
UNREACHABLE();
case NATIVE:
- case NONE:
+ case NO_FRAME_TYPE:
case NUMBER_OF_TYPES:
case MANUAL:
UNREACHABLE();
@@ -1154,7 +1178,8 @@ int OptimizedFrame::ComputeParametersCount() const {
Code code = LookupCode();
if (code.kind() == CodeKind::BUILTIN) {
return static_cast<int>(
- Memory<intptr_t>(fp() + StandardFrameConstants::kArgCOffset));
+ Memory<intptr_t>(fp() + StandardFrameConstants::kArgCOffset)) -
+ kJSArgcReceiverSlots;
} else {
return JavaScriptFrame::ComputeParametersCount();
}
@@ -1272,15 +1297,21 @@ void JavaScriptFrame::PrintTop(Isolate* isolate, FILE* file, bool print_args,
if (frame->IsConstructor()) PrintF(file, "new ");
JSFunction function = frame->function();
int code_offset = 0;
+ AbstractCode abstract_code = function.abstract_code(isolate);
if (frame->is_interpreted()) {
InterpretedFrame* iframe = reinterpret_cast<InterpretedFrame*>(frame);
code_offset = iframe->GetBytecodeOffset();
+ } else if (frame->is_baseline()) {
+ // TODO(pthier): AbstractCode should fully support Baseline code.
+ BaselineFrame* baseline_frame = BaselineFrame::cast(frame);
+ code_offset = baseline_frame->GetBytecodeOffset();
+ abstract_code = AbstractCode::cast(baseline_frame->GetBytecodeArray());
} else {
Code code = frame->unchecked_code();
code_offset = code.GetOffsetFromInstructionStart(isolate, frame->pc());
}
- PrintFunctionAndOffset(function, function.abstract_code(isolate),
- code_offset, file, print_line_number);
+ PrintFunctionAndOffset(function, abstract_code, code_offset, file,
+ print_line_number);
if (print_args) {
// function arguments
// (we are intentionally only printing the actually
@@ -1327,12 +1358,13 @@ Object CommonFrameWithJSLinkage::GetParameter(int index) const {
int CommonFrameWithJSLinkage::ComputeParametersCount() const {
DCHECK(can_access_heap_objects() &&
isolate()->heap()->gc_state() == Heap::NOT_IN_GC);
- return function().shared().internal_formal_parameter_count();
+ return function().shared().internal_formal_parameter_count_without_receiver();
}
int JavaScriptFrame::GetActualArgumentCount() const {
return static_cast<int>(
- Memory<intptr_t>(fp() + StandardFrameConstants::kArgCOffset));
+ Memory<intptr_t>(fp() + StandardFrameConstants::kArgCOffset)) -
+ kJSArgcReceiverSlots;
}
Handle<FixedArray> CommonFrameWithJSLinkage::GetParameters() const {
@@ -1361,7 +1393,7 @@ int JavaScriptBuiltinContinuationFrame::ComputeParametersCount() const {
kJavaScriptCallArgCountRegister.code());
Object argc_object(
Memory<Address>(fp() + BuiltinContinuationFrameConstants::kArgCOffset));
- return Smi::ToInt(argc_object);
+ return Smi::ToInt(argc_object) - kJSArgcReceiverSlots;
}
intptr_t JavaScriptBuiltinContinuationFrame::GetSPToFPDelta() const {
@@ -1843,7 +1875,8 @@ JSFunction BuiltinFrame::function() const {
int BuiltinFrame::ComputeParametersCount() const {
const int offset = BuiltinFrameConstants::kLengthOffset;
- return Smi::ToInt(Object(base::Memory<Address>(fp() + offset)));
+ return Smi::ToInt(Object(base::Memory<Address>(fp() + offset))) -
+ kJSArgcReceiverSlots;
}
#if V8_ENABLE_WEBASSEMBLY
@@ -2144,9 +2177,9 @@ void JavaScriptFrame::Print(StringStream* accumulator, PrintMode mode,
accumulator->PrintName(scope_info.ContextLocalName(i));
accumulator->Add(" = ");
if (!context.is_null()) {
- int index = Context::MIN_CONTEXT_SLOTS + i;
- if (index < context.length()) {
- accumulator->Add("%o", context.get(index));
+ int slot_index = Context::MIN_CONTEXT_SLOTS + i;
+ if (slot_index < context.length()) {
+ accumulator->Add("%o", context.get(slot_index));
} else {
accumulator->Add(
"// warning: missing context slot - inconsistent frame?");
diff --git a/chromium/v8/src/execution/frames.h b/chromium/v8/src/execution/frames.h
index 8d9dadd76d2..04979509a28 100644
--- a/chromium/v8/src/execution/frames.h
+++ b/chromium/v8/src/execution/frames.h
@@ -5,6 +5,7 @@
#ifndef V8_EXECUTION_FRAMES_H_
#define V8_EXECUTION_FRAMES_H_
+#include "include/v8-initialization.h"
#include "src/base/bounds.h"
#include "src/codegen/safepoint-table.h"
#include "src/common/globals.h"
@@ -122,7 +123,7 @@ class StackFrame {
public:
#define DECLARE_TYPE(type, ignore) type,
enum Type {
- NONE = 0,
+ NO_FRAME_TYPE = 0,
STACK_FRAME_TYPE_LIST(DECLARE_TYPE) NUMBER_OF_TYPES,
// Used by FrameScope to indicate that the stack frame is constructed
// manually and the FrameScope does not need to emit code.
@@ -175,7 +176,9 @@ class StackFrame {
intptr_t type = marker >> kSmiTagSize;
// TODO(petermarshall): There is a bug in the arm simulators that causes
// invalid frame markers.
-#if defined(USE_SIMULATOR) && (V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM)
+#if (defined(USE_SIMULATOR) && \
+ (V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM)) || \
+ V8_TARGET_ARCH_RISCV64
if (static_cast<uintptr_t>(type) >= Type::NUMBER_OF_TYPES) {
// Appease UBSan.
return Type::NUMBER_OF_TYPES;
@@ -1273,9 +1276,14 @@ class V8_EXPORT_PRIVATE StackTraceFrameIterator {
#endif // V8_ENABLE_WEBASSEMBLY
inline JavaScriptFrame* javascript_frame() const;
+ // Use this instead of FrameSummary::GetTop(javascript_frame) to keep
+ // filtering behavior consistent with the rest of StackTraceFrameIterator.
+ FrameSummary GetTopValidFrame() const;
+
private:
StackFrameIterator iterator_;
- bool IsValidFrame(StackFrame* frame) const;
+ static bool IsValidFrame(StackFrame* frame);
+ static bool IsValidJSFunction(JSFunction f);
};
class SafeStackFrameIterator : public StackFrameIteratorBase {
diff --git a/chromium/v8/src/execution/futex-emulation.cc b/chromium/v8/src/execution/futex-emulation.cc
index 2206b98c9f0..c1120dd8eb7 100644
--- a/chromium/v8/src/execution/futex-emulation.cc
+++ b/chromium/v8/src/execution/futex-emulation.cc
@@ -531,7 +531,8 @@ Object FutexEmulation::WaitAsync(Isolate* isolate,
Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
Handle<JSObject> promise_capability = factory->NewJSPromise();
- enum { kNotEqual, kTimedOut, kAsync } result_kind;
+ enum class ResultKind { kNotEqual, kTimedOut, kAsync };
+ ResultKind result_kind;
{
// 16. Perform EnterCriticalSection(WL).
NoGarbageCollectionMutexGuard lock_guard(g_mutex.Pointer());
@@ -543,11 +544,11 @@ Object FutexEmulation::WaitAsync(Isolate* isolate,
std::atomic<T>* p = reinterpret_cast<std::atomic<T>*>(
static_cast<int8_t*>(backing_store->buffer_start()) + addr);
if (p->load() != value) {
- result_kind = kNotEqual;
+ result_kind = ResultKind::kNotEqual;
} else if (use_timeout && rel_timeout_ns == 0) {
- result_kind = kTimedOut;
+ result_kind = ResultKind::kTimedOut;
} else {
- result_kind = kAsync;
+ result_kind = ResultKind::kAsync;
FutexWaitListNode* node = new FutexWaitListNode(
backing_store, addr, promise_capability, isolate);
@@ -571,7 +572,7 @@ Object FutexEmulation::WaitAsync(Isolate* isolate,
}
switch (result_kind) {
- case kNotEqual:
+ case ResultKind::kNotEqual:
// 18. If v is not equal to w, then
// ...
// c. Perform ! CreateDataPropertyOrThrow(resultObject, "async", false).
@@ -588,7 +589,7 @@ Object FutexEmulation::WaitAsync(Isolate* isolate,
.FromJust());
break;
- case kTimedOut:
+ case ResultKind::kTimedOut:
// 19. If t is 0 and mode is async, then
// ...
// c. Perform ! CreateDataPropertyOrThrow(resultObject, "async", false).
@@ -605,7 +606,7 @@ Object FutexEmulation::WaitAsync(Isolate* isolate,
.FromJust());
break;
- case kAsync:
+ case ResultKind::kAsync:
// Add the Promise into the NativeContext's atomics_waitasync_promises
// set, so that the list keeps it alive.
Handle<NativeContext> native_context(isolate->native_context());
diff --git a/chromium/v8/src/execution/futex-emulation.h b/chromium/v8/src/execution/futex-emulation.h
index cf8a9fd0796..2ab84295e08 100644
--- a/chromium/v8/src/execution/futex-emulation.h
+++ b/chromium/v8/src/execution/futex-emulation.h
@@ -9,7 +9,7 @@
#include <map>
-#include "include/v8.h"
+#include "include/v8-persistent-handle.h"
#include "src/base/atomicops.h"
#include "src/base/lazy-instance.h"
#include "src/base/macros.h"
@@ -29,6 +29,8 @@
namespace v8 {
+class Promise;
+
namespace base {
class TimeDelta;
} // namespace base
diff --git a/chromium/v8/src/execution/isolate-data.h b/chromium/v8/src/execution/isolate-data.h
index 3b667e0d5a2..1fdb2e69eda 100644
--- a/chromium/v8/src/execution/isolate-data.h
+++ b/chromium/v8/src/execution/isolate-data.h
@@ -20,13 +20,48 @@ namespace internal {
class Isolate;
+// IsolateData fields, defined as: V(Offset, Size, Name)
+#define ISOLATE_DATA_FIELDS(V) \
+ /* Misc. fields. */ \
+ V(kCageBaseOffset, kSystemPointerSize, cage_base) \
+ V(kStackGuardOffset, StackGuard::kSizeInBytes, stack_guard) \
+ /* Tier 0 tables (small but fast access). */ \
+ V(kBuiltinTier0EntryTableOffset, \
+ Builtins::kBuiltinTier0Count* kSystemPointerSize, \
+ builtin_tier0_entry_table) \
+ V(kBuiltinsTier0TableOffset, \
+ Builtins::kBuiltinTier0Count* kSystemPointerSize, builtin_tier0_table) \
+ /* Misc. fields. */ \
+ V(kEmbedderDataOffset, Internals::kNumIsolateDataSlots* kSystemPointerSize, \
+ embedder_data) \
+ V(kFastCCallCallerFPOffset, kSystemPointerSize, fast_c_call_caller_fp) \
+ V(kFastCCallCallerPCOffset, kSystemPointerSize, fast_c_call_caller_pc) \
+ V(kFastApiCallTargetOffset, kSystemPointerSize, fast_api_call_target) \
+ V(kLongTaskStatsCounterOffset, kSizetSize, long_task_stats_counter) \
+ /* Full tables (arbitrary size, potentially slower access). */ \
+ V(kRootsTableOffset, RootsTable::kEntriesCount* kSystemPointerSize, \
+ roots_table) \
+ V(kExternalReferenceTableOffset, ExternalReferenceTable::kSizeInBytes, \
+ external_reference_table) \
+ V(kThreadLocalTopOffset, ThreadLocalTop::kSizeInBytes, thread_local_top) \
+ V(kBuiltinEntryTableOffset, Builtins::kBuiltinCount* kSystemPointerSize, \
+ builtin_entry_table) \
+ V(kBuiltinTableOffset, Builtins::kBuiltinCount* kSystemPointerSize, \
+ builtin_table) \
+ ISOLATE_DATA_FIELDS_HEAP_SANDBOX(V) \
+ V(kStackIsIterableOffset, kUInt8Size, stack_is_iterable)
+
+#ifdef V8_HEAP_SANDBOX
+#define ISOLATE_DATA_FIELDS_HEAP_SANDBOX(V) \
+ V(kExternalPointerTableOffset, kSystemPointerSize * 3, external_pointer_table)
+#else
+#define ISOLATE_DATA_FIELDS_HEAP_SANDBOX(V)
+#endif // V8_HEAP_SANDBOX
+
// This class contains a collection of data accessible from both C++ runtime
-// and compiled code (including assembly stubs, builtins, interpreter bytecode
-// handlers and optimized code).
-// In particular, it contains pointer to the V8 heap roots table, external
-// reference table and builtins array.
-// The compiled code accesses the isolate data fields indirectly via the root
-// register.
+// and compiled code (including builtins, interpreter bytecode handlers and
+// optimized code). The compiled code accesses the isolate data fields
+// indirectly via the root register.
class IsolateData final {
public:
IsolateData(Isolate* isolate, Address cage_base)
@@ -37,158 +72,103 @@ class IsolateData final {
static constexpr intptr_t kIsolateRootBias = kRootRegisterBias;
- // The value of kPointerCageBaseRegister
- Address cage_base() const {
- return COMPRESS_POINTERS_BOOL ? cage_base_ : kNullAddress;
- }
-
// The value of the kRootRegister.
Address isolate_root() const {
return reinterpret_cast<Address>(this) + kIsolateRootBias;
}
- // Root-register-relative offset of the roots table.
- static constexpr int roots_table_offset() {
- return kRootsTableOffset - kIsolateRootBias;
- }
+ // Root-register-relative offsets.
+
+#define V(Offset, Size, Name) \
+ static constexpr int Name##_offset() { return Offset - kIsolateRootBias; }
+ ISOLATE_DATA_FIELDS(V)
+#undef V
- // Root-register-relative offset of the given root table entry.
static constexpr int root_slot_offset(RootIndex root_index) {
return roots_table_offset() + RootsTable::offset_of(root_index);
}
- // Root-register-relative offset of the external reference table.
- static constexpr int external_reference_table_offset() {
- return kExternalReferenceTableOffset - kIsolateRootBias;
+ static constexpr int BuiltinEntrySlotOffset(Builtin id) {
+ DCHECK(Builtins::IsBuiltinId(id));
+ return (Builtins::IsTier0(id) ? builtin_tier0_entry_table_offset()
+ : builtin_entry_table_offset()) +
+ Builtins::ToInt(id) * kSystemPointerSize;
}
-
- // Root-register-relative offset of the builtin entry table.
- static constexpr int builtin_entry_table_offset() {
- return kBuiltinEntryTableOffset - kIsolateRootBias;
- }
- static constexpr int builtin_entry_slot_offset(Builtin builtin) {
- DCHECK(Builtins::IsBuiltinId(builtin));
- return builtin_entry_table_offset() +
- static_cast<int>(builtin) * kSystemPointerSize;
- }
-
- // Root-register-relative offset of the builtins table.
- static constexpr int builtins_table_offset() {
- return kBuiltinsTableOffset - kIsolateRootBias;
- }
-
- // Root-register-relative offset of the external pointer table.
-#ifdef V8_HEAP_SANDBOX
- static constexpr int external_pointer_table_offset() {
- return kExternalPointerTableOffset - kIsolateRootBias;
- }
-#endif
-
- static constexpr int fast_c_call_caller_fp_offset() {
- return kFastCCallCallerFPOffset - kIsolateRootBias;
- }
-
- static constexpr int fast_c_call_caller_pc_offset() {
- return kFastCCallCallerPCOffset - kIsolateRootBias;
- }
-
- static constexpr int fast_api_call_target_offset() {
- return kFastApiCallTargetOffset - kIsolateRootBias;
+ // TODO(ishell): remove in favour of typified id version.
+ static constexpr int builtin_slot_offset(int builtin_index) {
+ return BuiltinSlotOffset(Builtins::FromInt(builtin_index));
}
-
- static constexpr int cage_base_offset() {
- return kCageBaseOffset - kIsolateRootBias;
+ static constexpr int BuiltinSlotOffset(Builtin id) {
+ return (Builtins::IsTier0(id) ? builtin_tier0_table_offset()
+ : builtin_table_offset()) +
+ Builtins::ToInt(id) * kSystemPointerSize;
}
- // Root-register-relative offset of the given builtin table entry.
- // TODO(ishell): remove in favour of typified id version.
- static int builtin_slot_offset(int builtin_index) {
- DCHECK(Builtins::IsBuiltinId(builtin_index));
- return builtins_table_offset() + builtin_index * kSystemPointerSize;
- }
+#define V(Offset, Size, Name) \
+ Address Name##_address() { return reinterpret_cast<Address>(&Name##_); }
+ ISOLATE_DATA_FIELDS(V)
+#undef V
- // Root-register-relative offset of the builtin table entry.
- static int builtin_slot_offset(Builtin id) {
- return builtins_table_offset() + static_cast<int>(id) * kSystemPointerSize;
+ Address fast_c_call_caller_fp() const { return fast_c_call_caller_fp_; }
+ Address fast_c_call_caller_pc() const { return fast_c_call_caller_pc_; }
+ Address fast_api_call_target() const { return fast_api_call_target_; }
+ // The value of kPointerCageBaseRegister.
+ Address cage_base() const { return cage_base_; }
+ StackGuard* stack_guard() { return &stack_guard_; }
+ Address* builtin_tier0_entry_table() { return builtin_tier0_entry_table_; }
+ Address* builtin_tier0_table() { return builtin_tier0_table_; }
+ RootsTable& roots() { return roots_table_; }
+ const RootsTable& roots() const { return roots_table_; }
+ ExternalReferenceTable* external_reference_table() {
+ return &external_reference_table_;
}
+ ThreadLocalTop& thread_local_top() { return thread_local_top_; }
+ ThreadLocalTop const& thread_local_top() const { return thread_local_top_; }
+ Address* builtin_entry_table() { return builtin_entry_table_; }
+ Address* builtin_table() { return builtin_table_; }
+ uint8_t stack_is_iterable() const { return stack_is_iterable_; }
- // The FP and PC that are saved right before TurboAssembler::CallCFunction.
- Address* fast_c_call_caller_fp_address() { return &fast_c_call_caller_fp_; }
- Address* fast_c_call_caller_pc_address() { return &fast_c_call_caller_pc_; }
- // The address of the fast API callback right before it's executed from
- // generated code.
- Address* fast_api_call_target_address() { return &fast_api_call_target_; }
- StackGuard* stack_guard() { return &stack_guard_; }
- uint8_t* stack_is_iterable_address() { return &stack_is_iterable_; }
- Address fast_c_call_caller_fp() { return fast_c_call_caller_fp_; }
- Address fast_c_call_caller_pc() { return fast_c_call_caller_pc_; }
- Address fast_api_call_target() { return fast_api_call_target_; }
- uint8_t stack_is_iterable() { return stack_is_iterable_; }
-
- // Returns true if this address points to data stored in this instance.
- // If it's the case then the value can be accessed indirectly through the
- // root register.
+ // Returns true if this address points to data stored in this instance. If
+ // it's the case then the value can be accessed indirectly through the root
+ // register.
bool contains(Address address) const {
STATIC_ASSERT(std::is_unsigned<Address>::value);
Address start = reinterpret_cast<Address>(this);
return (address - start) < sizeof(*this);
}
- ThreadLocalTop& thread_local_top() { return thread_local_top_; }
- ThreadLocalTop const& thread_local_top() const { return thread_local_top_; }
-
- RootsTable& roots() { return roots_; }
- const RootsTable& roots() const { return roots_; }
-
- ExternalReferenceTable* external_reference_table() {
- return &external_reference_table_;
- }
-
- Address* builtin_entry_table() { return builtin_entry_table_; }
- Address* builtins() { return builtins_; }
-
private:
// Static layout definition.
//
// Note: The location of fields within IsolateData is significant. The
// closer they are to the value of kRootRegister (i.e.: isolate_root()), the
// cheaper it is to access them. See also: https://crbug.com/993264.
- // The recommend guideline is to put frequently-accessed fields close to the
- // beginning of IsolateData.
-#define FIELDS(V) \
- V(kEmbedderDataOffset, Internals::kNumIsolateDataSlots* kSystemPointerSize) \
- V(kFastCCallCallerFPOffset, kSystemPointerSize) \
- V(kFastCCallCallerPCOffset, kSystemPointerSize) \
- V(kFastApiCallTargetOffset, kSystemPointerSize) \
- V(kCageBaseOffset, kSystemPointerSize) \
- V(kLongTaskStatsCounterOffset, kSizetSize) \
- V(kStackGuardOffset, StackGuard::kSizeInBytes) \
- V(kRootsTableOffset, RootsTable::kEntriesCount* kSystemPointerSize) \
- V(kExternalReferenceTableOffset, ExternalReferenceTable::kSizeInBytes) \
- V(kThreadLocalTopOffset, ThreadLocalTop::kSizeInBytes) \
- V(kBuiltinEntryTableOffset, Builtins::kBuiltinCount* kSystemPointerSize) \
- V(kBuiltinsTableOffset, Builtins::kBuiltinCount* kSystemPointerSize) \
- FIELDS_HEAP_SANDBOX(V) \
- V(kStackIsIterableOffset, kUInt8Size) \
- /* This padding aligns IsolateData size by 8 bytes. */ \
- V(kPaddingOffset, \
- 8 + RoundUp<8>(static_cast<int>(kPaddingOffset)) - kPaddingOffset) \
- /* Total size. */ \
+ // The recommended guideline is to put frequently-accessed fields close to
+ // the beginning of IsolateData.
+#define FIELDS(V) \
+ ISOLATE_DATA_FIELDS(V) \
+ /* This padding aligns IsolateData size by 8 bytes. */ \
+ V(kPaddingOffset, \
+ 8 + RoundUp<8>(static_cast<int>(kPaddingOffset)) - kPaddingOffset) \
+ /* Total size. */ \
V(kSize, 0)
-#ifdef V8_HEAP_SANDBOX
-#define FIELDS_HEAP_SANDBOX(V) \
- V(kExternalPointerTableOffset, kSystemPointerSize * 3)
-#else
-#define FIELDS_HEAP_SANDBOX(V)
-#endif // V8_HEAP_SANDBOX
-
DEFINE_FIELD_OFFSET_CONSTANTS(0, FIELDS)
#undef FIELDS
+ const Address cage_base_;
+
+ // Fields related to the system and JS stack. In particular, this contains
+ // the stack limit used by stack checks in generated code.
+ StackGuard stack_guard_;
+
+ // Tier 0 tables. See also builtin_entry_table_ and builtin_table_.
+ Address builtin_tier0_entry_table_[Builtins::kBuiltinTier0Count] = {};
+ Address builtin_tier0_table_[Builtins::kBuiltinTier0Count] = {};
+
// These fields are accessed through the API, offsets must be kept in sync
- // with v8::internal::Internals (in include/v8-internal.h) constants.
- // The layout consitency is verified in Isolate::CheckIsolateLayout() using
+ // with v8::internal::Internals (in include/v8-internal.h) constants. The
+ // layout consistency is verified in Isolate::CheckIsolateLayout() using
// runtime checks.
void* embedder_data_[Internals::kNumIsolateDataSlots] = {};
@@ -196,33 +176,30 @@ class IsolateData final {
// the sampling CPU profiler can iterate the stack during such calls. These
// are stored on IsolateData so that they can be stored to with only one move
// instruction in compiled code.
+ //
+ // The FP and PC that are saved right before TurboAssembler::CallCFunction.
Address fast_c_call_caller_fp_ = kNullAddress;
Address fast_c_call_caller_pc_ = kNullAddress;
+ // The address of the fast API callback right before it's executed from
+ // generated code.
Address fast_api_call_target_ = kNullAddress;
- Address cage_base_ = kNullAddress;
-
// Used for implementation of LongTaskStats. Counts the number of potential
// long tasks.
size_t long_task_stats_counter_ = 0;
- // Fields related to the system and JS stack. In particular, this contains
- // the stack limit used by stack checks in generated code.
- StackGuard stack_guard_;
-
- RootsTable roots_;
-
+ RootsTable roots_table_;
ExternalReferenceTable external_reference_table_;
ThreadLocalTop thread_local_top_;
- // The entry points for all builtins. This corresponds to
+ // The entry points for builtins. This corresponds to
// Code::InstructionStart() for each Code object in the builtins table below.
// The entry table is in IsolateData for easy access through kRootRegister.
Address builtin_entry_table_[Builtins::kBuiltinCount] = {};
// The entries in this array are tagged pointers to Code objects.
- Address builtins_[Builtins::kBuiltinCount] = {};
+ Address builtin_table_[Builtins::kBuiltinCount] = {};
// Table containing pointers to external objects.
#ifdef V8_HEAP_SANDBOX
@@ -259,31 +236,16 @@ void IsolateData::AssertPredictableLayout() {
STATIC_ASSERT(std::is_standard_layout<ThreadLocalTop>::value);
STATIC_ASSERT(std::is_standard_layout<ExternalReferenceTable>::value);
STATIC_ASSERT(std::is_standard_layout<IsolateData>::value);
- STATIC_ASSERT(offsetof(IsolateData, roots_) == kRootsTableOffset);
- STATIC_ASSERT(offsetof(IsolateData, external_reference_table_) ==
- kExternalReferenceTableOffset);
- STATIC_ASSERT(offsetof(IsolateData, thread_local_top_) ==
- kThreadLocalTopOffset);
- STATIC_ASSERT(offsetof(IsolateData, builtins_) == kBuiltinsTableOffset);
- STATIC_ASSERT(offsetof(IsolateData, fast_c_call_caller_fp_) ==
- kFastCCallCallerFPOffset);
- STATIC_ASSERT(offsetof(IsolateData, fast_c_call_caller_pc_) ==
- kFastCCallCallerPCOffset);
- STATIC_ASSERT(offsetof(IsolateData, fast_api_call_target_) ==
- kFastApiCallTargetOffset);
- STATIC_ASSERT(offsetof(IsolateData, cage_base_) == kCageBaseOffset);
- STATIC_ASSERT(offsetof(IsolateData, long_task_stats_counter_) ==
- kLongTaskStatsCounterOffset);
- STATIC_ASSERT(offsetof(IsolateData, stack_guard_) == kStackGuardOffset);
-#ifdef V8_HEAP_SANDBOX
- STATIC_ASSERT(offsetof(IsolateData, external_pointer_table_) ==
- kExternalPointerTableOffset);
-#endif
- STATIC_ASSERT(offsetof(IsolateData, stack_is_iterable_) ==
- kStackIsIterableOffset);
+#define V(Offset, Size, Name) \
+ STATIC_ASSERT(offsetof(IsolateData, Name##_) == Offset);
+ ISOLATE_DATA_FIELDS(V)
+#undef V
STATIC_ASSERT(sizeof(IsolateData) == IsolateData::kSize);
}
+#undef ISOLATE_DATA_FIELDS_HEAP_SANDBOX
+#undef ISOLATE_DATA_FIELDS
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/execution/isolate.cc b/chromium/v8/src/execution/isolate.cc
index 8363c52c491..801cb8b1322 100644
--- a/chromium/v8/src/execution/isolate.cc
+++ b/chromium/v8/src/execution/isolate.cc
@@ -14,6 +14,7 @@
#include <unordered_map>
#include <utility>
+#include "include/v8-template.h"
#include "src/api/api-inl.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/scopes.h"
@@ -51,6 +52,7 @@
#include "src/execution/simulator.h"
#include "src/execution/v8threads.h"
#include "src/execution/vm-state-inl.h"
+#include "src/handles/global-handles-inl.h"
#include "src/handles/persistent-handles.h"
#include "src/heap/heap-inl.h"
#include "src/heap/read-only-heap.h"
@@ -73,6 +75,7 @@
#include "src/objects/js-array-inl.h"
#include "src/objects/js-generator-inl.h"
#include "src/objects/js-weak-refs-inl.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/promise-inl.h"
#include "src/objects/prototype.h"
@@ -151,26 +154,6 @@ uint32_t DefaultEmbeddedBlobDataSize() {
return v8_Default_embedded_blob_data_size_;
}
-#ifdef V8_MULTI_SNAPSHOTS
-extern "C" const uint8_t* v8_Trusted_embedded_blob_code_;
-extern "C" uint32_t v8_Trusted_embedded_blob_code_size_;
-extern "C" const uint8_t* v8_Trusted_embedded_blob_data_;
-extern "C" uint32_t v8_Trusted_embedded_blob_data_size_;
-
-const uint8_t* TrustedEmbeddedBlobCode() {
- return v8_Trusted_embedded_blob_code_;
-}
-uint32_t TrustedEmbeddedBlobCodeSize() {
- return v8_Trusted_embedded_blob_code_size_;
-}
-const uint8_t* TrustedEmbeddedBlobData() {
- return v8_Trusted_embedded_blob_data_;
-}
-uint32_t TrustedEmbeddedBlobDataSize() {
- return v8_Trusted_embedded_blob_data_size_;
-}
-#endif
-
namespace {
// These variables provide access to the current embedded blob without requiring
// an isolate instance. This is needed e.g. by Code::InstructionStart, which may
@@ -282,9 +265,6 @@ bool Isolate::CurrentEmbeddedBlobIsBinaryEmbedded() {
const uint8_t* code =
current_embedded_blob_code_.load(std::memory_order::memory_order_relaxed);
if (code == nullptr) return false;
-#ifdef V8_MULTI_SNAPSHOTS
- if (code == TrustedEmbeddedBlobCode()) return true;
-#endif
return code == DefaultEmbeddedBlobCode();
}
@@ -660,7 +640,8 @@ class StackTraceBuilder {
if (V8_UNLIKELY(FLAG_detailed_error_stack_trace)) {
parameters = isolate_->factory()->CopyFixedArrayUpTo(
handle(generator_object->parameters_and_registers(), isolate_),
- function->shared().internal_formal_parameter_count());
+ function->shared()
+ .internal_formal_parameter_count_without_receiver());
}
AppendFrame(receiver, function, code, offset, flags, parameters);
@@ -703,7 +684,7 @@ class StackTraceBuilder {
#if V8_ENABLE_WEBASSEMBLY
void AppendWasmFrame(FrameSummary::WasmFrameSummary const& summary) {
- if (summary.code()->kind() != wasm::WasmCode::kFunction) return;
+ if (summary.code()->kind() != wasm::WasmCode::kWasmFunction) return;
Handle<WasmInstanceObject> instance = summary.wasm_instance();
int flags = StackFrameInfo::kIsWasm;
if (instance->module_object().is_asm_js()) {
@@ -851,6 +832,7 @@ class StackTraceBuilder {
};
bool GetStackTraceLimit(Isolate* isolate, int* result) {
+ DCHECK(!FLAG_correctness_fuzzer_suppressions);
Handle<JSObject> error = isolate->error_function();
Handle<String> key = isolate->factory()->stackTraceLimit_string();
@@ -897,7 +879,7 @@ void CaptureAsyncStackTrace(Isolate* isolate, Handle<JSPromise> promise,
Builtin::kAsyncGeneratorAwaitResolveClosure) ||
IsBuiltinFunction(isolate, reaction->fulfill_handler(),
Builtin::kAsyncGeneratorYieldResolveClosure)) {
- // Now peak into the handlers' AwaitContext to get to
+ // Now peek into the handlers' AwaitContext to get to
// the JSGeneratorObject for the async function.
Handle<Context> context(
JSFunction::cast(reaction->fulfill_handler()).context(), isolate);
@@ -1095,7 +1077,7 @@ Handle<FixedArray> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
Builtin::kAsyncFunctionAwaitRejectClosure) ||
IsBuiltinFunction(isolate, promise_reaction_job_task->handler(),
Builtin::kAsyncGeneratorAwaitRejectClosure)) {
- // Now peak into the handlers' AwaitContext to get to
+ // Now peek into the handlers' AwaitContext to get to
// the JSGeneratorObject for the async function.
Handle<Context> context(
JSFunction::cast(promise_reaction_job_task->handler()).context(),
@@ -1150,7 +1132,10 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
FrameSkipMode mode,
Handle<Object> caller) {
int limit;
- if (!GetStackTraceLimit(this, &limit)) return factory()->undefined_value();
+ if (FLAG_correctness_fuzzer_suppressions ||
+ !GetStackTraceLimit(this, &limit)) {
+ return factory()->undefined_value();
+ }
CaptureStackTraceOptions options;
options.limit = limit;
@@ -1548,7 +1533,7 @@ void ReportBootstrappingException(Handle<Object> exception,
PrintF(" <not available>\n");
} else {
PrintF("\n");
- int line_number = 1;
+ line_number = 1;
PrintF("%5d: ", line_number);
for (int i = 0; i < len; i++) {
uint16_t character = src->Get(i);
@@ -1585,7 +1570,9 @@ Handle<JSMessageObject> Isolate::CreateMessageOrAbort(
// print a user-friendly stack trace (not an internal one).
PrintF(stderr, "%s\n\nFROM\n",
MessageHandler::GetLocalizedMessage(this, message_obj).get());
- PrintCurrentStackTrace(stderr);
+ std::ostringstream stack_trace_stream;
+ PrintCurrentStackTrace(stack_trace_stream);
+ PrintF(stderr, "%s", stack_trace_stream.str().c_str());
base::OS::Abort();
}
}
@@ -2146,7 +2133,7 @@ Object Isolate::PromoteScheduledException() {
return ReThrow(thrown);
}
-void Isolate::PrintCurrentStackTrace(FILE* out) {
+void Isolate::PrintCurrentStackTrace(std::ostream& out) {
CaptureStackTraceOptions options;
options.limit = 0;
options.skip_mode = SKIP_NONE;
@@ -2171,20 +2158,16 @@ void Isolate::PrintCurrentStackTrace(FILE* out) {
bool Isolate::ComputeLocation(MessageLocation* target) {
StackTraceFrameIterator it(this);
if (it.done()) return false;
- CommonFrame* frame = it.frame();
// Compute the location from the function and the relocation info of the
// baseline code. For optimized code this will use the deoptimization
// information to get canonical location information.
- std::vector<FrameSummary> frames;
#if V8_ENABLE_WEBASSEMBLY
wasm::WasmCodeRefScope code_ref_scope;
#endif // V8_ENABLE_WEBASSEMBLY
- frame->Summarize(&frames);
- FrameSummary& summary = frames.back();
+ FrameSummary summary = it.GetTopValidFrame();
Handle<SharedFunctionInfo> shared;
Handle<Object> script = summary.script();
- if (!script->IsScript() ||
- (Script::cast(*script).source().IsUndefined(this))) {
+ if (!script->IsScript() || Script::cast(*script).source().IsUndefined(this)) {
return false;
}
@@ -2466,8 +2449,7 @@ bool PromiseHasUserDefinedRejectHandlerInternal(Isolate* isolate,
Handle<PromiseCapability>::cast(promise_or_capability)->promise(),
isolate);
}
- Handle<JSPromise> promise =
- Handle<JSPromise>::cast(promise_or_capability);
+ promise = Handle<JSPromise>::cast(promise_or_capability);
if (!reaction->reject_handler().IsUndefined(isolate)) {
Handle<JSReceiver> reject_handler(
JSReceiver::cast(reaction->reject_handler()), isolate);
@@ -2639,6 +2621,20 @@ bool Isolate::AreWasmExceptionsEnabled(Handle<Context> context) {
#endif // V8_ENABLE_WEBASSEMBLY
}
+bool Isolate::IsWasmDynamicTieringEnabled() {
+#if V8_ENABLE_WEBASSEMBLY
+ if (wasm_dynamic_tiering_enabled_callback()) {
+ HandleScope handle_scope(this);
+ v8::Local<v8::Context> api_context =
+ v8::Utils::ToLocal(handle(context(), this));
+ return wasm_dynamic_tiering_enabled_callback()(api_context);
+ }
+ return FLAG_wasm_dynamic_tiering;
+#else
+ return false;
+#endif // V8_ENABLE_WEBASSEMBLY
+}
+
Handle<Context> Isolate::GetIncumbentContext() {
JavaScriptFrameIterator it(this);
@@ -2648,7 +2644,7 @@ Handle<Context> Isolate::GetIncumbentContext() {
// NOTE: This code assumes that the stack grows downward.
Address top_backup_incumbent =
top_backup_incumbent_scope()
- ? top_backup_incumbent_scope()->JSStackComparableAddress()
+ ? top_backup_incumbent_scope()->JSStackComparableAddressPrivate()
: 0;
if (!it.done() &&
(!top_backup_incumbent || it.frame()->sp() < top_backup_incumbent)) {
@@ -2698,10 +2694,10 @@ void Isolate::ReleaseSharedPtrs() {
}
}
-bool Isolate::IsBuiltinsTableHandleLocation(Address* handle_location) {
+bool Isolate::IsBuiltinTableHandleLocation(Address* handle_location) {
FullObjectSlot location(handle_location);
- FullObjectSlot first_root(builtins_table());
- FullObjectSlot last_root(builtins_table() + Builtins::kBuiltinCount);
+ FullObjectSlot first_root(builtin_table());
+ FullObjectSlot last_root(builtin_table() + Builtins::kBuiltinCount);
if (location >= last_root) return false;
if (location < first_root) return false;
return true;
@@ -3072,9 +3068,15 @@ void Isolate::CheckIsolateLayout() {
Internals::kIsolateLongTaskStatsCounterOffset);
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.stack_guard_)),
Internals::kIsolateStackGuardOffset);
- CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.roots_)),
+ CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.roots_table_)),
Internals::kIsolateRootsOffset);
+ STATIC_ASSERT(Internals::kStackGuardSize == sizeof(StackGuard));
+ STATIC_ASSERT(Internals::kBuiltinTier0TableSize ==
+ Builtins::kBuiltinTier0Count * kSystemPointerSize);
+ STATIC_ASSERT(Internals::kBuiltinTier0EntryTableSize ==
+ Builtins::kBuiltinTier0Count * kSystemPointerSize);
+
#ifdef V8_HEAP_SANDBOX
CHECK_EQ(static_cast<int>(OFFSET_OF(ExternalPointerTable, buffer_)),
Internals::kExternalPointerTableBufferOffset);
@@ -3138,8 +3140,6 @@ void Isolate::Deinit() {
// All client isolates should already be detached.
DCHECK_NULL(client_isolate_head_);
- DumpAndResetStats();
-
if (FLAG_print_deopt_stress) {
PrintF(stdout, "=== Stress deopt counter: %u\n", stress_deopt_count_);
}
@@ -3155,6 +3155,11 @@ void Isolate::Deinit() {
// not cause a GC.
heap_.StartTearDown();
+ // This stops cancelable tasks (i.e. concurrent marking tasks).
+ // Stop concurrent tasks before destroying resources since they might still
+ // use those.
+ cancelable_task_manager()->CancelAndWait();
+
ReleaseSharedPtrs();
string_table_.reset();
@@ -3176,8 +3181,9 @@ void Isolate::Deinit() {
delete baseline_batch_compiler_;
baseline_batch_compiler_ = nullptr;
- // This stops cancelable tasks (i.e. concurrent marking tasks)
- cancelable_task_manager()->CancelAndWait();
+ // After all concurrent tasks are stopped, we know for sure that stats aren't
+ // updated anymore.
+ DumpAndResetStats();
main_thread_local_isolate_->heap()->FreeLinearAllocationArea();
@@ -3412,15 +3418,6 @@ void Isolate::InitializeDefaultEmbeddedBlob() {
const uint8_t* data = DefaultEmbeddedBlobData();
uint32_t data_size = DefaultEmbeddedBlobDataSize();
-#ifdef V8_MULTI_SNAPSHOTS
- if (!FLAG_untrusted_code_mitigations) {
- code = TrustedEmbeddedBlobCode();
- code_size = TrustedEmbeddedBlobCodeSize();
- data = TrustedEmbeddedBlobData();
- data_size = TrustedEmbeddedBlobDataSize();
- }
-#endif
-
if (StickyEmbeddedBlobCode() != nullptr) {
base::MutexGuard guard(current_embedded_blob_refcount_mutex_.Pointer());
// Check again now that we hold the lock.
@@ -3626,7 +3623,6 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
store_stub_cache_ = new StubCache(this);
materialized_object_store_ = new MaterializedObjectStore(this);
regexp_stack_ = new RegExpStack();
- regexp_stack_->isolate_ = this;
date_cache_ = new DateCache();
heap_profiler_ = new HeapProfiler(heap());
interpreter_ = new interpreter::Interpreter(this);
@@ -3756,7 +3752,6 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
// If we are deserializing, read the state into the now-empty heap.
{
- AlwaysAllocateScope always_allocate(heap());
CodeSpaceMemoryModificationScope modification_scope(heap());
if (create_heap_objects) {
@@ -3782,7 +3777,7 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
delete setup_delegate_;
setup_delegate_ = nullptr;
- Builtins::InitializeBuiltinEntryTable(this);
+ Builtins::InitializeIsolateDataTables(this);
Builtins::EmitCodeCreateEvents(this);
#ifdef DEBUG
@@ -4292,13 +4287,8 @@ MaybeHandle<JSPromise> Isolate::RunHostImportModuleDynamicallyCallback(
MaybeHandle<Object> maybe_import_assertions_argument) {
v8::Local<v8::Context> api_context =
v8::Utils::ToLocal(Handle<Context>(native_context()));
- DCHECK(host_import_module_dynamically_callback_ == nullptr ||
- host_import_module_dynamically_with_import_assertions_callback_ ==
- nullptr);
-
- if (host_import_module_dynamically_callback_ == nullptr &&
- host_import_module_dynamically_with_import_assertions_callback_ ==
- nullptr) {
+ if (host_import_module_dynamically_with_import_assertions_callback_ ==
+ nullptr) {
Handle<Object> exception =
factory()->NewError(error_function(), MessageTemplate::kUnsupported);
return NewRejectedPromise(this, api_context, exception);
@@ -4309,41 +4299,26 @@ MaybeHandle<JSPromise> Isolate::RunHostImportModuleDynamicallyCallback(
if (!maybe_specifier.ToHandle(&specifier_str)) {
Handle<Object> exception(pending_exception(), this);
clear_pending_exception();
-
return NewRejectedPromise(this, api_context, exception);
}
DCHECK(!has_pending_exception());
v8::Local<v8::Promise> promise;
-
- if (host_import_module_dynamically_with_import_assertions_callback_) {
- Handle<FixedArray> import_assertions_array;
- if (GetImportAssertionsFromArgument(maybe_import_assertions_argument)
- .ToHandle(&import_assertions_array)) {
- ASSIGN_RETURN_ON_SCHEDULED_EXCEPTION_VALUE(
- this, promise,
- host_import_module_dynamically_with_import_assertions_callback_(
- api_context, v8::Utils::ScriptOrModuleToLocal(referrer),
- v8::Utils::ToLocal(specifier_str),
- ToApiHandle<v8::FixedArray>(import_assertions_array)),
- MaybeHandle<JSPromise>());
- return v8::Utils::OpenHandle(*promise);
- } else {
- Handle<Object> exception(pending_exception(), this);
- clear_pending_exception();
-
- return NewRejectedPromise(this, api_context, exception);
- }
-
- } else {
- DCHECK_NOT_NULL(host_import_module_dynamically_callback_);
+ Handle<FixedArray> import_assertions_array;
+ if (GetImportAssertionsFromArgument(maybe_import_assertions_argument)
+ .ToHandle(&import_assertions_array)) {
ASSIGN_RETURN_ON_SCHEDULED_EXCEPTION_VALUE(
this, promise,
- host_import_module_dynamically_callback_(
+ host_import_module_dynamically_with_import_assertions_callback_(
api_context, v8::Utils::ScriptOrModuleToLocal(referrer),
- v8::Utils::ToLocal(specifier_str)),
+ v8::Utils::ToLocal(specifier_str),
+ ToApiHandle<v8::FixedArray>(import_assertions_array)),
MaybeHandle<JSPromise>());
return v8::Utils::OpenHandle(*promise);
+ } else {
+ Handle<Object> exception(pending_exception(), this);
+ clear_pending_exception();
+ return NewRejectedPromise(this, api_context, exception);
}
}
@@ -4435,11 +4410,6 @@ MaybeHandle<FixedArray> Isolate::GetImportAssertionsFromArgument(
void Isolate::ClearKeptObjects() { heap()->ClearKeptObjects(); }
void Isolate::SetHostImportModuleDynamicallyCallback(
- DeprecatedHostImportModuleDynamicallyCallback callback) {
- host_import_module_dynamically_callback_ = callback;
-}
-
-void Isolate::SetHostImportModuleDynamicallyCallback(
HostImportModuleDynamicallyWithImportAssertionsCallback callback) {
host_import_module_dynamically_with_import_assertions_callback_ = callback;
}
@@ -4606,7 +4576,7 @@ void Isolate::RunPromiseHookForAsyncEventDelegate(PromiseHookType type,
debug::kDebugDidHandle, promise->async_task_id(), false);
break;
case PromiseHookType::kInit:
- debug::DebugAsyncActionType type = debug::kDebugPromiseThen;
+ debug::DebugAsyncActionType action_type = debug::kDebugPromiseThen;
bool last_frame_was_promise_builtin = false;
JavaScriptFrameIterator it(this);
while (!it.done()) {
@@ -4622,21 +4592,22 @@ void Isolate::RunPromiseHookForAsyncEventDelegate(PromiseHookType type,
promise->set_async_task_id(++async_task_count_);
}
async_event_delegate_->AsyncEventOccurred(
- type, promise->async_task_id(), debug()->IsBlackboxed(info));
+ action_type, promise->async_task_id(),
+ debug()->IsBlackboxed(info));
}
return;
}
last_frame_was_promise_builtin = false;
if (info->HasBuiltinId()) {
if (info->builtin_id() == Builtin::kPromisePrototypeThen) {
- type = debug::kDebugPromiseThen;
+ action_type = debug::kDebugPromiseThen;
last_frame_was_promise_builtin = true;
} else if (info->builtin_id() == Builtin::kPromisePrototypeCatch) {
- type = debug::kDebugPromiseCatch;
+ action_type = debug::kDebugPromiseCatch;
last_frame_was_promise_builtin = true;
} else if (info->builtin_id() ==
Builtin::kPromisePrototypeFinally) {
- type = debug::kDebugPromiseFinally;
+ action_type = debug::kDebugPromiseFinally;
last_frame_was_promise_builtin = true;
}
}
@@ -4760,6 +4731,24 @@ void Isolate::CheckDetachedContextsAfterGC() {
}
}
+void Isolate::DetachGlobal(Handle<Context> env) {
+ counters()->errors_thrown_per_context()->AddSample(
+ env->native_context().GetErrorsThrown());
+
+ ReadOnlyRoots roots(this);
+ Handle<JSGlobalProxy> global_proxy(env->global_proxy(), this);
+ global_proxy->set_native_context(roots.null_value());
+ // NOTE: Turbofan's JSNativeContextSpecialization depends on DetachGlobal
+ // causing a map change.
+ JSObject::ForceSetPrototype(this, global_proxy, factory()->null_value());
+ global_proxy->map().set_constructor_or_back_pointer(roots.null_value(),
+ kRelaxedStore);
+ if (FLAG_track_detached_contexts) AddDetachedContext(env);
+ DCHECK(global_proxy->IsDetached());
+
+ env->native_context().set_microtask_queue(this, nullptr);
+}
+
double Isolate::LoadStartTimeMs() {
base::MutexGuard guard(&rail_mutex_);
return load_start_time_ms_;
@@ -4837,48 +4826,47 @@ void Isolate::CollectSourcePositionsForAllBytecodeArrays() {
}
#ifdef V8_INTL_SUPPORT
+
namespace {
-std::string GetStringFromLocale(Handle<Object> locales_obj) {
- DCHECK(locales_obj->IsString() || locales_obj->IsUndefined());
- if (locales_obj->IsString()) {
- return std::string(String::cast(*locales_obj).ToCString().get());
- }
- return "";
+std::string GetStringFromLocales(Isolate* isolate, Handle<Object> locales) {
+ if (locales->IsUndefined(isolate)) return "";
+ return std::string(String::cast(*locales).ToCString().get());
}
-} // namespace
-icu::UMemory* Isolate::get_cached_icu_object(ICUObjectCacheType cache_type,
- Handle<Object> locales_obj) {
- std::string locale = GetStringFromLocale(locales_obj);
- auto value = icu_object_cache_.find(cache_type);
- if (value == icu_object_cache_.end()) return nullptr;
+bool StringEqualsLocales(Isolate* isolate, const std::string& str,
+ Handle<Object> locales) {
+ if (locales->IsUndefined(isolate)) return str == "";
+ return Handle<String>::cast(locales)->IsEqualTo(
+ base::VectorOf(str.c_str(), str.length()));
+}
- ICUCachePair pair = value->second;
- if (pair.first != locale) return nullptr;
+} // namespace
- return pair.second.get();
+icu::UMemory* Isolate::get_cached_icu_object(ICUObjectCacheType cache_type,
+ Handle<Object> locales) {
+ const ICUObjectCacheEntry& entry =
+ icu_object_cache_[static_cast<int>(cache_type)];
+ return StringEqualsLocales(this, entry.locales, locales) ? entry.obj.get()
+ : nullptr;
}
-void Isolate::set_icu_object_in_cache(
- ICUObjectCacheType cache_type, Handle<Object> locales_obj,
- std::shared_ptr<icu::UMemory> icu_formatter) {
- std::string locale = GetStringFromLocale(locales_obj);
- ICUCachePair pair = std::make_pair(locale, icu_formatter);
-
- auto it = icu_object_cache_.find(cache_type);
- if (it == icu_object_cache_.end()) {
- icu_object_cache_.insert({cache_type, pair});
- } else {
- it->second = pair;
- }
+void Isolate::set_icu_object_in_cache(ICUObjectCacheType cache_type,
+ Handle<Object> locales,
+ std::shared_ptr<icu::UMemory> obj) {
+ icu_object_cache_[static_cast<int>(cache_type)] = {
+ GetStringFromLocales(this, locales), std::move(obj)};
}
void Isolate::clear_cached_icu_object(ICUObjectCacheType cache_type) {
- icu_object_cache_.erase(cache_type);
+ icu_object_cache_[static_cast<int>(cache_type)] = ICUObjectCacheEntry{};
}
-void Isolate::ClearCachedIcuObjects() { icu_object_cache_.clear(); }
+void Isolate::clear_cached_icu_objects() {
+ for (int i = 0; i < kICUObjectCacheTypeCount; i++) {
+ clear_cached_icu_object(static_cast<ICUObjectCacheType>(i));
+ }
+}
#endif // V8_INTL_SUPPORT
diff --git a/chromium/v8/src/execution/isolate.h b/chromium/v8/src/execution/isolate.h
index e543c727186..2edc34a3e62 100644
--- a/chromium/v8/src/execution/isolate.h
+++ b/chromium/v8/src/execution/isolate.h
@@ -13,9 +13,11 @@
#include <unordered_map>
#include <vector>
+#include "include/v8-context.h"
#include "include/v8-internal.h"
+#include "include/v8-isolate.h"
#include "include/v8-metrics.h"
-#include "include/v8.h"
+#include "include/v8-snapshot.h"
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/builtins/builtins.h"
@@ -33,6 +35,7 @@
#include "src/heap/heap.h"
#include "src/heap/read-only-heap.h"
#include "src/init/isolate-allocator.h"
+#include "src/init/vm-cage.h"
#include "src/objects/code.h"
#include "src/objects/contexts.h"
#include "src/objects/debug-objects.h"
@@ -91,6 +94,7 @@ class EternalHandles;
class HandleScopeImplementer;
class HeapObjectToIndexHashMap;
class HeapProfiler;
+class GlobalHandles;
class InnerPointerToCodeCache;
class LazyCompileDispatcher;
class LocalIsolate;
@@ -443,6 +447,8 @@ using DebugObjectCache = std::vector<Handle<HeapObject>>;
V(WasmLoadSourceMapCallback, wasm_load_source_map_callback, nullptr) \
V(WasmSimdEnabledCallback, wasm_simd_enabled_callback, nullptr) \
V(WasmExceptionsEnabledCallback, wasm_exceptions_enabled_callback, nullptr) \
+ V(WasmDynamicTieringEnabledCallback, wasm_dynamic_tiering_enabled_callback, \
+ nullptr) \
/* State for Relocatable. */ \
V(Relocatable*, relocatable_top, nullptr) \
V(DebugObjectCache*, string_stream_debug_object_cache, nullptr) \
@@ -711,6 +717,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
bool IsWasmSimdEnabled(Handle<Context> context);
bool AreWasmExceptionsEnabled(Handle<Context> context);
+ bool IsWasmDynamicTieringEnabled();
THREAD_LOCAL_TOP_ADDRESS(Context, pending_handler_context)
THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_entrypoint)
@@ -849,7 +856,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
v8::Isolate::AbortOnUncaughtExceptionCallback callback);
enum PrintStackMode { kPrintStackConcise, kPrintStackVerbose };
- void PrintCurrentStackTrace(FILE* out);
+ void PrintCurrentStackTrace(std::ostream& out);
void PrintStack(StringStream* accumulator,
PrintStackMode mode = kPrintStackVerbose);
void PrintStack(FILE* out, PrintStackMode mode = kPrintStackVerbose);
@@ -1072,6 +1079,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
return isolate_data()->cage_base();
}
+ Address code_cage_base() const { return cage_base(); }
+
// When pointer compression is on, the PtrComprCage used by this
// Isolate. Otherwise nullptr.
VirtualMemoryCage* GetPtrComprCage() {
@@ -1119,9 +1128,9 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
}
Address* builtin_entry_table() { return isolate_data_.builtin_entry_table(); }
- V8_INLINE Address* builtins_table() { return isolate_data_.builtins(); }
+ V8_INLINE Address* builtin_table() { return isolate_data_.builtin_table(); }
- bool IsBuiltinsTableHandleLocation(Address* handle_location);
+ bool IsBuiltinTableHandleLocation(Address* handle_location);
StubCache* load_stub_cache() const { return load_stub_cache_; }
StubCache* store_stub_cache() const { return store_stub_cache_; }
@@ -1346,18 +1355,18 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
default_locale_ = locale;
}
- // enum to access the icu object cache.
enum class ICUObjectCacheType{
kDefaultCollator, kDefaultNumberFormat, kDefaultSimpleDateFormat,
kDefaultSimpleDateFormatForTime, kDefaultSimpleDateFormatForDate};
+ static constexpr int kICUObjectCacheTypeCount = 5;
icu::UMemory* get_cached_icu_object(ICUObjectCacheType cache_type,
Handle<Object> locales);
void set_icu_object_in_cache(ICUObjectCacheType cache_type,
- Handle<Object> locale,
+ Handle<Object> locales,
std::shared_ptr<icu::UMemory> obj);
void clear_cached_icu_object(ICUObjectCacheType cache_type);
- void ClearCachedIcuObjects();
+ void clear_cached_icu_objects();
#endif // V8_INTL_SUPPORT
@@ -1556,6 +1565,9 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
void AddDetachedContext(Handle<Context> context);
void CheckDetachedContextsAfterGC();
+ // Detach the environment from its outer global object.
+ void DetachGlobal(Handle<Context> env);
+
std::vector<Object>* startup_object_cache() { return &startup_object_cache_; }
bool IsGeneratingEmbeddedBuiltins() const {
@@ -1640,18 +1652,6 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
void ClearKeptObjects();
- // While deprecating v8::HostImportModuleDynamicallyCallback in v8.h we still
- // need to support the version of the API that uses it, but we can't directly
- // reference the deprecated version because of the enusing build warnings. So,
- // we declare this matching type for temporary internal use.
- // TODO(v8:10958) Delete this declaration and all references to it once
- // v8::HostImportModuleDynamicallyCallback is removed.
- typedef MaybeLocal<Promise> (*DeprecatedHostImportModuleDynamicallyCallback)(
- v8::Local<v8::Context> context, v8::Local<v8::ScriptOrModule> referrer,
- v8::Local<v8::String> specifier);
-
- void SetHostImportModuleDynamicallyCallback(
- DeprecatedHostImportModuleDynamicallyCallback callback);
void SetHostImportModuleDynamicallyCallback(
HostImportModuleDynamicallyWithImportAssertionsCallback callback);
MaybeHandle<JSPromise> RunHostImportModuleDynamicallyCallback(
@@ -2018,8 +2018,6 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
v8::Isolate::AtomicsWaitCallback atomics_wait_callback_ = nullptr;
void* atomics_wait_callback_data_ = nullptr;
PromiseHook promise_hook_ = nullptr;
- DeprecatedHostImportModuleDynamicallyCallback
- host_import_module_dynamically_callback_ = nullptr;
HostImportModuleDynamicallyWithImportAssertionsCallback
host_import_module_dynamically_with_import_assertions_callback_ = nullptr;
std::atomic<debug::CoverageMode> code_coverage_mode_{
@@ -2043,14 +2041,18 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
#ifdef V8_INTL_SUPPORT
std::string default_locale_;
- struct ICUObjectCacheTypeHash {
- std::size_t operator()(ICUObjectCacheType a) const {
- return static_cast<std::size_t>(a);
- }
+ // The cache stores the most recently accessed {locales,obj} pair for each
+ // cache type.
+ struct ICUObjectCacheEntry {
+ std::string locales;
+ std::shared_ptr<icu::UMemory> obj;
+
+ ICUObjectCacheEntry() = default;
+ ICUObjectCacheEntry(std::string locales, std::shared_ptr<icu::UMemory> obj)
+ : locales(locales), obj(std::move(obj)) {}
};
- typedef std::pair<std::string, std::shared_ptr<icu::UMemory>> ICUCachePair;
- std::unordered_map<ICUObjectCacheType, ICUCachePair, ICUObjectCacheTypeHash>
- icu_object_cache_;
+
+ ICUObjectCacheEntry icu_object_cache_[kICUObjectCacheTypeCount];
#endif // V8_INTL_SUPPORT
// true if being profiled. Causes collection of extra compile info.
diff --git a/chromium/v8/src/execution/local-isolate-inl.h b/chromium/v8/src/execution/local-isolate-inl.h
index ca7c119b6bf..80baf1ab0ee 100644
--- a/chromium/v8/src/execution/local-isolate-inl.h
+++ b/chromium/v8/src/execution/local-isolate-inl.h
@@ -13,6 +13,11 @@ namespace v8 {
namespace internal {
Address LocalIsolate::cage_base() const { return isolate_->cage_base(); }
+
+Address LocalIsolate::code_cage_base() const {
+ return isolate_->code_cage_base();
+}
+
ReadOnlyHeap* LocalIsolate::read_only_heap() const {
return isolate_->read_only_heap();
}
diff --git a/chromium/v8/src/execution/local-isolate.h b/chromium/v8/src/execution/local-isolate.h
index 55891f87c5a..82a715dfeb1 100644
--- a/chromium/v8/src/execution/local-isolate.h
+++ b/chromium/v8/src/execution/local-isolate.h
@@ -58,6 +58,7 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
LocalHeap* heap() { return &heap_; }
inline Address cage_base() const;
+ inline Address code_cage_base() const;
inline ReadOnlyHeap* read_only_heap() const;
inline Object root(RootIndex index) const;
inline Handle<Object> root_handle(RootIndex index) const;
diff --git a/chromium/v8/src/execution/loong64/frame-constants-loong64.cc b/chromium/v8/src/execution/loong64/frame-constants-loong64.cc
new file mode 100644
index 00000000000..4bd809266c6
--- /dev/null
+++ b/chromium/v8/src/execution/loong64/frame-constants-loong64.cc
@@ -0,0 +1,32 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_LOONG64
+
+#include "src/execution/loong64/frame-constants-loong64.h"
+
+#include "src/codegen/loong64/assembler-loong64-inl.h"
+#include "src/execution/frame-constants.h"
+#include "src/execution/frames.h"
+
+namespace v8 {
+namespace internal {
+
+Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
+Register JavaScriptFrame::context_register() { return cp; }
+Register JavaScriptFrame::constant_pool_pointer_register() { UNREACHABLE(); }
+
+int UnoptimizedFrameConstants::RegisterStackSlotCount(int register_count) {
+ return register_count;
+}
+
+int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) {
+ USE(register_count);
+ return 0;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_LOONG64
diff --git a/chromium/v8/src/execution/loong64/frame-constants-loong64.h b/chromium/v8/src/execution/loong64/frame-constants-loong64.h
new file mode 100644
index 00000000000..1395f47a7bb
--- /dev/null
+++ b/chromium/v8/src/execution/loong64/frame-constants-loong64.h
@@ -0,0 +1,76 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_LOONG64_FRAME_CONSTANTS_LOONG64_H_
+#define V8_EXECUTION_LOONG64_FRAME_CONSTANTS_LOONG64_H_
+
+#include "src/base/bits.h"
+#include "src/base/macros.h"
+#include "src/execution/frame-constants.h"
+
+namespace v8 {
+namespace internal {
+
+class EntryFrameConstants : public AllStatic {
+ public:
+ // This is the offset to where JSEntry pushes the current value of
+ // Isolate::c_entry_fp onto the stack.
+ static constexpr int kCallerFPOffset = -3 * kSystemPointerSize;
+};
+
+class WasmCompileLazyFrameConstants : public TypedFrameConstants {
+ public:
+ static constexpr int kNumberOfSavedGpParamRegs = 7;
+ static constexpr int kNumberOfSavedFpParamRegs = 8;
+ static constexpr int kNumberOfSavedAllParamRegs = 15;
+
+ // FP-relative.
+ // See Generate_WasmCompileLazy in builtins-loong64.cc.
+ static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(6);
+ static constexpr int kFixedFrameSizeFromFp =
+ TypedFrameConstants::kFixedFrameSizeFromFp +
+ kNumberOfSavedGpParamRegs * kPointerSize +
+ kNumberOfSavedFpParamRegs * kDoubleSize;
+};
+
+// Frame constructed by the {WasmDebugBreak} builtin.
+// After pushing the frame type marker, the builtin pushes all Liftoff cache
+// registers (see liftoff-assembler-defs.h).
+class WasmDebugBreakFrameConstants : public TypedFrameConstants {
+ public:
+ // {a0 ... a7, t0 ... t5, s0, s1, s2, s5, s7, s8}
+ static constexpr uint32_t kPushedGpRegs = 0b11010011100000111111111111110000;
+ // {f0, f1, f2, ... f27, f28}
+ static constexpr uint32_t kPushedFpRegs = 0x1fffffff;
+
+ static constexpr int kNumPushedGpRegisters =
+ base::bits::CountPopulation(kPushedGpRegs);
+ static constexpr int kNumPushedFpRegisters =
+ base::bits::CountPopulation(kPushedFpRegs);
+
+ static constexpr int kLastPushedGpRegisterOffset =
+ -kFixedFrameSizeFromFp - kNumPushedGpRegisters * kSystemPointerSize;
+ static constexpr int kLastPushedFpRegisterOffset =
+ kLastPushedGpRegisterOffset - kNumPushedFpRegisters * kDoubleSize;
+
+ // Offsets are fp-relative.
+ static int GetPushedGpRegisterOffset(int reg_code) {
+ DCHECK_NE(0, kPushedGpRegs & (1 << reg_code));
+ uint32_t lower_regs = kPushedGpRegs & ((uint32_t{1} << reg_code) - 1);
+ return kLastPushedGpRegisterOffset +
+ base::bits::CountPopulation(lower_regs) * kSystemPointerSize;
+ }
+
+ static int GetPushedFpRegisterOffset(int reg_code) {
+ DCHECK_NE(0, kPushedFpRegs & (1 << reg_code));
+ uint32_t lower_regs = kPushedFpRegs & ((uint32_t{1} << reg_code) - 1);
+ return kLastPushedFpRegisterOffset +
+ base::bits::CountPopulation(lower_regs) * kDoubleSize;
+ }
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_EXECUTION_LOONG64_FRAME_CONSTANTS_LOONG64_H_
diff --git a/chromium/v8/src/execution/loong64/simulator-loong64.cc b/chromium/v8/src/execution/loong64/simulator-loong64.cc
new file mode 100644
index 00000000000..33f10304f60
--- /dev/null
+++ b/chromium/v8/src/execution/loong64/simulator-loong64.cc
@@ -0,0 +1,5538 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/execution/loong64/simulator-loong64.h"
+
+// Only build the simulator if not compiling for real LOONG64 hardware.
+#if defined(USE_SIMULATOR)
+
+#include <limits.h>
+#include <stdarg.h>
+#include <stdlib.h>
+
+#include <cmath>
+
+#include "src/base/bits.h"
+#include "src/base/platform/platform.h"
+#include "src/base/platform/wrappers.h"
+#include "src/base/strings.h"
+#include "src/base/vector.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/loong64/constants-loong64.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/diagnostics/disasm.h"
+#include "src/heap/combined-heap.h"
+#include "src/runtime/runtime-utils.h"
+#include "src/utils/ostreams.h"
+
+namespace v8 {
+namespace internal {
+
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(Simulator::GlobalMonitor,
+ Simulator::GlobalMonitor::Get)
+
+// #define PRINT_SIM_LOG
+
+// Util functions.
+inline bool HaveSameSign(int64_t a, int64_t b) { return ((a ^ b) >= 0); }
+
+uint32_t get_fcsr_condition_bit(uint32_t cc) {
+ if (cc == 0) {
+ return 23;
+ } else {
+ return 24 + cc;
+ }
+}
+
+static int64_t MultiplyHighSigned(int64_t u, int64_t v) {
+ uint64_t u0, v0, w0;
+ int64_t u1, v1, w1, w2, t;
+
+ u0 = u & 0xFFFFFFFFL;
+ u1 = u >> 32;
+ v0 = v & 0xFFFFFFFFL;
+ v1 = v >> 32;
+
+ w0 = u0 * v0;
+ t = u1 * v0 + (w0 >> 32);
+ w1 = t & 0xFFFFFFFFL;
+ w2 = t >> 32;
+ w1 = u0 * v1 + w1;
+
+ return u1 * v1 + w2 + (w1 >> 32);
+}
+
+static uint64_t MultiplyHighUnsigned(uint64_t u, uint64_t v) {
+ uint64_t u0, v0, w0;
+ uint64_t u1, v1, w1, w2, t;
+
+ u0 = u & 0xFFFFFFFFL;
+ u1 = u >> 32;
+ v0 = v & 0xFFFFFFFFL;
+ v1 = v >> 32;
+
+ w0 = u0 * v0;
+ t = u1 * v0 + (w0 >> 32);
+ w1 = t & 0xFFFFFFFFL;
+ w2 = t >> 32;
+ w1 = u0 * v1 + w1;
+
+ return u1 * v1 + w2 + (w1 >> 32);
+}
+
+#ifdef PRINT_SIM_LOG
+inline void printf_instr(const char* _Format, ...) {
+ va_list varList;
+ va_start(varList, _Format);
+ vprintf(_Format, varList);
+ va_end(varList);
+}
+#else
+#define printf_instr(...)
+#endif
+
+// This macro provides a platform independent use of sscanf. The reason for
+// SScanF not being implemented in a platform independent was through
+// ::v8::internal::OS in the same way as base::SNPrintF is that the Windows C
+// Run-Time Library does not provide vsscanf.
+#define SScanF sscanf
+
+// The Loong64Debugger class is used by the simulator while debugging simulated
+// code.
+class Loong64Debugger {
+ public:
+ explicit Loong64Debugger(Simulator* sim) : sim_(sim) {}
+
+ void Stop(Instruction* instr);
+ void Debug();
+ // Print all registers with a nice formatting.
+ void PrintAllRegs();
+ void PrintAllRegsIncludingFPU();
+
+ private:
+ // We set the breakpoint code to 0xFFFF to easily recognize it.
+ static const Instr kBreakpointInstr = BREAK | 0xFFFF;
+ static const Instr kNopInstr = 0x0;
+
+ Simulator* sim_;
+
+ int64_t GetRegisterValue(int regnum);
+ int64_t GetFPURegisterValue(int regnum);
+ float GetFPURegisterValueFloat(int regnum);
+ double GetFPURegisterValueDouble(int regnum);
+ bool GetValue(const char* desc, int64_t* value);
+
+ // Set or delete a breakpoint. Returns true if successful.
+ bool SetBreakpoint(Instruction* breakpc);
+ bool DeleteBreakpoint(Instruction* breakpc);
+
+ // Undo and redo all breakpoints. This is needed to bracket disassembly and
+ // execution to skip past breakpoints when run from the debugger.
+ void UndoBreakpoints();
+ void RedoBreakpoints();
+};
+
+inline void UNSUPPORTED() { printf("Sim: Unsupported instruction.\n"); }
+
+void Loong64Debugger::Stop(Instruction* instr) {
+ // Get the stop code.
+ uint32_t code = instr->Bits(25, 6);
+ PrintF("Simulator hit (%u)\n", code);
+ Debug();
+}
+
+int64_t Loong64Debugger::GetRegisterValue(int regnum) {
+ if (regnum == kNumSimuRegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_register(regnum);
+ }
+}
+
+int64_t Loong64Debugger::GetFPURegisterValue(int regnum) {
+ if (regnum == kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_fpu_register(regnum);
+ }
+}
+
+float Loong64Debugger::GetFPURegisterValueFloat(int regnum) {
+ if (regnum == kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_fpu_register_float(regnum);
+ }
+}
+
+double Loong64Debugger::GetFPURegisterValueDouble(int regnum) {
+ if (regnum == kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_fpu_register_double(regnum);
+ }
+}
+
+bool Loong64Debugger::GetValue(const char* desc, int64_t* value) {
+ int regnum = Registers::Number(desc);
+ int fpuregnum = FPURegisters::Number(desc);
+
+ if (regnum != kInvalidRegister) {
+ *value = GetRegisterValue(regnum);
+ return true;
+ } else if (fpuregnum != kInvalidFPURegister) {
+ *value = GetFPURegisterValue(fpuregnum);
+ return true;
+ } else if (strncmp(desc, "0x", 2) == 0) {
+ return SScanF(desc + 2, "%" SCNx64, reinterpret_cast<uint64_t*>(value)) ==
+ 1;
+ } else {
+ return SScanF(desc, "%" SCNu64, reinterpret_cast<uint64_t*>(value)) == 1;
+ }
+}
+
+bool Loong64Debugger::SetBreakpoint(Instruction* breakpc) {
+ // Check if a breakpoint can be set. If not return without any side-effects.
+ if (sim_->break_pc_ != nullptr) {
+ return false;
+ }
+
+ // Set the breakpoint.
+ sim_->break_pc_ = breakpc;
+ sim_->break_instr_ = breakpc->InstructionBits();
+ // Not setting the breakpoint instruction in the code itself. It will be set
+ // when the debugger shell continues.
+ return true;
+}
+
+bool Loong64Debugger::DeleteBreakpoint(Instruction* breakpc) {
+ if (sim_->break_pc_ != nullptr) {
+ sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+ }
+
+ sim_->break_pc_ = nullptr;
+ sim_->break_instr_ = 0;
+ return true;
+}
+
+void Loong64Debugger::UndoBreakpoints() {
+ if (sim_->break_pc_ != nullptr) {
+ sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+ }
+}
+
+void Loong64Debugger::RedoBreakpoints() {
+ if (sim_->break_pc_ != nullptr) {
+ sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
+ }
+}
+
+void Loong64Debugger::PrintAllRegs() {
+#define REG_INFO(n) Registers::Name(n), GetRegisterValue(n), GetRegisterValue(n)
+
+ PrintF("\n");
+ // at, v0, a0.
+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 "\t%3s: 0x%016" PRIx64 " %14" PRId64
+ "\t%3s: 0x%016" PRIx64 " %14" PRId64 "\n",
+ REG_INFO(1), REG_INFO(2), REG_INFO(4));
+ // v1, a1.
+ PrintF("%34s\t%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64
+ " %14" PRId64 " \n",
+ "", REG_INFO(3), REG_INFO(5));
+ // a2.
+ PrintF("%34s\t%34s\t%3s: 0x%016" PRIx64 " %14" PRId64 " \n", "", "",
+ REG_INFO(6));
+ // a3.
+ PrintF("%34s\t%34s\t%3s: 0x%016" PRIx64 " %14" PRId64 " \n", "", "",
+ REG_INFO(7));
+ PrintF("\n");
+ // a4-t3, s0-s7
+ for (int i = 0; i < 8; i++) {
+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64
+ " %14" PRId64 " \n",
+ REG_INFO(8 + i), REG_INFO(16 + i));
+ }
+ PrintF("\n");
+ // t8, k0, LO.
+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64
+ " %14" PRId64 " \t%3s: 0x%016" PRIx64 " %14" PRId64 " \n",
+ REG_INFO(24), REG_INFO(26), REG_INFO(32));
+ // t9, k1, HI.
+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64
+ " %14" PRId64 " \t%3s: 0x%016" PRIx64 " %14" PRId64 " \n",
+ REG_INFO(25), REG_INFO(27), REG_INFO(33));
+ // sp, fp, gp.
+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64
+ " %14" PRId64 " \t%3s: 0x%016" PRIx64 " %14" PRId64 " \n",
+ REG_INFO(29), REG_INFO(30), REG_INFO(28));
+ // pc.
+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64
+ " %14" PRId64 " \n",
+ REG_INFO(31), REG_INFO(34));
+
+#undef REG_INFO
+}
+
+void Loong64Debugger::PrintAllRegsIncludingFPU() {
+#define FPU_REG_INFO(n) \
+ FPURegisters::Name(n), GetFPURegisterValue(n), GetFPURegisterValueDouble(n)
+
+ PrintAllRegs();
+
+ PrintF("\n\n");
+ // f0, f1, f2, ... f31.
+ // TODO(plind): consider printing 2 columns for space efficiency.
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(0));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(1));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(2));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(3));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(4));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(5));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(6));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(7));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(8));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(9));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(10));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(11));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(12));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(13));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(14));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(15));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(16));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(17));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(18));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(19));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(20));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(21));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(22));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(23));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(24));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(25));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(26));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(27));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(28));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(29));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(30));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(31));
+
+#undef FPU_REG_INFO
+}
+
+void Loong64Debugger::Debug() {
+ intptr_t last_pc = -1;
+ bool done = false;
+
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+ char cmd[COMMAND_SIZE + 1];
+ char arg1[ARG_SIZE + 1];
+ char arg2[ARG_SIZE + 1];
+ char* argv[3] = {cmd, arg1, arg2};
+
+ // Make sure to have a proper terminating character if reaching the limit.
+ cmd[COMMAND_SIZE] = 0;
+ arg1[ARG_SIZE] = 0;
+ arg2[ARG_SIZE] = 0;
+
+ // Undo all set breakpoints while running in the debugger shell. This will
+ // make them invisible to all commands.
+ UndoBreakpoints();
+
+ while (!done && (sim_->get_pc() != Simulator::end_sim_pc)) {
+ if (last_pc != sim_->get_pc()) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // Use a reasonably large buffer.
+ v8::base::EmbeddedVector<char, 256> buffer;
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(sim_->get_pc()));
+ PrintF(" 0x%016" PRIx64 " %s\n", sim_->get_pc(), buffer.begin());
+ last_pc = sim_->get_pc();
+ }
+ char* line = ReadLine("sim> ");
+ if (line == nullptr) {
+ break;
+ } else {
+ char* last_input = sim_->last_debugger_input();
+ if (strcmp(line, "\n") == 0 && last_input != nullptr) {
+ line = last_input;
+ } else {
+ // Ownership is transferred to sim_;
+ sim_->set_last_debugger_input(line);
+ }
+ // Use sscanf to parse the individual parts of the command line. At the
+ // moment no command expects more than two parameters.
+ int argc = SScanF(line,
+ "%" XSTR(COMMAND_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s",
+ cmd, arg1, arg2);
+ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+ Instruction* instr = reinterpret_cast<Instruction*>(sim_->get_pc());
+ if (!(instr->IsTrap()) ||
+ instr->InstructionBits() == rtCallRedirInstr) {
+ sim_->InstructionDecode(
+ reinterpret_cast<Instruction*>(sim_->get_pc()));
+ } else {
+ // Allow si to jump over generated breakpoints.
+ PrintF("/!\\ Jumping over generated breakpoint.\n");
+ sim_->set_pc(sim_->get_pc() + kInstrSize);
+ }
+ } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
+ // Execute the one instruction we broke at with breakpoints disabled.
+ sim_->InstructionDecode(reinterpret_cast<Instruction*>(sim_->get_pc()));
+ // Leave the debugger shell.
+ done = true;
+ } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
+ if (argc == 2) {
+ int64_t value;
+ double dvalue;
+ if (strcmp(arg1, "all") == 0) {
+ PrintAllRegs();
+ } else if (strcmp(arg1, "allf") == 0) {
+ PrintAllRegsIncludingFPU();
+ } else {
+ int regnum = Registers::Number(arg1);
+ int fpuregnum = FPURegisters::Number(arg1);
+
+ if (regnum != kInvalidRegister) {
+ value = GetRegisterValue(regnum);
+ PrintF("%s: 0x%08" PRIx64 " %" PRId64 " \n", arg1, value,
+ value);
+ } else if (fpuregnum != kInvalidFPURegister) {
+ value = GetFPURegisterValue(fpuregnum);
+ dvalue = GetFPURegisterValueDouble(fpuregnum);
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n",
+ FPURegisters::Name(fpuregnum), value, dvalue);
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ }
+ } else {
+ if (argc == 3) {
+ if (strcmp(arg2, "single") == 0) {
+ int64_t value;
+ float fvalue;
+ int fpuregnum = FPURegisters::Number(arg1);
+
+ if (fpuregnum != kInvalidFPURegister) {
+ value = GetFPURegisterValue(fpuregnum);
+ value &= 0xFFFFFFFFUL;
+ fvalue = GetFPURegisterValueFloat(fpuregnum);
+ PrintF("%s: 0x%08" PRIx64 " %11.4e\n", arg1, value, fvalue);
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ PrintF("print <fpu register> single\n");
+ }
+ } else {
+ PrintF("print <register> or print <fpu register> single\n");
+ }
+ }
+ } else if ((strcmp(cmd, "po") == 0) ||
+ (strcmp(cmd, "printobject") == 0)) {
+ if (argc == 2) {
+ int64_t value;
+ StdoutStream os;
+ if (GetValue(arg1, &value)) {
+ Object obj(value);
+ os << arg1 << ": \n";
+#ifdef DEBUG
+ obj.Print(os);
+ os << "\n";
+#else
+ os << Brief(obj) << "\n";
+#endif
+ } else {
+ os << arg1 << " unrecognized\n";
+ }
+ } else {
+ PrintF("printobject <value>\n");
+ }
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0 ||
+ strcmp(cmd, "dump") == 0) {
+ int64_t* cur = nullptr;
+ int64_t* end = nullptr;
+ int next_arg = 1;
+
+ if (strcmp(cmd, "stack") == 0) {
+ cur = reinterpret_cast<int64_t*>(sim_->get_register(Simulator::sp));
+ } else { // Command "mem".
+ int64_t value;
+ if (!GetValue(arg1, &value)) {
+ PrintF("%s unrecognized\n", arg1);
+ continue;
+ }
+ cur = reinterpret_cast<int64_t*>(value);
+ next_arg++;
+ }
+
+ int64_t words;
+ if (argc == next_arg) {
+ words = 10;
+ } else {
+ if (!GetValue(argv[next_arg], &words)) {
+ words = 10;
+ }
+ }
+ end = cur + words;
+
+ bool skip_obj_print = (strcmp(cmd, "dump") == 0);
+ while (cur < end) {
+ PrintF(" 0x%012" PRIxPTR " : 0x%016" PRIx64 " %14" PRId64 " ",
+ reinterpret_cast<intptr_t>(cur), *cur, *cur);
+ Object obj(*cur);
+ Heap* current_heap = sim_->isolate_->heap();
+ if (!skip_obj_print) {
+ if (obj.IsSmi() ||
+ IsValidHeapObject(current_heap, HeapObject::cast(obj))) {
+ PrintF(" (");
+ if (obj.IsSmi()) {
+ PrintF("smi %d", Smi::ToInt(obj));
+ } else {
+ obj.ShortPrint();
+ }
+ PrintF(")");
+ }
+ }
+ PrintF("\n");
+ cur++;
+ }
+
+ } else if ((strcmp(cmd, "disasm") == 0) || (strcmp(cmd, "dpc") == 0) ||
+ (strcmp(cmd, "di") == 0)) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // Use a reasonably large buffer.
+ v8::base::EmbeddedVector<char, 256> buffer;
+
+ byte* cur = nullptr;
+ byte* end = nullptr;
+
+ if (argc == 1) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ end = cur + (10 * kInstrSize);
+ } else if (argc == 2) {
+ int regnum = Registers::Number(arg1);
+ if (regnum != kInvalidRegister || strncmp(arg1, "0x", 2) == 0) {
+ // The argument is an address or a register name.
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(value);
+ // Disassemble 10 instructions at <arg1>.
+ end = cur + (10 * kInstrSize);
+ }
+ } else {
+ // The argument is the number of instructions.
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ // Disassemble <arg1> instructions.
+ end = cur + (value * kInstrSize);
+ }
+ }
+ } else {
+ int64_t value1;
+ int64_t value2;
+ if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
+ cur = reinterpret_cast<byte*>(value1);
+ end = cur + (value2 * kInstrSize);
+ }
+ }
+
+ while (cur < end) {
+ dasm.InstructionDecode(buffer, cur);
+ PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast<intptr_t>(cur),
+ buffer.begin());
+ cur += kInstrSize;
+ }
+ } else if (strcmp(cmd, "gdb") == 0) {
+ PrintF("relinquishing control to gdb\n");
+ v8::base::OS::DebugBreak();
+ PrintF("regaining control from gdb\n");
+ } else if (strcmp(cmd, "break") == 0) {
+ if (argc == 2) {
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ if (!SetBreakpoint(reinterpret_cast<Instruction*>(value))) {
+ PrintF("setting breakpoint failed\n");
+ }
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ PrintF("break <address>\n");
+ }
+ } else if (strcmp(cmd, "del") == 0) {
+ if (!DeleteBreakpoint(nullptr)) {
+ PrintF("deleting breakpoint failed\n");
+ }
+ } else if (strcmp(cmd, "flags") == 0) {
+ PrintF("No flags on LOONG64 !\n");
+ } else if (strcmp(cmd, "stop") == 0) {
+ int64_t value;
+ intptr_t stop_pc = sim_->get_pc() - 2 * kInstrSize;
+ Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
+ Instruction* msg_address =
+ reinterpret_cast<Instruction*>(stop_pc + kInstrSize);
+ if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
+ // Remove the current stop.
+ if (sim_->IsStopInstruction(stop_instr)) {
+ stop_instr->SetInstructionBits(kNopInstr);
+ msg_address->SetInstructionBits(kNopInstr);
+ } else {
+ PrintF("Not at debugger stop.\n");
+ }
+ } else if (argc == 3) {
+ // Print information about all/the specified breakpoint(s).
+ if (strcmp(arg1, "info") == 0) {
+ if (strcmp(arg2, "all") == 0) {
+ PrintF("Stop information:\n");
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->PrintStopInfo(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->PrintStopInfo(value);
+ } else {
+ PrintF("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "enable") == 0) {
+ // Enable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->EnableStop(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->EnableStop(value);
+ } else {
+ PrintF("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "disable") == 0) {
+ // Disable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->DisableStop(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->DisableStop(value);
+ } else {
+ PrintF("Unrecognized argument.\n");
+ }
+ }
+ } else {
+ PrintF("Wrong usage. Use help command for more information.\n");
+ }
+ } else if ((strcmp(cmd, "stat") == 0) || (strcmp(cmd, "st") == 0)) {
+ // Print registers and disassemble.
+ PrintAllRegs();
+ PrintF("\n");
+
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // Use a reasonably large buffer.
+ v8::base::EmbeddedVector<char, 256> buffer;
+
+ byte* cur = nullptr;
+ byte* end = nullptr;
+
+ if (argc == 1) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ end = cur + (10 * kInstrSize);
+ } else if (argc == 2) {
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(value);
+ // no length parameter passed, assume 10 instructions
+ end = cur + (10 * kInstrSize);
+ }
+ } else {
+ int64_t value1;
+ int64_t value2;
+ if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
+ cur = reinterpret_cast<byte*>(value1);
+ end = cur + (value2 * kInstrSize);
+ }
+ }
+
+ while (cur < end) {
+ dasm.InstructionDecode(buffer, cur);
+ PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast<intptr_t>(cur),
+ buffer.begin());
+ cur += kInstrSize;
+ }
+ } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
+ PrintF("cont\n");
+ PrintF(" continue execution (alias 'c')\n");
+ PrintF("stepi\n");
+ PrintF(" step one instruction (alias 'si')\n");
+ PrintF("print <register>\n");
+ PrintF(" print register content (alias 'p')\n");
+ PrintF(" use register name 'all' to print all registers\n");
+ PrintF("printobject <register>\n");
+ PrintF(" print an object from a register (alias 'po')\n");
+ PrintF("stack [<words>]\n");
+ PrintF(" dump stack content, default dump 10 words)\n");
+ PrintF("mem <address> [<words>]\n");
+ PrintF(" dump memory content, default dump 10 words)\n");
+ PrintF("dump [<words>]\n");
+ PrintF(
+ " dump memory content without pretty printing JS objects, default "
+ "dump 10 words)\n");
+ PrintF("flags\n");
+ PrintF(" print flags\n");
+ PrintF("disasm [<instructions>]\n");
+ PrintF("disasm [<address/register>]\n");
+ PrintF("disasm [[<address/register>] <instructions>]\n");
+ PrintF(" disassemble code, default is 10 instructions\n");
+ PrintF(" from pc (alias 'di')\n");
+ PrintF("gdb\n");
+ PrintF(" enter gdb\n");
+ PrintF("break <address>\n");
+ PrintF(" set a break point on the address\n");
+ PrintF("del\n");
+ PrintF(" delete the breakpoint\n");
+ PrintF("stop feature:\n");
+ PrintF(" Description:\n");
+ PrintF(" Stops are debug instructions inserted by\n");
+ PrintF(" the Assembler::stop() function.\n");
+ PrintF(" When hitting a stop, the Simulator will\n");
+ PrintF(" stop and give control to the Debugger.\n");
+ PrintF(" All stop codes are watched:\n");
+ PrintF(" - They can be enabled / disabled: the Simulator\n");
+ PrintF(" will / won't stop when hitting them.\n");
+ PrintF(" - The Simulator keeps track of how many times they \n");
+ PrintF(" are met. (See the info command.) Going over a\n");
+ PrintF(" disabled stop still increases its counter. \n");
+ PrintF(" Commands:\n");
+ PrintF(" stop info all/<code> : print infos about number <code>\n");
+ PrintF(" or all stop(s).\n");
+ PrintF(" stop enable/disable all/<code> : enables / disables\n");
+ PrintF(" all or number <code> stop(s)\n");
+ PrintF(" stop unstop\n");
+ PrintF(" ignore the stop instruction at the current location\n");
+ PrintF(" from now on\n");
+ } else {
+ PrintF("Unknown command: %s\n", cmd);
+ }
+ }
+ }
+
+ // Add all the breakpoints back to stop execution and enter the debugger
+ // shell when hit.
+ RedoBreakpoints();
+
+#undef COMMAND_SIZE
+#undef ARG_SIZE
+
+#undef STR
+#undef XSTR
+}
+
+bool Simulator::ICacheMatch(void* one, void* two) {
+ DCHECK_EQ(reinterpret_cast<intptr_t>(one) & CachePage::kPageMask, 0);
+ DCHECK_EQ(reinterpret_cast<intptr_t>(two) & CachePage::kPageMask, 0);
+ return one == two;
+}
+
+static uint32_t ICacheHash(void* key) {
+ return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)) >> 2;
+}
+
+static bool AllOnOnePage(uintptr_t start, size_t size) {
+ intptr_t start_page = (start & ~CachePage::kPageMask);
+ intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
+ return start_page == end_page;
+}
+
+void Simulator::set_last_debugger_input(char* input) {
+ DeleteArray(last_debugger_input_);
+ last_debugger_input_ = input;
+}
+
+void Simulator::SetRedirectInstruction(Instruction* instruction) {
+ instruction->SetInstructionBits(rtCallRedirInstr);
+}
+
+void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache,
+ void* start_addr, size_t size) {
+ int64_t start = reinterpret_cast<int64_t>(start_addr);
+ int64_t intra_line = (start & CachePage::kLineMask);
+ start -= intra_line;
+ size += intra_line;
+ size = ((size - 1) | CachePage::kLineMask) + 1;
+ int offset = (start & CachePage::kPageMask);
+ while (!AllOnOnePage(start, size - 1)) {
+ int bytes_to_flush = CachePage::kPageSize - offset;
+ FlushOnePage(i_cache, start, bytes_to_flush);
+ start += bytes_to_flush;
+ size -= bytes_to_flush;
+ DCHECK_EQ((int64_t)0, start & CachePage::kPageMask);
+ offset = 0;
+ }
+ if (size != 0) {
+ FlushOnePage(i_cache, start, size);
+ }
+}
+
+CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
+ void* page) {
+ base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
+ if (entry->value == nullptr) {
+ CachePage* new_page = new CachePage();
+ entry->value = new_page;
+ }
+ return reinterpret_cast<CachePage*>(entry->value);
+}
+
+// Flush from start up to and not including start + size.
+void Simulator::FlushOnePage(base::CustomMatcherHashMap* i_cache,
+ intptr_t start, size_t size) {
+ DCHECK_LE(size, CachePage::kPageSize);
+ DCHECK(AllOnOnePage(start, size - 1));
+ DCHECK_EQ(start & CachePage::kLineMask, 0);
+ DCHECK_EQ(size & CachePage::kLineMask, 0);
+ void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
+ int offset = (start & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePage(i_cache, page);
+ char* valid_bytemap = cache_page->ValidityByte(offset);
+ memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
+}
+
+void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
+ Instruction* instr) {
+ int64_t address = reinterpret_cast<int64_t>(instr);
+ void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
+ void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
+ int offset = (address & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePage(i_cache, page);
+ char* cache_valid_byte = cache_page->ValidityByte(offset);
+ bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
+ char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
+ if (cache_hit) {
+ // Check that the data in memory matches the contents of the I-cache.
+ CHECK_EQ(0, memcmp(reinterpret_cast<void*>(instr),
+ cache_page->CachedData(offset), kInstrSize));
+ } else {
+ // Cache miss. Load memory into the cache.
+ memcpy(cached_line, line, CachePage::kLineLength);
+ *cache_valid_byte = CachePage::LINE_VALID;
+ }
+}
+
+Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
+ // Set up simulator support first. Some of this information is needed to
+ // setup the architecture state.
+ stack_size_ = FLAG_sim_stack_size * KB;
+ stack_ = reinterpret_cast<char*>(base::Malloc(stack_size_));
+ pc_modified_ = false;
+ icount_ = 0;
+ break_count_ = 0;
+ break_pc_ = nullptr;
+ break_instr_ = 0;
+
+ // Set up architecture state.
+ // All registers are initialized to zero to start with.
+ for (int i = 0; i < kNumSimuRegisters; i++) {
+ registers_[i] = 0;
+ }
+ for (int i = 0; i < kNumFPURegisters; i++) {
+ FPUregisters_[i] = 0;
+ }
+ for (int i = 0; i < kNumCFRegisters; i++) {
+ CFregisters_[i] = 0;
+ }
+
+ FCSR_ = 0;
+
+ // The sp is initialized to point to the bottom (high address) of the
+ // allocated stack area. To be safe in potential stack underflows we leave
+ // some buffer below.
+ registers_[sp] = reinterpret_cast<int64_t>(stack_) + stack_size_ - 64;
+ // The ra and pc are initialized to a known bad value that will cause an
+ // access violation if the simulator ever tries to execute it.
+ registers_[pc] = bad_ra;
+ registers_[ra] = bad_ra;
+
+ last_debugger_input_ = nullptr;
+}
+
+Simulator::~Simulator() {
+ GlobalMonitor::Get()->RemoveLinkedAddress(&global_monitor_thread_);
+ base::Free(stack_);
+}
+
+// Get the active Simulator for the current thread.
+Simulator* Simulator::current(Isolate* isolate) {
+ v8::internal::Isolate::PerIsolateThreadData* isolate_data =
+ isolate->FindOrAllocatePerThreadDataForThisThread();
+ DCHECK_NOT_NULL(isolate_data);
+
+ Simulator* sim = isolate_data->simulator();
+ if (sim == nullptr) {
+ // TODO(146): delete the simulator object when a thread/isolate goes away.
+ sim = new Simulator(isolate);
+ isolate_data->set_simulator(sim);
+ }
+ return sim;
+}
+
+// Sets the register in the architecture state. It will also deal with updating
+// Simulator internal state for special registers such as PC.
+void Simulator::set_register(int reg, int64_t value) {
+ DCHECK((reg >= 0) && (reg < kNumSimuRegisters));
+ if (reg == pc) {
+ pc_modified_ = true;
+ }
+
+ // Zero register always holds 0.
+ registers_[reg] = (reg == 0) ? 0 : value;
+}
+
+void Simulator::set_dw_register(int reg, const int* dbl) {
+ DCHECK((reg >= 0) && (reg < kNumSimuRegisters));
+ registers_[reg] = dbl[1];
+ registers_[reg] = registers_[reg] << 32;
+ registers_[reg] += dbl[0];
+}
+
+void Simulator::set_fpu_register(int fpureg, int64_t value) {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ FPUregisters_[fpureg] = value;
+}
+
+void Simulator::set_fpu_register_word(int fpureg, int32_t value) {
+ // Set ONLY lower 32-bits, leaving upper bits untouched.
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ int32_t* pword;
+ pword = reinterpret_cast<int32_t*>(&FPUregisters_[fpureg]);
+
+ *pword = value;
+}
+
+void Simulator::set_fpu_register_hi_word(int fpureg, int32_t value) {
+ // Set ONLY upper 32-bits, leaving lower bits untouched.
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ int32_t* phiword;
+ phiword = (reinterpret_cast<int32_t*>(&FPUregisters_[fpureg])) + 1;
+
+ *phiword = value;
+}
+
+void Simulator::set_fpu_register_float(int fpureg, float value) {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ *bit_cast<float*>(&FPUregisters_[fpureg]) = value;
+}
+
+void Simulator::set_fpu_register_double(int fpureg, double value) {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ *bit_cast<double*>(&FPUregisters_[fpureg]) = value;
+}
+
+void Simulator::set_cf_register(int cfreg, bool value) {
+ DCHECK((cfreg >= 0) && (cfreg < kNumCFRegisters));
+ CFregisters_[cfreg] = value;
+}
+
+// Get the register from the architecture state. This function does handle
+// the special case of accessing the PC register.
+int64_t Simulator::get_register(int reg) const {
+ DCHECK((reg >= 0) && (reg < kNumSimuRegisters));
+ if (reg == 0)
+ return 0;
+ else
+ return registers_[reg];
+}
+
+double Simulator::get_double_from_register_pair(int reg) {
+ // TODO(plind): bad ABI stuff, refactor or remove.
+ DCHECK((reg >= 0) && (reg < kNumSimuRegisters));
+
+ double dm_val = 0.0;
+ // Read the bits from the unsigned integer register_[] array
+ // into the double precision floating point value and return it.
+ char buffer[sizeof(registers_[0])];
+ memcpy(buffer, &registers_[reg], sizeof(registers_[0]));
+ memcpy(&dm_val, buffer, sizeof(registers_[0]));
+ return (dm_val);
+}
+
+int64_t Simulator::get_fpu_register(int fpureg) const {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return FPUregisters_[fpureg];
+}
+
+int32_t Simulator::get_fpu_register_word(int fpureg) const {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return static_cast<int32_t>(FPUregisters_[fpureg] & 0xFFFFFFFF);
+}
+
+int32_t Simulator::get_fpu_register_signed_word(int fpureg) const {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return static_cast<int32_t>(FPUregisters_[fpureg] & 0xFFFFFFFF);
+}
+
+int32_t Simulator::get_fpu_register_hi_word(int fpureg) const {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return static_cast<int32_t>((FPUregisters_[fpureg] >> 32) & 0xFFFFFFFF);
+}
+
+float Simulator::get_fpu_register_float(int fpureg) const {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return *bit_cast<float*>(const_cast<int64_t*>(&FPUregisters_[fpureg]));
+}
+
+double Simulator::get_fpu_register_double(int fpureg) const {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return *bit_cast<double*>(&FPUregisters_[fpureg]);
+}
+
+bool Simulator::get_cf_register(int cfreg) const {
+ DCHECK((cfreg >= 0) && (cfreg < kNumCFRegisters));
+ return CFregisters_[cfreg];
+}
+
+// Runtime FP routines take up to two double arguments and zero
+// or one integer arguments. All are constructed here,
+// from a0-a3 or fa0 and fa1 (n64).
+void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
+ const int fparg2 = f1;
+ *x = get_fpu_register_double(f0);
+ *y = get_fpu_register_double(fparg2);
+ *z = static_cast<int32_t>(get_register(a2));
+}
+
+// The return value is either in v0/v1 or f0.
+void Simulator::SetFpResult(const double& result) {
+ set_fpu_register_double(0, result);
+}
+
+// Helper functions for setting and testing the FCSR register's bits.
+void Simulator::set_fcsr_bit(uint32_t cc, bool value) {
+ if (value) {
+ FCSR_ |= (1 << cc);
+ } else {
+ FCSR_ &= ~(1 << cc);
+ }
+}
+
+bool Simulator::test_fcsr_bit(uint32_t cc) { return FCSR_ & (1 << cc); }
+
+void Simulator::set_fcsr_rounding_mode(FPURoundingMode mode) {
+ FCSR_ |= mode & kFPURoundingModeMask;
+}
+
+unsigned int Simulator::get_fcsr_rounding_mode() {
+ return FCSR_ & kFPURoundingModeMask;
+}
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool Simulator::set_fcsr_round_error(double original, double rounded) {
+ bool ret = false;
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, false);
+ set_fcsr_bit(kFCSRUnderflowCauseBit, false);
+ set_fcsr_bit(kFCSROverflowCauseBit, false);
+ set_fcsr_bit(kFCSRInexactCauseBit, false);
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ set_fcsr_bit(kFCSRInexactCauseBit, true);
+ }
+
+ if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) {
+ set_fcsr_bit(kFCSRUnderflowCauseBit, true);
+ ret = true;
+ }
+
+ if (rounded > max_int32 || rounded < min_int32) {
+ set_fcsr_bit(kFCSROverflowCauseBit, true);
+ // The reference is not really clear but it seems this is required:
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool Simulator::set_fcsr_round64_error(double original, double rounded) {
+ bool ret = false;
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
+ double max_int64 = static_cast<double>(std::numeric_limits<int64_t>::max());
+ double min_int64 = std::numeric_limits<int64_t>::min();
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, false);
+ set_fcsr_bit(kFCSRUnderflowCauseBit, false);
+ set_fcsr_bit(kFCSROverflowCauseBit, false);
+ set_fcsr_bit(kFCSRInexactCauseBit, false);
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ set_fcsr_bit(kFCSRInexactCauseBit, true);
+ }
+
+ if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) {
+ set_fcsr_bit(kFCSRUnderflowCauseBit, true);
+ ret = true;
+ }
+
+ if (rounded >= max_int64 || rounded < min_int64) {
+ set_fcsr_bit(kFCSROverflowCauseBit, true);
+ // The reference is not really clear but it seems this is required:
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool Simulator::set_fcsr_round_error(float original, float rounded) {
+ bool ret = false;
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, false);
+ set_fcsr_bit(kFCSRUnderflowCauseBit, false);
+ set_fcsr_bit(kFCSROverflowCauseBit, false);
+ set_fcsr_bit(kFCSRInexactCauseBit, false);
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ set_fcsr_bit(kFCSRInexactCauseBit, true);
+ }
+
+ if (rounded < FLT_MIN && rounded > -FLT_MIN && rounded != 0) {
+ set_fcsr_bit(kFCSRUnderflowCauseBit, true);
+ ret = true;
+ }
+
+ if (rounded > max_int32 || rounded < min_int32) {
+ set_fcsr_bit(kFCSROverflowCauseBit, true);
+ // The reference is not really clear but it seems this is required:
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+void Simulator::set_fpu_register_word_invalid_result(float original,
+ float rounded) {
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register_word(fd_reg(), 0);
+ } else if (rounded > max_int32) {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void Simulator::set_fpu_register_invalid_result(float original, float rounded) {
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register(fd_reg(), 0);
+ } else if (rounded > max_int32) {
+ set_fpu_register(fd_reg(), kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ set_fpu_register(fd_reg(), kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void Simulator::set_fpu_register_invalid_result64(float original,
+ float rounded) {
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
+ double max_int64 = static_cast<double>(std::numeric_limits<int64_t>::max());
+ double min_int64 = std::numeric_limits<int64_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register(fd_reg(), 0);
+ } else if (rounded >= max_int64) {
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ } else if (rounded < min_int64) {
+ set_fpu_register(fd_reg(), kFPU64InvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void Simulator::set_fpu_register_word_invalid_result(double original,
+ double rounded) {
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register_word(fd_reg(), 0);
+ } else if (rounded > max_int32) {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void Simulator::set_fpu_register_invalid_result(double original,
+ double rounded) {
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register(fd_reg(), 0);
+ } else if (rounded > max_int32) {
+ set_fpu_register(fd_reg(), kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ set_fpu_register(fd_reg(), kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void Simulator::set_fpu_register_invalid_result64(double original,
+ double rounded) {
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
+ double max_int64 = static_cast<double>(std::numeric_limits<int64_t>::max());
+ double min_int64 = std::numeric_limits<int64_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register(fd_reg(), 0);
+ } else if (rounded >= max_int64) {
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ } else if (rounded < min_int64) {
+ set_fpu_register(fd_reg(), kFPU64InvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool Simulator::set_fcsr_round64_error(float original, float rounded) {
+ bool ret = false;
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
+ double max_int64 = static_cast<double>(std::numeric_limits<int64_t>::max());
+ double min_int64 = std::numeric_limits<int64_t>::min();
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, false);
+ set_fcsr_bit(kFCSRUnderflowCauseBit, false);
+ set_fcsr_bit(kFCSROverflowCauseBit, false);
+ set_fcsr_bit(kFCSRInexactCauseBit, false);
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ set_fcsr_bit(kFCSRInexactCauseBit, true);
+ }
+
+ if (rounded < FLT_MIN && rounded > -FLT_MIN && rounded != 0) {
+ set_fcsr_bit(kFCSRUnderflowCauseBit, true);
+ ret = true;
+ }
+
+ if (rounded >= max_int64 || rounded < min_int64) {
+ set_fcsr_bit(kFCSROverflowCauseBit, true);
+ // The reference is not really clear but it seems this is required:
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+// For ftint instructions only
+void Simulator::round_according_to_fcsr(double toRound, double* rounded,
+ int32_t* rounded_int) {
+ // 0 RN (round to nearest): Round a result to the nearest
+ // representable value; if the result is exactly halfway between
+ // two representable values, round to zero.
+
+ // 1 RZ (round toward zero): Round a result to the closest
+ // representable value whose absolute value is less than or
+ // equal to the infinitely accurate result.
+
+ // 2 RP (round up, or toward +infinity): Round a result to the
+ // next representable value up.
+
+ // 3 RN (round down, or toward −infinity): Round a result to
+ // the next representable value down.
+ // switch ((FCSR_ >> 8) & 3) {
+ switch (FCSR_ & kFPURoundingModeMask) {
+ case kRoundToNearest:
+ *rounded = std::floor(toRound + 0.5);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ *rounded_int -= 1;
+ *rounded -= 1.;
+ }
+ break;
+ case kRoundToZero:
+ *rounded = trunc(toRound);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ break;
+ case kRoundToPlusInf:
+ *rounded = std::ceil(toRound);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ break;
+ case kRoundToMinusInf:
+ *rounded = std::floor(toRound);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ break;
+ }
+}
+
+void Simulator::round64_according_to_fcsr(double toRound, double* rounded,
+ int64_t* rounded_int) {
+ // 0 RN (round to nearest): Round a result to the nearest
+ // representable value; if the result is exactly halfway between
+ // two representable values, round to zero.
+
+ // 1 RZ (round toward zero): Round a result to the closest
+ // representable value whose absolute value is less than or.
+ // equal to the infinitely accurate result.
+
+ // 2 RP (round up, or toward +infinity): Round a result to the
+ // next representable value up.
+
+ // 3 RN (round down, or toward −infinity): Round a result to
+ // the next representable value down.
+ switch (FCSR_ & kFPURoundingModeMask) {
+ case kRoundToNearest:
+ *rounded = std::floor(toRound + 0.5);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ *rounded_int -= 1;
+ *rounded -= 1.;
+ }
+ break;
+ case kRoundToZero:
+ *rounded = std::trunc(toRound);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ break;
+ case kRoundToPlusInf:
+ *rounded = std::ceil(toRound);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ break;
+ case kRoundToMinusInf:
+ *rounded = std::floor(toRound);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ break;
+ }
+}
+
+void Simulator::round_according_to_fcsr(float toRound, float* rounded,
+ int32_t* rounded_int) {
+ // 0 RN (round to nearest): Round a result to the nearest
+ // representable value; if the result is exactly halfway between
+ // two representable values, round to zero.
+
+ // 1 RZ (round toward zero): Round a result to the closest
+ // representable value whose absolute value is less than or
+ // equal to the infinitely accurate result.
+
+ // 2 RP (round up, or toward +infinity): Round a result to the
+ // next representable value up.
+
+ // 3 RN (round down, or toward −infinity): Round a result to
+ // the next representable value down.
+ switch (FCSR_ & kFPURoundingModeMask) {
+ case kRoundToNearest:
+ *rounded = std::floor(toRound + 0.5);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ *rounded_int -= 1;
+ *rounded -= 1.f;
+ }
+ break;
+ case kRoundToZero:
+ *rounded = std::trunc(toRound);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ break;
+ case kRoundToPlusInf:
+ *rounded = std::ceil(toRound);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ break;
+ case kRoundToMinusInf:
+ *rounded = std::floor(toRound);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ break;
+ }
+}
+
+void Simulator::round64_according_to_fcsr(float toRound, float* rounded,
+ int64_t* rounded_int) {
+ // 0 RN (round to nearest): Round a result to the nearest
+ // representable value; if the result is exactly halfway between
+ // two representable values, round to zero.
+
+ // 1 RZ (round toward zero): Round a result to the closest
+ // representable value whose absolute value is less than or.
+ // equal to the infinitely accurate result.
+
+ // 2 RP (round up, or toward +infinity): Round a result to the
+ // next representable value up.
+
+ // 3 RN (round down, or toward −infinity): Round a result to
+ // the next representable value down.
+ switch (FCSR_ & kFPURoundingModeMask) {
+ case kRoundToNearest:
+ *rounded = std::floor(toRound + 0.5);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ *rounded_int -= 1;
+ *rounded -= 1.f;
+ }
+ break;
+ case kRoundToZero:
+ *rounded = trunc(toRound);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ break;
+ case kRoundToPlusInf:
+ *rounded = std::ceil(toRound);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ break;
+ case kRoundToMinusInf:
+ *rounded = std::floor(toRound);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ break;
+ }
+}
+
+// Raw access to the PC register.
+void Simulator::set_pc(int64_t value) {
+ pc_modified_ = true;
+ registers_[pc] = value;
+}
+
+bool Simulator::has_bad_pc() const {
+ return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc));
+}
+
+// Raw access to the PC register without the special adjustment when reading.
+int64_t Simulator::get_pc() const { return registers_[pc]; }
+
+// TODO(plind): refactor this messy debug code when we do unaligned access.
+void Simulator::DieOrDebug() {
+ if ((1)) { // Flag for this was removed.
+ Loong64Debugger dbg(this);
+ dbg.Debug();
+ } else {
+ base::OS::Abort();
+ }
+}
+
+void Simulator::TraceRegWr(int64_t value, TraceType t) {
+ if (::v8::internal::FLAG_trace_sim) {
+ union {
+ int64_t fmt_int64;
+ int32_t fmt_int32[2];
+ float fmt_float[2];
+ double fmt_double;
+ } v;
+ v.fmt_int64 = value;
+
+ switch (t) {
+ case WORD:
+ base::SNPrintF(trace_buf_,
+ "%016" PRIx64 " (%" PRId64 ") int32:%" PRId32
+ " uint32:%" PRIu32,
+ v.fmt_int64, icount_, v.fmt_int32[0], v.fmt_int32[0]);
+ break;
+ case DWORD:
+ base::SNPrintF(trace_buf_,
+ "%016" PRIx64 " (%" PRId64 ") int64:%" PRId64
+ " uint64:%" PRIu64,
+ value, icount_, value, value);
+ break;
+ case FLOAT:
+ base::SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRId64 ") flt:%e",
+ v.fmt_int64, icount_, v.fmt_float[0]);
+ break;
+ case DOUBLE:
+ base::SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRId64 ") dbl:%e",
+ v.fmt_int64, icount_, v.fmt_double);
+ break;
+ case FLOAT_DOUBLE:
+ base::SNPrintF(trace_buf_,
+ "%016" PRIx64 " (%" PRId64 ") flt:%e dbl:%e",
+ v.fmt_int64, icount_, v.fmt_float[0], v.fmt_double);
+ break;
+ case WORD_DWORD:
+ base::SNPrintF(trace_buf_,
+ "%016" PRIx64 " (%" PRId64 ") int32:%" PRId32
+ " uint32:%" PRIu32 " int64:%" PRId64 " uint64:%" PRIu64,
+ v.fmt_int64, icount_, v.fmt_int32[0], v.fmt_int32[0],
+ v.fmt_int64, v.fmt_int64);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+// TODO(plind): consider making icount_ printing a flag option.
+void Simulator::TraceMemRd(int64_t addr, int64_t value, TraceType t) {
+ if (::v8::internal::FLAG_trace_sim) {
+ union {
+ int64_t fmt_int64;
+ int32_t fmt_int32[2];
+ float fmt_float[2];
+ double fmt_double;
+ } v;
+ v.fmt_int64 = value;
+
+ switch (t) {
+ case WORD:
+ base::SNPrintF(trace_buf_,
+ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64
+ ") int32:%" PRId32 " uint32:%" PRIu32,
+ v.fmt_int64, addr, icount_, v.fmt_int32[0],
+ v.fmt_int32[0]);
+ break;
+ case DWORD:
+ base::SNPrintF(trace_buf_,
+ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64
+ ") int64:%" PRId64 " uint64:%" PRIu64,
+ value, addr, icount_, value, value);
+ break;
+ case FLOAT:
+ base::SNPrintF(trace_buf_,
+ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64
+ ") flt:%e",
+ v.fmt_int64, addr, icount_, v.fmt_float[0]);
+ break;
+ case DOUBLE:
+ base::SNPrintF(trace_buf_,
+ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64
+ ") dbl:%e",
+ v.fmt_int64, addr, icount_, v.fmt_double);
+ break;
+ case FLOAT_DOUBLE:
+ base::SNPrintF(trace_buf_,
+ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64
+ ") flt:%e dbl:%e",
+ v.fmt_int64, addr, icount_, v.fmt_float[0],
+ v.fmt_double);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+void Simulator::TraceMemWr(int64_t addr, int64_t value, TraceType t) {
+ if (::v8::internal::FLAG_trace_sim) {
+ switch (t) {
+ case BYTE:
+ base::SNPrintF(trace_buf_,
+ " %02" PRIx8 " --> [%016" PRIx64
+ "] (%" PRId64 ")",
+ static_cast<uint8_t>(value), addr, icount_);
+ break;
+ case HALF:
+ base::SNPrintF(trace_buf_,
+ " %04" PRIx16 " --> [%016" PRIx64
+ "] (%" PRId64 ")",
+ static_cast<uint16_t>(value), addr, icount_);
+ break;
+ case WORD:
+ base::SNPrintF(trace_buf_,
+ " %08" PRIx32 " --> [%016" PRIx64 "] (%" PRId64
+ ")",
+ static_cast<uint32_t>(value), addr, icount_);
+ break;
+ case DWORD:
+ base::SNPrintF(trace_buf_,
+ "%016" PRIx64 " --> [%016" PRIx64 "] (%" PRId64 " )",
+ value, addr, icount_);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+template <typename T>
+void Simulator::TraceMemRd(int64_t addr, T value) {
+ if (::v8::internal::FLAG_trace_sim) {
+ switch (sizeof(T)) {
+ case 1:
+ base::SNPrintF(trace_buf_,
+ "%08" PRIx8 " <-- [%08" PRIx64 "] (%" PRIu64
+ ") int8:%" PRId8 " uint8:%" PRIu8,
+ static_cast<uint8_t>(value), addr, icount_,
+ static_cast<int8_t>(value), static_cast<uint8_t>(value));
+ break;
+ case 2:
+ base::SNPrintF(trace_buf_,
+ "%08" PRIx16 " <-- [%08" PRIx64 "] (%" PRIu64
+ ") int16:%" PRId16 " uint16:%" PRIu16,
+ static_cast<uint16_t>(value), addr, icount_,
+ static_cast<int16_t>(value),
+ static_cast<uint16_t>(value));
+ break;
+ case 4:
+ base::SNPrintF(trace_buf_,
+ "%08" PRIx32 " <-- [%08" PRIx64 "] (%" PRIu64
+ ") int32:%" PRId32 " uint32:%" PRIu32,
+ static_cast<uint32_t>(value), addr, icount_,
+ static_cast<int32_t>(value),
+ static_cast<uint32_t>(value));
+ break;
+ case 8:
+ base::SNPrintF(trace_buf_,
+ "%08" PRIx64 " <-- [%08" PRIx64 "] (%" PRIu64
+ ") int64:%" PRId64 " uint64:%" PRIu64,
+ static_cast<uint64_t>(value), addr, icount_,
+ static_cast<int64_t>(value),
+ static_cast<uint64_t>(value));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+template <typename T>
+void Simulator::TraceMemWr(int64_t addr, T value) {
+ if (::v8::internal::FLAG_trace_sim) {
+ switch (sizeof(T)) {
+ case 1:
+ base::SNPrintF(trace_buf_,
+ " %02" PRIx8 " --> [%08" PRIx64 "] (%" PRIu64
+ ")",
+ static_cast<uint8_t>(value), addr, icount_);
+ break;
+ case 2:
+ base::SNPrintF(trace_buf_,
+ " %04" PRIx16 " --> [%08" PRIx64 "] (%" PRIu64 ")",
+ static_cast<uint16_t>(value), addr, icount_);
+ break;
+ case 4:
+ base::SNPrintF(trace_buf_,
+ "%08" PRIx32 " --> [%08" PRIx64 "] (%" PRIu64 ")",
+ static_cast<uint32_t>(value), addr, icount_);
+ break;
+ case 8:
+ base::SNPrintF(trace_buf_,
+ "%16" PRIx64 " --> [%08" PRIx64 "] (%" PRIu64 ")",
+ static_cast<uint64_t>(value), addr, icount_);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+// TODO(plind): sign-extend and zero-extend not implmented properly
+// on all the ReadXX functions, I don't think re-interpret cast does it.
+int32_t Simulator::ReadW(int64_t addr, Instruction* instr, TraceType t) {
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+ " \n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ /* if ((addr & 0x3) == 0)*/ {
+ local_monitor_.NotifyLoad();
+ int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+ TraceMemRd(addr, static_cast<int64_t>(*ptr), t);
+ return *ptr;
+ }
+ // PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n",
+ // addr,
+ // reinterpret_cast<intptr_t>(instr));
+ // DieOrDebug();
+ // return 0;
+}
+
+uint32_t Simulator::ReadWU(int64_t addr, Instruction* instr) {
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+ " \n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ // if ((addr & 0x3) == 0) {
+ local_monitor_.NotifyLoad();
+ uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
+ TraceMemRd(addr, static_cast<int64_t>(*ptr), WORD);
+ return *ptr;
+ // }
+ // PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
+ // reinterpret_cast<intptr_t>(instr));
+ // DieOrDebug();
+ // return 0;
+}
+
+void Simulator::WriteW(int64_t addr, int32_t value, Instruction* instr) {
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+ " \n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ /*if ((addr & 0x3) == 0)*/ {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ TraceMemWr(addr, value, WORD);
+ int* ptr = reinterpret_cast<int*>(addr);
+ *ptr = value;
+ return;
+ }
+ // PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n",
+ // addr,
+ // reinterpret_cast<intptr_t>(instr));
+ // DieOrDebug();
+}
+
+void Simulator::WriteConditionalW(int64_t addr, int32_t value,
+ Instruction* instr, int32_t rk_reg) {
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+ " \n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ if ((addr & 0x3) == 0) {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ if (local_monitor_.NotifyStoreConditional(addr, TransactionSize::Word) &&
+ GlobalMonitor::Get()->NotifyStoreConditional_Locked(
+ addr, &global_monitor_thread_)) {
+ local_monitor_.NotifyStore();
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ TraceMemWr(addr, value, WORD);
+ int* ptr = reinterpret_cast<int*>(addr);
+ *ptr = value;
+ set_register(rk_reg, 1);
+ } else {
+ set_register(rk_reg, 0);
+ }
+ return;
+ }
+ PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+}
+
+int64_t Simulator::Read2W(int64_t addr, Instruction* instr) {
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+ " \n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ /* if ((addr & kPointerAlignmentMask) == 0)*/ {
+ local_monitor_.NotifyLoad();
+ int64_t* ptr = reinterpret_cast<int64_t*>(addr);
+ TraceMemRd(addr, *ptr);
+ return *ptr;
+ }
+ // PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n",
+ // addr,
+ // reinterpret_cast<intptr_t>(instr));
+ // DieOrDebug();
+ // return 0;
+}
+
+void Simulator::Write2W(int64_t addr, int64_t value, Instruction* instr) {
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+ "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ /*if ((addr & kPointerAlignmentMask) == 0)*/ {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ TraceMemWr(addr, value, DWORD);
+ int64_t* ptr = reinterpret_cast<int64_t*>(addr);
+ *ptr = value;
+ return;
+ }
+ // PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n",
+ // addr,
+ // reinterpret_cast<intptr_t>(instr));
+ // DieOrDebug();
+}
+
+void Simulator::WriteConditional2W(int64_t addr, int64_t value,
+ Instruction* instr, int32_t rk_reg) {
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+ "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ if ((addr & kPointerAlignmentMask) == 0) {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ if (local_monitor_.NotifyStoreConditional(addr,
+ TransactionSize::DoubleWord) &&
+ GlobalMonitor::Get()->NotifyStoreConditional_Locked(
+ addr, &global_monitor_thread_)) {
+ local_monitor_.NotifyStore();
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ TraceMemWr(addr, value, DWORD);
+ int64_t* ptr = reinterpret_cast<int64_t*>(addr);
+ *ptr = value;
+ set_register(rk_reg, 1);
+ } else {
+ set_register(rk_reg, 0);
+ }
+ return;
+ }
+ PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+}
+
+double Simulator::ReadD(int64_t addr, Instruction* instr) {
+ /*if ((addr & kDoubleAlignmentMask) == 0)*/ {
+ local_monitor_.NotifyLoad();
+ double* ptr = reinterpret_cast<double*>(addr);
+ return *ptr;
+ }
+ // PrintF("Unaligned (double) read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR
+ // "\n",
+ // addr, reinterpret_cast<intptr_t>(instr));
+ // base::OS::Abort();
+ // return 0;
+}
+
+void Simulator::WriteD(int64_t addr, double value, Instruction* instr) {
+ /*if ((addr & kDoubleAlignmentMask) == 0)*/ {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ double* ptr = reinterpret_cast<double*>(addr);
+ *ptr = value;
+ return;
+ }
+ // PrintF("Unaligned (double) write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR
+ // "\n",
+ // addr, reinterpret_cast<intptr_t>(instr));
+ // DieOrDebug();
+}
+
+uint16_t Simulator::ReadHU(int64_t addr, Instruction* instr) {
+ // if ((addr & 1) == 0) {
+ local_monitor_.NotifyLoad();
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ TraceMemRd(addr, static_cast<int64_t>(*ptr));
+ return *ptr;
+ // }
+ // PrintF("Unaligned unsigned halfword read at 0x%08" PRIx64
+ // " , pc=0x%08" V8PRIxPTR "\n",
+ // addr, reinterpret_cast<intptr_t>(instr));
+ // DieOrDebug();
+ // return 0;
+}
+
+int16_t Simulator::ReadH(int64_t addr, Instruction* instr) {
+ // if ((addr & 1) == 0) {
+ local_monitor_.NotifyLoad();
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ TraceMemRd(addr, static_cast<int64_t>(*ptr));
+ return *ptr;
+ // }
+ // PrintF("Unaligned signed halfword read at 0x%08" PRIx64
+ // " , pc=0x%08" V8PRIxPTR "\n",
+ // addr, reinterpret_cast<intptr_t>(instr));
+ // DieOrDebug();
+ // return 0;
+}
+
+void Simulator::WriteH(int64_t addr, uint16_t value, Instruction* instr) {
+ // if ((addr & 1) == 0) {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ TraceMemWr(addr, value, HALF);
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ *ptr = value;
+ return;
+ // }
+ // PrintF("Unaligned unsigned halfword write at 0x%08" PRIx64
+ // " , pc=0x%08" V8PRIxPTR "\n",
+ // addr, reinterpret_cast<intptr_t>(instr));
+ // DieOrDebug();
+}
+
+void Simulator::WriteH(int64_t addr, int16_t value, Instruction* instr) {
+ // if ((addr & 1) == 0) {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ TraceMemWr(addr, value, HALF);
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ *ptr = value;
+ return;
+ // }
+ // PrintF("Unaligned halfword write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR
+ // "\n",
+ // addr, reinterpret_cast<intptr_t>(instr));
+ // DieOrDebug();
+}
+
+uint32_t Simulator::ReadBU(int64_t addr) {
+ local_monitor_.NotifyLoad();
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ TraceMemRd(addr, static_cast<int64_t>(*ptr));
+ return *ptr & 0xFF;
+}
+
+int32_t Simulator::ReadB(int64_t addr) {
+ local_monitor_.NotifyLoad();
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ TraceMemRd(addr, static_cast<int64_t>(*ptr));
+ return *ptr;
+}
+
+void Simulator::WriteB(int64_t addr, uint8_t value) {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ TraceMemWr(addr, value, BYTE);
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ *ptr = value;
+}
+
+void Simulator::WriteB(int64_t addr, int8_t value) {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ TraceMemWr(addr, value, BYTE);
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ *ptr = value;
+}
+
+template <typename T>
+T Simulator::ReadMem(int64_t addr, Instruction* instr) {
+ int alignment_mask = (1 << sizeof(T)) - 1;
+ if ((addr & alignment_mask) == 0) {
+ local_monitor_.NotifyLoad();
+ T* ptr = reinterpret_cast<T*>(addr);
+ TraceMemRd(addr, *ptr);
+ return *ptr;
+ }
+ PrintF("Unaligned read of type sizeof(%ld) at 0x%08lx, pc=0x%08" V8PRIxPTR
+ "\n",
+ sizeof(T), addr, reinterpret_cast<intptr_t>(instr));
+ base::OS::Abort();
+ return 0;
+}
+
+template <typename T>
+void Simulator::WriteMem(int64_t addr, T value, Instruction* instr) {
+ int alignment_mask = (1 << sizeof(T)) - 1;
+ if ((addr & alignment_mask) == 0) {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ T* ptr = reinterpret_cast<T*>(addr);
+ *ptr = value;
+ TraceMemWr(addr, value);
+ return;
+ }
+ PrintF("Unaligned write of type sizeof(%ld) at 0x%08lx, pc=0x%08" V8PRIxPTR
+ "\n",
+ sizeof(T), addr, reinterpret_cast<intptr_t>(instr));
+ base::OS::Abort();
+}
+
+// Returns the limit of the stack area to enable checking for stack overflows.
+uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
+ // The simulator uses a separate JS stack. If we have exhausted the C stack,
+ // we also drop down the JS limit to reflect the exhaustion on the JS stack.
+ if (base::Stack::GetCurrentStackPosition() < c_limit) {
+ return reinterpret_cast<uintptr_t>(get_sp());
+ }
+
+ // Otherwise the limit is the JS stack. Leave a safety margin of 1024 bytes
+ // to prevent overrunning the stack when pushing values.
+ return reinterpret_cast<uintptr_t>(stack_) + 1024;
+}
+
+// Unsupported instructions use Format to print an error and stop execution.
+void Simulator::Format(Instruction* instr, const char* format) {
+ PrintF("Simulator found unsupported instruction:\n 0x%08" PRIxPTR " : %s\n",
+ reinterpret_cast<intptr_t>(instr), format);
+ UNIMPLEMENTED();
+}
+
+// Calls into the V8 runtime are based on this very simple interface.
+// Note: To be able to return two values from some calls the code in runtime.cc
+// uses the ObjectPair which is essentially two 32-bit values stuffed into a
+// 64-bit value. With the code below we assume that all runtime calls return
+// 64 bits of result. If they don't, the v1 result register contains a bogus
+// value, which is fine because it is caller-saved.
+
+using SimulatorRuntimeCall = ObjectPair (*)(int64_t arg0, int64_t arg1,
+ int64_t arg2, int64_t arg3,
+ int64_t arg4, int64_t arg5,
+ int64_t arg6, int64_t arg7,
+ int64_t arg8, int64_t arg9);
+
+// These prototypes handle the four types of FP calls.
+using SimulatorRuntimeCompareCall = int64_t (*)(double darg0, double darg1);
+using SimulatorRuntimeFPFPCall = double (*)(double darg0, double darg1);
+using SimulatorRuntimeFPCall = double (*)(double darg0);
+using SimulatorRuntimeFPIntCall = double (*)(double darg0, int32_t arg0);
+
+// This signature supports direct call in to API function native callback
+// (refer to InvocationCallback in v8.h).
+using SimulatorRuntimeDirectApiCall = void (*)(int64_t arg0);
+using SimulatorRuntimeProfilingApiCall = void (*)(int64_t arg0, void* arg1);
+
+// This signature supports direct call to accessor getter callback.
+using SimulatorRuntimeDirectGetterCall = void (*)(int64_t arg0, int64_t arg1);
+using SimulatorRuntimeProfilingGetterCall = void (*)(int64_t arg0, int64_t arg1,
+ void* arg2);
+
+// Software interrupt instructions are used by the simulator to call into the
+// C-based V8 runtime. They are also used for debugging with simulator.
+void Simulator::SoftwareInterrupt() {
+ int32_t opcode_hi15 = instr_.Bits(31, 17);
+ CHECK_EQ(opcode_hi15, 0x15);
+ uint32_t code = instr_.Bits(14, 0);
+ // We first check if we met a call_rt_redirected.
+ if (instr_.InstructionBits() == rtCallRedirInstr) {
+ Redirection* redirection = Redirection::FromInstruction(instr_.instr());
+
+ int64_t* stack_pointer = reinterpret_cast<int64_t*>(get_register(sp));
+
+ int64_t arg0 = get_register(a0);
+ int64_t arg1 = get_register(a1);
+ int64_t arg2 = get_register(a2);
+ int64_t arg3 = get_register(a3);
+ int64_t arg4 = get_register(a4);
+ int64_t arg5 = get_register(a5);
+ int64_t arg6 = get_register(a6);
+ int64_t arg7 = get_register(a7);
+ int64_t arg8 = stack_pointer[0];
+ int64_t arg9 = stack_pointer[1];
+ STATIC_ASSERT(kMaxCParameters == 10);
+
+ bool fp_call =
+ (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
+
+ {
+ // With the hard floating point calling convention, double
+ // arguments are passed in FPU registers. Fetch the arguments
+ // from there and call the builtin using soft floating point
+ // convention.
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ arg0 = get_fpu_register(f0);
+ arg1 = get_fpu_register(f1);
+ arg2 = get_fpu_register(f2);
+ arg3 = get_fpu_register(f3);
+ break;
+ case ExternalReference::BUILTIN_FP_CALL:
+ arg0 = get_fpu_register(f0);
+ arg1 = get_fpu_register(f1);
+ break;
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ arg0 = get_fpu_register(f0);
+ arg1 = get_fpu_register(f1);
+ arg2 = get_register(a2);
+ break;
+ default:
+ break;
+ }
+ }
+
+ // This is dodgy but it works because the C entry stubs are never moved.
+ // See comment in codegen-arm.cc and bug 1242173.
+ int64_t saved_ra = get_register(ra);
+
+ intptr_t external =
+ reinterpret_cast<intptr_t>(redirection->external_function());
+
+ // Based on CpuFeatures::IsSupported(FPU), Loong64 will use either hardware
+ // FPU, or gcc soft-float routines. Hardware FPU is simulated in this
+ // simulator. Soft-float has additional abstraction of ExternalReference,
+ // to support serialization.
+ if (fp_call) {
+ double dval0, dval1; // one or two double parameters
+ int32_t ival; // zero or one integer parameters
+ int64_t iresult = 0; // integer return value
+ double dresult = 0; // double return value
+ GetFpArgs(&dval0, &dval1, &ival);
+ SimulatorRuntimeCall generic_target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
+ if (::v8::internal::FLAG_trace_sim) {
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ PrintF("Call to host function at %p with args %f, %f",
+ reinterpret_cast<void*>(FUNCTION_ADDR(generic_target)),
+ dval0, dval1);
+ break;
+ case ExternalReference::BUILTIN_FP_CALL:
+ PrintF("Call to host function at %p with arg %f",
+ reinterpret_cast<void*>(FUNCTION_ADDR(generic_target)),
+ dval0);
+ break;
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ PrintF("Call to host function at %p with args %f, %d",
+ reinterpret_cast<void*>(FUNCTION_ADDR(generic_target)),
+ dval0, ival);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_COMPARE_CALL: {
+ SimulatorRuntimeCompareCall target =
+ reinterpret_cast<SimulatorRuntimeCompareCall>(external);
+ iresult = target(dval0, dval1);
+ set_register(v0, static_cast<int64_t>(iresult));
+ // set_register(v1, static_cast<int64_t>(iresult >> 32));
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_FP_CALL: {
+ SimulatorRuntimeFPFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
+ dresult = target(dval0, dval1);
+ SetFpResult(dresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_CALL: {
+ SimulatorRuntimeFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ dresult = target(dval0);
+ SetFpResult(dresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_INT_CALL: {
+ SimulatorRuntimeFPIntCall target =
+ reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
+ dresult = target(dval0, ival);
+ SetFpResult(dresult);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ if (::v8::internal::FLAG_trace_sim) {
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ PrintF("Returned %08x\n", static_cast<int32_t>(iresult));
+ break;
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_FP_CALL:
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ PrintF("Returned %f\n", dresult);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08" PRIx64 " \n",
+ reinterpret_cast<void*>(external), arg0);
+ }
+ SimulatorRuntimeDirectApiCall target =
+ reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
+ target(arg0);
+ } else if (redirection->type() == ExternalReference::PROFILING_API_CALL) {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08" PRIx64 " %08" PRIx64
+ " \n",
+ reinterpret_cast<void*>(external), arg0, arg1);
+ }
+ SimulatorRuntimeProfilingApiCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
+ target(arg0, Redirection::ReverseRedirection(arg1));
+ } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08" PRIx64 " %08" PRIx64
+ " \n",
+ reinterpret_cast<void*>(external), arg0, arg1);
+ }
+ SimulatorRuntimeDirectGetterCall target =
+ reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+ target(arg0, arg1);
+ } else if (redirection->type() ==
+ ExternalReference::PROFILING_GETTER_CALL) {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08" PRIx64 " %08" PRIx64
+ " %08" PRIx64 " \n",
+ reinterpret_cast<void*>(external), arg0, arg1, arg2);
+ }
+ SimulatorRuntimeProfilingGetterCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
+ target(arg0, arg1, Redirection::ReverseRedirection(arg2));
+ } else {
+ DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL ||
+ redirection->type() == ExternalReference::BUILTIN_CALL_PAIR);
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF(
+ "Call to host function at %p "
+ "args %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64
+ " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64
+ " , %08" PRIx64 " , %08" PRIx64 " \n",
+ reinterpret_cast<void*>(FUNCTION_ADDR(target)), arg0, arg1, arg2,
+ arg3, arg4, arg5, arg6, arg7, arg8, arg9);
+ }
+ ObjectPair result =
+ target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
+ set_register(v0, (int64_t)(result.x));
+ set_register(v1, (int64_t)(result.y));
+ }
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned %08" PRIx64 " : %08" PRIx64 " \n", get_register(v1),
+ get_register(v0));
+ }
+ set_register(ra, saved_ra);
+ set_pc(get_register(ra));
+
+ } else if (code <= kMaxStopCode) {
+ if (IsWatchpoint(code)) {
+ PrintWatchpoint(code);
+ } else {
+ IncreaseStopCounter(code);
+ HandleStop(code, instr_.instr());
+ }
+ } else {
+ // All remaining break_ codes, and all traps are handled here.
+ Loong64Debugger dbg(this);
+ dbg.Debug();
+ }
+}
+
+// Stop helper functions.
+bool Simulator::IsWatchpoint(uint64_t code) {
+ return (code <= kMaxWatchpointCode);
+}
+
+void Simulator::PrintWatchpoint(uint64_t code) {
+ Loong64Debugger dbg(this);
+ ++break_count_;
+ PrintF("\n---- break %" PRId64 " marker: %3d (instr count: %8" PRId64
+ " ) ----------"
+ "----------------------------------",
+ code, break_count_, icount_);
+ dbg.PrintAllRegs(); // Print registers and continue running.
+}
+
+void Simulator::HandleStop(uint64_t code, Instruction* instr) {
+ // Stop if it is enabled, otherwise go on jumping over the stop
+ // and the message address.
+ if (IsEnabledStop(code)) {
+ Loong64Debugger dbg(this);
+ dbg.Stop(instr);
+ }
+}
+
+bool Simulator::IsStopInstruction(Instruction* instr) {
+ int32_t opcode_hi15 = instr->Bits(31, 17);
+ uint32_t code = static_cast<uint32_t>(instr->Bits(14, 0));
+ return (opcode_hi15 == 0x15) && code > kMaxWatchpointCode &&
+ code <= kMaxStopCode;
+}
+
+bool Simulator::IsEnabledStop(uint64_t code) {
+ DCHECK_LE(code, kMaxStopCode);
+ DCHECK_GT(code, kMaxWatchpointCode);
+ return !(watched_stops_[code].count & kStopDisabledBit);
+}
+
+void Simulator::EnableStop(uint64_t code) {
+ if (!IsEnabledStop(code)) {
+ watched_stops_[code].count &= ~kStopDisabledBit;
+ }
+}
+
+void Simulator::DisableStop(uint64_t code) {
+ if (IsEnabledStop(code)) {
+ watched_stops_[code].count |= kStopDisabledBit;
+ }
+}
+
+void Simulator::IncreaseStopCounter(uint64_t code) {
+ DCHECK_LE(code, kMaxStopCode);
+ if ((watched_stops_[code].count & ~(1 << 31)) == 0x7FFFFFFF) {
+ PrintF("Stop counter for code %" PRId64
+ " has overflowed.\n"
+ "Enabling this code and reseting the counter to 0.\n",
+ code);
+ watched_stops_[code].count = 0;
+ EnableStop(code);
+ } else {
+ watched_stops_[code].count++;
+ }
+}
+
+// Print a stop status.
+void Simulator::PrintStopInfo(uint64_t code) {
+ if (code <= kMaxWatchpointCode) {
+ PrintF("That is a watchpoint, not a stop.\n");
+ return;
+ } else if (code > kMaxStopCode) {
+ PrintF("Code too large, only %u stops can be used\n", kMaxStopCode + 1);
+ return;
+ }
+ const char* state = IsEnabledStop(code) ? "Enabled" : "Disabled";
+ int32_t count = watched_stops_[code].count & ~kStopDisabledBit;
+ // Don't print the state of unused breakpoints.
+ if (count != 0) {
+ if (watched_stops_[code].desc) {
+ PrintF("stop %" PRId64 " - 0x%" PRIx64 " : \t%s, \tcounter = %i, \t%s\n",
+ code, code, state, count, watched_stops_[code].desc);
+ } else {
+ PrintF("stop %" PRId64 " - 0x%" PRIx64 " : \t%s, \tcounter = %i\n", code,
+ code, state, count);
+ }
+ }
+}
+
+void Simulator::SignalException(Exception e) {
+ FATAL("Error: Exception %i raised.", static_cast<int>(e));
+}
+
+template <typename T>
+static T FPAbs(T a);
+
+template <>
+double FPAbs<double>(double a) {
+ return fabs(a);
+}
+
+template <>
+float FPAbs<float>(float a) {
+ return fabsf(a);
+}
+
+template <typename T>
+static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T* result) {
+ if (std::isnan(a) && std::isnan(b)) {
+ *result = a;
+ } else if (std::isnan(a)) {
+ *result = b;
+ } else if (std::isnan(b)) {
+ *result = a;
+ } else if (b == a) {
+ // Handle -0.0 == 0.0 case.
+ // std::signbit() returns int 0 or 1 so subtracting MaxMinKind::kMax
+ // negates the result.
+ *result = std::signbit(b) - static_cast<int>(kind) ? b : a;
+ } else {
+ return false;
+ }
+ return true;
+}
+
+template <typename T>
+static T FPUMin(T a, T b) {
+ T result;
+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
+ return result;
+ } else {
+ return b < a ? b : a;
+ }
+}
+
+template <typename T>
+static T FPUMax(T a, T b) {
+ T result;
+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMax, &result)) {
+ return result;
+ } else {
+ return b > a ? b : a;
+ }
+}
+
+template <typename T>
+static T FPUMinA(T a, T b) {
+ T result;
+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
+ if (FPAbs(a) < FPAbs(b)) {
+ result = a;
+ } else if (FPAbs(b) < FPAbs(a)) {
+ result = b;
+ } else {
+ result = a < b ? a : b;
+ }
+ }
+ return result;
+}
+
+template <typename T>
+static T FPUMaxA(T a, T b) {
+ T result;
+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
+ if (FPAbs(a) > FPAbs(b)) {
+ result = a;
+ } else if (FPAbs(b) > FPAbs(a)) {
+ result = b;
+ } else {
+ result = a > b ? a : b;
+ }
+ }
+ return result;
+}
+
+enum class KeepSign : bool { no = false, yes };
+
+template <typename T, typename std::enable_if<std::is_floating_point<T>::value,
+ int>::type = 0>
+T FPUCanonalizeNaNArg(T result, T arg, KeepSign keepSign = KeepSign::no) {
+ DCHECK(std::isnan(arg));
+ T qNaN = std::numeric_limits<T>::quiet_NaN();
+ if (keepSign == KeepSign::yes) {
+ return std::copysign(qNaN, result);
+ }
+ return qNaN;
+}
+
+template <typename T>
+T FPUCanonalizeNaNArgs(T result, KeepSign keepSign, T first) {
+ if (std::isnan(first)) {
+ return FPUCanonalizeNaNArg(result, first, keepSign);
+ }
+ return result;
+}
+
+template <typename T, typename... Args>
+T FPUCanonalizeNaNArgs(T result, KeepSign keepSign, T first, Args... args) {
+ if (std::isnan(first)) {
+ return FPUCanonalizeNaNArg(result, first, keepSign);
+ }
+ return FPUCanonalizeNaNArgs(result, keepSign, args...);
+}
+
+template <typename Func, typename T, typename... Args>
+T FPUCanonalizeOperation(Func f, T first, Args... args) {
+ return FPUCanonalizeOperation(f, KeepSign::no, first, args...);
+}
+
+template <typename Func, typename T, typename... Args>
+T FPUCanonalizeOperation(Func f, KeepSign keepSign, T first, Args... args) {
+ T result = f(first, args...);
+ if (std::isnan(result)) {
+ result = FPUCanonalizeNaNArgs(result, keepSign, first, args...);
+ }
+ return result;
+}
+
+// Handle execution based on instruction types.
+void Simulator::DecodeTypeOp6() {
+ int64_t alu_out;
+ // Next pc.
+ int64_t next_pc = bad_ra;
+
+ // Branch instructions common part.
+ auto BranchAndLinkHelper = [this, &next_pc]() {
+ int64_t current_pc = get_pc();
+ set_register(ra, current_pc + kInstrSize);
+ int32_t offs26_low16 =
+ static_cast<uint32_t>(instr_.Bits(25, 10) << 16) >> 16;
+ int32_t offs26_high10 = static_cast<int32_t>(instr_.Bits(9, 0) << 22) >> 6;
+ int32_t offs26 = offs26_low16 | offs26_high10;
+ next_pc = current_pc + (offs26 << 2);
+ printf_instr("Offs26: %08x\n", offs26);
+ set_pc(next_pc);
+ };
+
+ auto BranchOff16Helper = [this, &next_pc](bool do_branch) {
+ int64_t current_pc = get_pc();
+ int32_t offs16 = static_cast<int32_t>(instr_.Bits(25, 10) << 16) >> 16;
+ printf_instr("Offs16: %08x\n", offs16);
+ int32_t offs = do_branch ? (offs16 << 2) : kInstrSize;
+ next_pc = current_pc + offs;
+ set_pc(next_pc);
+ };
+
+ auto BranchOff21Helper = [this, &next_pc](bool do_branch) {
+ int64_t current_pc = get_pc();
+ int32_t offs21_low16 =
+ static_cast<uint32_t>(instr_.Bits(25, 10) << 16) >> 16;
+ int32_t offs21_high5 = static_cast<int32_t>(instr_.Bits(4, 0) << 27) >> 11;
+ int32_t offs = offs21_low16 | offs21_high5;
+ printf_instr("Offs21: %08x\n", offs);
+ offs = do_branch ? (offs << 2) : kInstrSize;
+ next_pc = current_pc + offs;
+ set_pc(next_pc);
+ };
+
+ auto BranchOff26Helper = [this, &next_pc]() {
+ int64_t current_pc = get_pc();
+ int32_t offs26_low16 =
+ static_cast<uint32_t>(instr_.Bits(25, 10) << 16) >> 16;
+ int32_t offs26_high10 = static_cast<int32_t>(instr_.Bits(9, 0) << 22) >> 6;
+ int32_t offs26 = offs26_low16 | offs26_high10;
+ next_pc = current_pc + (offs26 << 2);
+ printf_instr("Offs26: %08x\n", offs26);
+ set_pc(next_pc);
+ };
+
+ auto JumpOff16Helper = [this, &next_pc]() {
+ int32_t offs16 = static_cast<int32_t>(instr_.Bits(25, 10) << 16) >> 16;
+ printf_instr("JIRL\t %s: %016lx, %s: %016lx, offs16: %x\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), offs16);
+ set_register(rd_reg(), get_pc() + kInstrSize);
+ next_pc = rj() + (offs16 << 2);
+ set_pc(next_pc);
+ };
+
+ switch (instr_.Bits(31, 26) << 26) {
+ case ADDU16I_D: {
+ printf_instr("ADDU16I_D\t %s: %016lx, %s: %016lx, si16: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si16());
+ int32_t si16_upper = static_cast<int32_t>(si16()) << 16;
+ alu_out = static_cast<int64_t>(si16_upper) + rj();
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case BEQZ:
+ printf_instr("BEQZ\t %s: %016lx, ", Registers::Name(rj_reg()), rj());
+ BranchOff21Helper(rj() == 0);
+ break;
+ case BNEZ:
+ printf_instr("BNEZ\t %s: %016lx, ", Registers::Name(rj_reg()), rj());
+ BranchOff21Helper(rj() != 0);
+ break;
+ case BCZ: {
+ if (instr_.Bits(9, 8) == 0b00) {
+ // BCEQZ
+ printf_instr("BCEQZ\t fcc%d: %s, ", cj_reg(), cj() ? "True" : "False");
+ BranchOff21Helper(cj() == false);
+ } else if (instr_.Bits(9, 8) == 0b01) {
+ // BCNEZ
+ printf_instr("BCNEZ\t fcc%d: %s, ", cj_reg(), cj() ? "True" : "False");
+ BranchOff21Helper(cj() == true);
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+ case JIRL:
+ JumpOff16Helper();
+ break;
+ case B:
+ printf_instr("B\t ");
+ BranchOff26Helper();
+ break;
+ case BL:
+ printf_instr("BL\t ");
+ BranchAndLinkHelper();
+ break;
+ case BEQ:
+ printf_instr("BEQ\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()),
+ rj(), Registers::Name(rd_reg()), rd());
+ BranchOff16Helper(rj() == rd());
+ break;
+ case BNE:
+ printf_instr("BNE\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()),
+ rj(), Registers::Name(rd_reg()), rd());
+ BranchOff16Helper(rj() != rd());
+ break;
+ case BLT:
+ printf_instr("BLT\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()),
+ rj(), Registers::Name(rd_reg()), rd());
+ BranchOff16Helper(rj() < rd());
+ break;
+ case BGE:
+ printf_instr("BGE\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()),
+ rj(), Registers::Name(rd_reg()), rd());
+ BranchOff16Helper(rj() >= rd());
+ break;
+ case BLTU:
+ printf_instr("BLTU\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()),
+ rj(), Registers::Name(rd_reg()), rd());
+ BranchOff16Helper(rj_u() < rd_u());
+ break;
+ case BGEU:
+ printf_instr("BGEU\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()),
+ rj(), Registers::Name(rd_reg()), rd());
+ BranchOff16Helper(rj_u() >= rd_u());
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeOp7() {
+ int64_t alu_out;
+
+ switch (instr_.Bits(31, 25) << 25) {
+ case LU12I_W: {
+ printf_instr("LU12I_W\t %s: %016lx, si20: %d\n",
+ Registers::Name(rd_reg()), rd(), si20());
+ int32_t si20_upper = static_cast<int32_t>(si20() << 12);
+ SetResult(rd_reg(), static_cast<int64_t>(si20_upper));
+ break;
+ }
+ case LU32I_D: {
+ printf_instr("LU32I_D\t %s: %016lx, si20: %d\n",
+ Registers::Name(rd_reg()), rd(), si20());
+ int32_t si20_signExtend = static_cast<int32_t>(si20() << 12) >> 12;
+ int64_t lower_32bit_mask = 0xFFFFFFFF;
+ alu_out = (static_cast<int64_t>(si20_signExtend) << 32) |
+ (rd() & lower_32bit_mask);
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case PCADDI: {
+ printf_instr("PCADDI\t %s: %016lx, si20: %d\n", Registers::Name(rd_reg()),
+ rd(), si20());
+ int32_t si20_signExtend = static_cast<int32_t>(si20() << 12) >> 10;
+ int64_t current_pc = get_pc();
+ alu_out = static_cast<int64_t>(si20_signExtend) + current_pc;
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case PCALAU12I: {
+ printf_instr("PCALAU12I\t %s: %016lx, si20: %d\n",
+ Registers::Name(rd_reg()), rd(), si20());
+ int32_t si20_signExtend = static_cast<int32_t>(si20() << 12);
+ int64_t current_pc = get_pc();
+ int64_t clear_lower12bit_mask = 0xFFFFFFFFFFFFF000;
+ alu_out = static_cast<int64_t>(si20_signExtend) + current_pc;
+ SetResult(rd_reg(), alu_out & clear_lower12bit_mask);
+ break;
+ }
+ case PCADDU12I: {
+ printf_instr("PCADDU12I\t %s: %016lx, si20: %d\n",
+ Registers::Name(rd_reg()), rd(), si20());
+ int32_t si20_signExtend = static_cast<int32_t>(si20() << 12);
+ int64_t current_pc = get_pc();
+ alu_out = static_cast<int64_t>(si20_signExtend) + current_pc;
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case PCADDU18I: {
+ printf_instr("PCADDU18I\t %s: %016lx, si20: %d\n",
+ Registers::Name(rd_reg()), rd(), si20());
+ int64_t si20_signExtend = (static_cast<int64_t>(si20()) << 44) >> 26;
+ int64_t current_pc = get_pc();
+ alu_out = si20_signExtend + current_pc;
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeOp8() {
+ int64_t addr = 0x0;
+ int64_t si14_se = (static_cast<int64_t>(si14()) << 50) >> 48;
+
+ switch (instr_.Bits(31, 24) << 24) {
+ case LDPTR_W:
+ printf_instr("LDPTR_W\t %s: %016lx, %s: %016lx, si14: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si14_se);
+ set_register(rd_reg(), ReadW(rj() + si14_se, instr_.instr()));
+ break;
+ case STPTR_W:
+ printf_instr("STPTR_W\t %s: %016lx, %s: %016lx, si14: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si14_se);
+ WriteW(rj() + si14_se, static_cast<int32_t>(rd()), instr_.instr());
+ break;
+ case LDPTR_D:
+ printf_instr("LDPTR_D\t %s: %016lx, %s: %016lx, si14: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si14_se);
+ set_register(rd_reg(), Read2W(rj() + si14_se, instr_.instr()));
+ break;
+ case STPTR_D:
+ printf_instr("STPTR_D\t %s: %016lx, %s: %016lx, si14: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si14_se);
+ Write2W(rj() + si14_se, rd(), instr_.instr());
+ break;
+ case LL_W: {
+ printf_instr("LL_W\t %s: %016lx, %s: %016lx, si14: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si14_se);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ addr = si14_se + rj();
+ set_register(rd_reg(), ReadW(addr, instr_.instr()));
+ local_monitor_.NotifyLoadLinked(addr, TransactionSize::Word);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(addr,
+ &global_monitor_thread_);
+ break;
+ }
+ case SC_W: {
+ printf_instr("SC_W\t %s: %016lx, %s: %016lx, si14: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si14_se);
+ addr = si14_se + rj();
+ WriteConditionalW(addr, static_cast<int32_t>(rd()), instr_.instr(),
+ rd_reg());
+ break;
+ }
+ case LL_D: {
+ printf_instr("LL_D\t %s: %016lx, %s: %016lx, si14: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si14_se);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ addr = si14_se + rj();
+ set_register(rd_reg(), Read2W(addr, instr_.instr()));
+ local_monitor_.NotifyLoadLinked(addr, TransactionSize::DoubleWord);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(addr,
+ &global_monitor_thread_);
+ break;
+ }
+ case SC_D: {
+ printf_instr("SC_D\t %s: %016lx, %s: %016lx, si14: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si14_se);
+ addr = si14_se + rj();
+ WriteConditional2W(addr, rd(), instr_.instr(), rd_reg());
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeOp10() {
+ int64_t alu_out = 0x0;
+ int64_t si12_se = (static_cast<int64_t>(si12()) << 52) >> 52;
+ uint64_t si12_ze = (static_cast<uint64_t>(ui12()) << 52) >> 52;
+
+ switch (instr_.Bits(31, 22) << 22) {
+ case BSTR_W: {
+ CHECK_EQ(instr_.Bit(21), 1);
+ uint8_t lsbw_ = lsbw();
+ uint8_t msbw_ = msbw();
+ CHECK_LE(lsbw_, msbw_);
+ uint8_t size = msbw_ - lsbw_ + 1;
+ uint64_t mask = (1ULL << size) - 1;
+ if (instr_.Bit(15) == 0) {
+ // BSTRINS_W
+ printf_instr(
+ "BSTRINS_W\t %s: %016lx, %s: %016lx, msbw: %02x, lsbw: %02x\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), rj(),
+ msbw_, lsbw_);
+ alu_out = static_cast<int32_t>((rd_u() & ~(mask << lsbw_)) |
+ ((rj_u() & mask) << lsbw_));
+ } else {
+ // BSTRPICK_W
+ printf_instr(
+ "BSTRPICK_W\t %s: %016lx, %s: %016lx, msbw: %02x, lsbw: %02x\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), rj(),
+ msbw_, lsbw_);
+ alu_out = static_cast<int32_t>((rj_u() & (mask << lsbw_)) >> lsbw_);
+ }
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case BSTRINS_D: {
+ uint8_t lsbd_ = lsbd();
+ uint8_t msbd_ = msbd();
+ CHECK_LE(lsbd_, msbd_);
+ printf_instr(
+ "BSTRINS_D\t %s: %016lx, %s: %016lx, msbw: %02x, lsbw: %02x\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), rj(),
+ msbd_, lsbd_);
+ uint8_t size = msbd_ - lsbd_ + 1;
+ if (size < 64) {
+ uint64_t mask = (1ULL << size) - 1;
+ alu_out = (rd_u() & ~(mask << lsbd_)) | ((rj_u() & mask) << lsbd_);
+ SetResult(rd_reg(), alu_out);
+ } else if (size == 64) {
+ SetResult(rd_reg(), rj());
+ }
+ break;
+ }
+ case BSTRPICK_D: {
+ uint8_t lsbd_ = lsbd();
+ uint8_t msbd_ = msbd();
+ CHECK_LE(lsbd_, msbd_);
+ printf_instr(
+ "BSTRPICK_D\t %s: %016lx, %s: %016lx, msbw: %02x, lsbw: %02x\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), rj(),
+ msbd_, lsbd_);
+ uint8_t size = msbd_ - lsbd_ + 1;
+ if (size < 64) {
+ uint64_t mask = (1ULL << size) - 1;
+ alu_out = (rj_u() & (mask << lsbd_)) >> lsbd_;
+ SetResult(rd_reg(), alu_out);
+ } else if (size == 64) {
+ SetResult(rd_reg(), rj());
+ }
+ break;
+ }
+ case SLTI:
+ printf_instr("SLTI\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_se);
+ SetResult(rd_reg(), rj() < si12_se ? 1 : 0);
+ break;
+ case SLTUI:
+ printf_instr("SLTUI\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_se);
+ SetResult(rd_reg(), rj_u() < static_cast<uint64_t>(si12_se) ? 1 : 0);
+ break;
+ case ADDI_W: {
+ printf_instr("ADDI_W\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_se);
+ int32_t alu32_out =
+ static_cast<int32_t>(rj()) + static_cast<int32_t>(si12_se);
+ SetResult(rd_reg(), alu32_out);
+ break;
+ }
+ case ADDI_D:
+ printf_instr("ADDI_D\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_se);
+ SetResult(rd_reg(), rj() + si12_se);
+ break;
+ case LU52I_D: {
+ printf_instr("LU52I_D\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_se);
+ int64_t si12_se = static_cast<int64_t>(si12()) << 52;
+ uint64_t mask = (1ULL << 52) - 1;
+ alu_out = si12_se + (rj() & mask);
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case ANDI:
+ printf_instr("ANDI\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ SetResult(rd_reg(), rj() & si12_ze);
+ break;
+ case ORI:
+ printf_instr("ORI\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ SetResult(rd_reg(), rj_u() | si12_ze);
+ break;
+ case XORI:
+ printf_instr("XORI\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ SetResult(rd_reg(), rj_u() ^ si12_ze);
+ break;
+ case LD_B:
+ printf_instr("LD_B\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ set_register(rd_reg(), ReadB(rj() + si12_se));
+ break;
+ case LD_H:
+ printf_instr("LD_H\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ set_register(rd_reg(), ReadH(rj() + si12_se, instr_.instr()));
+ break;
+ case LD_W:
+ printf_instr("LD_W\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ set_register(rd_reg(), ReadW(rj() + si12_se, instr_.instr()));
+ break;
+ case LD_D:
+ printf_instr("LD_D\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ set_register(rd_reg(), Read2W(rj() + si12_se, instr_.instr()));
+ break;
+ case ST_B:
+ printf_instr("ST_B\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ WriteB(rj() + si12_se, static_cast<int8_t>(rd()));
+ break;
+ case ST_H:
+ printf_instr("ST_H\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ WriteH(rj() + si12_se, static_cast<int16_t>(rd()), instr_.instr());
+ break;
+ case ST_W:
+ printf_instr("ST_W\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ WriteW(rj() + si12_se, static_cast<int32_t>(rd()), instr_.instr());
+ break;
+ case ST_D:
+ printf_instr("ST_D\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ Write2W(rj() + si12_se, rd(), instr_.instr());
+ break;
+ case LD_BU:
+ printf_instr("LD_BU\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ set_register(rd_reg(), ReadBU(rj() + si12_se));
+ break;
+ case LD_HU:
+ printf_instr("LD_HU\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ set_register(rd_reg(), ReadHU(rj() + si12_se, instr_.instr()));
+ break;
+ case LD_WU:
+ printf_instr("LD_WU\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ set_register(rd_reg(), ReadWU(rj() + si12_se, instr_.instr()));
+ break;
+ case FLD_S: {
+ printf_instr("FLD_S\t %s: %016f, %s: %016lx, si12: %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ Registers::Name(rj_reg()), rj(), si12_ze);
+ set_fpu_register(fd_reg(), kFPUInvalidResult); // Trash upper 32 bits.
+ set_fpu_register_word(
+ fd_reg(), ReadW(rj() + si12_se, instr_.instr(), FLOAT_DOUBLE));
+ break;
+ }
+ case FST_S: {
+ printf_instr("FST_S\t %s: %016f, %s: %016lx, si12: %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ Registers::Name(rj_reg()), rj(), si12_ze);
+ int32_t alu_out_32 = static_cast<int32_t>(get_fpu_register(fd_reg()));
+ WriteW(rj() + si12_se, alu_out_32, instr_.instr());
+ break;
+ }
+ case FLD_D: {
+ printf_instr("FLD_D\t %s: %016f, %s: %016lx, si12: %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ Registers::Name(rj_reg()), rj(), si12_ze);
+ set_fpu_register_double(fd_reg(), ReadD(rj() + si12_se, instr_.instr()));
+ TraceMemRd(rj() + si12_se, get_fpu_register(fd_reg()), DOUBLE);
+ break;
+ }
+ case FST_D: {
+ printf_instr("FST_D\t %s: %016f, %s: %016lx, si12: %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ Registers::Name(rj_reg()), rj(), si12_ze);
+ WriteD(rj() + si12_se, get_fpu_register_double(fd_reg()), instr_.instr());
+ TraceMemWr(rj() + si12_se, get_fpu_register(fd_reg()), DWORD);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeOp12() {
+ switch (instr_.Bits(31, 20) << 20) {
+ case FMADD_S:
+ printf_instr("FMADD_S\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fk_reg()), fk_float(),
+ FPURegisters::Name(fa_reg()), fa_float(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ SetFPUFloatResult(fd_reg(), std::fma(fj_float(), fk_float(), fa_float()));
+ break;
+ case FMADD_D:
+ printf_instr("FMADD_D\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fk_reg()), fk_double(),
+ FPURegisters::Name(fa_reg()), fa_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ SetFPUDoubleResult(fd_reg(),
+ std::fma(fj_double(), fk_double(), fa_double()));
+ break;
+ case FMSUB_S:
+ printf_instr("FMSUB_S\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fk_reg()), fk_float(),
+ FPURegisters::Name(fa_reg()), fa_float(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ SetFPUFloatResult(fd_reg(),
+ std::fma(fj_float(), fk_float(), -fa_float()));
+ break;
+ case FMSUB_D:
+ printf_instr("FMSUB_D\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fk_reg()), fk_double(),
+ FPURegisters::Name(fa_reg()), fa_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ SetFPUDoubleResult(fd_reg(),
+ std::fma(fj_double(), fk_double(), -fa_double()));
+ break;
+ case FNMADD_S:
+ printf_instr("FNMADD_S\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fk_reg()), fk_float(),
+ FPURegisters::Name(fa_reg()), fa_float(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ SetFPUFloatResult(fd_reg(),
+ std::fma(-fj_float(), fk_float(), -fa_float()));
+ break;
+ case FNMADD_D:
+ printf_instr("FNMADD_D\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fk_reg()), fk_double(),
+ FPURegisters::Name(fa_reg()), fa_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ SetFPUDoubleResult(fd_reg(),
+ std::fma(-fj_double(), fk_double(), -fa_double()));
+ break;
+ case FNMSUB_S:
+ printf_instr("FNMSUB_S\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fk_reg()), fk_float(),
+ FPURegisters::Name(fa_reg()), fa_float(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ SetFPUFloatResult(fd_reg(),
+ std::fma(-fj_float(), fk_float(), fa_float()));
+ break;
+ case FNMSUB_D:
+ printf_instr("FNMSUB_D\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fk_reg()), fk_double(),
+ FPURegisters::Name(fa_reg()), fa_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ SetFPUDoubleResult(fd_reg(),
+ std::fma(-fj_double(), fk_double(), fa_double()));
+ break;
+ case FCMP_COND_S: {
+ CHECK_EQ(instr_.Bits(4, 3), 0);
+ float fj = fj_float();
+ float fk = fk_float();
+ switch (cond()) {
+ case CAF: {
+ printf_instr("FCMP_CAF_S fcc%d\n", cd_reg());
+ set_cf_register(cd_reg(), false);
+ break;
+ }
+ case CUN: {
+ printf_instr("FCMP_CUN_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case CEQ: {
+ printf_instr("FCMP_CEQ_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), fj == fk);
+ break;
+ }
+ case CUEQ: {
+ printf_instr("FCMP_CUEQ_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(),
+ (fj == fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case CLT: {
+ printf_instr("FCMP_CLT_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), fj < fk);
+ break;
+ }
+ case CULT: {
+ printf_instr("FCMP_CULT_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(),
+ (fj < fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case CLE: {
+ printf_instr("FCMP_CLE_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), fj <= fk);
+ break;
+ }
+ case CULE: {
+ printf_instr("FCMP_CULE_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(),
+ (fj <= fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case CNE: {
+ printf_instr("FCMP_CNE_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), (fj < fk) || (fj > fk));
+ break;
+ }
+ case COR: {
+ printf_instr("FCMP_COR_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), !std::isnan(fj) && !std::isnan(fk));
+ break;
+ }
+ case CUNE: {
+ printf_instr("FCMP_CUNE_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(),
+ (fj != fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case SAF:
+ case SUN:
+ case SEQ:
+ case SUEQ:
+ case SLT:
+ case SULT:
+ case SLE:
+ case SULE:
+ case SNE:
+ case SOR:
+ case SUNE:
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+ case FCMP_COND_D: {
+ CHECK_EQ(instr_.Bits(4, 3), 0);
+ double fj = fj_double();
+ double fk = fk_double();
+ switch (cond()) {
+ case CAF: {
+ printf_instr("FCMP_CAF_D fcc%d\n", cd_reg());
+ set_cf_register(cd_reg(), false);
+ break;
+ }
+ case CUN: {
+ printf_instr("FCMP_CUN_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case CEQ: {
+ printf_instr("FCMP_CEQ_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), fj == fk);
+ break;
+ }
+ case CUEQ: {
+ printf_instr("FCMP_CUEQ_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(),
+ (fj == fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case CLT: {
+ printf_instr("FCMP_CLT_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), fj < fk);
+ break;
+ }
+ case CULT: {
+ printf_instr("FCMP_CULT_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(),
+ (fj < fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case CLE: {
+ printf_instr("FCMP_CLE_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), fj <= fk);
+ break;
+ }
+ case CULE: {
+ printf_instr("FCMP_CULE_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(),
+ (fj <= fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case CNE: {
+ printf_instr("FCMP_CNE_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), (fj < fk) || (fj > fk));
+ break;
+ }
+ case COR: {
+ printf_instr("FCMP_COR_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), !std::isnan(fj) && !std::isnan(fk));
+ break;
+ }
+ case CUNE: {
+ printf_instr("FCMP_CUNE_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(),
+ (fj != fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case SAF:
+ case SUN:
+ case SEQ:
+ case SUEQ:
+ case SLT:
+ case SULT:
+ case SLE:
+ case SULE:
+ case SNE:
+ case SOR:
+ case SUNE:
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+ case FSEL: {
+ CHECK_EQ(instr_.Bits(19, 18), 0);
+ printf_instr("FSEL fcc%d, %s: %016f, %s: %016f, %s: %016f\n", ca_reg(),
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double(),
+ FPURegisters::Name(fk_reg()), fk_double());
+ if (ca() == 0) {
+ SetFPUDoubleResult(fd_reg(), fj_double());
+ } else {
+ SetFPUDoubleResult(fd_reg(), fk_double());
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeOp14() {
+ int64_t alu_out = 0x0;
+ int32_t alu32_out = 0x0;
+
+ switch (instr_.Bits(31, 18) << 18) {
+ case ALSL: {
+ uint8_t sa = sa2() + 1;
+ alu32_out =
+ (static_cast<int32_t>(rj()) << sa) + static_cast<int32_t>(rk());
+ if (instr_.Bit(17) == 0) {
+ // ALSL_W
+ printf_instr("ALSL_W\t %s: %016lx, %s: %016lx, %s: %016lx, sa2: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk(), sa2());
+ SetResult(rd_reg(), alu32_out);
+ } else {
+ // ALSL_WU
+ printf_instr("ALSL_WU\t %s: %016lx, %s: %016lx, %s: %016lx, sa2: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk(), sa2());
+ SetResult(rd_reg(), static_cast<uint32_t>(alu32_out));
+ }
+ break;
+ }
+ case BYTEPICK_W: {
+ CHECK_EQ(instr_.Bit(17), 0);
+ printf_instr("BYTEPICK_W\t %s: %016lx, %s: %016lx, %s: %016lx, sa2: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk(), sa2());
+ uint8_t sa = sa2() * 8;
+ if (sa == 0) {
+ alu32_out = static_cast<int32_t>(rk());
+ } else {
+ int32_t mask = (1 << 31) >> (sa - 1);
+ int32_t rk_hi = (static_cast<int32_t>(rk()) & (~mask)) << sa;
+ int32_t rj_lo = (static_cast<uint32_t>(rj()) & mask) >> (32 - sa);
+ alu32_out = rk_hi | rj_lo;
+ }
+ SetResult(rd_reg(), static_cast<int64_t>(alu32_out));
+ break;
+ }
+ case BYTEPICK_D: {
+ printf_instr("BYTEPICK_D\t %s: %016lx, %s: %016lx, %s: %016lx, sa3: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk(), sa3());
+ uint8_t sa = sa3() * 8;
+ if (sa == 0) {
+ alu_out = rk();
+ } else {
+ int64_t mask = (1LL << 63) >> (sa - 1);
+ int64_t rk_hi = (rk() & (~mask)) << sa;
+ int64_t rj_lo = static_cast<uint64_t>(rj() & mask) >> (64 - sa);
+ alu_out = rk_hi | rj_lo;
+ }
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case ALSL_D: {
+ printf_instr("ALSL_D\t %s: %016lx, %s: %016lx, %s: %016lx, sa2: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk(), sa2());
+ CHECK_EQ(instr_.Bit(17), 0);
+ uint8_t sa = sa2() + 1;
+ alu_out = (rj() << sa) + rk();
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case SLLI: {
+ DCHECK_EQ(instr_.Bit(17), 0);
+ if (instr_.Bits(17, 15) == 0b001) {
+ // SLLI_W
+ printf_instr("SLLI_W\t %s: %016lx, %s: %016lx, ui5: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), ui5());
+ alu32_out = static_cast<int32_t>(rj()) << ui5();
+ SetResult(rd_reg(), static_cast<int64_t>(alu32_out));
+ } else if ((instr_.Bits(17, 16) == 0b01)) {
+ // SLLI_D
+ printf_instr("SLLI_D\t %s: %016lx, %s: %016lx, ui6: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), ui6());
+ SetResult(rd_reg(), rj() << ui6());
+ }
+ break;
+ }
+ case SRLI: {
+ DCHECK_EQ(instr_.Bit(17), 0);
+ if (instr_.Bits(17, 15) == 0b001) {
+ // SRLI_W
+ printf_instr("SRLI_W\t %s: %016lx, %s: %016lx, ui5: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), ui5());
+ alu32_out = static_cast<uint32_t>(rj()) >> ui5();
+ SetResult(rd_reg(), static_cast<int64_t>(alu32_out));
+ } else if (instr_.Bits(17, 16) == 0b01) {
+ // SRLI_D
+ printf_instr("SRLI_D\t %s: %016lx, %s: %016lx, ui6: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), ui6());
+ SetResult(rd_reg(), rj_u() >> ui6());
+ }
+ break;
+ }
+ case SRAI: {
+ DCHECK_EQ(instr_.Bit(17), 0);
+ if (instr_.Bits(17, 15) == 0b001) {
+ // SRAI_W
+ printf_instr("SRAI_W\t %s: %016lx, %s: %016lx, ui5: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), ui5());
+ alu32_out = static_cast<int32_t>(rj()) >> ui5();
+ SetResult(rd_reg(), static_cast<int64_t>(alu32_out));
+ } else if (instr_.Bits(17, 16) == 0b01) {
+ // SRAI_D
+ printf_instr("SRAI_D\t %s: %016lx, %s: %016lx, ui6: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), ui6());
+ SetResult(rd_reg(), rj() >> ui6());
+ }
+ break;
+ }
+ case ROTRI: {
+ DCHECK_EQ(instr_.Bit(17), 0);
+ if (instr_.Bits(17, 15) == 0b001) {
+ // ROTRI_W
+ printf_instr("ROTRI_W\t %s: %016lx, %s: %016lx, ui5: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), ui5());
+ alu32_out = static_cast<int32_t>(
+ base::bits::RotateRight32(static_cast<const uint32_t>(rj_u()),
+ static_cast<const uint32_t>(ui5())));
+ SetResult(rd_reg(), static_cast<int64_t>(alu32_out));
+ } else if (instr_.Bits(17, 16) == 0b01) {
+ // ROTRI_D
+ printf_instr("ROTRI_D\t %s: %016lx, %s: %016lx, ui6: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), ui6());
+ alu_out =
+ static_cast<int64_t>(base::bits::RotateRight64(rj_u(), ui6()));
+ SetResult(rd_reg(), alu_out);
+ printf_instr("ROTRI, %s, %s, %d\n", Registers::Name(rd_reg()),
+ Registers::Name(rj_reg()), ui6());
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeOp17() {
+ int64_t alu_out;
+
+ switch (instr_.Bits(31, 15) << 15) {
+ case ADD_W: {
+ printf_instr("ADD_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ int32_t alu32_out = static_cast<int32_t>(rj() + rk());
+ // Sign-extend result of 32bit operation into 64bit register.
+ SetResult(rd_reg(), static_cast<int64_t>(alu32_out));
+ break;
+ }
+ case ADD_D:
+ printf_instr("ADD_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() + rk());
+ break;
+ case SUB_W: {
+ printf_instr("SUB_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ int32_t alu32_out = static_cast<int32_t>(rj() - rk());
+ // Sign-extend result of 32bit operation into 64bit register.
+ SetResult(rd_reg(), static_cast<int64_t>(alu32_out));
+ break;
+ }
+ case SUB_D:
+ printf_instr("SUB_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() - rk());
+ break;
+ case SLT:
+ printf_instr("SLT\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() < rk() ? 1 : 0);
+ break;
+ case SLTU:
+ printf_instr("SLTU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj_u() < rk_u() ? 1 : 0);
+ break;
+ case MASKEQZ:
+ printf_instr("MASKEQZ\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rk() == 0 ? rj() : 0);
+ break;
+ case MASKNEZ:
+ printf_instr("MASKNEZ\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rk() != 0 ? rj() : 0);
+ break;
+ case NOR:
+ printf_instr("NOR\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), ~(rj() | rk()));
+ break;
+ case AND:
+ printf_instr("AND\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() & rk());
+ break;
+ case OR:
+ printf_instr("OR\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() | rk());
+ break;
+ case XOR:
+ printf_instr("XOR\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() ^ rk());
+ break;
+ case ORN:
+ printf_instr("ORN\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() | (~rk()));
+ break;
+ case ANDN:
+ printf_instr("ANDN\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() & (~rk()));
+ break;
+ case SLL_W:
+ printf_instr("SLL_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), (int32_t)rj() << (rk_u() % 32));
+ break;
+ case SRL_W: {
+ printf_instr("SRL_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ alu_out = static_cast<int32_t>((uint32_t)rj_u() >> (rk_u() % 32));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case SRA_W:
+ printf_instr("SRA_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), (int32_t)rj() >> (rk_u() % 32));
+ break;
+ case SLL_D:
+ printf_instr("SLL_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() << (rk_u() % 64));
+ break;
+ case SRL_D: {
+ printf_instr("SRL_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ alu_out = static_cast<int64_t>(rj_u() >> (rk_u() % 64));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case SRA_D:
+ printf_instr("SRA_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() >> (rk_u() % 64));
+ break;
+ case ROTR_W: {
+ printf_instr("ROTR_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ alu_out = static_cast<int32_t>(
+ base::bits::RotateRight32(static_cast<const uint32_t>(rj_u()),
+ static_cast<const uint32_t>(rk_u() % 32)));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case ROTR_D: {
+ printf_instr("ROTR_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ alu_out = static_cast<int64_t>(
+ base::bits::RotateRight64((rj_u()), (rk_u() % 64)));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case MUL_W: {
+ printf_instr("MUL_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ alu_out = static_cast<int32_t>(rj()) * static_cast<int32_t>(rk());
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case MULH_W: {
+ printf_instr("MULH_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ int32_t rj_lo = static_cast<int32_t>(rj());
+ int32_t rk_lo = static_cast<int32_t>(rk());
+ alu_out = static_cast<int64_t>(rj_lo) * static_cast<int64_t>(rk_lo);
+ SetResult(rd_reg(), alu_out >> 32);
+ break;
+ }
+ case MULH_WU: {
+ printf_instr("MULH_WU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ uint32_t rj_lo = static_cast<uint32_t>(rj_u());
+ uint32_t rk_lo = static_cast<uint32_t>(rk_u());
+ alu_out = static_cast<uint64_t>(rj_lo) * static_cast<uint64_t>(rk_lo);
+ SetResult(rd_reg(), alu_out >> 32);
+ break;
+ }
+ case MUL_D:
+ printf_instr("MUL_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() * rk());
+ break;
+ case MULH_D:
+ printf_instr("MULH_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), MultiplyHighSigned(rj(), rk()));
+ break;
+ case MULH_DU:
+ printf_instr("MULH_DU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), MultiplyHighUnsigned(rj_u(), rk_u()));
+ break;
+ case MULW_D_W: {
+ printf_instr("MULW_D_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ int64_t rj_i32 = static_cast<int32_t>(rj());
+ int64_t rk_i32 = static_cast<int32_t>(rk());
+ SetResult(rd_reg(), rj_i32 * rk_i32);
+ break;
+ }
+ case MULW_D_WU: {
+ printf_instr("MULW_D_WU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ uint64_t rj_u32 = static_cast<uint32_t>(rj_u());
+ uint64_t rk_u32 = static_cast<uint32_t>(rk_u());
+ SetResult(rd_reg(), rj_u32 * rk_u32);
+ break;
+ }
+ case DIV_W: {
+ printf_instr("DIV_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ int32_t rj_i32 = static_cast<int32_t>(rj());
+ int32_t rk_i32 = static_cast<int32_t>(rk());
+ if (rj_i32 == INT_MIN && rk_i32 == -1) {
+ SetResult(rd_reg(), INT_MIN);
+ } else if (rk_i32 != 0) {
+ SetResult(rd_reg(), rj_i32 / rk_i32);
+ }
+ break;
+ }
+ case MOD_W: {
+ printf_instr("MOD_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ int32_t rj_i32 = static_cast<int32_t>(rj());
+ int32_t rk_i32 = static_cast<int32_t>(rk());
+ if (rj_i32 == INT_MIN && rk_i32 == -1) {
+ SetResult(rd_reg(), 0);
+ } else if (rk_i32 != 0) {
+ SetResult(rd_reg(), rj_i32 % rk_i32);
+ }
+ break;
+ }
+ case DIV_WU: {
+ printf_instr("DIV_WU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ uint32_t rj_u32 = static_cast<uint32_t>(rj());
+ uint32_t rk_u32 = static_cast<uint32_t>(rk());
+ if (rk_u32 != 0) {
+ SetResult(rd_reg(), static_cast<int32_t>(rj_u32 / rk_u32));
+ }
+ break;
+ }
+ case MOD_WU: {
+ printf_instr("MOD_WU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ uint32_t rj_u32 = static_cast<uint32_t>(rj());
+ uint32_t rk_u32 = static_cast<uint32_t>(rk());
+ if (rk_u32 != 0) {
+ SetResult(rd_reg(), static_cast<int32_t>(rj_u32 % rk_u32));
+ }
+ break;
+ }
+ case DIV_D: {
+ printf_instr("DIV_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ if (rj() == LONG_MIN && rk() == -1) {
+ SetResult(rd_reg(), LONG_MIN);
+ } else if (rk() != 0) {
+ SetResult(rd_reg(), rj() / rk());
+ }
+ break;
+ }
+ case MOD_D: {
+ printf_instr("MOD_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ if (rj() == LONG_MIN && rk() == -1) {
+ SetResult(rd_reg(), 0);
+ } else if (rk() != 0) {
+ SetResult(rd_reg(), rj() % rk());
+ }
+ break;
+ }
+ case DIV_DU: {
+ printf_instr("DIV_DU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ if (rk_u() != 0) {
+ SetResult(rd_reg(), static_cast<int64_t>(rj_u() / rk_u()));
+ }
+ break;
+ }
+ case MOD_DU: {
+ printf_instr("MOD_DU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ if (rk_u() != 0) {
+ SetResult(rd_reg(), static_cast<int64_t>(rj_u() % rk_u()));
+ }
+ break;
+ }
+ case BREAK:
+ printf_instr("BREAK\t code: %x\n", instr_.Bits(14, 0));
+ SoftwareInterrupt();
+ break;
+ case FADD_S: {
+ printf_instr("FADD_S\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float(),
+ FPURegisters::Name(fk_reg()), fk_float());
+ SetFPUFloatResult(
+ fd_reg(),
+ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs + rhs; },
+ fj_float(), fk_float()));
+ break;
+ }
+ case FADD_D: {
+ printf_instr("FADD_D\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double(),
+ FPURegisters::Name(fk_reg()), fk_double());
+ SetFPUDoubleResult(fd_reg(),
+ FPUCanonalizeOperation(
+ [](double lhs, double rhs) { return lhs + rhs; },
+ fj_double(), fk_double()));
+ break;
+ }
+ case FSUB_S: {
+ printf_instr("FSUB_S\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float(),
+ FPURegisters::Name(fk_reg()), fk_float());
+ SetFPUFloatResult(
+ fd_reg(),
+ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs - rhs; },
+ fj_float(), fk_float()));
+ break;
+ }
+ case FSUB_D: {
+ printf_instr("FSUB_D\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double(),
+ FPURegisters::Name(fk_reg()), fk_double());
+ SetFPUDoubleResult(fd_reg(),
+ FPUCanonalizeOperation(
+ [](double lhs, double rhs) { return lhs - rhs; },
+ fj_double(), fk_double()));
+ break;
+ }
+ case FMUL_S: {
+ printf_instr("FMUL_S\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float(),
+ FPURegisters::Name(fk_reg()), fk_float());
+ SetFPUFloatResult(
+ fd_reg(),
+ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs * rhs; },
+ fj_float(), fk_float()));
+ break;
+ }
+ case FMUL_D: {
+ printf_instr("FMUL_D\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double(),
+ FPURegisters::Name(fk_reg()), fk_double());
+ SetFPUDoubleResult(fd_reg(),
+ FPUCanonalizeOperation(
+ [](double lhs, double rhs) { return lhs * rhs; },
+ fj_double(), fk_double()));
+ break;
+ }
+ case FDIV_S: {
+ printf_instr("FDIV_S\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float(),
+ FPURegisters::Name(fk_reg()), fk_float());
+ SetFPUFloatResult(
+ fd_reg(),
+ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs / rhs; },
+ fj_float(), fk_float()));
+ break;
+ }
+ case FDIV_D: {
+ printf_instr("FDIV_D\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double(),
+ FPURegisters::Name(fk_reg()), fk_double());
+ SetFPUDoubleResult(fd_reg(),
+ FPUCanonalizeOperation(
+ [](double lhs, double rhs) { return lhs / rhs; },
+ fj_double(), fk_double()));
+ break;
+ }
+ case FMAX_S:
+ printf_instr("FMAX_S\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float(),
+ FPURegisters::Name(fk_reg()), fk_float());
+ SetFPUFloatResult(fd_reg(), FPUMax(fk_float(), fj_float()));
+ break;
+ case FMAX_D:
+ printf_instr("FMAX_D\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double(),
+ FPURegisters::Name(fk_reg()), fk_double());
+ SetFPUDoubleResult(fd_reg(), FPUMax(fk_double(), fj_double()));
+ break;
+ case FMIN_S:
+ printf_instr("FMIN_S\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float(),
+ FPURegisters::Name(fk_reg()), fk_float());
+ SetFPUFloatResult(fd_reg(), FPUMin(fk_float(), fj_float()));
+ break;
+ case FMIN_D:
+ printf_instr("FMIN_D\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double(),
+ FPURegisters::Name(fk_reg()), fk_double());
+ SetFPUDoubleResult(fd_reg(), FPUMin(fk_double(), fj_double()));
+ break;
+ case FMAXA_S:
+ printf_instr("FMAXA_S\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float(),
+ FPURegisters::Name(fk_reg()), fk_float());
+ SetFPUFloatResult(fd_reg(), FPUMaxA(fk_float(), fj_float()));
+ break;
+ case FMAXA_D:
+ printf_instr("FMAXA_D\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double(),
+ FPURegisters::Name(fk_reg()), fk_double());
+ SetFPUDoubleResult(fd_reg(), FPUMaxA(fk_double(), fj_double()));
+ break;
+ case FMINA_S:
+ printf_instr("FMINA_S\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float(),
+ FPURegisters::Name(fk_reg()), fk_float());
+ SetFPUFloatResult(fd_reg(), FPUMinA(fk_float(), fj_float()));
+ break;
+ case FMINA_D:
+ printf_instr("FMINA_D\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double(),
+ FPURegisters::Name(fk_reg()), fk_double());
+ SetFPUDoubleResult(fd_reg(), FPUMinA(fk_double(), fj_double()));
+ break;
+ case LDX_B:
+ printf_instr("LDX_B\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ set_register(rd_reg(), ReadB(rj() + rk()));
+ break;
+ case LDX_H:
+ printf_instr("LDX_H\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ set_register(rd_reg(), ReadH(rj() + rk(), instr_.instr()));
+ break;
+ case LDX_W:
+ printf_instr("LDX_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ set_register(rd_reg(), ReadW(rj() + rk(), instr_.instr()));
+ break;
+ case LDX_D:
+ printf_instr("LDX_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ set_register(rd_reg(), Read2W(rj() + rk(), instr_.instr()));
+ break;
+ case STX_B:
+ printf_instr("STX_B\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ WriteB(rj() + rk(), static_cast<int8_t>(rd()));
+ break;
+ case STX_H:
+ printf_instr("STX_H\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ WriteH(rj() + rk(), static_cast<int16_t>(rd()), instr_.instr());
+ break;
+ case STX_W:
+ printf_instr("STX_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ WriteW(rj() + rk(), static_cast<int32_t>(rd()), instr_.instr());
+ break;
+ case STX_D:
+ printf_instr("STX_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ Write2W(rj() + rk(), rd(), instr_.instr());
+ break;
+ case LDX_BU:
+ printf_instr("LDX_BU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ set_register(rd_reg(), ReadBU(rj() + rk()));
+ break;
+ case LDX_HU:
+ printf_instr("LDX_HU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ set_register(rd_reg(), ReadHU(rj() + rk(), instr_.instr()));
+ break;
+ case LDX_WU:
+ printf_instr("LDX_WU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ set_register(rd_reg(), ReadWU(rj() + rk(), instr_.instr()));
+ break;
+ case FLDX_S:
+ printf_instr("FLDX_S\t %s: %016f, %s: %016lx, %s: %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ Registers::Name(rj_reg()), rj(), Registers::Name(rk_reg()),
+ rk());
+ set_fpu_register(fd_reg(), kFPUInvalidResult); // Trash upper 32 bits.
+ set_fpu_register_word(fd_reg(),
+ ReadW(rj() + rk(), instr_.instr(), FLOAT_DOUBLE));
+ break;
+ case FLDX_D:
+ printf_instr("FLDX_D\t %s: %016f, %s: %016lx, %s: %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ Registers::Name(rj_reg()), rj(), Registers::Name(rk_reg()),
+ rk());
+ set_fpu_register_double(fd_reg(), ReadD(rj() + rk(), instr_.instr()));
+ break;
+ case FSTX_S:
+ printf_instr("FSTX_S\t %s: %016f, %s: %016lx, %s: %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ Registers::Name(rj_reg()), rj(), Registers::Name(rk_reg()),
+ rk());
+ WriteW(rj() + rk(), static_cast<int32_t>(get_fpu_register(fd_reg())),
+ instr_.instr());
+ break;
+ case FSTX_D:
+ printf_instr("FSTX_D\t %s: %016f, %s: %016lx, %s: %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ Registers::Name(rj_reg()), rj(), Registers::Name(rk_reg()),
+ rk());
+ WriteD(rj() + rk(), get_fpu_register_double(fd_reg()), instr_.instr());
+ break;
+ case AMSWAP_W:
+ printf("Sim UNIMPLEMENTED: AMSWAP_W\n");
+ UNIMPLEMENTED();
+ case AMSWAP_D:
+ printf("Sim UNIMPLEMENTED: AMSWAP_D\n");
+ UNIMPLEMENTED();
+ case AMADD_W:
+ printf("Sim UNIMPLEMENTED: AMADD_W\n");
+ UNIMPLEMENTED();
+ case AMADD_D:
+ printf("Sim UNIMPLEMENTED: AMADD_D\n");
+ UNIMPLEMENTED();
+ case AMAND_W:
+ printf("Sim UNIMPLEMENTED: AMAND_W\n");
+ UNIMPLEMENTED();
+ case AMAND_D:
+ printf("Sim UNIMPLEMENTED: AMAND_D\n");
+ UNIMPLEMENTED();
+ case AMOR_W:
+ printf("Sim UNIMPLEMENTED: AMOR_W\n");
+ UNIMPLEMENTED();
+ case AMOR_D:
+ printf("Sim UNIMPLEMENTED: AMOR_D\n");
+ UNIMPLEMENTED();
+ case AMXOR_W:
+ printf("Sim UNIMPLEMENTED: AMXOR_W\n");
+ UNIMPLEMENTED();
+ case AMXOR_D:
+ printf("Sim UNIMPLEMENTED: AMXOR_D\n");
+ UNIMPLEMENTED();
+ case AMMAX_W:
+ printf("Sim UNIMPLEMENTED: AMMAX_W\n");
+ UNIMPLEMENTED();
+ case AMMAX_D:
+ printf("Sim UNIMPLEMENTED: AMMAX_D\n");
+ UNIMPLEMENTED();
+ case AMMIN_W:
+ printf("Sim UNIMPLEMENTED: AMMIN_W\n");
+ UNIMPLEMENTED();
+ case AMMIN_D:
+ printf("Sim UNIMPLEMENTED: AMMIN_D\n");
+ UNIMPLEMENTED();
+ case AMMAX_WU:
+ printf("Sim UNIMPLEMENTED: AMMAX_WU\n");
+ UNIMPLEMENTED();
+ case AMMAX_DU:
+ printf("Sim UNIMPLEMENTED: AMMAX_DU\n");
+ UNIMPLEMENTED();
+ case AMMIN_WU:
+ printf("Sim UNIMPLEMENTED: AMMIN_WU\n");
+ UNIMPLEMENTED();
+ case AMMIN_DU:
+ printf("Sim UNIMPLEMENTED: AMMIN_DU\n");
+ UNIMPLEMENTED();
+ case AMSWAP_DB_W: {
+ printf_instr("AMSWAP_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
+ rk(), Registers::Name(rj_reg()), rj());
+ int32_t rdvalue;
+ do {
+ {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ set_register(rd_reg(), ReadW(rj(), instr_.instr()));
+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::Word);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ rj(), &global_monitor_thread_);
+ }
+ rdvalue = get_register(rd_reg());
+ WriteConditionalW(rj(), static_cast<int32_t>(rk()), instr_.instr(),
+ rd_reg());
+ } while (!get_register(rd_reg()));
+ set_register(rd_reg(), rdvalue);
+ } break;
+ case AMSWAP_DB_D: {
+ printf_instr("AMSWAP_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
+ rk(), Registers::Name(rj_reg()), rj());
+ int64_t rdvalue;
+ do {
+ {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ set_register(rd_reg(), Read2W(rj(), instr_.instr()));
+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::DoubleWord);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ rj(), &global_monitor_thread_);
+ }
+ rdvalue = get_register(rd_reg());
+ WriteConditional2W(rj(), rk(), instr_.instr(), rd_reg());
+ } while (!get_register(rd_reg()));
+ set_register(rd_reg(), rdvalue);
+ } break;
+ case AMADD_DB_W: {
+ printf_instr("AMADD_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
+ rk(), Registers::Name(rj_reg()), rj());
+ int32_t rdvalue;
+ do {
+ {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ set_register(rd_reg(), ReadW(rj(), instr_.instr()));
+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::Word);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ rj(), &global_monitor_thread_);
+ }
+ rdvalue = get_register(rd_reg());
+ WriteConditionalW(rj(),
+ static_cast<int32_t>(static_cast<int32_t>(rk()) +
+ static_cast<int32_t>(rd())),
+ instr_.instr(), rd_reg());
+ } while (!get_register(rd_reg()));
+ set_register(rd_reg(), rdvalue);
+ } break;
+ case AMADD_DB_D: {
+ printf_instr("AMADD_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
+ rk(), Registers::Name(rj_reg()), rj());
+ int64_t rdvalue;
+ do {
+ {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ set_register(rd_reg(), Read2W(rj(), instr_.instr()));
+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::DoubleWord);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ rj(), &global_monitor_thread_);
+ }
+ rdvalue = get_register(rd_reg());
+ WriteConditional2W(rj(), rk() + rd(), instr_.instr(), rd_reg());
+ } while (!get_register(rd_reg()));
+ set_register(rd_reg(), rdvalue);
+ } break;
+ case AMAND_DB_W: {
+ printf_instr("AMAND_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
+ rk(), Registers::Name(rj_reg()), rj());
+ int32_t rdvalue;
+ do {
+ {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ set_register(rd_reg(), ReadW(rj(), instr_.instr()));
+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::Word);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ rj(), &global_monitor_thread_);
+ }
+ rdvalue = get_register(rd_reg());
+ WriteConditionalW(rj(),
+ static_cast<int32_t>(static_cast<int32_t>(rk()) &
+ static_cast<int32_t>(rd())),
+ instr_.instr(), rd_reg());
+ } while (!get_register(rd_reg()));
+ set_register(rd_reg(), rdvalue);
+ } break;
+ case AMAND_DB_D: {
+ printf_instr("AMAND_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
+ rk(), Registers::Name(rj_reg()), rj());
+ int64_t rdvalue;
+ do {
+ {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ set_register(rd_reg(), Read2W(rj(), instr_.instr()));
+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::DoubleWord);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ rj(), &global_monitor_thread_);
+ }
+ rdvalue = get_register(rd_reg());
+ WriteConditional2W(rj(), rk() & rd(), instr_.instr(), rd_reg());
+ } while (!get_register(rd_reg()));
+ set_register(rd_reg(), rdvalue);
+ } break;
+ case AMOR_DB_W: {
+ printf_instr("AMOR_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
+ rk(), Registers::Name(rj_reg()), rj());
+ int32_t rdvalue;
+ do {
+ {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ set_register(rd_reg(), ReadW(rj(), instr_.instr()));
+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::Word);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ rj(), &global_monitor_thread_);
+ }
+ rdvalue = get_register(rd_reg());
+ WriteConditionalW(rj(),
+ static_cast<int32_t>(static_cast<int32_t>(rk()) |
+ static_cast<int32_t>(rd())),
+ instr_.instr(), rd_reg());
+ } while (!get_register(rd_reg()));
+ set_register(rd_reg(), rdvalue);
+ } break;
+ case AMOR_DB_D: {
+ printf_instr("AMOR_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
+ rk(), Registers::Name(rj_reg()), rj());
+ int64_t rdvalue;
+ do {
+ {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ set_register(rd_reg(), Read2W(rj(), instr_.instr()));
+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::DoubleWord);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ rj(), &global_monitor_thread_);
+ }
+ rdvalue = get_register(rd_reg());
+ WriteConditional2W(rj(), rk() | rd(), instr_.instr(), rd_reg());
+ } while (!get_register(rd_reg()));
+ set_register(rd_reg(), rdvalue);
+ } break;
+ case AMXOR_DB_W: {
+ printf_instr("AMXOR_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
+ rk(), Registers::Name(rj_reg()), rj());
+ int32_t rdvalue;
+ do {
+ {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ set_register(rd_reg(), ReadW(rj(), instr_.instr()));
+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::Word);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ rj(), &global_monitor_thread_);
+ }
+ rdvalue = get_register(rd_reg());
+ WriteConditionalW(rj(),
+ static_cast<int32_t>(static_cast<int32_t>(rk()) ^
+ static_cast<int32_t>(rd())),
+ instr_.instr(), rd_reg());
+ } while (!get_register(rd_reg()));
+ set_register(rd_reg(), rdvalue);
+ } break;
+ case AMXOR_DB_D: {
+ printf_instr("AMXOR_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
+ rk(), Registers::Name(rj_reg()), rj());
+ int64_t rdvalue;
+ do {
+ {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ set_register(rd_reg(), Read2W(rj(), instr_.instr()));
+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::DoubleWord);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ rj(), &global_monitor_thread_);
+ }
+ rdvalue = get_register(rd_reg());
+ WriteConditional2W(rj(), rk() ^ rd(), instr_.instr(), rd_reg());
+ } while (!get_register(rd_reg()));
+ set_register(rd_reg(), rdvalue);
+ } break;
+ case AMMAX_DB_W:
+ printf("Sim UNIMPLEMENTED: AMMAX_DB_W\n");
+ UNIMPLEMENTED();
+ case AMMAX_DB_D:
+ printf("Sim UNIMPLEMENTED: AMMAX_DB_D\n");
+ UNIMPLEMENTED();
+ case AMMIN_DB_W:
+ printf("Sim UNIMPLEMENTED: AMMIN_DB_W\n");
+ UNIMPLEMENTED();
+ case AMMIN_DB_D:
+ printf("Sim UNIMPLEMENTED: AMMIN_DB_D\n");
+ UNIMPLEMENTED();
+ case AMMAX_DB_WU:
+ printf("Sim UNIMPLEMENTED: AMMAX_DB_WU\n");
+ UNIMPLEMENTED();
+ case AMMAX_DB_DU:
+ printf("Sim UNIMPLEMENTED: AMMAX_DB_DU\n");
+ UNIMPLEMENTED();
+ case AMMIN_DB_WU:
+ printf("Sim UNIMPLEMENTED: AMMIN_DB_WU\n");
+ UNIMPLEMENTED();
+ case AMMIN_DB_DU:
+ printf("Sim UNIMPLEMENTED: AMMIN_DB_DU\n");
+ UNIMPLEMENTED();
+ case DBAR:
+ printf_instr("DBAR\n");
+ break;
+ case IBAR:
+ printf("Sim UNIMPLEMENTED: IBAR\n");
+ UNIMPLEMENTED();
+ case FSCALEB_S:
+ printf("Sim UNIMPLEMENTED: FSCALEB_S\n");
+ UNIMPLEMENTED();
+ case FSCALEB_D:
+ printf("Sim UNIMPLEMENTED: FSCALEB_D\n");
+ UNIMPLEMENTED();
+ case FCOPYSIGN_S:
+ printf("Sim UNIMPLEMENTED: FCOPYSIGN_S\n");
+ UNIMPLEMENTED();
+ case FCOPYSIGN_D:
+ printf("Sim UNIMPLEMENTED: FCOPYSIGN_D\n");
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeOp22() {
+ int64_t alu_out;
+
+ switch (instr_.Bits(31, 10) << 10) {
+ case CLZ_W: {
+ printf_instr("CLZ_W\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ alu_out = base::bits::CountLeadingZeros32(static_cast<int32_t>(rj_u()));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case CTZ_W: {
+ printf_instr("CTZ_W\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ alu_out = base::bits::CountTrailingZeros32(static_cast<int32_t>(rj_u()));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case CLZ_D: {
+ printf_instr("CLZ_D\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ alu_out = base::bits::CountLeadingZeros64(static_cast<int64_t>(rj_u()));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case CTZ_D: {
+ printf_instr("CTZ_D\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ alu_out = base::bits::CountTrailingZeros64(static_cast<int64_t>(rj_u()));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case REVB_2H: {
+ printf_instr("REVB_2H\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint32_t input = static_cast<uint32_t>(rj());
+ uint64_t output = 0;
+
+ uint32_t mask = 0xFF000000;
+ for (int i = 0; i < 4; i++) {
+ uint32_t tmp = mask & input;
+ if (i % 2 == 0) {
+ tmp = tmp >> 8;
+ } else {
+ tmp = tmp << 8;
+ }
+ output = output | tmp;
+ mask = mask >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(static_cast<int32_t>(output));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case REVB_4H: {
+ printf_instr("REVB_4H\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint64_t input = rj_u();
+ uint64_t output = 0;
+
+ uint64_t mask = 0xFF00000000000000;
+ for (int i = 0; i < 8; i++) {
+ uint64_t tmp = mask & input;
+ if (i % 2 == 0) {
+ tmp = tmp >> 8;
+ } else {
+ tmp = tmp << 8;
+ }
+ output = output | tmp;
+ mask = mask >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case REVB_2W: {
+ printf_instr("REVB_2W\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint64_t input = rj_u();
+ uint64_t output = 0;
+
+ uint64_t mask = 0xFF000000FF000000;
+ for (int i = 0; i < 4; i++) {
+ uint64_t tmp = mask & input;
+ if (i <= 1) {
+ tmp = tmp >> (24 - i * 16);
+ } else {
+ tmp = tmp << (i * 16 - 24);
+ }
+ output = output | tmp;
+ mask = mask >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case REVB_D: {
+ printf_instr("REVB_D\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint64_t input = rj_u();
+ uint64_t output = 0;
+
+ uint64_t mask = 0xFF00000000000000;
+ for (int i = 0; i < 8; i++) {
+ uint64_t tmp = mask & input;
+ if (i <= 3) {
+ tmp = tmp >> (56 - i * 16);
+ } else {
+ tmp = tmp << (i * 16 - 56);
+ }
+ output = output | tmp;
+ mask = mask >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case REVH_2W: {
+ printf_instr("REVH_2W\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint64_t input = rj_u();
+ uint64_t output = 0;
+
+ uint64_t mask = 0xFFFF000000000000;
+ for (int i = 0; i < 4; i++) {
+ uint64_t tmp = mask & input;
+ if (i % 2 == 0) {
+ tmp = tmp >> 16;
+ } else {
+ tmp = tmp << 16;
+ }
+ output = output | tmp;
+ mask = mask >> 16;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case REVH_D: {
+ printf_instr("REVH_D\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint64_t input = rj_u();
+ uint64_t output = 0;
+
+ uint64_t mask = 0xFFFF000000000000;
+ for (int i = 0; i < 4; i++) {
+ uint64_t tmp = mask & input;
+ if (i <= 1) {
+ tmp = tmp >> (48 - i * 32);
+ } else {
+ tmp = tmp << (i * 32 - 48);
+ }
+ output = output | tmp;
+ mask = mask >> 16;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case BITREV_4B: {
+ printf_instr("BITREV_4B\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint32_t input = static_cast<uint32_t>(rj());
+ uint32_t output = 0;
+ uint8_t i_byte, o_byte;
+
+ // Reverse the bit in byte for each individual byte
+ for (int i = 0; i < 4; i++) {
+ output = output >> 8;
+ i_byte = input & 0xFF;
+
+ // Fast way to reverse bits in byte
+ // Devised by Sean Anderson, July 13, 2001
+ o_byte = static_cast<uint8_t>(((i_byte * 0x0802LU & 0x22110LU) |
+ (i_byte * 0x8020LU & 0x88440LU)) *
+ 0x10101LU >>
+ 16);
+
+ output = output | (static_cast<uint32_t>(o_byte << 24));
+ input = input >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(static_cast<int32_t>(output));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case BITREV_8B: {
+ printf_instr("BITREV_8B\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint64_t input = rj_u();
+ uint64_t output = 0;
+ uint8_t i_byte, o_byte;
+
+ // Reverse the bit in byte for each individual byte
+ for (int i = 0; i < 8; i++) {
+ output = output >> 8;
+ i_byte = input & 0xFF;
+
+ // Fast way to reverse bits in byte
+ // Devised by Sean Anderson, July 13, 2001
+ o_byte = static_cast<uint8_t>(((i_byte * 0x0802LU & 0x22110LU) |
+ (i_byte * 0x8020LU & 0x88440LU)) *
+ 0x10101LU >>
+ 16);
+
+ output = output | (static_cast<uint64_t>(o_byte) << 56);
+ input = input >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case BITREV_W: {
+ printf_instr("BITREV_W\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint32_t input = static_cast<uint32_t>(rj());
+ uint32_t output = 0;
+ output = base::bits::ReverseBits(input);
+ alu_out = static_cast<int64_t>(static_cast<int32_t>(output));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case BITREV_D: {
+ printf_instr("BITREV_D\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ alu_out = static_cast<int64_t>(base::bits::ReverseBits(rj_u()));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case EXT_W_B: {
+ printf_instr("EXT_W_B\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint8_t input = static_cast<uint8_t>(rj());
+ alu_out = static_cast<int64_t>(static_cast<int8_t>(input));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case EXT_W_H: {
+ printf_instr("EXT_W_H\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint16_t input = static_cast<uint16_t>(rj());
+ alu_out = static_cast<int64_t>(static_cast<int16_t>(input));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case FABS_S:
+ printf_instr("FABS_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ SetFPUFloatResult(fd_reg(), std::abs(fj_float()));
+ break;
+ case FABS_D:
+ printf_instr("FABS_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ SetFPUDoubleResult(fd_reg(), std::abs(fj_double()));
+ break;
+ case FNEG_S:
+ printf_instr("FNEG_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ SetFPUFloatResult(fd_reg(), -fj_float());
+ break;
+ case FNEG_D:
+ printf_instr("FNEG_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ SetFPUDoubleResult(fd_reg(), -fj_double());
+ break;
+ case FSQRT_S: {
+ printf_instr("FSQRT_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ if (fj_float() >= 0) {
+ SetFPUFloatResult(fd_reg(), std::sqrt(fj_float()));
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, false);
+ } else {
+ SetFPUFloatResult(fd_reg(), std::sqrt(-1)); // qnan
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, true);
+ }
+ break;
+ }
+ case FSQRT_D: {
+ printf_instr("FSQRT_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ if (fj_double() >= 0) {
+ SetFPUDoubleResult(fd_reg(), std::sqrt(fj_double()));
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, false);
+ } else {
+ SetFPUDoubleResult(fd_reg(), std::sqrt(-1)); // qnan
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, true);
+ }
+ break;
+ }
+ case FMOV_S:
+ printf_instr("FMOV_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ SetFPUFloatResult(fd_reg(), fj_float());
+ break;
+ case FMOV_D:
+ printf_instr("FMOV_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ SetFPUDoubleResult(fd_reg(), fj_double());
+ break;
+ case MOVGR2FR_W: {
+ printf_instr("MOVGR2FR_W\t %s: %016f, %s, %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ Registers::Name(rj_reg()), rj());
+ set_fpu_register_word(fd_reg(), static_cast<int32_t>(rj()));
+ TraceRegWr(get_fpu_register(fd_reg()), FLOAT_DOUBLE);
+ break;
+ }
+ case MOVGR2FR_D:
+ printf_instr("MOVGR2FR_D\t %s: %016f, %s, %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ Registers::Name(rj_reg()), rj());
+ SetFPUResult2(fd_reg(), rj());
+ break;
+ case MOVGR2FRH_W: {
+ printf_instr("MOVGR2FRH_W\t %s: %016f, %s, %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ Registers::Name(rj_reg()), rj());
+ set_fpu_register_hi_word(fd_reg(), static_cast<int32_t>(rj()));
+ TraceRegWr(get_fpu_register(fd_reg()), DOUBLE);
+ break;
+ }
+ case MOVFR2GR_S: {
+ printf_instr("MOVFR2GR_S\t %s: %016lx, %s, %016f\n",
+ Registers::Name(rd_reg()), rd(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ set_register(rd_reg(),
+ static_cast<int64_t>(get_fpu_register_word(fj_reg())));
+ TraceRegWr(get_register(rd_reg()), WORD_DWORD);
+ break;
+ }
+ case MOVFR2GR_D:
+ printf_instr("MOVFR2GR_D\t %s: %016lx, %s, %016f\n",
+ Registers::Name(rd_reg()), rd(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ SetResult(rd_reg(), get_fpu_register(fj_reg()));
+ break;
+ case MOVFRH2GR_S:
+ printf_instr("MOVFRH2GR_S\t %s: %016lx, %s, %016f\n",
+ Registers::Name(rd_reg()), rd(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ SetResult(rd_reg(), get_fpu_register_hi_word(fj_reg()));
+ break;
+ case MOVGR2FCSR: {
+ printf_instr("MOVGR2FCSR\t fcsr: %016x, %s, %016lx\n", FCSR_,
+ Registers::Name(rj_reg()), rj());
+ // fcsr could be 0-3
+ CHECK_LT(rd_reg(), 4);
+ FCSR_ = static_cast<uint32_t>(rj());
+ TraceRegWr(FCSR_);
+ break;
+ }
+ case MOVFCSR2GR: {
+ printf_instr("MOVFCSR2GR\t %s, %016lx, FCSR: %016x\n",
+ Registers::Name(rd_reg()), rd(), FCSR_);
+ // fcsr could be 0-3
+ CHECK_LT(rj_reg(), 4);
+ SetResult(rd_reg(), FCSR_);
+ break;
+ }
+ case FCVT_S_D:
+ printf_instr("FCVT_S_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ SetFPUFloatResult(fd_reg(), static_cast<float>(fj_double()));
+ break;
+ case FCVT_D_S:
+ printf_instr("FCVT_D_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ SetFPUDoubleResult(fd_reg(), static_cast<double>(fj_float()));
+ break;
+ case FTINTRM_W_S: {
+ printf_instr("FTINTRM_W_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float rounded = std::floor(fj);
+ int32_t result = static_cast<int32_t>(rounded);
+ SetFPUWordResult(fd_reg(), result);
+ if (set_fcsr_round_error(fj, rounded)) {
+ set_fpu_register_word_invalid_result(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRM_W_D: {
+ printf_instr("FTINTRM_W_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double rounded = std::floor(fj);
+ int32_t result = static_cast<int32_t>(rounded);
+ SetFPUWordResult(fd_reg(), result);
+ if (set_fcsr_round_error(fj, rounded)) {
+ set_fpu_register_invalid_result(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRM_L_S: {
+ printf_instr("FTINTRM_L_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float rounded = std::floor(fj);
+ int64_t result = static_cast<int64_t>(rounded);
+ SetFPUResult(fd_reg(), result);
+ if (set_fcsr_round64_error(fj, rounded)) {
+ set_fpu_register_invalid_result64(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRM_L_D: {
+ printf_instr("FTINTRM_L_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double rounded = std::floor(fj);
+ int64_t result = static_cast<int64_t>(rounded);
+ SetFPUResult(fd_reg(), result);
+ if (set_fcsr_round64_error(fj, rounded)) {
+ set_fpu_register_invalid_result64(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRP_W_S: {
+ printf_instr("FTINTRP_W_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float rounded = std::ceil(fj);
+ int32_t result = static_cast<int32_t>(rounded);
+ SetFPUWordResult(fd_reg(), result);
+ if (set_fcsr_round_error(fj, rounded)) {
+ set_fpu_register_word_invalid_result(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRP_W_D: {
+ printf_instr("FTINTRP_W_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double rounded = std::ceil(fj);
+ int32_t result = static_cast<int32_t>(rounded);
+ SetFPUWordResult(fd_reg(), result);
+ if (set_fcsr_round_error(fj, rounded)) {
+ set_fpu_register_invalid_result(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRP_L_S: {
+ printf_instr("FTINTRP_L_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float rounded = std::ceil(fj);
+ int64_t result = static_cast<int64_t>(rounded);
+ SetFPUResult(fd_reg(), result);
+ if (set_fcsr_round64_error(fj, rounded)) {
+ set_fpu_register_invalid_result64(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRP_L_D: {
+ printf_instr("FTINTRP_L_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double rounded = std::ceil(fj);
+ int64_t result = static_cast<int64_t>(rounded);
+ SetFPUResult(fd_reg(), result);
+ if (set_fcsr_round64_error(fj, rounded)) {
+ set_fpu_register_invalid_result64(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRZ_W_S: {
+ printf_instr("FTINTRZ_W_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float rounded = std::trunc(fj);
+ int32_t result = static_cast<int32_t>(rounded);
+ SetFPUWordResult(fd_reg(), result);
+ if (set_fcsr_round_error(fj, rounded)) {
+ set_fpu_register_word_invalid_result(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRZ_W_D: {
+ printf_instr("FTINTRZ_W_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double rounded = std::trunc(fj);
+ int32_t result = static_cast<int32_t>(rounded);
+ SetFPUWordResult(fd_reg(), result);
+ if (set_fcsr_round_error(fj, rounded)) {
+ set_fpu_register_invalid_result(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRZ_L_S: {
+ printf_instr("FTINTRZ_L_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float rounded = std::trunc(fj);
+ int64_t result = static_cast<int64_t>(rounded);
+ SetFPUResult(fd_reg(), result);
+ if (set_fcsr_round64_error(fj, rounded)) {
+ set_fpu_register_invalid_result64(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRZ_L_D: {
+ printf_instr("FTINTRZ_L_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double rounded = std::trunc(fj);
+ int64_t result = static_cast<int64_t>(rounded);
+ SetFPUResult(fd_reg(), result);
+ if (set_fcsr_round64_error(fj, rounded)) {
+ set_fpu_register_invalid_result64(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRNE_W_S: {
+ printf_instr("FTINTRNE_W_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float rounded = std::floor(fj + 0.5);
+ int32_t result = static_cast<int32_t>(rounded);
+ if ((result & 1) != 0 && result - fj == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ SetFPUWordResult(fd_reg(), result);
+ if (set_fcsr_round_error(fj, rounded)) {
+ set_fpu_register_word_invalid_result(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRNE_W_D: {
+ printf_instr("FTINTRNE_W_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double rounded = std::floor(fj + 0.5);
+ int32_t result = static_cast<int32_t>(rounded);
+ if ((result & 1) != 0 && result - fj == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ SetFPUWordResult(fd_reg(), result);
+ if (set_fcsr_round_error(fj, rounded)) {
+ set_fpu_register_invalid_result(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRNE_L_S: {
+ printf_instr("FTINTRNE_L_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float rounded = std::floor(fj + 0.5);
+ int64_t result = static_cast<int64_t>(rounded);
+ if ((result & 1) != 0 && result - fj == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ SetFPUResult(fd_reg(), result);
+ if (set_fcsr_round64_error(fj, rounded)) {
+ set_fpu_register_invalid_result64(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRNE_L_D: {
+ printf_instr("FTINTRNE_L_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double rounded = std::floor(fj + 0.5);
+ int64_t result = static_cast<int64_t>(rounded);
+ if ((result & 1) != 0 && result - fj == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ SetFPUResult(fd_reg(), result);
+ if (set_fcsr_round64_error(fj, rounded)) {
+ set_fpu_register_invalid_result64(fj, rounded);
+ }
+ break;
+ }
+ case FTINT_W_S: {
+ printf_instr("FTINT_W_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float rounded;
+ int32_t result;
+ round_according_to_fcsr(fj, &rounded, &result);
+ SetFPUWordResult(fd_reg(), result);
+ if (set_fcsr_round_error(fj, rounded)) {
+ set_fpu_register_word_invalid_result(fj, rounded);
+ }
+ break;
+ }
+ case FTINT_W_D: {
+ printf_instr("FTINT_W_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double rounded;
+ int32_t result;
+ round_according_to_fcsr(fj, &rounded, &result);
+ SetFPUWordResult(fd_reg(), result);
+ if (set_fcsr_round_error(fj, rounded)) {
+ set_fpu_register_word_invalid_result(fj, rounded);
+ }
+ break;
+ }
+ case FTINT_L_S: {
+ printf_instr("FTINT_L_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float rounded;
+ int64_t result;
+ round64_according_to_fcsr(fj, &rounded, &result);
+ SetFPUResult(fd_reg(), result);
+ if (set_fcsr_round64_error(fj, rounded)) {
+ set_fpu_register_invalid_result64(fj, rounded);
+ }
+ break;
+ }
+ case FTINT_L_D: {
+ printf_instr("FTINT_L_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double rounded;
+ int64_t result;
+ round64_according_to_fcsr(fj, &rounded, &result);
+ SetFPUResult(fd_reg(), result);
+ if (set_fcsr_round64_error(fj, rounded)) {
+ set_fpu_register_invalid_result64(fj, rounded);
+ }
+ break;
+ }
+ case FFINT_S_W: {
+ alu_out = get_fpu_register_signed_word(fj_reg());
+ printf_instr("FFINT_S_W\t %s: %016f, %s, %016x\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), static_cast<int>(alu_out));
+ SetFPUFloatResult(fd_reg(), static_cast<float>(alu_out));
+ break;
+ }
+ case FFINT_S_L: {
+ alu_out = get_fpu_register(fj_reg());
+ printf_instr("FFINT_S_L\t %s: %016f, %s, %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), alu_out);
+ SetFPUFloatResult(fd_reg(), static_cast<float>(alu_out));
+ break;
+ }
+ case FFINT_D_W: {
+ alu_out = get_fpu_register_signed_word(fj_reg());
+ printf_instr("FFINT_D_W\t %s: %016f, %s, %016x\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), static_cast<int>(alu_out));
+ SetFPUDoubleResult(fd_reg(), static_cast<double>(alu_out));
+ break;
+ }
+ case FFINT_D_L: {
+ alu_out = get_fpu_register(fj_reg());
+ printf_instr("FFINT_D_L\t %s: %016f, %s, %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), alu_out);
+ SetFPUDoubleResult(fd_reg(), static_cast<double>(alu_out));
+ break;
+ }
+ case FRINT_S: {
+ printf_instr("FRINT_S\t %s: %016f, %s, %016f mode : ",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float result, temp_result;
+ double temp;
+ float upper = std::ceil(fj);
+ float lower = std::floor(fj);
+ switch (get_fcsr_rounding_mode()) {
+ case kRoundToNearest:
+ printf_instr(" kRoundToNearest\n");
+ if (upper - fj < fj - lower) {
+ result = upper;
+ } else if (upper - fj > fj - lower) {
+ result = lower;
+ } else {
+ temp_result = upper / 2;
+ float reminder = std::modf(temp_result, &temp);
+ if (reminder == 0) {
+ result = upper;
+ } else {
+ result = lower;
+ }
+ }
+ break;
+ case kRoundToZero:
+ printf_instr(" kRoundToZero\n");
+ result = (fj > 0 ? lower : upper);
+ break;
+ case kRoundToPlusInf:
+ printf_instr(" kRoundToPlusInf\n");
+ result = upper;
+ break;
+ case kRoundToMinusInf:
+ printf_instr(" kRoundToMinusInf\n");
+ result = lower;
+ break;
+ }
+ SetFPUFloatResult(fd_reg(), result);
+ set_fcsr_bit(kFCSRInexactCauseBit, result != fj);
+ break;
+ }
+ case FRINT_D: {
+ printf_instr("FRINT_D\t %s: %016f, %s, %016f mode : ",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double result, temp, temp_result;
+ double upper = std::ceil(fj);
+ double lower = std::floor(fj);
+ switch (get_fcsr_rounding_mode()) {
+ case kRoundToNearest:
+ printf_instr(" kRoundToNearest\n");
+ if (upper - fj < fj - lower) {
+ result = upper;
+ } else if (upper - fj > fj - lower) {
+ result = lower;
+ } else {
+ temp_result = upper / 2;
+ double reminder = std::modf(temp_result, &temp);
+ if (reminder == 0) {
+ result = upper;
+ } else {
+ result = lower;
+ }
+ }
+ break;
+ case kRoundToZero:
+ printf_instr(" kRoundToZero\n");
+ result = (fj > 0 ? lower : upper);
+ break;
+ case kRoundToPlusInf:
+ printf_instr(" kRoundToPlusInf\n");
+ result = upper;
+ break;
+ case kRoundToMinusInf:
+ printf_instr(" kRoundToMinusInf\n");
+ result = lower;
+ break;
+ }
+ SetFPUDoubleResult(fd_reg(), result);
+ set_fcsr_bit(kFCSRInexactCauseBit, result != fj);
+ break;
+ }
+ case MOVFR2CF:
+ printf("Sim UNIMPLEMENTED: MOVFR2CF\n");
+ UNIMPLEMENTED();
+ case MOVCF2FR:
+ printf("Sim UNIMPLEMENTED: MOVCF2FR\n");
+ UNIMPLEMENTED();
+ case MOVGR2CF:
+ printf_instr("MOVGR2CF\t FCC%d, %s: %016lx\n", cd_reg(),
+ Registers::Name(rj_reg()), rj());
+ set_cf_register(cd_reg(), rj() & 1);
+ break;
+ case MOVCF2GR:
+ printf_instr("MOVCF2GR\t %s: %016lx, FCC%d\n", Registers::Name(rd_reg()),
+ rd(), cj_reg());
+ SetResult(rd_reg(), cj());
+ break;
+ case FRECIP_S:
+ printf("Sim UNIMPLEMENTED: FRECIP_S\n");
+ UNIMPLEMENTED();
+ case FRECIP_D:
+ printf("Sim UNIMPLEMENTED: FRECIP_D\n");
+ UNIMPLEMENTED();
+ case FRSQRT_S:
+ printf("Sim UNIMPLEMENTED: FRSQRT_S\n");
+ UNIMPLEMENTED();
+ case FRSQRT_D:
+ printf("Sim UNIMPLEMENTED: FRSQRT_D\n");
+ UNIMPLEMENTED();
+ case FCLASS_S:
+ printf("Sim UNIMPLEMENTED: FCLASS_S\n");
+ UNIMPLEMENTED();
+ case FCLASS_D:
+ printf("Sim UNIMPLEMENTED: FCLASS_D\n");
+ UNIMPLEMENTED();
+ case FLOGB_S:
+ printf("Sim UNIMPLEMENTED: FLOGB_S\n");
+ UNIMPLEMENTED();
+ case FLOGB_D:
+ printf("Sim UNIMPLEMENTED: FLOGB_D\n");
+ UNIMPLEMENTED();
+ case CLO_W:
+ printf("Sim UNIMPLEMENTED: CLO_W\n");
+ UNIMPLEMENTED();
+ case CTO_W:
+ printf("Sim UNIMPLEMENTED: CTO_W\n");
+ UNIMPLEMENTED();
+ case CLO_D:
+ printf("Sim UNIMPLEMENTED: CLO_D\n");
+ UNIMPLEMENTED();
+ case CTO_D:
+ printf("Sim UNIMPLEMENTED: CTO_D\n");
+ UNIMPLEMENTED();
+ // Unimplemented opcodes raised an error in the configuration step before,
+ // so we can use the default here to set the destination register in common
+ // cases.
+ default:
+ UNREACHABLE();
+ }
+}
+
+// Executes the current instruction.
+void Simulator::InstructionDecode(Instruction* instr) {
+ if (v8::internal::FLAG_check_icache) {
+ CheckICache(i_cache(), instr);
+ }
+ pc_modified_ = false;
+
+ v8::base::EmbeddedVector<char, 256> buffer;
+
+ if (::v8::internal::FLAG_trace_sim) {
+ base::SNPrintF(trace_buf_, " ");
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // Use a reasonably large buffer.
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
+ }
+
+ static int instr_count = 0;
+ USE(instr_count);
+ instr_ = instr;
+ printf_instr("\nInstr%3d: %08x, PC: %016lx\t", instr_count++,
+ instr_.Bits(31, 0), get_pc());
+ switch (instr_.InstructionType()) {
+ case Instruction::kOp6Type:
+ DecodeTypeOp6();
+ break;
+ case Instruction::kOp7Type:
+ DecodeTypeOp7();
+ break;
+ case Instruction::kOp8Type:
+ DecodeTypeOp8();
+ break;
+ case Instruction::kOp10Type:
+ DecodeTypeOp10();
+ break;
+ case Instruction::kOp12Type:
+ DecodeTypeOp12();
+ break;
+ case Instruction::kOp14Type:
+ DecodeTypeOp14();
+ break;
+ case Instruction::kOp17Type:
+ DecodeTypeOp17();
+ break;
+ case Instruction::kOp22Type:
+ DecodeTypeOp22();
+ break;
+ default: {
+ printf("instr_: %x\n", instr_.Bits(31, 0));
+ UNREACHABLE();
+ }
+ }
+
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF(" 0x%08" PRIxPTR " %-44s %s\n",
+ reinterpret_cast<intptr_t>(instr), buffer.begin(),
+ trace_buf_.begin());
+ }
+
+ if (!pc_modified_) {
+ set_register(pc, reinterpret_cast<int64_t>(instr) + kInstrSize);
+ }
+}
+
+void Simulator::Execute() {
+ // Get the PC to simulate. Cannot use the accessor here as we need the
+ // raw PC value and not the one used as input to arithmetic instructions.
+ int64_t program_counter = get_pc();
+ if (::v8::internal::FLAG_stop_sim_at == 0) {
+ // Fast version of the dispatch loop without checking whether the simulator
+ // should be stopping at a particular executed instruction.
+ while (program_counter != end_sim_pc) {
+ Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
+ icount_++;
+ InstructionDecode(instr);
+ program_counter = get_pc();
+ }
+ } else {
+ // FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
+ // we reach the particular instruction count.
+ while (program_counter != end_sim_pc) {
+ Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
+ icount_++;
+ if (icount_ == static_cast<int64_t>(::v8::internal::FLAG_stop_sim_at)) {
+ Loong64Debugger dbg(this);
+ dbg.Debug();
+ } else {
+ InstructionDecode(instr);
+ }
+ program_counter = get_pc();
+ }
+ }
+}
+
+void Simulator::CallInternal(Address entry) {
+ // Adjust JS-based stack limit to C-based stack limit.
+ isolate_->stack_guard()->AdjustStackLimitForSimulator();
+
+ // Prepare to execute the code at entry.
+ set_register(pc, static_cast<int64_t>(entry));
+ // Put down marker for end of simulation. The simulator will stop simulation
+ // when the PC reaches this value. By saving the "end simulation" value into
+ // the LR the simulation stops when returning to this call point.
+ set_register(ra, end_sim_pc);
+
+ // Remember the values of callee-saved registers.
+ int64_t s0_val = get_register(s0);
+ int64_t s1_val = get_register(s1);
+ int64_t s2_val = get_register(s2);
+ int64_t s3_val = get_register(s3);
+ int64_t s4_val = get_register(s4);
+ int64_t s5_val = get_register(s5);
+ int64_t s6_val = get_register(s6);
+ int64_t s7_val = get_register(s7);
+ int64_t s8_val = get_register(s8);
+ int64_t gp_val = get_register(gp);
+ int64_t sp_val = get_register(sp);
+ int64_t tp_val = get_register(tp);
+ int64_t fp_val = get_register(fp);
+
+ // Set up the callee-saved registers with a known value. To be able to check
+ // that they are preserved properly across JS execution.
+ int64_t callee_saved_value = icount_;
+ set_register(s0, callee_saved_value);
+ set_register(s1, callee_saved_value);
+ set_register(s2, callee_saved_value);
+ set_register(s3, callee_saved_value);
+ set_register(s4, callee_saved_value);
+ set_register(s5, callee_saved_value);
+ set_register(s6, callee_saved_value);
+ set_register(s7, callee_saved_value);
+ set_register(s8, callee_saved_value);
+ set_register(gp, callee_saved_value);
+ set_register(tp, callee_saved_value);
+ set_register(fp, callee_saved_value);
+
+ // Start the simulation.
+ Execute();
+
+ // Check that the callee-saved registers have been preserved.
+ CHECK_EQ(callee_saved_value, get_register(s0));
+ CHECK_EQ(callee_saved_value, get_register(s1));
+ CHECK_EQ(callee_saved_value, get_register(s2));
+ CHECK_EQ(callee_saved_value, get_register(s3));
+ CHECK_EQ(callee_saved_value, get_register(s4));
+ CHECK_EQ(callee_saved_value, get_register(s5));
+ CHECK_EQ(callee_saved_value, get_register(s6));
+ CHECK_EQ(callee_saved_value, get_register(s7));
+ CHECK_EQ(callee_saved_value, get_register(s8));
+ CHECK_EQ(callee_saved_value, get_register(gp));
+ CHECK_EQ(callee_saved_value, get_register(tp));
+ CHECK_EQ(callee_saved_value, get_register(fp));
+
+ // Restore callee-saved registers with the original value.
+ set_register(s0, s0_val);
+ set_register(s1, s1_val);
+ set_register(s2, s2_val);
+ set_register(s3, s3_val);
+ set_register(s4, s4_val);
+ set_register(s5, s5_val);
+ set_register(s6, s6_val);
+ set_register(s7, s7_val);
+ set_register(s8, s8_val);
+ set_register(gp, gp_val);
+ set_register(sp, sp_val);
+ set_register(tp, tp_val);
+ set_register(fp, fp_val);
+}
+
+intptr_t Simulator::CallImpl(Address entry, int argument_count,
+ const intptr_t* arguments) {
+ constexpr int kRegisterPassedArguments = 8;
+ // Set up arguments.
+
+ int reg_arg_count = std::min(kRegisterPassedArguments, argument_count);
+ if (reg_arg_count > 0) set_register(a0, arguments[0]);
+ if (reg_arg_count > 1) set_register(a1, arguments[1]);
+ if (reg_arg_count > 2) set_register(a2, arguments[2]);
+ if (reg_arg_count > 3) set_register(a3, arguments[3]);
+ if (reg_arg_count > 4) set_register(a4, arguments[4]);
+ if (reg_arg_count > 5) set_register(a5, arguments[5]);
+ if (reg_arg_count > 6) set_register(a6, arguments[6]);
+ if (reg_arg_count > 7) set_register(a7, arguments[7]);
+
+ // Remaining arguments passed on stack.
+ int64_t original_stack = get_register(sp);
+ // Compute position of stack on entry to generated code.
+ int stack_args_count = argument_count - reg_arg_count;
+ int stack_args_size = stack_args_count * sizeof(*arguments);
+ int64_t entry_stack = original_stack - stack_args_size;
+
+ if (base::OS::ActivationFrameAlignment() != 0) {
+ entry_stack &= -base::OS::ActivationFrameAlignment();
+ }
+ // Store remaining arguments on stack, from low to high memory.
+ intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+ memcpy(stack_argument, arguments + reg_arg_count,
+ stack_args_count * sizeof(*arguments));
+ set_register(sp, entry_stack);
+
+ CallInternal(entry);
+
+ // Pop stack passed arguments.
+ CHECK_EQ(entry_stack, get_register(sp));
+ set_register(sp, original_stack);
+
+ return get_register(a0);
+}
+
+double Simulator::CallFP(Address entry, double d0, double d1) {
+ const FPURegister fparg2 = f1;
+ set_fpu_register_double(f0, d0);
+ set_fpu_register_double(fparg2, d1);
+ CallInternal(entry);
+ return get_fpu_register_double(f0);
+}
+
+uintptr_t Simulator::PushAddress(uintptr_t address) {
+ int64_t new_sp = get_register(sp) - sizeof(uintptr_t);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+ *stack_slot = address;
+ set_register(sp, new_sp);
+ return new_sp;
+}
+
+uintptr_t Simulator::PopAddress() {
+ int64_t current_sp = get_register(sp);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+ uintptr_t address = *stack_slot;
+ set_register(sp, current_sp + sizeof(uintptr_t));
+ return address;
+}
+
+Simulator::LocalMonitor::LocalMonitor()
+ : access_state_(MonitorAccess::Open),
+ tagged_addr_(0),
+ size_(TransactionSize::None) {}
+
+void Simulator::LocalMonitor::Clear() {
+ access_state_ = MonitorAccess::Open;
+ tagged_addr_ = 0;
+ size_ = TransactionSize::None;
+}
+
+void Simulator::LocalMonitor::NotifyLoad() {
+ if (access_state_ == MonitorAccess::RMW) {
+ // A non linked load could clear the local monitor. As a result, it's
+ // most strict to unconditionally clear the local monitor on load.
+ Clear();
+ }
+}
+
+void Simulator::LocalMonitor::NotifyLoadLinked(uintptr_t addr,
+ TransactionSize size) {
+ access_state_ = MonitorAccess::RMW;
+ tagged_addr_ = addr;
+ size_ = size;
+}
+
+void Simulator::LocalMonitor::NotifyStore() {
+ if (access_state_ == MonitorAccess::RMW) {
+ // A non exclusive store could clear the local monitor. As a result, it's
+ // most strict to unconditionally clear the local monitor on store.
+ Clear();
+ }
+}
+
+bool Simulator::LocalMonitor::NotifyStoreConditional(uintptr_t addr,
+ TransactionSize size) {
+ if (access_state_ == MonitorAccess::RMW) {
+ if (addr == tagged_addr_ && size_ == size) {
+ Clear();
+ return true;
+ } else {
+ return false;
+ }
+ } else {
+ DCHECK(access_state_ == MonitorAccess::Open);
+ return false;
+ }
+}
+
+Simulator::GlobalMonitor::LinkedAddress::LinkedAddress()
+ : access_state_(MonitorAccess::Open),
+ tagged_addr_(0),
+ next_(nullptr),
+ prev_(nullptr),
+ failure_counter_(0) {}
+
+void Simulator::GlobalMonitor::LinkedAddress::Clear_Locked() {
+ access_state_ = MonitorAccess::Open;
+ tagged_addr_ = 0;
+}
+
+void Simulator::GlobalMonitor::LinkedAddress::NotifyLoadLinked_Locked(
+ uintptr_t addr) {
+ access_state_ = MonitorAccess::RMW;
+ tagged_addr_ = addr;
+}
+
+void Simulator::GlobalMonitor::LinkedAddress::NotifyStore_Locked() {
+ if (access_state_ == MonitorAccess::RMW) {
+ // A non exclusive store could clear the global monitor. As a result, it's
+ // most strict to unconditionally clear global monitors on store.
+ Clear_Locked();
+ }
+}
+
+bool Simulator::GlobalMonitor::LinkedAddress::NotifyStoreConditional_Locked(
+ uintptr_t addr, bool is_requesting_thread) {
+ if (access_state_ == MonitorAccess::RMW) {
+ if (is_requesting_thread) {
+ if (addr == tagged_addr_) {
+ Clear_Locked();
+ // Introduce occasional sc/scd failures. This is to simulate the
+ // behavior of hardware, which can randomly fail due to background
+ // cache evictions.
+ if (failure_counter_++ >= kMaxFailureCounter) {
+ failure_counter_ = 0;
+ return false;
+ } else {
+ return true;
+ }
+ }
+ } else if ((addr & kExclusiveTaggedAddrMask) ==
+ (tagged_addr_ & kExclusiveTaggedAddrMask)) {
+ // Check the masked addresses when responding to a successful lock by
+ // another thread so the implementation is more conservative (i.e. the
+ // granularity of locking is as large as possible.)
+ Clear_Locked();
+ return false;
+ }
+ }
+ return false;
+}
+
+void Simulator::GlobalMonitor::NotifyLoadLinked_Locked(
+ uintptr_t addr, LinkedAddress* linked_address) {
+ linked_address->NotifyLoadLinked_Locked(addr);
+ PrependProcessor_Locked(linked_address);
+}
+
+void Simulator::GlobalMonitor::NotifyStore_Locked(
+ LinkedAddress* linked_address) {
+ // Notify each thread of the store operation.
+ for (LinkedAddress* iter = head_; iter; iter = iter->next_) {
+ iter->NotifyStore_Locked();
+ }
+}
+
+bool Simulator::GlobalMonitor::NotifyStoreConditional_Locked(
+ uintptr_t addr, LinkedAddress* linked_address) {
+ DCHECK(IsProcessorInLinkedList_Locked(linked_address));
+ if (linked_address->NotifyStoreConditional_Locked(addr, true)) {
+ // Notify the other processors that this StoreConditional succeeded.
+ for (LinkedAddress* iter = head_; iter; iter = iter->next_) {
+ if (iter != linked_address) {
+ iter->NotifyStoreConditional_Locked(addr, false);
+ }
+ }
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool Simulator::GlobalMonitor::IsProcessorInLinkedList_Locked(
+ LinkedAddress* linked_address) const {
+ return head_ == linked_address || linked_address->next_ ||
+ linked_address->prev_;
+}
+
+void Simulator::GlobalMonitor::PrependProcessor_Locked(
+ LinkedAddress* linked_address) {
+ if (IsProcessorInLinkedList_Locked(linked_address)) {
+ return;
+ }
+
+ if (head_) {
+ head_->prev_ = linked_address;
+ }
+ linked_address->prev_ = nullptr;
+ linked_address->next_ = head_;
+ head_ = linked_address;
+}
+
+void Simulator::GlobalMonitor::RemoveLinkedAddress(
+ LinkedAddress* linked_address) {
+ base::MutexGuard lock_guard(&mutex);
+ if (!IsProcessorInLinkedList_Locked(linked_address)) {
+ return;
+ }
+
+ if (linked_address->prev_) {
+ linked_address->prev_->next_ = linked_address->next_;
+ } else {
+ head_ = linked_address->next_;
+ }
+ if (linked_address->next_) {
+ linked_address->next_->prev_ = linked_address->prev_;
+ }
+ linked_address->prev_ = nullptr;
+ linked_address->next_ = nullptr;
+}
+
+#undef SScanF
+
+} // namespace internal
+} // namespace v8
+
+#endif // USE_SIMULATOR
diff --git a/chromium/v8/src/execution/loong64/simulator-loong64.h b/chromium/v8/src/execution/loong64/simulator-loong64.h
new file mode 100644
index 00000000000..b9e97b93b22
--- /dev/null
+++ b/chromium/v8/src/execution/loong64/simulator-loong64.h
@@ -0,0 +1,647 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Declares a Simulator for loongisa instructions if we are not generating a
+// native loongisa binary. This Simulator allows us to run and debug loongisa
+// code generation on regular desktop machines. V8 calls into generated code via
+// the GeneratedCode wrapper, which will start execution in the Simulator or
+// forwards to the real entry on a loongisa HW platform.
+
+#ifndef V8_EXECUTION_LOONG64_SIMULATOR_LOONG64_H_
+#define V8_EXECUTION_LOONG64_SIMULATOR_LOONG64_H_
+
+// globals.h defines USE_SIMULATOR.
+#include "src/common/globals.h"
+
+template <typename T>
+int Compare(const T& a, const T& b) {
+ if (a == b)
+ return 0;
+ else if (a < b)
+ return -1;
+ else
+ return 1;
+}
+
+// Returns the negative absolute value of its argument.
+template <typename T,
+ typename = typename std::enable_if<std::is_signed<T>::value>::type>
+T Nabs(T a) {
+ return a < 0 ? a : -a;
+}
+
+#if defined(USE_SIMULATOR)
+// Running with a simulator.
+
+#include "src/base/hashmap.h"
+#include "src/base/strings.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/loong64/constants-loong64.h"
+#include "src/execution/simulator-base.h"
+#include "src/utils/allocation.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Utility functions
+
+class CachePage {
+ public:
+ static const int LINE_VALID = 0;
+ static const int LINE_INVALID = 1;
+
+ static const int kPageShift = 12;
+ static const int kPageSize = 1 << kPageShift;
+ static const int kPageMask = kPageSize - 1;
+ static const int kLineShift = 2; // The cache line is only 4 bytes right now.
+ static const int kLineLength = 1 << kLineShift;
+ static const int kLineMask = kLineLength - 1;
+
+ CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); }
+
+ char* ValidityByte(int offset) {
+ return &validity_map_[offset >> kLineShift];
+ }
+
+ char* CachedData(int offset) { return &data_[offset]; }
+
+ private:
+ char data_[kPageSize]; // The cached data.
+ static const int kValidityMapSize = kPageSize >> kLineShift;
+ char validity_map_[kValidityMapSize]; // One byte per line.
+};
+
+class SimInstructionBase : public InstructionBase {
+ public:
+ Type InstructionType() const { return type_; }
+ inline Instruction* instr() const { return instr_; }
+ inline int32_t operand() const { return operand_; }
+
+ protected:
+ SimInstructionBase() : operand_(-1), instr_(nullptr), type_(kUnsupported) {}
+ explicit SimInstructionBase(Instruction* instr) {}
+
+ int32_t operand_;
+ Instruction* instr_;
+ Type type_;
+
+ private:
+ DISALLOW_ASSIGN(SimInstructionBase);
+};
+
+class SimInstruction : public InstructionGetters<SimInstructionBase> {
+ public:
+ SimInstruction() {}
+
+ explicit SimInstruction(Instruction* instr) { *this = instr; }
+
+ SimInstruction& operator=(Instruction* instr) {
+ operand_ = *reinterpret_cast<const int32_t*>(instr);
+ instr_ = instr;
+ type_ = InstructionBase::InstructionType();
+ DCHECK(reinterpret_cast<void*>(&operand_) == this);
+ return *this;
+ }
+};
+
+class Simulator : public SimulatorBase {
+ public:
+ friend class Loong64Debugger;
+
+ // Registers are declared in order.
+ enum Register {
+ no_reg = -1,
+ zero_reg = 0,
+ ra,
+ gp,
+ sp,
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ t0,
+ t1,
+ t2,
+ t3,
+ t4,
+ t5,
+ t6,
+ t7,
+ t8,
+ tp,
+ fp,
+ s0,
+ s1,
+ s2,
+ s3,
+ s4,
+ s5,
+ s6,
+ s7,
+ s8,
+ pc, // pc must be the last register.
+ kNumSimuRegisters,
+ // aliases
+ v0 = a0,
+ v1 = a1
+ };
+
+ // Condition flag registers.
+ enum CFRegister {
+ fcc0,
+ fcc1,
+ fcc2,
+ fcc3,
+ fcc4,
+ fcc5,
+ fcc6,
+ fcc7,
+ kNumCFRegisters
+ };
+
+ // Floating point registers.
+ enum FPURegister {
+ f0,
+ f1,
+ f2,
+ f3,
+ f4,
+ f5,
+ f6,
+ f7,
+ f8,
+ f9,
+ f10,
+ f11,
+ f12,
+ f13,
+ f14,
+ f15,
+ f16,
+ f17,
+ f18,
+ f19,
+ f20,
+ f21,
+ f22,
+ f23,
+ f24,
+ f25,
+ f26,
+ f27,
+ f28,
+ f29,
+ f30,
+ f31,
+ kNumFPURegisters
+ };
+
+ explicit Simulator(Isolate* isolate);
+ ~Simulator();
+
+ // The currently executing Simulator instance. Potentially there can be one
+ // for each native thread.
+ V8_EXPORT_PRIVATE static Simulator* current(v8::internal::Isolate* isolate);
+
+ // Accessors for register state. Reading the pc value adheres to the LOONG64
+ // architecture specification and is off by a 8 from the currently executing
+ // instruction.
+ void set_register(int reg, int64_t value);
+ void set_register_word(int reg, int32_t value);
+ void set_dw_register(int dreg, const int* dbl);
+ V8_EXPORT_PRIVATE int64_t get_register(int reg) const;
+ double get_double_from_register_pair(int reg);
+ // Same for FPURegisters.
+ void set_fpu_register(int fpureg, int64_t value);
+ void set_fpu_register_word(int fpureg, int32_t value);
+ void set_fpu_register_hi_word(int fpureg, int32_t value);
+ void set_fpu_register_float(int fpureg, float value);
+ void set_fpu_register_double(int fpureg, double value);
+ void set_fpu_register_invalid_result64(float original, float rounded);
+ void set_fpu_register_invalid_result(float original, float rounded);
+ void set_fpu_register_word_invalid_result(float original, float rounded);
+ void set_fpu_register_invalid_result64(double original, double rounded);
+ void set_fpu_register_invalid_result(double original, double rounded);
+ void set_fpu_register_word_invalid_result(double original, double rounded);
+ int64_t get_fpu_register(int fpureg) const;
+ int32_t get_fpu_register_word(int fpureg) const;
+ int32_t get_fpu_register_signed_word(int fpureg) const;
+ int32_t get_fpu_register_hi_word(int fpureg) const;
+ float get_fpu_register_float(int fpureg) const;
+ double get_fpu_register_double(int fpureg) const;
+ void set_cf_register(int cfreg, bool value);
+ bool get_cf_register(int cfreg) const;
+ void set_fcsr_rounding_mode(FPURoundingMode mode);
+ unsigned int get_fcsr_rounding_mode();
+ void set_fcsr_bit(uint32_t cc, bool value);
+ bool test_fcsr_bit(uint32_t cc);
+ bool set_fcsr_round_error(double original, double rounded);
+ bool set_fcsr_round64_error(double original, double rounded);
+ bool set_fcsr_round_error(float original, float rounded);
+ bool set_fcsr_round64_error(float original, float rounded);
+ void round_according_to_fcsr(double toRound, double* rounded,
+ int32_t* rounded_int);
+ void round64_according_to_fcsr(double toRound, double* rounded,
+ int64_t* rounded_int);
+ void round_according_to_fcsr(float toRound, float* rounded,
+ int32_t* rounded_int);
+ void round64_according_to_fcsr(float toRound, float* rounded,
+ int64_t* rounded_int);
+ // Special case of set_register and get_register to access the raw PC value.
+ void set_pc(int64_t value);
+ V8_EXPORT_PRIVATE int64_t get_pc() const;
+
+ Address get_sp() const { return static_cast<Address>(get_register(sp)); }
+
+ // Accessor to the internal simulator stack area.
+ uintptr_t StackLimit(uintptr_t c_limit) const;
+
+ // Executes LOONG64 instructions until the PC reaches end_sim_pc.
+ void Execute();
+
+ template <typename Return, typename... Args>
+ Return Call(Address entry, Args... args) {
+ return VariadicCall<Return>(this, &Simulator::CallImpl, entry, args...);
+ }
+
+ // Alternative: call a 2-argument double function.
+ double CallFP(Address entry, double d0, double d1);
+
+ // Push an address onto the JS stack.
+ uintptr_t PushAddress(uintptr_t address);
+
+ // Pop an address from the JS stack.
+ uintptr_t PopAddress();
+
+ // Debugger input.
+ void set_last_debugger_input(char* input);
+ char* last_debugger_input() { return last_debugger_input_; }
+
+ // Redirection support.
+ static void SetRedirectInstruction(Instruction* instruction);
+
+ // ICache checking.
+ static bool ICacheMatch(void* one, void* two);
+ static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
+ size_t size);
+
+ // Returns true if pc register contains one of the 'special_values' defined
+ // below (bad_ra, end_sim_pc).
+ bool has_bad_pc() const;
+
+ private:
+ enum special_values {
+ // Known bad pc value to ensure that the simulator does not execute
+ // without being properly setup.
+ bad_ra = -1,
+ // A pc value used to signal the simulator to stop execution. Generally
+ // the ra is set to this value on transition from native C code to
+ // simulated execution, so that the simulator can "return" to the native
+ // C code.
+ end_sim_pc = -2,
+ // Unpredictable value.
+ Unpredictable = 0xbadbeaf
+ };
+
+ V8_EXPORT_PRIVATE intptr_t CallImpl(Address entry, int argument_count,
+ const intptr_t* arguments);
+
+ // Unsupported instructions use Format to print an error and stop execution.
+ void Format(Instruction* instr, const char* format);
+
+ // Helpers for data value tracing.
+ enum TraceType {
+ BYTE,
+ HALF,
+ WORD,
+ DWORD,
+ FLOAT,
+ DOUBLE,
+ FLOAT_DOUBLE,
+ WORD_DWORD
+ };
+
+ // Read and write memory.
+ inline uint32_t ReadBU(int64_t addr);
+ inline int32_t ReadB(int64_t addr);
+ inline void WriteB(int64_t addr, uint8_t value);
+ inline void WriteB(int64_t addr, int8_t value);
+
+ inline uint16_t ReadHU(int64_t addr, Instruction* instr);
+ inline int16_t ReadH(int64_t addr, Instruction* instr);
+ // Note: Overloaded on the sign of the value.
+ inline void WriteH(int64_t addr, uint16_t value, Instruction* instr);
+ inline void WriteH(int64_t addr, int16_t value, Instruction* instr);
+
+ inline uint32_t ReadWU(int64_t addr, Instruction* instr);
+ inline int32_t ReadW(int64_t addr, Instruction* instr, TraceType t = WORD);
+ inline void WriteW(int64_t addr, int32_t value, Instruction* instr);
+ void WriteConditionalW(int64_t addr, int32_t value, Instruction* instr,
+ int32_t rt_reg);
+ inline int64_t Read2W(int64_t addr, Instruction* instr);
+ inline void Write2W(int64_t addr, int64_t value, Instruction* instr);
+ inline void WriteConditional2W(int64_t addr, int64_t value,
+ Instruction* instr, int32_t rt_reg);
+
+ inline double ReadD(int64_t addr, Instruction* instr);
+ inline void WriteD(int64_t addr, double value, Instruction* instr);
+
+ template <typename T>
+ T ReadMem(int64_t addr, Instruction* instr);
+ template <typename T>
+ void WriteMem(int64_t addr, T value, Instruction* instr);
+
+ // Helper for debugging memory access.
+ inline void DieOrDebug();
+
+ void TraceRegWr(int64_t value, TraceType t = DWORD);
+ void TraceMemWr(int64_t addr, int64_t value, TraceType t);
+ void TraceMemRd(int64_t addr, int64_t value, TraceType t = DWORD);
+ template <typename T>
+ void TraceMemRd(int64_t addr, T value);
+ template <typename T>
+ void TraceMemWr(int64_t addr, T value);
+
+ SimInstruction instr_;
+
+ // Executing is handled based on the instruction type.
+ void DecodeTypeOp6();
+ void DecodeTypeOp7();
+ void DecodeTypeOp8();
+ void DecodeTypeOp10();
+ void DecodeTypeOp12();
+ void DecodeTypeOp14();
+ void DecodeTypeOp17();
+ void DecodeTypeOp22();
+
+ inline int32_t rj_reg() const { return instr_.RjValue(); }
+ inline int64_t rj() const { return get_register(rj_reg()); }
+ inline uint64_t rj_u() const {
+ return static_cast<uint64_t>(get_register(rj_reg()));
+ }
+ inline int32_t rk_reg() const { return instr_.RkValue(); }
+ inline int64_t rk() const { return get_register(rk_reg()); }
+ inline uint64_t rk_u() const {
+ return static_cast<uint64_t>(get_register(rk_reg()));
+ }
+ inline int32_t rd_reg() const { return instr_.RdValue(); }
+ inline int64_t rd() const { return get_register(rd_reg()); }
+ inline uint64_t rd_u() const {
+ return static_cast<uint64_t>(get_register(rd_reg()));
+ }
+ inline int32_t fa_reg() const { return instr_.FaValue(); }
+ inline float fa_float() const { return get_fpu_register_float(fa_reg()); }
+ inline double fa_double() const { return get_fpu_register_double(fa_reg()); }
+ inline int32_t fj_reg() const { return instr_.FjValue(); }
+ inline float fj_float() const { return get_fpu_register_float(fj_reg()); }
+ inline double fj_double() const { return get_fpu_register_double(fj_reg()); }
+ inline int32_t fk_reg() const { return instr_.FkValue(); }
+ inline float fk_float() const { return get_fpu_register_float(fk_reg()); }
+ inline double fk_double() const { return get_fpu_register_double(fk_reg()); }
+ inline int32_t fd_reg() const { return instr_.FdValue(); }
+ inline float fd_float() const { return get_fpu_register_float(fd_reg()); }
+ inline double fd_double() const { return get_fpu_register_double(fd_reg()); }
+ inline int32_t cj_reg() const { return instr_.CjValue(); }
+ inline bool cj() const { return get_cf_register(cj_reg()); }
+ inline int32_t cd_reg() const { return instr_.CdValue(); }
+ inline bool cd() const { return get_cf_register(cd_reg()); }
+ inline int32_t ca_reg() const { return instr_.CaValue(); }
+ inline bool ca() const { return get_cf_register(ca_reg()); }
+ inline uint32_t sa2() const { return instr_.Sa2Value(); }
+ inline uint32_t sa3() const { return instr_.Sa3Value(); }
+ inline uint32_t ui5() const { return instr_.Ui5Value(); }
+ inline uint32_t ui6() const { return instr_.Ui6Value(); }
+ inline uint32_t lsbw() const { return instr_.LsbwValue(); }
+ inline uint32_t msbw() const { return instr_.MsbwValue(); }
+ inline uint32_t lsbd() const { return instr_.LsbdValue(); }
+ inline uint32_t msbd() const { return instr_.MsbdValue(); }
+ inline uint32_t cond() const { return instr_.CondValue(); }
+ inline int32_t si12() const { return (instr_.Si12Value() << 20) >> 20; }
+ inline uint32_t ui12() const { return instr_.Ui12Value(); }
+ inline int32_t si14() const { return (instr_.Si14Value() << 18) >> 18; }
+ inline int32_t si16() const { return (instr_.Si16Value() << 16) >> 16; }
+ inline int32_t si20() const { return (instr_.Si20Value() << 12) >> 12; }
+
+ inline void SetResult(const int32_t rd_reg, const int64_t alu_out) {
+ set_register(rd_reg, alu_out);
+ TraceRegWr(alu_out);
+ }
+
+ inline void SetFPUWordResult(int32_t fd_reg, int32_t alu_out) {
+ set_fpu_register_word(fd_reg, alu_out);
+ TraceRegWr(get_fpu_register(fd_reg), WORD);
+ }
+
+ inline void SetFPUWordResult2(int32_t fd_reg, int32_t alu_out) {
+ set_fpu_register_word(fd_reg, alu_out);
+ TraceRegWr(get_fpu_register(fd_reg));
+ }
+
+ inline void SetFPUResult(int32_t fd_reg, int64_t alu_out) {
+ set_fpu_register(fd_reg, alu_out);
+ TraceRegWr(get_fpu_register(fd_reg));
+ }
+
+ inline void SetFPUResult2(int32_t fd_reg, int64_t alu_out) {
+ set_fpu_register(fd_reg, alu_out);
+ TraceRegWr(get_fpu_register(fd_reg), DOUBLE);
+ }
+
+ inline void SetFPUFloatResult(int32_t fd_reg, float alu_out) {
+ set_fpu_register_float(fd_reg, alu_out);
+ TraceRegWr(get_fpu_register(fd_reg), FLOAT);
+ }
+
+ inline void SetFPUDoubleResult(int32_t fd_reg, double alu_out) {
+ set_fpu_register_double(fd_reg, alu_out);
+ TraceRegWr(get_fpu_register(fd_reg), DOUBLE);
+ }
+
+ // Used for breakpoints.
+ void SoftwareInterrupt();
+
+ // Stop helper functions.
+ bool IsWatchpoint(uint64_t code);
+ void PrintWatchpoint(uint64_t code);
+ void HandleStop(uint64_t code, Instruction* instr);
+ bool IsStopInstruction(Instruction* instr);
+ bool IsEnabledStop(uint64_t code);
+ void EnableStop(uint64_t code);
+ void DisableStop(uint64_t code);
+ void IncreaseStopCounter(uint64_t code);
+ void PrintStopInfo(uint64_t code);
+
+ // Executes one instruction.
+ void InstructionDecode(Instruction* instr);
+ // Execute one instruction placed in a branch delay slot.
+
+ // ICache.
+ static void CheckICache(base::CustomMatcherHashMap* i_cache,
+ Instruction* instr);
+ static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t start,
+ size_t size);
+ static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
+ void* page);
+
+ enum Exception {
+ none,
+ kIntegerOverflow,
+ kIntegerUnderflow,
+ kDivideByZero,
+ kNumExceptions
+ };
+
+ // Exceptions.
+ void SignalException(Exception e);
+
+ // Handle arguments and return value for runtime FP functions.
+ void GetFpArgs(double* x, double* y, int32_t* z);
+ void SetFpResult(const double& result);
+
+ void CallInternal(Address entry);
+
+ // Architecture state.
+ // Registers.
+ int64_t registers_[kNumSimuRegisters];
+ // Floating point Registers.
+ int64_t FPUregisters_[kNumFPURegisters];
+ // Condition flags Registers.
+ bool CFregisters_[kNumCFRegisters];
+ // FPU control register.
+ uint32_t FCSR_;
+
+ // Simulator support.
+ // Allocate 1MB for stack.
+ size_t stack_size_;
+ char* stack_;
+ bool pc_modified_;
+ int64_t icount_;
+ int break_count_;
+ base::EmbeddedVector<char, 128> trace_buf_;
+
+ // Debugger input.
+ char* last_debugger_input_;
+
+ v8::internal::Isolate* isolate_;
+
+ // Registered breakpoints.
+ Instruction* break_pc_;
+ Instr break_instr_;
+
+ // Stop is disabled if bit 31 is set.
+ static const uint32_t kStopDisabledBit = 1 << 31;
+
+ // A stop is enabled, meaning the simulator will stop when meeting the
+ // instruction, if bit 31 of watched_stops_[code].count is unset.
+ // The value watched_stops_[code].count & ~(1 << 31) indicates how many times
+ // the breakpoint was hit or gone through.
+ struct StopCountAndDesc {
+ uint32_t count;
+ char* desc;
+ };
+ StopCountAndDesc watched_stops_[kMaxStopCode + 1];
+
+ // Synchronization primitives.
+ enum class MonitorAccess {
+ Open,
+ RMW,
+ };
+
+ enum class TransactionSize {
+ None = 0,
+ Word = 4,
+ DoubleWord = 8,
+ };
+
+ // The least-significant bits of the address are ignored. The number of bits
+ // is implementation-defined, between 3 and minimum page size.
+ static const uintptr_t kExclusiveTaggedAddrMask = ~((1 << 3) - 1);
+
+ class LocalMonitor {
+ public:
+ LocalMonitor();
+
+ // These functions manage the state machine for the local monitor, but do
+ // not actually perform loads and stores. NotifyStoreConditional only
+ // returns true if the store conditional is allowed; the global monitor will
+ // still have to be checked to see whether the memory should be updated.
+ void NotifyLoad();
+ void NotifyLoadLinked(uintptr_t addr, TransactionSize size);
+ void NotifyStore();
+ bool NotifyStoreConditional(uintptr_t addr, TransactionSize size);
+
+ private:
+ void Clear();
+
+ MonitorAccess access_state_;
+ uintptr_t tagged_addr_;
+ TransactionSize size_;
+ };
+
+ class GlobalMonitor {
+ public:
+ class LinkedAddress {
+ public:
+ LinkedAddress();
+
+ private:
+ friend class GlobalMonitor;
+ // These functions manage the state machine for the global monitor, but do
+ // not actually perform loads and stores.
+ void Clear_Locked();
+ void NotifyLoadLinked_Locked(uintptr_t addr);
+ void NotifyStore_Locked();
+ bool NotifyStoreConditional_Locked(uintptr_t addr,
+ bool is_requesting_thread);
+
+ MonitorAccess access_state_;
+ uintptr_t tagged_addr_;
+ LinkedAddress* next_;
+ LinkedAddress* prev_;
+ // A scd can fail due to background cache evictions. Rather than
+ // simulating this, we'll just occasionally introduce cases where an
+ // store conditional fails. This will happen once after every
+ // kMaxFailureCounter exclusive stores.
+ static const int kMaxFailureCounter = 5;
+ int failure_counter_;
+ };
+
+ // Exposed so it can be accessed by Simulator::{Read,Write}Ex*.
+ base::Mutex mutex;
+
+ void NotifyLoadLinked_Locked(uintptr_t addr, LinkedAddress* linked_address);
+ void NotifyStore_Locked(LinkedAddress* linked_address);
+ bool NotifyStoreConditional_Locked(uintptr_t addr,
+ LinkedAddress* linked_address);
+
+ // Called when the simulator is destroyed.
+ void RemoveLinkedAddress(LinkedAddress* linked_address);
+
+ static GlobalMonitor* Get();
+
+ private:
+ // Private constructor. Call {GlobalMonitor::Get()} to get the singleton.
+ GlobalMonitor() = default;
+ friend class base::LeakyObject<GlobalMonitor>;
+
+ bool IsProcessorInLinkedList_Locked(LinkedAddress* linked_address) const;
+ void PrependProcessor_Locked(LinkedAddress* linked_address);
+
+ LinkedAddress* head_ = nullptr;
+ };
+
+ LocalMonitor local_monitor_;
+ GlobalMonitor::LinkedAddress global_monitor_thread_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // defined(USE_SIMULATOR)
+#endif // V8_EXECUTION_LOONG64_SIMULATOR_LOONG64_H_
diff --git a/chromium/v8/src/execution/messages.cc b/chromium/v8/src/execution/messages.cc
index ad530e1f2a7..10d89ca14e8 100644
--- a/chromium/v8/src/execution/messages.cc
+++ b/chromium/v8/src/execution/messages.cc
@@ -106,55 +106,55 @@ void MessageHandler::ReportMessage(Isolate* isolate, const MessageLocation* loc,
Handle<JSMessageObject> message) {
v8::Local<v8::Message> api_message_obj = v8::Utils::MessageToLocal(message);
- if (api_message_obj->ErrorLevel() == v8::Isolate::kMessageError) {
- // We are calling into embedder's code which can throw exceptions.
- // Thus we need to save current exception state, reset it to the clean one
- // and ignore scheduled exceptions callbacks can throw.
+ if (api_message_obj->ErrorLevel() != v8::Isolate::kMessageError) {
+ ReportMessageNoExceptions(isolate, loc, message, v8::Local<v8::Value>());
+ return;
+ }
- // We pass the exception object into the message handler callback though.
- Object exception_object = ReadOnlyRoots(isolate).undefined_value();
- if (isolate->has_pending_exception()) {
- exception_object = isolate->pending_exception();
- }
- Handle<Object> exception(exception_object, isolate);
+ // We are calling into embedder's code which can throw exceptions.
+ // Thus we need to save current exception state, reset it to the clean one
+ // and ignore scheduled exceptions callbacks can throw.
- Isolate::ExceptionScope exception_scope(isolate);
- isolate->clear_pending_exception();
- isolate->set_external_caught_exception(false);
+ // We pass the exception object into the message handler callback though.
+ Object exception_object = ReadOnlyRoots(isolate).undefined_value();
+ if (isolate->has_pending_exception()) {
+ exception_object = isolate->pending_exception();
+ }
+ Handle<Object> exception(exception_object, isolate);
- // Turn the exception on the message into a string if it is an object.
- if (message->argument().IsJSObject()) {
- HandleScope scope(isolate);
- Handle<Object> argument(message->argument(), isolate);
+ Isolate::ExceptionScope exception_scope(isolate);
+ isolate->clear_pending_exception();
+ isolate->set_external_caught_exception(false);
- MaybeHandle<Object> maybe_stringified;
- Handle<Object> stringified;
- // Make sure we don't leak uncaught internally generated Error objects.
- if (argument->IsJSError()) {
- maybe_stringified = Object::NoSideEffectsToString(isolate, argument);
- } else {
- v8::TryCatch catcher(reinterpret_cast<v8::Isolate*>(isolate));
- catcher.SetVerbose(false);
- catcher.SetCaptureMessage(false);
+ // Turn the exception on the message into a string if it is an object.
+ if (message->argument().IsJSObject()) {
+ HandleScope scope(isolate);
+ Handle<Object> argument(message->argument(), isolate);
- maybe_stringified = Object::ToString(isolate, argument);
- }
+ MaybeHandle<Object> maybe_stringified;
+ Handle<Object> stringified;
+ // Make sure we don't leak uncaught internally generated Error objects.
+ if (argument->IsJSError()) {
+ maybe_stringified = Object::NoSideEffectsToString(isolate, argument);
+ } else {
+ v8::TryCatch catcher(reinterpret_cast<v8::Isolate*>(isolate));
+ catcher.SetVerbose(false);
+ catcher.SetCaptureMessage(false);
- if (!maybe_stringified.ToHandle(&stringified)) {
- DCHECK(isolate->has_pending_exception());
- isolate->clear_pending_exception();
- isolate->set_external_caught_exception(false);
- stringified =
- isolate->factory()->NewStringFromAsciiChecked("exception");
- }
- message->set_argument(*stringified);
+ maybe_stringified = Object::ToString(isolate, argument);
}
- v8::Local<v8::Value> api_exception_obj = v8::Utils::ToLocal(exception);
- ReportMessageNoExceptions(isolate, loc, message, api_exception_obj);
- } else {
- ReportMessageNoExceptions(isolate, loc, message, v8::Local<v8::Value>());
+ if (!maybe_stringified.ToHandle(&stringified)) {
+ DCHECK(isolate->has_pending_exception());
+ isolate->clear_pending_exception();
+ isolate->set_external_caught_exception(false);
+ stringified = isolate->factory()->exception_string();
+ }
+ message->set_argument(*stringified);
}
+
+ v8::Local<v8::Value> api_exception_obj = v8::Utils::ToLocal(exception);
+ ReportMessageNoExceptions(isolate, loc, message, api_exception_obj);
}
void MessageHandler::ReportMessageNoExceptions(
@@ -297,10 +297,14 @@ class V8_NODISCARD PrepareStackTraceScope {
MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
Handle<JSObject> error,
Handle<Object> raw_stack) {
+ if (FLAG_correctness_fuzzer_suppressions) {
+ return isolate->factory()->empty_string();
+ }
DCHECK(raw_stack->IsFixedArray());
Handle<FixedArray> elems = Handle<FixedArray>::cast(raw_stack);
const bool in_recursion = isolate->formatting_stack_trace();
+ const bool has_overflowed = i::StackLimitCheck{isolate}.HasOverflowed();
Handle<Context> error_context;
if (!in_recursion && error->GetCreationContext().ToHandle(&error_context)) {
DCHECK(error_context->IsNativeContext());
@@ -318,7 +322,7 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
isolate->RunPrepareStackTraceCallback(error_context, error, sites),
Object);
return result;
- } else {
+ } else if (!has_overflowed) {
Handle<JSFunction> global_error =
handle(error_context->error_function(), isolate);
@@ -359,7 +363,6 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
}
// Otherwise, run our internal formatting logic.
-
IncrementalStringBuilder builder(isolate);
RETURN_ON_EXCEPTION(isolate, AppendErrorString(isolate, error, &builder),
@@ -818,11 +821,11 @@ MessageTemplate UpdateErrorTemplate(CallPrinter::ErrorHint hint,
Handle<JSObject> ErrorUtils::NewIteratorError(Isolate* isolate,
Handle<Object> source) {
MessageLocation location;
- CallPrinter::ErrorHint hint = CallPrinter::kNone;
+ CallPrinter::ErrorHint hint = CallPrinter::ErrorHint::kNone;
Handle<String> callsite = RenderCallSite(isolate, source, &location, &hint);
MessageTemplate id = MessageTemplate::kNotIterableNoSymbolLoad;
- if (hint == CallPrinter::kNone) {
+ if (hint == CallPrinter::ErrorHint::kNone) {
Handle<Symbol> iterator_symbol = isolate->factory()->iterator_symbol();
return isolate->factory()->NewTypeError(id, callsite, iterator_symbol);
}
@@ -868,7 +871,7 @@ Object ErrorUtils::ThrowSpreadArgError(Isolate* isolate, MessageTemplate id,
Handle<JSObject> ErrorUtils::NewCalledNonCallableError(Isolate* isolate,
Handle<Object> source) {
MessageLocation location;
- CallPrinter::ErrorHint hint = CallPrinter::kNone;
+ CallPrinter::ErrorHint hint = CallPrinter::ErrorHint::kNone;
Handle<String> callsite = RenderCallSite(isolate, source, &location, &hint);
MessageTemplate id = MessageTemplate::kCalledNonCallable;
id = UpdateErrorTemplate(hint, id);
@@ -878,7 +881,7 @@ Handle<JSObject> ErrorUtils::NewCalledNonCallableError(Isolate* isolate,
Handle<JSObject> ErrorUtils::NewConstructedNonConstructable(
Isolate* isolate, Handle<Object> source) {
MessageLocation location;
- CallPrinter::ErrorHint hint = CallPrinter::kNone;
+ CallPrinter::ErrorHint hint = CallPrinter::ErrorHint::kNone;
Handle<String> callsite = RenderCallSite(isolate, source, &location, &hint);
MessageTemplate id = MessageTemplate::kNotConstructor;
return isolate->factory()->NewTypeError(id, callsite);
@@ -971,7 +974,6 @@ Object ErrorUtils::ThrowLoadFromNullOrUndefined(Isolate* isolate,
callsite, object);
}
} else {
- Handle<Object> key_handle;
if (!key.ToHandle(&key_handle) ||
!maybe_property_name.ToHandle(&property_name)) {
error = isolate->factory()->NewTypeError(
diff --git a/chromium/v8/src/execution/messages.h b/chromium/v8/src/execution/messages.h
index a945b822994..5a542796477 100644
--- a/chromium/v8/src/execution/messages.h
+++ b/chromium/v8/src/execution/messages.h
@@ -12,6 +12,7 @@
#include <memory>
+#include "include/v8-local-handle.h"
#include "src/base/optional.h"
#include "src/common/message-template.h"
#include "src/handles/handles.h"
diff --git a/chromium/v8/src/execution/microtask-queue.h b/chromium/v8/src/execution/microtask-queue.h
index e9d40a924ff..6091fa3575c 100644
--- a/chromium/v8/src/execution/microtask-queue.h
+++ b/chromium/v8/src/execution/microtask-queue.h
@@ -6,11 +6,12 @@
#define V8_EXECUTION_MICROTASK_QUEUE_H_
#include <stdint.h>
+
#include <memory>
#include <vector>
#include "include/v8-internal.h" // For Address.
-#include "include/v8.h"
+#include "include/v8-microtask-queue.h"
#include "src/base/macros.h"
namespace v8 {
diff --git a/chromium/v8/src/execution/mips/simulator-mips.cc b/chromium/v8/src/execution/mips/simulator-mips.cc
index c49172a564c..64ef946b2db 100644
--- a/chromium/v8/src/execution/mips/simulator-mips.cc
+++ b/chromium/v8/src/execution/mips/simulator-mips.cc
@@ -150,7 +150,6 @@ bool MipsDebugger::GetValue(const char* desc, int32_t* value) {
} else {
return SScanF(desc, "%i", value) == 1;
}
- return false;
}
bool MipsDebugger::GetValue(const char* desc, int64_t* value) {
@@ -169,7 +168,6 @@ bool MipsDebugger::GetValue(const char* desc, int64_t* value) {
} else {
return SScanF(desc, "%" SCNu64, reinterpret_cast<uint64_t*>(value)) == 1;
}
- return false;
}
bool MipsDebugger::SetBreakpoint(Instruction* breakpc) {
@@ -2028,7 +2026,6 @@ double Simulator::ReadD(int32_t addr, Instruction* instr) {
PrintF("Unaligned (double) read at 0x%08x, pc=0x%08" V8PRIxPTR "\n", addr,
reinterpret_cast<intptr_t>(instr));
base::OS::Abort();
- return 0;
}
void Simulator::WriteD(int32_t addr, double value, Instruction* instr) {
@@ -2055,7 +2052,6 @@ uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
addr, reinterpret_cast<intptr_t>(instr));
base::OS::Abort();
- return 0;
}
int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
@@ -2068,7 +2064,6 @@ int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
PrintF("Unaligned signed halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
addr, reinterpret_cast<intptr_t>(instr));
base::OS::Abort();
- return 0;
}
void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
@@ -2330,7 +2325,6 @@ void Simulator::SoftwareInterrupt() {
break;
default:
UNREACHABLE();
- break;
}
}
switch (redirection->type()) {
@@ -2365,7 +2359,6 @@ void Simulator::SoftwareInterrupt() {
}
default:
UNREACHABLE();
- break;
}
if (::v8::internal::FLAG_trace_sim) {
switch (redirection->type()) {
@@ -2379,7 +2372,6 @@ void Simulator::SoftwareInterrupt() {
break;
default:
UNREACHABLE();
- break;
}
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
@@ -2930,7 +2922,6 @@ void Simulator::DecodeTypeRegisterDRsType() {
UNSUPPORTED();
}
break;
- break;
}
case TRUNC_L_D: { // Mips32r2 instruction.
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
@@ -4233,7 +4224,6 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
default:
alu_out = 0x12345678;
UNREACHABLE();
- break;
}
}
}
@@ -4271,7 +4261,6 @@ int Simulator::DecodeMsaDataFormat() {
break;
default:
UNREACHABLE();
- break;
}
} else {
int DF[] = {MSA_BYTE, MSA_HALF, MSA_WORD, MSA_DWORD};
@@ -4316,7 +4305,6 @@ int Simulator::DecodeMsaDataFormat() {
break;
default:
UNREACHABLE();
- break;
}
}
return df;
@@ -4682,7 +4670,6 @@ void Simulator::DecodeTypeMsaELM() {
case SPLATI:
case INSVE:
UNIMPLEMENTED();
- break;
default:
UNREACHABLE();
}
@@ -6798,7 +6785,6 @@ void Simulator::DecodeTypeImmediate() {
}
default:
UNREACHABLE();
- break;
}
}
}
@@ -6856,7 +6842,6 @@ void Simulator::DecodeTypeImmediate() {
break;
default:
UNREACHABLE();
- break;
}
break;
default:
@@ -6880,14 +6865,16 @@ void Simulator::DecodeTypeImmediate() {
// Type 3: instructions using a 26 bytes immediate. (e.g. j, jal).
void Simulator::DecodeTypeJump() {
- SimInstruction simInstr = instr_;
+ // instr_ will be overwritten by BranchDelayInstructionDecode(), so we save
+ // the result of IsLinkingInstruction now.
+ bool isLinkingInstr = instr_.IsLinkingInstruction();
// Get current pc.
int32_t current_pc = get_pc();
// Get unchanged bits of pc.
int32_t pc_high_bits = current_pc & 0xF0000000;
// Next pc.
- int32_t next_pc = pc_high_bits | (simInstr.Imm26Value() << 2);
+ int32_t next_pc = pc_high_bits | (instr_.Imm26Value() << 2);
// Execute branch delay slot.
// We don't check for end_sim_pc. First it should not be met as the current pc
@@ -6898,7 +6885,7 @@ void Simulator::DecodeTypeJump() {
// Update pc and ra if necessary.
// Do this after the branch delay execution.
- if (simInstr.IsLinkingInstruction()) {
+ if (isLinkingInstr) {
set_register(31, current_pc + 2 * kInstrSize);
}
set_pc(next_pc);
diff --git a/chromium/v8/src/execution/mips64/simulator-mips64.cc b/chromium/v8/src/execution/mips64/simulator-mips64.cc
index d45889e5a2e..f6286539009 100644
--- a/chromium/v8/src/execution/mips64/simulator-mips64.cc
+++ b/chromium/v8/src/execution/mips64/simulator-mips64.cc
@@ -159,7 +159,6 @@ bool MipsDebugger::GetValue(const char* desc, int64_t* value) {
} else {
return SScanF(desc, "%" SCNu64, reinterpret_cast<uint64_t*>(value)) == 1;
}
- return false;
}
bool MipsDebugger::SetBreakpoint(Instruction* breakpc) {
@@ -2039,7 +2038,6 @@ double Simulator::ReadD(int64_t addr, Instruction* instr) {
PrintF("Unaligned (double) read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n",
addr, reinterpret_cast<intptr_t>(instr));
base::OS::Abort();
- return 0;
}
void Simulator::WriteD(int64_t addr, double value, Instruction* instr) {
@@ -2330,7 +2328,6 @@ void Simulator::SoftwareInterrupt() {
break;
default:
UNREACHABLE();
- break;
}
}
switch (redirection->type()) {
@@ -2365,7 +2362,6 @@ void Simulator::SoftwareInterrupt() {
}
default:
UNREACHABLE();
- break;
}
if (::v8::internal::FLAG_trace_sim) {
switch (redirection->type()) {
@@ -2379,7 +2375,6 @@ void Simulator::SoftwareInterrupt() {
break;
default:
UNREACHABLE();
- break;
}
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
@@ -4404,7 +4399,6 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
default:
alu_out = 0x12345678;
UNREACHABLE();
- break;
}
break;
}
@@ -4503,7 +4497,6 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
default:
alu_out = 0x12345678;
UNREACHABLE();
- break;
}
break;
}
@@ -4542,7 +4535,6 @@ int Simulator::DecodeMsaDataFormat() {
break;
default:
UNREACHABLE();
- break;
}
} else {
int DF[] = {MSA_BYTE, MSA_HALF, MSA_WORD, MSA_DWORD};
@@ -4587,7 +4579,6 @@ int Simulator::DecodeMsaDataFormat() {
break;
default:
UNREACHABLE();
- break;
}
}
return df;
@@ -4967,7 +4958,6 @@ void Simulator::DecodeTypeMsaELM() {
case SPLATI:
case INSVE:
UNIMPLEMENTED();
- break;
default:
UNREACHABLE();
}
@@ -7187,7 +7177,6 @@ void Simulator::DecodeTypeImmediate() {
}
default:
UNREACHABLE();
- break;
}
break;
}
@@ -7273,7 +7262,6 @@ void Simulator::DecodeTypeImmediate() {
break;
default:
UNREACHABLE();
- break;
}
break;
default:
@@ -7297,13 +7285,15 @@ void Simulator::DecodeTypeImmediate() {
// Type 3: instructions using a 26 bytes immediate. (e.g. j, jal).
void Simulator::DecodeTypeJump() {
- SimInstruction simInstr = instr_;
+ // instr_ will be overwritten by BranchDelayInstructionDecode(), so we save
+ // the result of IsLinkingInstruction now.
+ bool isLinkingInstr = instr_.IsLinkingInstruction();
// Get current pc.
int64_t current_pc = get_pc();
// Get unchanged bits of pc.
int64_t pc_high_bits = current_pc & 0xFFFFFFFFF0000000;
// Next pc.
- int64_t next_pc = pc_high_bits | (simInstr.Imm26Value() << 2);
+ int64_t next_pc = pc_high_bits | (instr_.Imm26Value() << 2);
// Execute branch delay slot.
// We don't check for end_sim_pc. First it should not be met as the current pc
@@ -7314,7 +7304,7 @@ void Simulator::DecodeTypeJump() {
// Update pc and ra if necessary.
// Do this after the branch delay execution.
- if (simInstr.IsLinkingInstruction()) {
+ if (isLinkingInstr) {
set_register(31, current_pc + 2 * kInstrSize);
}
set_pc(next_pc);
diff --git a/chromium/v8/src/execution/mips64/simulator-mips64.h b/chromium/v8/src/execution/mips64/simulator-mips64.h
index ce3f06f2ed9..69e80941741 100644
--- a/chromium/v8/src/execution/mips64/simulator-mips64.h
+++ b/chromium/v8/src/execution/mips64/simulator-mips64.h
@@ -243,7 +243,7 @@ class Simulator : public SimulatorBase {
void set_register(int reg, int64_t value);
void set_register_word(int reg, int32_t value);
void set_dw_register(int dreg, const int* dbl);
- int64_t get_register(int reg) const;
+ V8_EXPORT_PRIVATE int64_t get_register(int reg) const;
double get_double_from_register_pair(int reg);
// Same for FPURegisters.
void set_fpu_register(int fpureg, int64_t value);
@@ -291,7 +291,7 @@ class Simulator : public SimulatorBase {
unsigned int get_msacsr_rounding_mode();
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int64_t value);
- int64_t get_pc() const;
+ V8_EXPORT_PRIVATE int64_t get_pc() const;
Address get_sp() const { return static_cast<Address>(get_register(sp)); }
diff --git a/chromium/v8/src/execution/ppc/simulator-ppc.cc b/chromium/v8/src/execution/ppc/simulator-ppc.cc
index 5e9751c07ae..d9dc7813ee0 100644
--- a/chromium/v8/src/execution/ppc/simulator-ppc.cc
+++ b/chromium/v8/src/execution/ppc/simulator-ppc.cc
@@ -93,16 +93,12 @@ bool PPCDebugger::GetValue(const char* desc, intptr_t* value) {
if (regnum != kNoRegister) {
*value = GetRegisterValue(regnum);
return true;
- } else {
- if (strncmp(desc, "0x", 2) == 0) {
- return SScanF(desc + 2, "%" V8PRIxPTR,
- reinterpret_cast<uintptr_t*>(value)) == 1;
- } else {
- return SScanF(desc, "%" V8PRIuPTR, reinterpret_cast<uintptr_t*>(value)) ==
- 1;
- }
}
- return false;
+ if (strncmp(desc, "0x", 2) == 0) {
+ return SScanF(desc + 2, "%" V8PRIxPTR,
+ reinterpret_cast<uintptr_t*>(value)) == 1;
+ }
+ return SScanF(desc, "%" V8PRIuPTR, reinterpret_cast<uintptr_t*>(value)) == 1;
}
bool PPCDebugger::GetFPDoubleValue(const char* desc, double* value) {
@@ -1031,7 +1027,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
if (!stack_aligned) {
PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
@@ -1071,7 +1066,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
switch (redirection->type()) {
@@ -1085,7 +1079,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
@@ -1200,7 +1193,18 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
set_register(r3, result_buffer);
}
} else {
- DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL);
+ // FAST_C_CALL is temporarily handled here as well, because we lack
+ // proper support for direct C calls with FP params in the simulator.
+ // The generic BUILTIN_CALL path assumes all parameters are passed in
+ // the GP registers, thus supporting calling the slow callback without
+ // crashing. The reason for that is that in the mjsunit tests we check
+ // the `fast_c_api.supports_fp_params` (which is false on
+ // non-simulator builds for arm/arm64), thus we expect that the slow
+ // path will be called. And since the slow path passes the arguments
+ // as a `const FunctionCallbackInfo<Value>&` (which is a GP argument),
+ // the call is made correctly.
+ DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL ||
+ redirection->type() == ExternalReference::FAST_C_CALL);
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
intptr_t result = target(arg[0], arg[1], arg[2], arg[3], arg[4],
@@ -1704,7 +1708,6 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
case CRORC:
case CROR: {
UNIMPLEMENTED(); // Not used by V8.
- break;
}
case RLWIMIX: {
int ra = instr->RAValue();
@@ -2552,7 +2555,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rs = instr->RSValue();
int ra = instr->RAValue();
uint32_t rs_val = static_cast<uint32_t>(get_register(rs));
- uintptr_t count = __builtin_ctz(rs_val);
+ uintptr_t count = rs_val == 0 ? 32 : __builtin_ctz(rs_val);
set_register(ra, count);
if (instr->Bit(0)) { // RC Bit set
int bf = 0;
@@ -2570,7 +2573,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rs = instr->RSValue();
int ra = instr->RAValue();
uint64_t rs_val = get_register(rs);
- uintptr_t count = __builtin_ctz(rs_val);
+ uintptr_t count = rs_val == 0 ? 64 : __builtin_ctzl(rs_val);
set_register(ra, count);
if (instr->Bit(0)) { // RC Bit set
int bf = 0;
@@ -3192,7 +3195,6 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
case LMW:
case STMW: {
UNIMPLEMENTED();
- break;
}
case LFSU:
@@ -3282,7 +3284,25 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
}
break;
}
-
+ case BRW: {
+ constexpr int kBitsPerWord = 32;
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ uint64_t rs_val = get_register(rs);
+ uint32_t rs_high = rs_val >> kBitsPerWord;
+ uint32_t rs_low = (rs_val << kBitsPerWord) >> kBitsPerWord;
+ uint64_t result = __builtin_bswap32(rs_high);
+ result = (result << kBitsPerWord) | __builtin_bswap32(rs_low);
+ set_register(ra, result);
+ break;
+ }
+ case BRD: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ uint64_t rs_val = get_register(rs);
+ set_register(ra, __builtin_bswap64(rs_val));
+ break;
+ }
case FCFIDS: {
// fcfids
int frt = instr->RTValue();
@@ -3512,7 +3532,6 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
break;
default:
UNIMPLEMENTED(); // Not used by V8.
- break;
}
if (frb_val < static_cast<double>(kMinVal)) {
frt_val = kMinVal;
@@ -3557,7 +3576,6 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
break;
default:
UNIMPLEMENTED(); // Not used by V8.
- break;
}
if (frb_val < static_cast<double>(kMinVal)) {
frt_val = kMinVal;
@@ -3609,7 +3627,6 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
}
default:
UNIMPLEMENTED(); // Not used by V8.
- break;
}
if (frb_val < kMinVal) {
frt_val = kMinVal;
@@ -3634,8 +3651,8 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
? kRoundToZero
: (fp_condition_reg_ & kFPRoundingModeMask);
uint64_t frt_val;
- uint64_t kMinVal = 0;
- uint64_t kMaxVal = kMinVal - 1;
+ uint64_t kMinVal = kMinUInt32;
+ uint64_t kMaxVal = kMaxUInt32;
bool invalid_convert = false;
if (std::isnan(frb_val)) {
@@ -3653,7 +3670,6 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
break;
default:
UNIMPLEMENTED(); // Not used by V8.
- break;
}
if (frb_val < kMinVal) {
frt_val = kMinVal;
@@ -3683,7 +3699,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int fra = instr->RAValue();
double frb_val = get_double_from_d_register(frb);
double fra_val = get_double_from_d_register(fra);
- double frt_val = std::copysign(fra_val, frb_val);
+ double frt_val = std::copysign(frb_val, fra_val);
set_d_register_from_double(frt, frt_val);
return;
}
@@ -3746,7 +3762,6 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
break;
default:
UNIMPLEMENTED();
- break;
}
return;
}
@@ -4728,6 +4743,36 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
}
break;
}
+ case XSCVSPDPN: {
+ int t = instr->RTValue();
+ int b = instr->RBValue();
+ uint64_t double_bits = get_d_register(b);
+ // Value is at the high 32 bits of the register.
+ float f =
+ bit_cast<float, uint32_t>(static_cast<uint32_t>(double_bits >> 32));
+ double_bits = bit_cast<uint64_t, double>(static_cast<double>(f));
+ // Preserve snan.
+ if (issignaling(f)) {
+ double_bits &= 0xFFF7FFFFFFFFFFFFU; // Clear bit 51.
+ }
+ set_d_register(t, double_bits);
+ break;
+ }
+ case XSCVDPSPN: {
+ int t = instr->RTValue();
+ int b = instr->RBValue();
+ double b_val = get_double_from_d_register(b);
+ uint64_t float_bits = static_cast<uint64_t>(
+ bit_cast<uint32_t, float>(static_cast<float>(b_val)));
+ // Preserve snan.
+ if (issignaling(b_val)) {
+ float_bits &= 0xFFBFFFFFU; // Clear bit 22.
+ }
+ // fp result is placed in both 32bit halfs of the dst.
+ float_bits = (float_bits << 32) | float_bits;
+ set_d_register(t, float_bits);
+ break;
+ }
#define VECTOR_UNPACK(S, D, if_high_side) \
int t = instr->RTValue(); \
int b = instr->RBValue(); \
@@ -5118,7 +5163,6 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
#undef GET_ADDRESS
default: {
UNIMPLEMENTED();
- break;
}
}
}
diff --git a/chromium/v8/src/execution/riscv64/simulator-riscv64.cc b/chromium/v8/src/execution/riscv64/simulator-riscv64.cc
index 3ec0c0e8117..4d289c4d20d 100644
--- a/chromium/v8/src/execution/riscv64/simulator-riscv64.cc
+++ b/chromium/v8/src/execution/riscv64/simulator-riscv64.cc
@@ -60,6 +60,688 @@
#include "src/runtime/runtime-utils.h"
#include "src/utils/ostreams.h"
+// The following code about RVV was based from:
+// https://github.com/riscv/riscv-isa-sim
+// Copyright (c) 2010-2017, The Regents of the University of California
+// (Regents). All Rights Reserved.
+
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// 3. Neither the name of the Regents nor the
+// names of its contributors may be used to endorse or promote products
+// derived from this software without specific prior written permission.
+
+// IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
+// SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS,
+// ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF
+// REGENTS HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED
+// TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
+// HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
+// MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+#define RVV_VI_GENERAL_LOOP_BASE \
+ for (uint64_t i = rvv_vstart(); i < rvv_vl(); i++) {
+#define RVV_VI_LOOP_END \
+ set_rvv_vstart(0); \
+ }
+
+#define RVV_VI_MASK_VARS \
+ const uint8_t midx = i / 64; \
+ const uint8_t mpos = i % 64;
+
+#define RVV_VI_LOOP_MASK_SKIP(BODY) \
+ RVV_VI_MASK_VARS \
+ if (instr_.RvvVM() == 0) { \
+ bool skip = ((Rvvelt<uint64_t>(0, midx) >> mpos) & 0x1) == 0; \
+ if (skip) { \
+ continue; \
+ } \
+ }
+
+#define RVV_VI_VV_LOOP(BODY) \
+ RVV_VI_GENERAL_LOOP_BASE \
+ RVV_VI_LOOP_MASK_SKIP() \
+ if (rvv_vsew() == E8) { \
+ VV_PARAMS(8); \
+ BODY \
+ } else if (rvv_vsew() == E16) { \
+ VV_PARAMS(16); \
+ BODY \
+ } else if (rvv_vsew() == E32) { \
+ VV_PARAMS(32); \
+ BODY \
+ } else if (rvv_vsew() == E64) { \
+ VV_PARAMS(64); \
+ BODY \
+ } else if (rvv_vsew() == E128) { \
+ VV_PARAMS(128); \
+ BODY \
+ } else { \
+ UNREACHABLE(); \
+ } \
+ RVV_VI_LOOP_END \
+ rvv_trace_vd();
+
+#define RVV_VI_VV_ULOOP(BODY) \
+ RVV_VI_GENERAL_LOOP_BASE \
+ RVV_VI_LOOP_MASK_SKIP() \
+ if (rvv_vsew() == E8) { \
+ VV_UPARAMS(8); \
+ BODY \
+ } else if (rvv_vsew() == E16) { \
+ VV_UPARAMS(16); \
+ BODY \
+ } else if (rvv_vsew() == E32) { \
+ VV_UPARAMS(32); \
+ BODY \
+ } else if (rvv_vsew() == E64) { \
+ VV_UPARAMS(64); \
+ BODY \
+ } else if (rvv_vsew() == E128) { \
+ VV_UPARAMS(128); \
+ BODY \
+ } else { \
+ UNREACHABLE(); \
+ } \
+ RVV_VI_LOOP_END \
+ rvv_trace_vd();
+
+#define RVV_VI_VX_LOOP(BODY) \
+ RVV_VI_GENERAL_LOOP_BASE \
+ RVV_VI_LOOP_MASK_SKIP() \
+ if (rvv_vsew() == E8) { \
+ VX_PARAMS(8); \
+ BODY \
+ } else if (rvv_vsew() == E16) { \
+ VX_PARAMS(16); \
+ BODY \
+ } else if (rvv_vsew() == E32) { \
+ VX_PARAMS(32); \
+ BODY \
+ } else if (rvv_vsew() == E64) { \
+ VX_PARAMS(64); \
+ BODY \
+ } else if (rvv_vsew() == E128) { \
+ VX_PARAMS(128); \
+ BODY \
+ } else { \
+ UNREACHABLE(); \
+ } \
+ RVV_VI_LOOP_END \
+ rvv_trace_vd();
+
+#define RVV_VI_VX_ULOOP(BODY) \
+ RVV_VI_GENERAL_LOOP_BASE \
+ RVV_VI_LOOP_MASK_SKIP() \
+ if (rvv_vsew() == E8) { \
+ VX_UPARAMS(8); \
+ BODY \
+ } else if (rvv_vsew() == E16) { \
+ VX_UPARAMS(16); \
+ BODY \
+ } else if (rvv_vsew() == E32) { \
+ VX_UPARAMS(32); \
+ BODY \
+ } else if (rvv_vsew() == E64) { \
+ VX_UPARAMS(64); \
+ BODY \
+ } else if (rvv_vsew() == E128) { \
+ VX_UPARAMS(128); \
+ BODY \
+ } else { \
+ UNREACHABLE(); \
+ } \
+ RVV_VI_LOOP_END \
+ rvv_trace_vd();
+
+#define RVV_VI_VI_LOOP(BODY) \
+ RVV_VI_GENERAL_LOOP_BASE \
+ RVV_VI_LOOP_MASK_SKIP() \
+ if (rvv_vsew() == E8) { \
+ VI_PARAMS(8); \
+ BODY \
+ } else if (rvv_vsew() == E16) { \
+ VI_PARAMS(16); \
+ BODY \
+ } else if (rvv_vsew() == E32) { \
+ VI_PARAMS(32); \
+ BODY \
+ } else if (rvv_vsew() == E64) { \
+ VI_PARAMS(64); \
+ BODY \
+ } else if (rvv_vsew() == E128) { \
+ VI_PARAMS(128); \
+ BODY \
+ } else { \
+ UNREACHABLE(); \
+ } \
+ RVV_VI_LOOP_END \
+ rvv_trace_vd();
+
+#define RVV_VI_VI_ULOOP(BODY) \
+ RVV_VI_GENERAL_LOOP_BASE \
+ RVV_VI_LOOP_MASK_SKIP() \
+ if (rvv_vsew() == E8) { \
+ VI_UPARAMS(8); \
+ BODY \
+ } else if (rvv_vsew() == E16) { \
+ VI_UPARAMS(16); \
+ BODY \
+ } else if (rvv_vsew() == E32) { \
+ VI_UPARAMS(32); \
+ BODY \
+ } else if (rvv_vsew() == E64) { \
+ VI_UPARAMS(64); \
+ BODY \
+ } else if (rvv_vsew() == E128) { \
+ VI_UPARAMS(128); \
+ BODY \
+ } else { \
+ UNREACHABLE(); \
+ } \
+ RVV_VI_LOOP_END \
+ rvv_trace_vd();
+
+#define RVV_VI_VVXI_MERGE_LOOP(BODY) \
+ RVV_VI_GENERAL_LOOP_BASE \
+ if (rvv_vsew() == E8) { \
+ VXI_PARAMS(8); \
+ BODY; \
+ } else if (rvv_vsew() == E16) { \
+ VXI_PARAMS(16); \
+ BODY; \
+ } else if (rvv_vsew() == E32) { \
+ VXI_PARAMS(32); \
+ BODY; \
+ } else if (rvv_vsew() == E64) { \
+ VXI_PARAMS(64); \
+ BODY; \
+ } else if (rvv_vsew() == E128) { \
+ VXI_PARAMS(128); \
+ BODY \
+ } \
+ RVV_VI_LOOP_END \
+ rvv_trace_vd();
+
+#define VV_WITH_CARRY_PARAMS(x) \
+ type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i); \
+ type_sew_t<x>::type vs1 = Rvvelt<type_sew_t<x>::type>(rvv_vs1_reg(), i); \
+ type_sew_t<x>::type& vd = Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true);
+
+#define XI_WITH_CARRY_PARAMS(x) \
+ type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i); \
+ type_sew_t<x>::type rs1 = (type_sew_t<x>::type)(get_register(rs1_reg())); \
+ type_sew_t<x>::type simm5 = (type_sew_t<x>::type)instr_.RvvSimm5(); \
+ type_sew_t<x>::type& vd = Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true);
+
+// carry/borrow bit loop
+#define RVV_VI_VV_LOOP_WITH_CARRY(BODY) \
+ CHECK_NE(rvv_vd_reg(), 0); \
+ RVV_VI_GENERAL_LOOP_BASE \
+ RVV_VI_MASK_VARS \
+ if (rvv_vsew() == E8) { \
+ VV_WITH_CARRY_PARAMS(8) \
+ BODY; \
+ } else if (rvv_vsew() == E16) { \
+ VV_WITH_CARRY_PARAMS(16) \
+ BODY; \
+ } else if (rvv_vsew() == E32) { \
+ VV_WITH_CARRY_PARAMS(32) \
+ BODY; \
+ } else if (rvv_vsew() == E64) { \
+ VV_WITH_CARRY_PARAMS(64) \
+ BODY; \
+ } \
+ RVV_VI_LOOP_END
+
+#define RVV_VI_XI_LOOP_WITH_CARRY(BODY) \
+ CHECK_NE(rvv_vd_reg(), 0); \
+ RVV_VI_GENERAL_LOOP_BASE \
+ RVV_VI_MASK_VARS \
+ if (rvv_vsew() == E8) { \
+ XI_WITH_CARRY_PARAMS(8) \
+ BODY; \
+ } else if (rvv_vsew() == E16) { \
+ XI_WITH_CARRY_PARAMS(16) \
+ BODY; \
+ } else if (rvv_vsew() == E32) { \
+ XI_WITH_CARRY_PARAMS(32) \
+ BODY; \
+ } else if (rvv_vsew() == E64) { \
+ XI_WITH_CARRY_PARAMS(64) \
+ BODY; \
+ } \
+ RVV_VI_LOOP_END
+
+#define VV_CMP_PARAMS(x) \
+ type_sew_t<x>::type vs1 = Rvvelt<type_sew_t<x>::type>(rvv_vs1_reg(), i); \
+ type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VX_CMP_PARAMS(x) \
+ type_sew_t<x>::type rs1 = (type_sew_t<x>::type)(get_register(rs1_reg())); \
+ type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VI_CMP_PARAMS(x) \
+ type_sew_t<x>::type simm5 = (type_sew_t<x>::type)instr_.RvvSimm5(); \
+ type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VV_UCMP_PARAMS(x) \
+ type_usew_t<x>::type vs1 = Rvvelt<type_usew_t<x>::type>(rvv_vs1_reg(), i); \
+ type_usew_t<x>::type vs2 = Rvvelt<type_usew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VX_UCMP_PARAMS(x) \
+ type_usew_t<x>::type rs1 = \
+ (type_sew_t<x>::type)(get_register(rvv_vs1_reg())); \
+ type_usew_t<x>::type vs2 = Rvvelt<type_usew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VI_UCMP_PARAMS(x) \
+ type_usew_t<x>::type uimm5 = (type_usew_t<x>::type)instr_.RvvUimm5(); \
+ type_usew_t<x>::type vs2 = Rvvelt<type_usew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define RVV_VI_LOOP_CMP_BASE \
+ CHECK(rvv_vsew() >= E8 && rvv_vsew() <= E64); \
+ for (reg_t i = rvv_vstart(); i < rvv_vl(); ++i) { \
+ RVV_VI_LOOP_MASK_SKIP(); \
+ uint64_t mmask = uint64_t(1) << mpos; \
+ uint64_t& vdi = Rvvelt<uint64_t>(rvv_vd_reg(), midx, true); \
+ uint64_t res = 0;
+
+#define RVV_VI_LOOP_CMP_END \
+ vdi = (vdi & ~mmask) | (((res) << mpos) & mmask); \
+ } \
+ rvv_trace_vd(); \
+ set_rvv_vstart(0);
+
+// comparision result to masking register
+#define RVV_VI_VV_LOOP_CMP(BODY) \
+ RVV_VI_LOOP_CMP_BASE \
+ if (rvv_vsew() == E8) { \
+ VV_CMP_PARAMS(8); \
+ BODY; \
+ } else if (rvv_vsew() == E16) { \
+ VV_CMP_PARAMS(16); \
+ BODY; \
+ } else if (rvv_vsew() == E32) { \
+ VV_CMP_PARAMS(32); \
+ BODY; \
+ } else if (rvv_vsew() == E64) { \
+ VV_CMP_PARAMS(64); \
+ BODY; \
+ } \
+ RVV_VI_LOOP_CMP_END
+
+#define RVV_VI_VX_LOOP_CMP(BODY) \
+ RVV_VI_LOOP_CMP_BASE \
+ if (rvv_vsew() == E8) { \
+ VX_CMP_PARAMS(8); \
+ BODY; \
+ } else if (rvv_vsew() == E16) { \
+ VX_CMP_PARAMS(16); \
+ BODY; \
+ } else if (rvv_vsew() == E32) { \
+ VX_CMP_PARAMS(32); \
+ BODY; \
+ } else if (rvv_vsew() == E64) { \
+ VX_CMP_PARAMS(64); \
+ BODY; \
+ } \
+ RVV_VI_LOOP_CMP_END
+
+#define RVV_VI_VI_LOOP_CMP(BODY) \
+ RVV_VI_LOOP_CMP_BASE \
+ if (rvv_vsew() == E8) { \
+ VI_CMP_PARAMS(8); \
+ BODY; \
+ } else if (rvv_vsew() == E16) { \
+ VI_CMP_PARAMS(16); \
+ BODY; \
+ } else if (rvv_vsew() == E32) { \
+ VI_CMP_PARAMS(32); \
+ BODY; \
+ } else if (rvv_vsew() == E64) { \
+ VI_CMP_PARAMS(64); \
+ BODY; \
+ } \
+ RVV_VI_LOOP_CMP_END
+
+#define RVV_VI_VV_ULOOP_CMP(BODY) \
+ RVV_VI_LOOP_CMP_BASE \
+ if (rvv_vsew() == E8) { \
+ VV_UCMP_PARAMS(8); \
+ BODY; \
+ } else if (rvv_vsew() == E16) { \
+ VV_UCMP_PARAMS(16); \
+ BODY; \
+ } else if (rvv_vsew() == E32) { \
+ VV_UCMP_PARAMS(32); \
+ BODY; \
+ } else if (rvv_vsew() == E64) { \
+ VV_UCMP_PARAMS(64); \
+ BODY; \
+ } \
+ RVV_VI_LOOP_CMP_END
+
+#define RVV_VI_VX_ULOOP_CMP(BODY) \
+ RVV_VI_LOOP_CMP_BASE \
+ if (rvv_vsew() == E8) { \
+ VX_UCMP_PARAMS(8); \
+ BODY; \
+ } else if (rvv_vsew() == E16) { \
+ VX_UCMP_PARAMS(16); \
+ BODY; \
+ } else if (rvv_vsew() == E32) { \
+ VX_UCMP_PARAMS(32); \
+ BODY; \
+ } else if (rvv_vsew() == E64) { \
+ VX_UCMP_PARAMS(64); \
+ BODY; \
+ } \
+ RVV_VI_LOOP_CMP_END
+
+#define RVV_VI_VI_ULOOP_CMP(BODY) \
+ RVV_VI_LOOP_CMP_BASE \
+ if (rvv_vsew() == E8) { \
+ VI_UCMP_PARAMS(8); \
+ BODY; \
+ } else if (rvv_vsew() == E16) { \
+ VI_UCMP_PARAMS(16); \
+ BODY; \
+ } else if (rvv_vsew() == E32) { \
+ VI_UCMP_PARAMS(32); \
+ BODY; \
+ } else if (rvv_vsew() == E64) { \
+ VI_UCMP_PARAMS(64); \
+ BODY; \
+ } \
+ RVV_VI_LOOP_CMP_END
+
+#define RVV_VI_VFP_LOOP_BASE \
+ for (uint64_t i = rvv_vstart(); i < rvv_vl(); ++i) { \
+ RVV_VI_LOOP_MASK_SKIP();
+
+#define RVV_VI_VFP_LOOP_END \
+ } \
+ set_rvv_vstart(0);
+
+#define RVV_VI_VFP_VF_LOOP(BODY16, BODY32, BODY64) \
+ RVV_VI_VFP_LOOP_BASE \
+ switch (rvv_vsew()) { \
+ case E16: { \
+ UNIMPLEMENTED(); \
+ } \
+ case E32: { \
+ float& vd = Rvvelt<float>(rvv_vd_reg(), i, true); \
+ float fs1 = static_cast<float>(get_fpu_register(rs1_reg())); \
+ float vs2 = Rvvelt<float>(rvv_vs2_reg(), i); \
+ BODY32; \
+ break; \
+ } \
+ case E64: { \
+ double& vd = Rvvelt<double>(rvv_vd_reg(), i, true); \
+ double fs1 = static_cast<double>(get_fpu_register(rs1_reg())); \
+ double vs2 = Rvvelt<double>(rvv_vs2_reg(), i); \
+ BODY64; \
+ break; \
+ } \
+ default: \
+ UNREACHABLE(); \
+ break; \
+ } \
+ RVV_VI_VFP_LOOP_END \
+ rvv_trace_vd();
+
+#define RVV_VI_VFP_VV_LOOP(BODY16, BODY32, BODY64) \
+ RVV_VI_VFP_LOOP_BASE \
+ switch (rvv_vsew()) { \
+ case E16: { \
+ UNIMPLEMENTED(); \
+ break; \
+ } \
+ case E32: { \
+ float& vd = Rvvelt<float>(rvv_vd_reg(), i, true); \
+ float vs1 = Rvvelt<float>(rvv_vs1_reg(), i); \
+ float vs2 = Rvvelt<float>(rvv_vs2_reg(), i); \
+ BODY32; \
+ break; \
+ } \
+ case E64: { \
+ double& vd = Rvvelt<double>(rvv_vd_reg(), i, true); \
+ double vs1 = Rvvelt<double>(rvv_vs1_reg(), i); \
+ double vs2 = Rvvelt<double>(rvv_vs2_reg(), i); \
+ BODY64; \
+ break; \
+ } \
+ default: \
+ require(0); \
+ break; \
+ } \
+ RVV_VI_VFP_LOOP_END \
+ rvv_trace_vd();
+
+#define RVV_VI_VFP_LOOP_CMP_BASE \
+ for (reg_t i = rvv_vstart(); i < rvv_vl(); ++i) { \
+ RVV_VI_LOOP_MASK_SKIP(); \
+ uint64_t mmask = uint64_t(1) << mpos; \
+ uint64_t& vdi = Rvvelt<uint64_t>(rvv_vd_reg(), midx, true); \
+ uint64_t res = 0;
+
+#define RVV_VI_VFP_LOOP_CMP_END \
+ switch (rvv_vsew()) { \
+ case E16: \
+ case E32: \
+ case E64: { \
+ vdi = (vdi & ~mmask) | (((res) << mpos) & mmask); \
+ break; \
+ } \
+ default: \
+ UNREACHABLE(); \
+ break; \
+ } \
+ } \
+ set_rvv_vstart(0); \
+ rvv_trace_vd();
+
+#define RVV_VI_VFP_LOOP_CMP(BODY16, BODY32, BODY64, is_vs1) \
+ RVV_VI_VFP_LOOP_CMP_BASE \
+ switch (rvv_vsew()) { \
+ case E16: { \
+ UNIMPLEMENTED(); \
+ } \
+ case E32: { \
+ float vs2 = Rvvelt<float>(rvv_vs2_reg(), i); \
+ float vs1 = Rvvelt<float>(rvv_vs1_reg(), i); \
+ BODY32; \
+ break; \
+ } \
+ case E64: { \
+ double vs2 = Rvvelt<double>(rvv_vs2_reg(), i); \
+ double vs1 = Rvvelt<double>(rvv_vs1_reg(), i); \
+ BODY64; \
+ break; \
+ } \
+ default: \
+ UNREACHABLE(); \
+ break; \
+ } \
+ RVV_VI_VFP_LOOP_CMP_END
+
+// reduction loop - signed
+#define RVV_VI_LOOP_REDUCTION_BASE(x) \
+ auto& vd_0_des = Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), 0, true); \
+ auto vd_0_res = Rvvelt<type_sew_t<x>::type>(rvv_vs1_reg(), 0); \
+ for (uint64_t i = rvv_vstart(); i < rvv_vl(); ++i) { \
+ RVV_VI_LOOP_MASK_SKIP(); \
+ auto vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define RVV_VI_LOOP_REDUCTION_END(x) \
+ } \
+ if (rvv_vl() > 0) { \
+ vd_0_des = vd_0_res; \
+ } \
+ set_rvv_vstart(0);
+
+#define REDUCTION_LOOP(x, BODY) \
+ RVV_VI_LOOP_REDUCTION_BASE(x) \
+ BODY; \
+ RVV_VI_LOOP_REDUCTION_END(x)
+
+#define RVV_VI_VV_LOOP_REDUCTION(BODY) \
+ if (rvv_vsew() == E8) { \
+ REDUCTION_LOOP(8, BODY) \
+ } else if (rvv_vsew() == E16) { \
+ REDUCTION_LOOP(16, BODY) \
+ } else if (rvv_vsew() == E32) { \
+ REDUCTION_LOOP(32, BODY) \
+ } else if (rvv_vsew() == E64) { \
+ REDUCTION_LOOP(64, BODY) \
+ } \
+ rvv_trace_vd();
+
+// reduction loop - unsgied
+#define RVV_VI_ULOOP_REDUCTION_BASE(x) \
+ auto& vd_0_des = Rvvelt<type_usew_t<x>::type>(rvv_vd_reg(), 0, true); \
+ auto vd_0_res = Rvvelt<type_usew_t<x>::type>(rvv_vs1_reg(), 0); \
+ for (reg_t i = rvv_vstart(); i < rvv_vl(); ++i) { \
+ RVV_VI_LOOP_MASK_SKIP(); \
+ auto vs2 = Rvvelt<type_usew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define REDUCTION_ULOOP(x, BODY) \
+ RVV_VI_ULOOP_REDUCTION_BASE(x) \
+ BODY; \
+ RVV_VI_LOOP_REDUCTION_END(x)
+
+#define RVV_VI_VV_ULOOP_REDUCTION(BODY) \
+ if (rvv_vsew() == E8) { \
+ REDUCTION_ULOOP(8, BODY) \
+ } else if (rvv_vsew() == E16) { \
+ REDUCTION_ULOOP(16, BODY) \
+ } else if (rvv_vsew() == E32) { \
+ REDUCTION_ULOOP(32, BODY) \
+ } else if (rvv_vsew() == E64) { \
+ REDUCTION_ULOOP(64, BODY) \
+ } \
+ rvv_trace_vd();
+
+#define VI_STRIP(inx) reg_t vreg_inx = inx;
+
+#define VI_ELEMENT_SKIP(inx) \
+ if (inx >= vl) { \
+ continue; \
+ } else if (inx < rvv_vstart()) { \
+ continue; \
+ } else { \
+ RVV_VI_LOOP_MASK_SKIP(); \
+ }
+
+#define require_vm \
+ do { \
+ if (instr_.RvvVM() == 0) CHECK_NE(rvv_vd_reg(), 0); \
+ } while (0);
+
+#define VI_CHECK_STORE(elt_width, is_mask_ldst) \
+ reg_t veew = is_mask_ldst ? 1 : sizeof(elt_width##_t) * 8;
+// float vemul = is_mask_ldst ? 1 : ((float)veew / rvv_vsew() * Rvvvflmul);
+// reg_t emul = vemul < 1 ? 1 : vemul;
+// require(vemul >= 0.125 && vemul <= 8);
+// require_align(rvv_rd(), vemul);
+// require((nf * emul) <= (NVPR / 4) && (rvv_rd() + nf * emul) <= NVPR);
+
+#define VI_CHECK_LOAD(elt_width, is_mask_ldst) \
+ VI_CHECK_STORE(elt_width, is_mask_ldst); \
+ require_vm;
+
+/*vd + fn * emul*/
+#define RVV_VI_LD(stride, offset, elt_width, is_mask_ldst) \
+ const reg_t nf = rvv_nf() + 1; \
+ const reg_t vl = is_mask_ldst ? ((rvv_vl() + 7) / 8) : rvv_vl(); \
+ const int64_t baseAddr = rs1(); \
+ for (reg_t i = 0; i < vl; ++i) { \
+ VI_ELEMENT_SKIP(i); \
+ VI_STRIP(i); \
+ set_rvv_vstart(i); \
+ for (reg_t fn = 0; fn < nf; ++fn) { \
+ auto val = ReadMem<elt_width##_t>( \
+ baseAddr + (stride) + (offset) * sizeof(elt_width##_t), \
+ instr_.instr()); \
+ type_sew_t<sizeof(elt_width##_t)* 8>::type& vd = \
+ Rvvelt<type_sew_t<sizeof(elt_width##_t) * 8>::type>(rvv_vd_reg(), \
+ vreg_inx, true); \
+ vd = val; \
+ } \
+ } \
+ set_rvv_vstart(0); \
+ if (::v8::internal::FLAG_trace_sim) { \
+ __int128_t value = Vregister_[rvv_vd_reg()]; \
+ SNPrintF(trace_buf_, "0x%016" PRIx64 "%016" PRIx64 " <-- 0x%016" PRIx64, \
+ *(reinterpret_cast<int64_t*>(&value) + 1), \
+ *reinterpret_cast<int64_t*>(&value), \
+ (uint64_t)(get_register(rs1_reg()))); \
+ }
+
+#define RVV_VI_ST(stride, offset, elt_width, is_mask_ldst) \
+ const reg_t nf = rvv_nf() + 1; \
+ const reg_t vl = is_mask_ldst ? ((rvv_vl() + 7) / 8) : rvv_vl(); \
+ const int64_t baseAddr = rs1(); \
+ for (reg_t i = 0; i < vl; ++i) { \
+ VI_STRIP(i) \
+ VI_ELEMENT_SKIP(i); \
+ set_rvv_vstart(i); \
+ for (reg_t fn = 0; fn < nf; ++fn) { \
+ elt_width##_t vs1 = Rvvelt<type_sew_t<sizeof(elt_width##_t) * 8>::type>( \
+ rvv_vs3_reg(), vreg_inx); \
+ WriteMem(baseAddr + (stride) + (offset) * sizeof(elt_width##_t), vs1, \
+ instr_.instr()); \
+ } \
+ } \
+ set_rvv_vstart(0); \
+ if (::v8::internal::FLAG_trace_sim) { \
+ __int128_t value = Vregister_[rvv_vd_reg()]; \
+ SNPrintF(trace_buf_, "0x%016" PRIx64 "%016" PRIx64 " --> 0x%016" PRIx64, \
+ *(reinterpret_cast<int64_t*>(&value) + 1), \
+ *reinterpret_cast<int64_t*>(&value), \
+ (uint64_t)(get_register(rs1_reg()))); \
+ }
+
+#define VI_VFP_LOOP_SCALE_BASE \
+ /*require(STATE.frm < 0x5);*/ \
+ for (reg_t i = rvv_vstart(); i < rvv_vl(); ++i) { \
+ RVV_VI_LOOP_MASK_SKIP();
+
+#define RVV_VI_VFP_CVT_SCALE(BODY8, BODY16, BODY32, CHECK8, CHECK16, CHECK32, \
+ is_widen, eew_check) \
+ CHECK(eew_check); \
+ switch (rvv_vsew()) { \
+ case E8: { \
+ CHECK8 \
+ VI_VFP_LOOP_SCALE_BASE \
+ BODY8 /*set_fp_exceptions*/; \
+ RVV_VI_VFP_LOOP_END \
+ } break; \
+ case E16: { \
+ CHECK16 \
+ VI_VFP_LOOP_SCALE_BASE \
+ BODY16 /*set_fp_exceptions*/; \
+ RVV_VI_VFP_LOOP_END \
+ } break; \
+ case E32: { \
+ CHECK32 \
+ VI_VFP_LOOP_SCALE_BASE \
+ BODY32 /*set_fp_exceptions*/; \
+ RVV_VI_VFP_LOOP_END \
+ } break; \
+ default: \
+ require(0); \
+ break; \
+ } \
+ rvv_trace_vd();
+
namespace v8 {
namespace internal {
@@ -116,13 +798,14 @@ class RiscvDebugger {
int64_t GetFPURegisterValue(int regnum);
float GetFPURegisterValueFloat(int regnum);
double GetFPURegisterValueDouble(int regnum);
+ __int128_t GetVRegisterValue(int regnum);
bool GetValue(const char* desc, int64_t* value);
};
-inline void UNSUPPORTED() {
- printf("Sim: Unsupported instruction.\n");
+#define UNSUPPORTED() \
+ printf("Sim: Unsupported instruction. Func:%s Line:%d\n", __FUNCTION__, \
+ __LINE__); \
base::OS::Abort();
-}
int64_t RiscvDebugger::GetRegisterValue(int regnum) {
if (regnum == kNumSimuRegisters) {
@@ -156,6 +839,14 @@ double RiscvDebugger::GetFPURegisterValueDouble(int regnum) {
}
}
+__int128_t RiscvDebugger::GetVRegisterValue(int regnum) {
+ if (regnum == kNumVRegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_vregister(regnum);
+ }
+}
+
bool RiscvDebugger::GetValue(const char* desc, int64_t* value) {
int regnum = Registers::Number(desc);
int fpuregnum = FPURegisters::Number(desc);
@@ -172,7 +863,6 @@ bool RiscvDebugger::GetValue(const char* desc, int64_t* value) {
} else {
return SScanF(desc, "%" SCNu64, reinterpret_cast<uint64_t*>(value)) == 1;
}
- return false;
}
#define REG_INFO(name) \
@@ -315,6 +1005,7 @@ void RiscvDebugger::Debug() {
} else {
int regnum = Registers::Number(arg1);
int fpuregnum = FPURegisters::Number(arg1);
+ int vregnum = VRegisters::Number(arg1);
if (regnum != kInvalidRegister) {
value = GetRegisterValue(regnum);
@@ -325,6 +1016,11 @@ void RiscvDebugger::Debug() {
dvalue = GetFPURegisterValueDouble(fpuregnum);
PrintF("%3s: 0x%016" PRIx64 " %16.4e\n",
FPURegisters::Name(fpuregnum), value, dvalue);
+ } else if (vregnum != kInvalidVRegister) {
+ __int128_t v = GetVRegisterValue(vregnum);
+ PrintF("\t%s:0x%016" PRIx64 "%016" PRIx64 "\n",
+ VRegisters::Name(vregnum), (uint64_t)(v >> 64),
+ (uint64_t)v);
} else {
PrintF("%s unrecognized\n", arg1);
}
@@ -960,6 +1656,11 @@ double Simulator::get_fpu_register_double(int fpureg) const {
return *bit_cast<double*>(&FPUregisters_[fpureg]);
}
+__int128_t Simulator::get_vregister(int vreg) const {
+ DCHECK((vreg >= 0) && (vreg < kNumVRegisters));
+ return Vregister_[vreg];
+}
+
// Runtime FP routines take up to two double arguments and zero
// or one integer arguments. All are constructed here,
// from fa0, fa1, and a0.
@@ -1301,6 +2002,9 @@ void Simulator::WriteMem(int64_t addr, T value, Instruction* instr) {
#endif
T* ptr = reinterpret_cast<T*>(addr);
TraceMemWr(addr, value);
+ // PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" PRId64 "\n",
+ // (int64_t)ptr,
+ // (int64_t)value);
*ptr = value;
}
@@ -1424,7 +2128,6 @@ void Simulator::SoftwareInterrupt() {
break;
default:
UNREACHABLE();
- break;
}
}
switch (redirection->type()) {
@@ -1459,7 +2162,6 @@ void Simulator::SoftwareInterrupt() {
}
default:
UNREACHABLE();
- break;
}
if (::v8::internal::FLAG_trace_sim) {
switch (redirection->type()) {
@@ -1473,7 +2175,6 @@ void Simulator::SoftwareInterrupt() {
break;
default:
UNREACHABLE();
- break;
}
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
@@ -2042,7 +2743,17 @@ bool Simulator::CompareFHelper(T input1, T input2, FPUCondition cc) {
result = (input1 == input2);
}
break;
-
+ case NE:
+ if (std::numeric_limits<T>::signaling_NaN() == input1 ||
+ std::numeric_limits<T>::signaling_NaN() == input2) {
+ set_fflags(kInvalidOperation);
+ }
+ if (std::isnan(input1) || std::isnan(input2)) {
+ result = true;
+ } else {
+ result = (input1 != input2);
+ }
+ break;
default:
UNREACHABLE();
}
@@ -2376,7 +3087,8 @@ void Simulator::DecodeRVRFPType() {
break;
}
case 0b00001: { // RO_FCVT_WU_S
- set_rd(RoundF2IHelper<uint32_t>(original_val, instr_.RoundMode()));
+ set_rd(sext32(
+ RoundF2IHelper<uint32_t>(original_val, instr_.RoundMode())));
break;
}
#ifdef V8_TARGET_ARCH_64_BIT
@@ -2416,7 +3128,6 @@ void Simulator::DecodeRVRFPType() {
}
break;
}
- // TODO(RISCV): Implement handling of NaN (quiet and signalling).
case RO_FLE_S: { // RO_FEQ_S RO_FLT_S RO_FLE_S
switch (instr_.Funct3Value()) {
case 0b010: { // RO_FEQ_S
@@ -2624,7 +3335,6 @@ void Simulator::DecodeRVRFPType() {
case (RO_FCLASS_D & kRFPTypeMask): { // RO_FCLASS_D , 64D RO_FMV_X_D
if (instr_.Rs2Value() != 0b00000) {
UNSUPPORTED();
- break;
}
switch (instr_.Funct3Value()) {
case 0b001: { // RO_FCLASS_D
@@ -2651,7 +3361,8 @@ void Simulator::DecodeRVRFPType() {
break;
}
case 0b00001: { // RO_FCVT_WU_D
- set_rd(RoundF2IHelper<uint32_t>(original_val, instr_.RoundMode()));
+ set_rd(sext32(
+ RoundF2IHelper<uint32_t>(original_val, instr_.RoundMode())));
break;
}
#ifdef V8_TARGET_ARCH_64_BIT
@@ -2826,6 +3537,117 @@ void Simulator::DecodeRVR4Type() {
}
}
+bool Simulator::DecodeRvvVL() {
+ uint32_t instr_temp =
+ instr_.InstructionBits() & (kRvvMopMask | kRvvNfMask | kBaseOpcodeMask);
+ if (RO_V_VL == instr_temp) {
+ if (!(instr_.InstructionBits() & (kRvvRs2Mask))) {
+ switch (instr_.vl_vs_width()) {
+ case 8: {
+ RVV_VI_LD(0, (i * nf + fn), int8, false);
+ break;
+ }
+ case 16: {
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+ default:
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+ return true;
+ } else {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ }
+ } else if (RO_V_VLS == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VLX == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VLSEG2 == instr_temp || RO_V_VLSEG3 == instr_temp ||
+ RO_V_VLSEG4 == instr_temp || RO_V_VLSEG5 == instr_temp ||
+ RO_V_VLSEG6 == instr_temp || RO_V_VLSEG7 == instr_temp ||
+ RO_V_VLSEG8 == instr_temp) {
+ if (!(instr_.InstructionBits() & (kRvvRs2Mask))) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ }
+ } else if (RO_V_VLSSEG2 == instr_temp || RO_V_VLSSEG3 == instr_temp ||
+ RO_V_VLSSEG4 == instr_temp || RO_V_VLSSEG5 == instr_temp ||
+ RO_V_VLSSEG6 == instr_temp || RO_V_VLSSEG7 == instr_temp ||
+ RO_V_VLSSEG8 == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VLXSEG2 == instr_temp || RO_V_VLXSEG3 == instr_temp ||
+ RO_V_VLXSEG4 == instr_temp || RO_V_VLXSEG5 == instr_temp ||
+ RO_V_VLXSEG6 == instr_temp || RO_V_VLXSEG7 == instr_temp ||
+ RO_V_VLXSEG8 == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool Simulator::DecodeRvvVS() {
+ uint32_t instr_temp =
+ instr_.InstructionBits() & (kRvvMopMask | kRvvNfMask | kBaseOpcodeMask);
+ if (RO_V_VS == instr_temp) {
+ if (!(instr_.InstructionBits() & (kRvvRs2Mask))) {
+ switch (instr_.vl_vs_width()) {
+ case 8: {
+ RVV_VI_ST(0, (i * nf + fn), uint8, false);
+ break;
+ }
+ case 16: {
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+ default:
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+ } else {
+ UNIMPLEMENTED_RISCV();
+ }
+ return true;
+ } else if (RO_V_VSS == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VSX == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VSU == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VSSEG2 == instr_temp || RO_V_VSSEG3 == instr_temp ||
+ RO_V_VSSEG4 == instr_temp || RO_V_VSSEG5 == instr_temp ||
+ RO_V_VSSEG6 == instr_temp || RO_V_VSSEG7 == instr_temp ||
+ RO_V_VSSEG8 == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VSSSEG2 == instr_temp || RO_V_VSSSEG3 == instr_temp ||
+ RO_V_VSSSEG4 == instr_temp || RO_V_VSSSEG5 == instr_temp ||
+ RO_V_VSSSEG6 == instr_temp || RO_V_VSSSEG7 == instr_temp ||
+ RO_V_VSSSEG8 == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VSXSEG2 == instr_temp || RO_V_VSXSEG3 == instr_temp ||
+ RO_V_VSXSEG4 == instr_temp || RO_V_VSXSEG5 == instr_temp ||
+ RO_V_VSXSEG6 == instr_temp || RO_V_VSXSEG7 == instr_temp ||
+ RO_V_VSXSEG8 == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else {
+ return false;
+ }
+}
+
Builtin Simulator::LookUp(Address pc) {
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
++builtin) {
@@ -3061,8 +3883,12 @@ void Simulator::DecodeRVIType() {
TraceMemRd(addr, val, get_fpu_register(frd_reg()));
break;
}
- default:
- UNSUPPORTED();
+ default: {
+ if (!DecodeRvvVL()) {
+ UNSUPPORTED();
+ }
+ break;
+ }
}
}
@@ -3095,7 +3921,10 @@ void Simulator::DecodeRVSType() {
break;
}
default:
- UNSUPPORTED();
+ if (!DecodeRvvVS()) {
+ UNSUPPORTED();
+ }
+ break;
}
}
@@ -3403,6 +4232,1187 @@ void Simulator::DecodeCBType() {
}
}
+/**
+ * RISCV-ISA-SIM
+ *
+ * @link https://github.com/riscv/riscv-isa-sim/
+ * @copyright Copyright (c) The Regents of the University of California
+ * @license hhttps://github.com/riscv/riscv-isa-sim/blob/master/LICENSE
+ */
+// ref: https://locklessinc.com/articles/sat_arithmetic/
+template <typename T, typename UT>
+static inline T sat_add(T x, T y, bool& sat) {
+ UT ux = x;
+ UT uy = y;
+ UT res = ux + uy;
+ sat = false;
+ int sh = sizeof(T) * 8 - 1;
+
+ /* Calculate overflowed result. (Don't change the sign bit of ux) */
+ ux = (ux >> sh) + (((UT)0x1 << sh) - 1);
+
+ /* Force compiler to use cmovns instruction */
+ if ((T)((ux ^ uy) | ~(uy ^ res)) >= 0) {
+ res = ux;
+ sat = true;
+ }
+
+ return res;
+}
+
+template <typename T, typename UT>
+static inline T sat_sub(T x, T y, bool& sat) {
+ UT ux = x;
+ UT uy = y;
+ UT res = ux - uy;
+ sat = false;
+ int sh = sizeof(T) * 8 - 1;
+
+ /* Calculate overflowed result. (Don't change the sign bit of ux) */
+ ux = (ux >> sh) + (((UT)0x1 << sh) - 1);
+
+ /* Force compiler to use cmovns instruction */
+ if ((T)((ux ^ uy) & (ux ^ res)) < 0) {
+ res = ux;
+ sat = true;
+ }
+
+ return res;
+}
+
+void Simulator::DecodeRvvIVV() {
+ DCHECK_EQ(instr_.InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_IVV);
+ switch (instr_.InstructionBits() & kVTypeMask) {
+ case RO_V_VADD_VV: {
+ RVV_VI_VV_LOOP({ vd = vs1 + vs2; });
+ break;
+ }
+ case RO_V_VSADD_VV: {
+ RVV_VI_GENERAL_LOOP_BASE
+ bool sat = false;
+ switch (rvv_vsew()) {
+ case E8: {
+ VV_PARAMS(8);
+ vd = sat_add<int8_t, uint8_t>(vs2, vs1, sat);
+ break;
+ }
+ case E16: {
+ VV_PARAMS(16);
+ vd = sat_add<int16_t, uint16_t>(vs2, vs1, sat);
+ break;
+ }
+ case E32: {
+ VV_PARAMS(32);
+ vd = sat_add<int32_t, uint32_t>(vs2, vs1, sat);
+ break;
+ }
+ default: {
+ VV_PARAMS(64);
+ vd = sat_add<int64_t, uint64_t>(vs2, vs1, sat);
+ break;
+ }
+ }
+ set_rvv_vxsat(sat);
+ RVV_VI_LOOP_END
+ break;
+ }
+ case RO_V_VSADDU_VV:
+ RVV_VI_VV_ULOOP({
+ vd = vs2 + vs1;
+ vd |= -(vd < vs2);
+ })
+ break;
+ case RO_V_VSUB_VV: {
+ RVV_VI_VV_LOOP({ vd = vs2 - vs1; })
+ break;
+ }
+ case RO_V_VSSUB_VV: {
+ RVV_VI_GENERAL_LOOP_BASE
+ bool sat = false;
+ switch (rvv_vsew()) {
+ case E8: {
+ VV_PARAMS(8);
+ vd = sat_sub<int8_t, uint8_t>(vs2, vs1, sat);
+ break;
+ }
+ case E16: {
+ VV_PARAMS(16);
+ vd = sat_sub<int16_t, uint16_t>(vs2, vs1, sat);
+ break;
+ }
+ case E32: {
+ VV_PARAMS(32);
+ vd = sat_sub<int32_t, uint32_t>(vs2, vs1, sat);
+ break;
+ }
+ default: {
+ VV_PARAMS(64);
+ vd = sat_sub<int64_t, uint64_t>(vs2, vs1, sat);
+ break;
+ }
+ }
+ set_rvv_vxsat(sat);
+ RVV_VI_LOOP_END
+ break;
+ }
+ case RO_V_VAND_VV: {
+ RVV_VI_VV_LOOP({ vd = vs1 & vs2; })
+ break;
+ }
+ case RO_V_VOR_VV: {
+ RVV_VI_VV_LOOP({ vd = vs1 | vs2; })
+ break;
+ }
+ case RO_V_VXOR_VV: {
+ RVV_VI_VV_LOOP({ vd = vs1 ^ vs2; })
+ break;
+ }
+ case RO_V_VMAXU_VV: {
+ RVV_VI_VV_ULOOP({
+ if (vs1 <= vs2) {
+ vd = vs2;
+ } else {
+ vd = vs1;
+ }
+ })
+ break;
+ }
+ case RO_V_VMAX_VV: {
+ RVV_VI_VV_LOOP({
+ if (vs1 <= vs2) {
+ vd = vs2;
+ } else {
+ vd = vs1;
+ }
+ })
+ break;
+ }
+ case RO_V_VMINU_VV: {
+ RVV_VI_VV_ULOOP({
+ if (vs1 <= vs2) {
+ vd = vs1;
+ } else {
+ vd = vs2;
+ }
+ })
+ break;
+ }
+ case RO_V_VMIN_VV: {
+ RVV_VI_VV_LOOP({
+ if (vs1 <= vs2) {
+ vd = vs1;
+ } else {
+ vd = vs2;
+ }
+ })
+ break;
+ }
+ case RO_V_VMV_VV: {
+ if (instr_.RvvVM()) {
+ RVV_VI_VVXI_MERGE_LOOP({
+ vd = vs1;
+ USE(simm5);
+ USE(vs2);
+ USE(rs1);
+ });
+ } else {
+ RVV_VI_VVXI_MERGE_LOOP({
+ bool use_first = (Rvvelt<uint64_t>(0, (i / 64)) >> (i % 64)) & 0x1;
+ vd = use_first ? vs1 : vs2;
+ USE(simm5);
+ USE(rs1);
+ });
+ }
+ break;
+ }
+ case RO_V_VMSEQ_VV: {
+ RVV_VI_VV_LOOP_CMP({ res = vs1 == vs2; })
+ break;
+ }
+ case RO_V_VMSNE_VV: {
+ RVV_VI_VV_LOOP_CMP({ res = vs1 != vs2; })
+ break;
+ }
+ case RO_V_VMSLTU_VV: {
+ RVV_VI_VV_ULOOP_CMP({ res = vs2 < vs1; })
+ break;
+ }
+ case RO_V_VMSLT_VV: {
+ RVV_VI_VV_LOOP_CMP({ res = vs2 < vs1; })
+ break;
+ }
+ case RO_V_VMSLE_VV: {
+ RVV_VI_VV_LOOP_CMP({ res = vs2 <= vs1; })
+ break;
+ }
+ case RO_V_VMSLEU_VV: {
+ RVV_VI_VV_ULOOP_CMP({ res = vs2 <= vs1; })
+ break;
+ }
+ case RO_V_VADC_VV:
+ if (instr_.RvvVM()) {
+ RVV_VI_VV_LOOP_WITH_CARRY({
+ auto& v0 = Rvvelt<uint64_t>(0, midx);
+ vd = vs1 + vs2 + (v0 >> mpos) & 0x1;
+ })
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case RO_V_VSLL_VV: {
+ RVV_VI_VV_LOOP({ vd = vs2 << vs1; })
+ break;
+ }
+ case RO_V_VRGATHER_VV: {
+ RVV_VI_GENERAL_LOOP_BASE
+ CHECK_NE(rvv_vs1_reg(), rvv_vd_reg());
+ CHECK_NE(rvv_vs2_reg(), rvv_vd_reg());
+ switch (rvv_vsew()) {
+ case E8: {
+ auto vs1 = Rvvelt<uint8_t>(rvv_vs1_reg(), i);
+ // if (i > 255) continue;
+ Rvvelt<uint8_t>(rvv_vd_reg(), i, true) =
+ vs1 >= rvv_vlmax() ? 0 : Rvvelt<uint8_t>(rvv_vs2_reg(), vs1);
+ break;
+ }
+ case E16: {
+ auto vs1 = Rvvelt<uint16_t>(rvv_vs1_reg(), i);
+ Rvvelt<uint16_t>(rvv_vd_reg(), i, true) =
+ vs1 >= rvv_vlmax() ? 0 : Rvvelt<uint16_t>(rvv_vs2_reg(), vs1);
+ break;
+ }
+ case E32: {
+ auto vs1 = Rvvelt<uint32_t>(rvv_vs1_reg(), i);
+ Rvvelt<uint32_t>(rvv_vd_reg(), i, true) =
+ vs1 >= rvv_vlmax() ? 0 : Rvvelt<uint32_t>(rvv_vs2_reg(), vs1);
+ break;
+ }
+ default: {
+ auto vs1 = Rvvelt<uint64_t>(rvv_vs1_reg(), i);
+ Rvvelt<uint64_t>(rvv_vd_reg(), i, true) =
+ vs1 >= rvv_vlmax() ? 0 : Rvvelt<uint64_t>(rvv_vs2_reg(), vs1);
+ break;
+ }
+ }
+ RVV_VI_LOOP_END;
+ break;
+ }
+ default:
+ // v8::base::EmbeddedVector<char, 256> buffer;
+ // SNPrintF(trace_buf_, " ");
+ // disasm::NameConverter converter;
+ // disasm::Disassembler dasm(converter);
+ // // Use a reasonably large buffer.
+ // dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(&instr_));
+
+ // PrintF("EXECUTING 0x%08" PRIxPTR " %-44s\n",
+ // reinterpret_cast<intptr_t>(&instr_), buffer.begin());
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+ set_rvv_vstart(0);
+}
+
+void Simulator::DecodeRvvIVI() {
+ DCHECK_EQ(instr_.InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_IVI);
+ switch (instr_.InstructionBits() & kVTypeMask) {
+ case RO_V_VADD_VI: {
+ RVV_VI_VI_LOOP({ vd = simm5 + vs2; })
+ break;
+ }
+ case RO_V_VSADD_VI: {
+ RVV_VI_GENERAL_LOOP_BASE
+ bool sat = false;
+ switch (rvv_vsew()) {
+ case E8: {
+ VI_PARAMS(8);
+ vd = sat_add<int8_t, uint8_t>(vs2, simm5, sat);
+ break;
+ }
+ case E16: {
+ VI_PARAMS(16);
+ vd = sat_add<int16_t, uint16_t>(vs2, simm5, sat);
+ break;
+ }
+ case E32: {
+ VI_PARAMS(32);
+ vd = sat_add<int32_t, uint32_t>(vs2, simm5, sat);
+ break;
+ }
+ default: {
+ VI_PARAMS(64);
+ vd = sat_add<int64_t, uint64_t>(vs2, simm5, sat);
+ break;
+ }
+ }
+ set_rvv_vxsat(sat);
+ RVV_VI_LOOP_END
+ break;
+ }
+ case RO_V_VSADDU_VI:{
+ RVV_VI_VI_ULOOP({
+ vd = vs2 + uimm5;
+ vd |= -(vd < vs2);
+ })
+ break;
+ }
+ case RO_V_VRSUB_VI: {
+ RVV_VI_VI_LOOP({ vd = vs2 - simm5; })
+ break;
+ }
+ case RO_V_VAND_VI: {
+ RVV_VI_VI_LOOP({ vd = simm5 & vs2; })
+ break;
+ }
+ case RO_V_VOR_VI: {
+ RVV_VI_VI_LOOP({ vd = simm5 | vs2; })
+ break;
+ }
+ case RO_V_VXOR_VI: {
+ RVV_VI_VI_LOOP({ vd = simm5 ^ vs2; })
+ break;
+ }
+ case RO_V_VMV_VI:
+ if (instr_.RvvVM()) {
+ RVV_VI_VVXI_MERGE_LOOP({
+ vd = simm5;
+ USE(vs1);
+ USE(vs2);
+ USE(rs1);
+ });
+ } else {
+ RVV_VI_VVXI_MERGE_LOOP({
+ bool use_first = (Rvvelt<uint64_t>(0, (i / 64)) >> (i % 64)) & 0x1;
+ vd = use_first ? simm5 : vs2;
+ USE(vs1);
+ USE(rs1);
+ });
+ }
+ break;
+ case RO_V_VMSEQ_VI:
+ RVV_VI_VI_LOOP_CMP({ res = simm5 == vs2; })
+ break;
+ case RO_V_VMSNE_VI:
+ RVV_VI_VI_LOOP_CMP({ res = simm5 != vs2; })
+ break;
+ case RO_V_VMSLEU_VI:
+ RVV_VI_VI_ULOOP_CMP({ res = vs2 <= uimm5; })
+ break;
+ case RO_V_VMSLE_VI:
+ RVV_VI_VI_LOOP_CMP({ res = vs2 <= simm5; })
+ break;
+ case RO_V_VMSGT_VI:
+ RVV_VI_VI_LOOP_CMP({ res = vs2 > simm5; })
+ break;
+ case RO_V_VSLIDEDOWN_VI: {
+ const uint8_t sh = instr_.RvvUimm5();
+ RVV_VI_GENERAL_LOOP_BASE
+
+ reg_t offset = 0;
+ bool is_valid = (i + sh) < rvv_vlmax();
+
+ if (is_valid) {
+ offset = sh;
+ }
+
+ switch (rvv_sew()) {
+ case E8: {
+ VI_XI_SLIDEDOWN_PARAMS(8, offset);
+ vd = is_valid ? vs2 : 0;
+ } break;
+ case E16: {
+ VI_XI_SLIDEDOWN_PARAMS(16, offset);
+ vd = is_valid ? vs2 : 0;
+ } break;
+ case E32: {
+ VI_XI_SLIDEDOWN_PARAMS(32, offset);
+ vd = is_valid ? vs2 : 0;
+ } break;
+ default: {
+ VI_XI_SLIDEDOWN_PARAMS(64, offset);
+ vd = is_valid ? vs2 : 0;
+ } break;
+ }
+ RVV_VI_LOOP_END
+ } break;
+ case RO_V_VSRL_VI:
+ RVV_VI_VI_LOOP({ vd = vs2 >> simm5; })
+ break;
+ case RO_V_VSLL_VI:
+ RVV_VI_VI_LOOP({ vd = vs2 << simm5; })
+ break;
+ case RO_V_VADC_VI:
+ if (instr_.RvvVM()) {
+ RVV_VI_XI_LOOP_WITH_CARRY({
+ auto& v0 = Rvvelt<uint64_t>(0, midx);
+ vd = simm5 + vs2 + (v0 >> mpos) & 0x1;
+ USE(rs1);
+ })
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ default:
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+}
+
+void Simulator::DecodeRvvIVX() {
+ DCHECK_EQ(instr_.InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_IVX);
+ switch (instr_.InstructionBits() & kVTypeMask) {
+ case RO_V_VADD_VX: {
+ RVV_VI_VX_LOOP({ vd = rs1 + vs2; })
+ break;
+ }
+ case RO_V_VSADD_VX: {
+ RVV_VI_GENERAL_LOOP_BASE
+ bool sat = false;
+ switch (rvv_vsew()) {
+ case E8: {
+ VX_PARAMS(8);
+ vd = sat_add<int8_t, uint8_t>(vs2, rs1, sat);
+ break;
+ }
+ case E16: {
+ VX_PARAMS(16);
+ vd = sat_add<int16_t, uint16_t>(vs2, rs1, sat);
+ break;
+ }
+ case E32: {
+ VX_PARAMS(32);
+ vd = sat_add<int32_t, uint32_t>(vs2, rs1, sat);
+ break;
+ }
+ default: {
+ VX_PARAMS(64);
+ vd = sat_add<int64_t, uint64_t>(vs2, rs1, sat);
+ break;
+ }
+ }
+ set_rvv_vxsat(sat);
+ RVV_VI_LOOP_END
+ break;
+ }
+ case RO_V_VSADDU_VX: {
+ RVV_VI_VX_ULOOP({
+ vd = vs2 + rs1;
+ vd |= -(vd < vs2);
+ })
+ break;
+ }
+ case RO_V_VSUB_VX: {
+ RVV_VI_VX_LOOP({ vd = vs2 - rs1; })
+ break;
+ }
+ case RO_V_VSSUB_VX: {
+ RVV_VI_GENERAL_LOOP_BASE
+ bool sat = false;
+ switch (rvv_vsew()) {
+ case E8: {
+ VX_PARAMS(8);
+ vd = sat_sub<int8_t, uint8_t>(vs2, rs1, sat);
+ break;
+ }
+ case E16: {
+ VX_PARAMS(16);
+ vd = sat_sub<int16_t, uint16_t>(vs2, rs1, sat);
+ break;
+ }
+ case E32: {
+ VX_PARAMS(32);
+ vd = sat_sub<int32_t, uint32_t>(vs2, rs1, sat);
+ break;
+ }
+ default: {
+ VX_PARAMS(64);
+ vd = sat_sub<int64_t, uint64_t>(vs2, rs1, sat);
+ break;
+ }
+ }
+ set_rvv_vxsat(sat);
+ RVV_VI_LOOP_END
+ break;
+ }
+ case RO_V_VRSUB_VX: {
+ RVV_VI_VX_LOOP({ vd = rs1 - vs2; })
+ break;
+ }
+ case RO_V_VAND_VX: {
+ RVV_VI_VX_LOOP({ vd = rs1 & vs2; })
+ break;
+ }
+ case RO_V_VOR_VX: {
+ RVV_VI_VX_LOOP({ vd = rs1 | vs2; })
+ break;
+ }
+ case RO_V_VXOR_VX: {
+ RVV_VI_VX_LOOP({ vd = rs1 ^ vs2; })
+ break;
+ }
+ case RO_V_VMAX_VX: {
+ RVV_VI_VX_LOOP({
+ if (rs1 <= vs2) {
+ vd = vs2;
+ } else {
+ vd = rs1;
+ }
+ })
+ break;
+ }
+ case RO_V_VMAXU_VX: {
+ RVV_VI_VX_ULOOP({
+ if (rs1 <= vs2) {
+ vd = vs2;
+ } else {
+ vd = rs1;
+ }
+ })
+ break;
+ }
+ case RO_V_VMINU_VX: {
+ RVV_VI_VX_ULOOP({
+ if (rs1 <= vs2) {
+ vd = rs1;
+ } else {
+ vd = vs2;
+ }
+ })
+ break;
+ }
+ case RO_V_VMIN_VX: {
+ RVV_VI_VX_LOOP({
+ if (rs1 <= vs2) {
+ vd = rs1;
+ } else {
+ vd = vs2;
+ }
+ })
+ break;
+ }
+ case RO_V_VMV_VX:
+ if (instr_.RvvVM()) {
+ RVV_VI_VVXI_MERGE_LOOP({
+ vd = rs1;
+ USE(vs1);
+ USE(vs2);
+ USE(simm5);
+ });
+ } else {
+ RVV_VI_VVXI_MERGE_LOOP({
+ bool use_first = (Rvvelt<uint64_t>(0, (i / 64)) >> (i % 64)) & 0x1;
+ vd = use_first ? rs1 : vs2;
+ USE(vs1);
+ USE(simm5);
+ });
+ }
+ break;
+ case RO_V_VMSEQ_VX:
+ RVV_VI_VX_LOOP_CMP({ res = vs2 == rs1; })
+ break;
+ case RO_V_VMSNE_VX:
+ RVV_VI_VX_LOOP_CMP({ res = vs2 != rs1; })
+ break;
+ case RO_V_VMSLT_VX:
+ RVV_VI_VX_LOOP_CMP({ res = vs2 < rs1; })
+ break;
+ case RO_V_VMSLTU_VX:
+ RVV_VI_VX_ULOOP_CMP({ res = vs2 < rs1; })
+ break;
+ case RO_V_VMSLE_VX:
+ RVV_VI_VX_LOOP_CMP({ res = vs2 <= rs1; })
+ break;
+ case RO_V_VMSLEU_VX:
+ RVV_VI_VX_ULOOP_CMP({ res = vs2 <= rs1; })
+ break;
+ case RO_V_VMSGT_VX:
+ RVV_VI_VX_LOOP_CMP({ res = vs2 > rs1; })
+ break;
+ case RO_V_VMSGTU_VX:
+ RVV_VI_VX_ULOOP_CMP({ res = vs2 > rs1; })
+ break;
+ case RO_V_VSLIDEDOWN_VX:
+ UNIMPLEMENTED_RISCV();
+ break;
+ case RO_V_VADC_VX:
+ if (instr_.RvvVM()) {
+ RVV_VI_XI_LOOP_WITH_CARRY({
+ auto& v0 = Rvvelt<uint64_t>(0, midx);
+ vd = rs1 + vs2 + (v0 >> mpos) & 0x1;
+ USE(simm5);
+ })
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case RO_V_VSLL_VX: {
+ RVV_VI_VX_LOOP({ vd = vs2 << rs1; })
+ break;
+ }
+ case RO_V_VSRL_VX: {
+ RVV_VI_VX_LOOP({ vd = int32_t(uint32_t(vs2) >> (rs1 & (xlen - 1))); })
+ break;
+ }
+ default:
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+}
+
+void Simulator::DecodeRvvMVV() {
+ DCHECK_EQ(instr_.InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_MVV);
+ switch (instr_.InstructionBits() & kVTypeMask) {
+ case RO_V_VWXUNARY0: {
+ if (rvv_vs1_reg() == 0) {
+ switch (rvv_vsew()) {
+ case E8:
+ set_rd(Rvvelt<type_sew_t<8>::type>(rvv_vs2_reg(), 0));
+ break;
+ case E16:
+ set_rd(Rvvelt<type_sew_t<16>::type>(rvv_vs2_reg(), 0));
+ break;
+ case E32:
+ set_rd(Rvvelt<type_sew_t<32>::type>(rvv_vs2_reg(), 0));
+ break;
+ case E64:
+ set_rd(Rvvelt<type_sew_t<64>::type>(rvv_vs2_reg(), 0));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ set_rvv_vstart(0);
+ SNPrintF(trace_buf_, "0x%ld", get_register(rd_reg()));
+ } else {
+ v8::base::EmbeddedVector<char, 256> buffer;
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(&instr_));
+ PrintF("EXECUTING 0x%08" PRIxPTR " %-44s\n",
+ reinterpret_cast<intptr_t>(&instr_), buffer.begin());
+ UNIMPLEMENTED_RISCV();
+ }
+ } break;
+ case RO_V_VREDMAXU:
+ RVV_VI_VV_ULOOP_REDUCTION(
+ { vd_0_res = (vd_0_res >= vs2) ? vd_0_res : vs2; })
+ break;
+ case RO_V_VREDMAX:
+ RVV_VI_VV_LOOP_REDUCTION(
+ { vd_0_res = (vd_0_res >= vs2) ? vd_0_res : vs2; })
+ break;
+ case RO_V_VREDMINU:
+ RVV_VI_VV_ULOOP_REDUCTION(
+ { vd_0_res = (vd_0_res <= vs2) ? vd_0_res : vs2; })
+ break;
+ case RO_V_VREDMIN:
+ RVV_VI_VV_LOOP_REDUCTION(
+ { vd_0_res = (vd_0_res <= vs2) ? vd_0_res : vs2; })
+ break;
+ default:
+ v8::base::EmbeddedVector<char, 256> buffer;
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(&instr_));
+ PrintF("EXECUTING 0x%08" PRIxPTR " %-44s\n",
+ reinterpret_cast<intptr_t>(&instr_), buffer.begin());
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+}
+
+void Simulator::DecodeRvvMVX() {
+ DCHECK_EQ(instr_.InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_MVX);
+ switch (instr_.InstructionBits() & kVTypeMask) {
+ case RO_V_VRXUNARY0:
+ if (instr_.Vs2Value() == 0x0) {
+ if (rvv_vl() > 0 && rvv_vstart() < rvv_vl()) {
+ switch (rvv_vsew()) {
+ case E8:
+ Rvvelt<uint8_t>(rvv_vd_reg(), 0, true) =
+ (uint8_t)get_register(rs1_reg());
+ break;
+ case E16:
+ Rvvelt<uint16_t>(rvv_vd_reg(), 0, true) =
+ (uint16_t)get_register(rs1_reg());
+ break;
+ case E32:
+ Rvvelt<uint32_t>(rvv_vd_reg(), 0, true) =
+ (uint32_t)get_register(rs1_reg());
+ break;
+ case E64:
+ Rvvelt<uint64_t>(rvv_vd_reg(), 0, true) =
+ (uint64_t)get_register(rs1_reg());
+ break;
+ default:
+ UNREACHABLE();
+ }
+ // set_rvv_vl(0);
+ }
+ set_rvv_vstart(0);
+ rvv_trace_vd();
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ default:
+ v8::base::EmbeddedVector<char, 256> buffer;
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(&instr_));
+ PrintF("EXECUTING 0x%08" PRIxPTR " %-44s\n",
+ reinterpret_cast<intptr_t>(&instr_), buffer.begin());
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+}
+
+void Simulator::DecodeRvvFVV() {
+ DCHECK_EQ(instr_.InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_FVV);
+ switch (instr_.InstructionBits() & kVTypeMask) {
+ case RO_V_VFDIV_VV: {
+ RVV_VI_VFP_VV_LOOP(
+ { UNIMPLEMENTED(); },
+ {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](float vs1, float vs2) {
+ if (is_invalid_fdiv(vs1, vs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else if (vs2 == 0.0f) {
+ this->set_fflags(kDivideByZero);
+ return (std::signbit(vs1) == std::signbit(vs2)
+ ? std::numeric_limits<float>::infinity()
+ : -std::numeric_limits<float>::infinity());
+ } else {
+ return vs1 / vs2;
+ }
+ };
+ auto alu_out = fn(vs1, vs2);
+ // if any input or result is NaN, the result is quiet_NaN
+ if (std::isnan(alu_out) || std::isnan(vs1) || std::isnan(vs2)) {
+ // signaling_nan sets kInvalidOperation bit
+ if (isSnan(alu_out) || isSnan(vs1) || isSnan(vs2))
+ set_fflags(kInvalidOperation);
+ alu_out = std::numeric_limits<float>::quiet_NaN();
+ }
+ vd = alu_out;
+ },
+ {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double vs1, double vs2) {
+ if (is_invalid_fdiv(vs1, vs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else if (vs2 == 0.0f) {
+ this->set_fflags(kDivideByZero);
+ return (std::signbit(vs1) == std::signbit(vs2)
+ ? std::numeric_limits<double>::infinity()
+ : -std::numeric_limits<double>::infinity());
+ } else {
+ return vs1 / vs2;
+ }
+ };
+ auto alu_out = fn(vs1, vs2);
+ // if any input or result is NaN, the result is quiet_NaN
+ if (std::isnan(alu_out) || std::isnan(vs1) || std::isnan(vs2)) {
+ // signaling_nan sets kInvalidOperation bit
+ if (isSnan(alu_out) || isSnan(vs1) || isSnan(vs2))
+ set_fflags(kInvalidOperation);
+ alu_out = std::numeric_limits<double>::quiet_NaN();
+ }
+ vd = alu_out;
+ })
+ break;
+ }
+ case RO_V_VFMUL_VV: {
+ RVV_VI_VFP_VV_LOOP(
+ { UNIMPLEMENTED(); },
+ {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double drs1, double drs2) {
+ if (is_invalid_fmul(drs1, drs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return drs1 * drs2;
+ }
+ };
+ auto alu_out = fn(vs1, vs2);
+ // if any input or result is NaN, the result is quiet_NaN
+ if (std::isnan(alu_out) || std::isnan(vs1) || std::isnan(vs2)) {
+ // signaling_nan sets kInvalidOperation bit
+ if (isSnan(alu_out) || isSnan(vs1) || isSnan(vs2))
+ set_fflags(kInvalidOperation);
+ alu_out = std::numeric_limits<float>::quiet_NaN();
+ }
+ vd = alu_out;
+ },
+ {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double drs1, double drs2) {
+ if (is_invalid_fmul(drs1, drs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return drs1 * drs2;
+ }
+ };
+ auto alu_out = fn(vs1, vs2);
+ // if any input or result is NaN, the result is quiet_NaN
+ if (std::isnan(alu_out) || std::isnan(vs1) || std::isnan(vs2)) {
+ // signaling_nan sets kInvalidOperation bit
+ if (isSnan(alu_out) || isSnan(vs1) || isSnan(vs2))
+ set_fflags(kInvalidOperation);
+ alu_out = std::numeric_limits<double>::quiet_NaN();
+ }
+ vd = alu_out;
+ })
+ break;
+ }
+ case RO_V_VFUNARY0:
+ switch (instr_.Vs1Value()) {
+ case VFCVT_X_F_V:
+ RVV_VI_VFP_VF_LOOP(
+ { UNIMPLEMENTED(); },
+ {
+ Rvvelt<int32_t>(rvv_vd_reg(), i) =
+ RoundF2IHelper<int32_t>(vs2, read_csr_value(csr_frm));
+ USE(vd);
+ USE(fs1);
+ },
+ {
+ Rvvelt<int64_t>(rvv_vd_reg(), i) =
+ RoundF2IHelper<int64_t>(vs2, read_csr_value(csr_frm));
+ USE(vd);
+ USE(fs1);
+ })
+ break;
+ case VFCVT_XU_F_V:
+ RVV_VI_VFP_VF_LOOP(
+ { UNIMPLEMENTED(); },
+ {
+ Rvvelt<uint32_t>(rvv_vd_reg(), i) =
+ RoundF2IHelper<uint32_t>(vs2, read_csr_value(csr_frm));
+ USE(vd);
+ USE(fs1);
+ },
+ {
+ Rvvelt<uint64_t>(rvv_vd_reg(), i) =
+ RoundF2IHelper<uint64_t>(vs2, read_csr_value(csr_frm));
+ USE(vd);
+ USE(fs1);
+ })
+ break;
+ case VFCVT_F_XU_V:
+ RVV_VI_VFP_VF_LOOP({ UNIMPLEMENTED(); },
+ {
+ auto vs2_i = Rvvelt<uint32_t>(rvv_vs2_reg(), i);
+ vd = static_cast<float>(vs2_i);
+ USE(vs2);
+ USE(fs1);
+ },
+ {
+ auto vs2_i = Rvvelt<uint64_t>(rvv_vs2_reg(), i);
+ vd = static_cast<double>(vs2_i);
+ USE(vs2);
+ USE(fs1);
+ })
+ break;
+ case VFCVT_F_X_V:
+ RVV_VI_VFP_VF_LOOP({ UNIMPLEMENTED(); },
+ {
+ auto vs2_i = Rvvelt<int32_t>(rvv_vs2_reg(), i);
+ vd = static_cast<float>(vs2_i);
+ USE(vs2);
+ USE(fs1);
+ },
+ {
+ auto vs2_i = Rvvelt<int64_t>(rvv_vs2_reg(), i);
+ vd = static_cast<double>(vs2_i);
+ USE(vs2);
+ USE(fs1);
+ })
+ break;
+ case VFNCVT_F_F_W:
+ RVV_VI_VFP_CVT_SCALE(
+ { UNREACHABLE(); }, { UNREACHABLE(); },
+ {
+ auto vs2 = Rvvelt<double>(rvv_vs2_reg(), i);
+ Rvvelt<float>(rvv_vd_reg(), i, true) =
+ CanonicalizeDoubleToFloatOperation(
+ [](double drs) { return static_cast<float>(drs); },
+ vs2);
+ },
+ { ; }, { ; }, { ; }, false, (rvv_vsew() >= E16))
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ break;
+ }
+ break;
+ case RO_V_VFUNARY1:
+ switch (instr_.Vs1Value()) {
+ case VFCLASS_V:
+ RVV_VI_VFP_VF_LOOP(
+ { UNIMPLEMENTED(); },
+ {
+ int32_t& vd_i = Rvvelt<int32_t>(rvv_vd_reg(), i, true);
+ vd_i = int32_t(FclassHelper(vs2));
+ USE(fs1);
+ USE(vd);
+ },
+ {
+ int64_t& vd_i = Rvvelt<int64_t>(rvv_vd_reg(), i, true);
+ vd_i = FclassHelper(vs2);
+ USE(fs1);
+ USE(vd);
+ })
+ break;
+ default:
+ break;
+ }
+ break;
+ case RO_V_VMFEQ_VV: {
+ RVV_VI_VFP_LOOP_CMP({ UNIMPLEMENTED(); },
+ { res = CompareFHelper(vs1, vs2, EQ); },
+ { res = CompareFHelper(vs1, vs2, EQ); }, true)
+ } break;
+ case RO_V_VMFNE_VV: {
+ RVV_VI_VFP_LOOP_CMP({ UNIMPLEMENTED(); },
+ { res = CompareFHelper(vs1, vs2, NE); },
+ { res = CompareFHelper(vs1, vs2, NE); }, true)
+ } break;
+ case RO_V_VMFLT_VV: {
+ RVV_VI_VFP_LOOP_CMP({ UNIMPLEMENTED(); },
+ { res = CompareFHelper(vs1, vs2, LT); },
+ { res = CompareFHelper(vs1, vs2, LT); }, true)
+ } break;
+ case RO_V_VMFLE_VV: {
+ RVV_VI_VFP_LOOP_CMP({ UNIMPLEMENTED(); },
+ { res = CompareFHelper(vs1, vs2, LE); },
+ { res = CompareFHelper(vs1, vs2, LE); }, true)
+ } break;
+ case RO_V_VFMAX_VV: {
+ RVV_VI_VFP_VV_LOOP({ UNIMPLEMENTED(); },
+ { vd = FMaxMinHelper(vs2, vs1, MaxMinKind::kMax); },
+ { vd = FMaxMinHelper(vs2, vs1, MaxMinKind::kMax); })
+ break;
+ }
+ case RO_V_VFMIN_VV: {
+ RVV_VI_VFP_VV_LOOP({ UNIMPLEMENTED(); },
+ { vd = FMaxMinHelper(vs2, vs1, MaxMinKind::kMin); },
+ { vd = FMaxMinHelper(vs2, vs1, MaxMinKind::kMin); })
+ break;
+ }
+ case RO_V_VFSGNJ_VV:
+ RVV_VI_VFP_VV_LOOP({ UNIMPLEMENTED(); },
+ { vd = fsgnj32(vs2, vs1, false, false); },
+ { vd = fsgnj64(vs2, vs1, false, false); })
+ break;
+ case RO_V_VFSGNJN_VV:
+ RVV_VI_VFP_VV_LOOP({ UNIMPLEMENTED(); },
+ { vd = fsgnj32(vs2, vs1, true, false); },
+ { vd = fsgnj64(vs2, vs1, true, false); })
+ break;
+ case RO_V_VFSGNJX_VV:
+ RVV_VI_VFP_VV_LOOP({ UNIMPLEMENTED(); },
+ { vd = fsgnj32(vs2, vs1, false, true); },
+ { vd = fsgnj64(vs2, vs1, false, true); })
+ break;
+ case RO_V_VFADD_VV:
+ RVV_VI_VFP_VV_LOOP(
+ { UNIMPLEMENTED(); },
+ {
+ auto fn = [this](float frs1, float frs2) {
+ if (is_invalid_fadd(frs1, frs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else {
+ return frs1 + frs2;
+ }
+ };
+ auto alu_out = fn(vs1, vs2);
+ // if any input or result is NaN, the result is quiet_NaN
+ if (std::isnan(alu_out) || std::isnan(vs1) || std::isnan(vs2)) {
+ // signaling_nan sets kInvalidOperation bit
+ if (isSnan(alu_out) || isSnan(vs1) || isSnan(vs2))
+ set_fflags(kInvalidOperation);
+ alu_out = std::numeric_limits<float>::quiet_NaN();
+ }
+ vd = alu_out;
+ },
+ {
+ auto fn = [this](double frs1, double frs2) {
+ if (is_invalid_fadd(frs1, frs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return frs1 + frs2;
+ }
+ };
+ auto alu_out = fn(vs1, vs2);
+ // if any input or result is NaN, the result is quiet_NaN
+ if (std::isnan(alu_out) || std::isnan(vs1) || std::isnan(vs2)) {
+ // signaling_nan sets kInvalidOperation bit
+ if (isSnan(alu_out) || isSnan(vs1) || isSnan(vs2))
+ set_fflags(kInvalidOperation);
+ alu_out = std::numeric_limits<double>::quiet_NaN();
+ }
+ vd = alu_out;
+ })
+ break;
+ case RO_V_VFSUB_VV:
+ RVV_VI_VFP_VV_LOOP(
+ { UNIMPLEMENTED(); },
+ {
+ auto fn = [this](float frs1, float frs2) {
+ if (is_invalid_fsub(frs1, frs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else {
+ return frs2 - frs1;
+ }
+ };
+ auto alu_out = fn(vs1, vs2);
+ // if any input or result is NaN, the result is quiet_NaN
+ if (std::isnan(alu_out) || std::isnan(vs1) || std::isnan(vs2)) {
+ // signaling_nan sets kInvalidOperation bit
+ if (isSnan(alu_out) || isSnan(vs1) || isSnan(vs2))
+ set_fflags(kInvalidOperation);
+ alu_out = std::numeric_limits<float>::quiet_NaN();
+ }
+
+ vd = alu_out;
+ },
+ {
+ auto fn = [this](double frs1, double frs2) {
+ if (is_invalid_fsub(frs1, frs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return frs2 - frs1;
+ }
+ };
+ auto alu_out = fn(vs1, vs2);
+ // if any input or result is NaN, the result is quiet_NaN
+ if (std::isnan(alu_out) || std::isnan(vs1) || std::isnan(vs2)) {
+ // signaling_nan sets kInvalidOperation bit
+ if (isSnan(alu_out) || isSnan(vs1) || isSnan(vs2))
+ set_fflags(kInvalidOperation);
+ alu_out = std::numeric_limits<double>::quiet_NaN();
+ }
+ vd = alu_out;
+ })
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ break;
+ }
+}
+
+void Simulator::DecodeRvvFVF() {
+ DCHECK_EQ(instr_.InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_FVF);
+ switch (instr_.InstructionBits() & kVTypeMask) {
+ case RO_V_VFSGNJ_VF:
+ RVV_VI_VFP_VF_LOOP(
+ {}, { vd = fsgnj32(vs2, fs1, false, false); },
+ { vd = fsgnj64(vs2, fs1, false, false); })
+ break;
+ case RO_V_VFSGNJN_VF:
+ RVV_VI_VFP_VF_LOOP(
+ {}, { vd = fsgnj32(vs2, fs1, true, false); },
+ { vd = fsgnj64(vs2, fs1, true, false); })
+ break;
+ case RO_V_VFSGNJX_VF:
+ RVV_VI_VFP_VF_LOOP(
+ {}, { vd = fsgnj32(vs2, fs1, false, true); },
+ { vd = fsgnj64(vs2, fs1, false, true); })
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ break;
+ }
+}
+void Simulator::DecodeVType() {
+ switch (instr_.InstructionBits() & (kFunct3Mask | kBaseOpcodeMask)) {
+ case OP_IVV:
+ DecodeRvvIVV();
+ return;
+ case OP_FVV:
+ DecodeRvvFVV();
+ return;
+ case OP_MVV:
+ DecodeRvvMVV();
+ return;
+ case OP_IVI:
+ DecodeRvvIVI();
+ return;
+ case OP_IVX:
+ DecodeRvvIVX();
+ return;
+ case OP_FVF:
+ UNIMPLEMENTED_RISCV();
+ return;
+ case OP_MVX:
+ DecodeRvvMVX();
+ return;
+ }
+ switch (instr_.InstructionBits() &
+ (kBaseOpcodeMask | kFunct3Mask | 0x80000000)) {
+ case RO_V_VSETVLI: {
+ uint64_t avl;
+ set_rvv_vtype(rvv_zimm());
+ if (rs1_reg() != zero_reg) {
+ avl = rs1();
+ } else if (rd_reg() != zero_reg) {
+ avl = ~0;
+ } else {
+ avl = rvv_vl();
+ }
+ avl = avl <= rvv_vlmax() ? avl : rvv_vlmax();
+ set_rvv_vl(avl);
+ set_rd(rvv_vl());
+ rvv_trace_status();
+ break;
+ }
+ case RO_V_VSETVL: {
+ if (!(instr_.InstructionBits() & 0x40000000)) {
+ uint64_t avl;
+ set_rvv_vtype(rs2());
+ if (rs1_reg() != zero_reg) {
+ avl = rs1();
+ } else if (rd_reg() != zero_reg) {
+ avl = ~0;
+ } else {
+ avl = rvv_vl();
+ }
+ avl = avl <= rvv_vlmax() ? avl
+ : avl < (rvv_vlmax() * 2) ? avl / 2
+ : rvv_vlmax();
+ set_rvv_vl(avl);
+ set_rd(rvv_vl());
+ rvv_trace_status();
+ } else {
+ DCHECK_EQ(instr_.InstructionBits() &
+ (kBaseOpcodeMask | kFunct3Mask | 0xC0000000),
+ RO_V_VSETIVLI);
+ uint64_t avl;
+ set_rvv_vtype(rvv_zimm());
+ avl = instr_.Rvvuimm();
+ avl = avl <= rvv_vlmax() ? avl
+ : avl < (rvv_vlmax() * 2) ? avl / 2
+ : rvv_vlmax();
+ set_rvv_vl(avl);
+ set_rd(rvv_vl());
+ rvv_trace_status();
+ break;
+ }
+ break;
+ }
+ default:
+ FATAL("Error: Unsupport on FILE:%s:%d.", __FILE__, __LINE__);
+ }
+}
// Executes the current instruction.
void Simulator::InstructionDecode(Instruction* instr) {
if (v8::internal::FLAG_check_icache) {
@@ -3473,6 +5483,9 @@ void Simulator::InstructionDecode(Instruction* instr) {
case Instruction::kCSType:
DecodeCSType();
break;
+ case Instruction::kVType:
+ DecodeVType();
+ break;
default:
if (1) {
std::cout << "Unrecognized instruction [@pc=0x" << std::hex
@@ -3483,7 +5496,7 @@ void Simulator::InstructionDecode(Instruction* instr) {
}
if (::v8::internal::FLAG_trace_sim) {
- PrintF(" 0x%012" PRIxPTR " %-44s %s\n",
+ PrintF(" 0x%012" PRIxPTR " %-44s\t%s\n",
reinterpret_cast<intptr_t>(instr), buffer.begin(),
trace_buf_.begin());
}
@@ -3524,8 +5537,6 @@ void Simulator::CallInternal(Address entry) {
set_register(ra, end_sim_pc);
// Remember the values of callee-saved registers.
- // The code below assumes that r9 is not used as sb (static base) in
- // simulator code and therefore is regarded as a callee-saved register.
int64_t s0_val = get_register(s0);
int64_t s1_val = get_register(s1);
int64_t s2_val = get_register(s2);
@@ -3534,9 +5545,12 @@ void Simulator::CallInternal(Address entry) {
int64_t s5_val = get_register(s5);
int64_t s6_val = get_register(s6);
int64_t s7_val = get_register(s7);
+ int64_t s8_val = get_register(s8);
+ int64_t s9_val = get_register(s9);
+ int64_t s10_val = get_register(s10);
+ int64_t s11_val = get_register(s11);
int64_t gp_val = get_register(gp);
int64_t sp_val = get_register(sp);
- int64_t fp_val = get_register(fp);
// Set up the callee-saved registers with a known value. To be able to check
// that they are preserved properly across JS execution.
@@ -3549,8 +5563,11 @@ void Simulator::CallInternal(Address entry) {
set_register(s5, callee_saved_value);
set_register(s6, callee_saved_value);
set_register(s7, callee_saved_value);
+ set_register(s8, callee_saved_value);
+ set_register(s9, callee_saved_value);
+ set_register(s10, callee_saved_value);
+ set_register(s11, callee_saved_value);
set_register(gp, callee_saved_value);
- set_register(fp, callee_saved_value);
// Start the simulation.
Execute();
@@ -3564,8 +5581,11 @@ void Simulator::CallInternal(Address entry) {
CHECK_EQ(callee_saved_value, get_register(s5));
CHECK_EQ(callee_saved_value, get_register(s6));
CHECK_EQ(callee_saved_value, get_register(s7));
+ CHECK_EQ(callee_saved_value, get_register(s8));
+ CHECK_EQ(callee_saved_value, get_register(s9));
+ CHECK_EQ(callee_saved_value, get_register(s10));
+ CHECK_EQ(callee_saved_value, get_register(s11));
CHECK_EQ(callee_saved_value, get_register(gp));
- CHECK_EQ(callee_saved_value, get_register(fp));
// Restore callee-saved registers with the original value.
set_register(s0, s0_val);
@@ -3576,9 +5596,12 @@ void Simulator::CallInternal(Address entry) {
set_register(s5, s5_val);
set_register(s6, s6_val);
set_register(s7, s7_val);
+ set_register(s8, s8_val);
+ set_register(s9, s9_val);
+ set_register(s10, s10_val);
+ set_register(s11, s11_val);
set_register(gp, gp_val);
set_register(sp, sp_val);
- set_register(fp, fp_val);
}
intptr_t Simulator::CallImpl(Address entry, int argument_count,
@@ -3586,15 +5609,12 @@ intptr_t Simulator::CallImpl(Address entry, int argument_count,
constexpr int kRegisterPassedArguments = 8;
// Set up arguments.
- // First four arguments passed in registers in both ABI's.
+ // RISC-V 64G ISA has a0-a7 for passing arguments
int reg_arg_count = std::min(kRegisterPassedArguments, argument_count);
if (reg_arg_count > 0) set_register(a0, arguments[0]);
if (reg_arg_count > 1) set_register(a1, arguments[1]);
if (reg_arg_count > 2) set_register(a2, arguments[2]);
if (reg_arg_count > 3) set_register(a3, arguments[3]);
-
- // Up to eight arguments passed in registers in N64 ABI.
- // TODO(plind): N64 ABI calls these regs a4 - a7. Clarify this.
if (reg_arg_count > 4) set_register(a4, arguments[4]);
if (reg_arg_count > 5) set_register(a5, arguments[5]);
if (reg_arg_count > 6) set_register(a6, arguments[6]);
@@ -3602,12 +5622,13 @@ intptr_t Simulator::CallImpl(Address entry, int argument_count,
if (::v8::internal::FLAG_trace_sim) {
std::cout << "CallImpl: reg_arg_count = " << reg_arg_count << std::hex
- << " entry-pc (JSEntry) = 0x" << entry << " a0 (Isolate) = 0x"
- << get_register(a0) << " a1 (orig_func/new_target) = 0x"
- << get_register(a1) << " a2 (func/target) = 0x"
- << get_register(a2) << " a3 (receiver) = 0x" << get_register(a3)
- << " a4 (argc) = 0x" << get_register(a4) << " a5 (argv) = 0x"
- << get_register(a5) << std::endl;
+ << " entry-pc (JSEntry) = 0x" << entry
+ << " a0 (Isolate-root) = 0x" << get_register(a0)
+ << " a1 (orig_func/new_target) = 0x" << get_register(a1)
+ << " a2 (func/target) = 0x" << get_register(a2)
+ << " a3 (receiver) = 0x" << get_register(a3) << " a4 (argc) = 0x"
+ << get_register(a4) << " a5 (argv) = 0x" << get_register(a5)
+ << std::endl;
}
// Remaining arguments passed on stack.
diff --git a/chromium/v8/src/execution/riscv64/simulator-riscv64.h b/chromium/v8/src/execution/riscv64/simulator-riscv64.h
index 2fa40cea4e9..fce6cdca0ad 100644
--- a/chromium/v8/src/execution/riscv64/simulator-riscv64.h
+++ b/chromium/v8/src/execution/riscv64/simulator-riscv64.h
@@ -132,8 +132,11 @@ union u32_f32 {
inline float fsgnj32(float rs1, float rs2, bool n, bool x) {
u32_f32 a = {.f = rs1}, b = {.f = rs2};
u32_f32 res;
- res.u =
- (a.u & ~F32_SIGN) | ((((x) ? a.u : (n) ? F32_SIGN : 0) ^ b.u) & F32_SIGN);
+ res.u = (a.u & ~F32_SIGN) | ((((x) ? a.u
+ : (n) ? F32_SIGN
+ : 0) ^
+ b.u) &
+ F32_SIGN);
return res.f;
}
#define F64_SIGN ((uint64_t)1 << 63)
@@ -144,8 +147,11 @@ union u64_f64 {
inline double fsgnj64(double rs1, double rs2, bool n, bool x) {
u64_f64 a = {.d = rs1}, b = {.d = rs2};
u64_f64 res;
- res.u =
- (a.u & ~F64_SIGN) | ((((x) ? a.u : (n) ? F64_SIGN : 0) ^ b.u) & F64_SIGN);
+ res.u = (a.u & ~F64_SIGN) | ((((x) ? a.u
+ : (n) ? F64_SIGN
+ : 0) ^
+ b.u) &
+ F64_SIGN);
return res.d;
}
@@ -299,6 +305,42 @@ class Simulator : public SimulatorBase {
kNumFPURegisters
};
+ enum VRegister {
+ v0,
+ v1,
+ v2,
+ v3,
+ v4,
+ v5,
+ v6,
+ v7,
+ v8,
+ v9,
+ v10,
+ v11,
+ v12,
+ v13,
+ v14,
+ v15,
+ v16,
+ v17,
+ v18,
+ v19,
+ v20,
+ v21,
+ v22,
+ v23,
+ v24,
+ v25,
+ v26,
+ v27,
+ v28,
+ v29,
+ v30,
+ v31,
+ kNumVRegisters
+ };
+
explicit Simulator(Isolate* isolate);
~Simulator();
@@ -312,7 +354,7 @@ class Simulator : public SimulatorBase {
void set_register(int reg, int64_t value);
void set_register_word(int reg, int32_t value);
void set_dw_register(int dreg, const int* dbl);
- int64_t get_register(int reg) const;
+ V8_EXPORT_PRIVATE int64_t get_register(int reg) const;
double get_double_from_register_pair(int reg);
// Same for FPURegisters.
@@ -338,6 +380,59 @@ class Simulator : public SimulatorBase {
void set_fflags(uint32_t flags) { set_csr_bits(csr_fflags, flags); }
void clear_fflags(int32_t flags) { clear_csr_bits(csr_fflags, flags); }
+ // RVV CSR
+ __int128_t get_vregister(int vreg) const;
+ inline uint64_t rvv_vlen() const { return kRvvVLEN; }
+ inline uint64_t rvv_vtype() const { return vtype_; }
+ inline uint64_t rvv_vl() const { return vl_; }
+ inline uint64_t rvv_vstart() const { return vstart_; }
+ inline uint64_t rvv_vxsat() const { return vxsat_; }
+ inline uint64_t rvv_vxrm() const { return vxrm_; }
+ inline uint64_t rvv_vcsr() const { return vcsr_; }
+ inline uint64_t rvv_vlenb() const { return vlenb_; }
+ inline uint32_t rvv_zimm() const { return instr_.Rvvzimm(); }
+ inline uint32_t rvv_vlmul() const { return (rvv_vtype() & 0x7); }
+ inline uint32_t rvv_vsew() const { return ((rvv_vtype() >> 3) & 0x7); }
+
+ inline const char* rvv_sew_s() const {
+ uint32_t vsew = rvv_vsew();
+ switch (vsew) {
+#define CAST_VSEW(name) \
+ case name: \
+ return #name;
+ RVV_SEW(CAST_VSEW)
+ default:
+ return "unknown";
+#undef CAST_VSEW
+ }
+ }
+
+ inline const char* rvv_lmul_s() const {
+ uint32_t vlmul = rvv_vlmul();
+ switch (vlmul) {
+#define CAST_VLMUL(name) \
+ case name: \
+ return #name;
+ RVV_LMUL(CAST_VLMUL)
+ default:
+ return "unknown";
+#undef CAST_VSEW
+ }
+ }
+
+ // return size of lane.8 16 32 64
+ inline uint32_t rvv_sew() const {
+ DCHECK_EQ(rvv_vsew() & (~0x7), 0x0);
+ return (0x1 << rvv_vsew()) * 8;
+ }
+ inline uint64_t rvv_vlmax() const {
+ if ((rvv_vlmul() & 0b100) != 0) {
+ return (rvv_vlen() / rvv_sew()) >> (rvv_vlmul() & 0b11);
+ } else {
+ return ((rvv_vlen() << rvv_vlmul()) / rvv_sew());
+ }
+ }
+
inline uint32_t get_dynamic_rounding_mode();
inline bool test_fflags_bits(uint32_t mask);
@@ -354,7 +449,7 @@ class Simulator : public SimulatorBase {
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int64_t value);
- int64_t get_pc() const;
+ V8_EXPORT_PRIVATE int64_t get_pc() const;
Address get_sp() const { return static_cast<Address>(get_register(sp)); }
@@ -550,6 +645,234 @@ class Simulator : public SimulatorBase {
}
}
+ // RVV
+ // The following code about RVV was based from:
+ // https://github.com/riscv/riscv-isa-sim
+ // Copyright (c) 2010-2017, The Regents of the University of California
+ // (Regents). All Rights Reserved.
+
+ // Redistribution and use in source and binary forms, with or without
+ // modification, are permitted provided that the following conditions are met:
+ // 1. Redistributions of source code must retain the above copyright
+ // notice, this list of conditions and the following disclaimer.
+ // 2. Redistributions in binary form must reproduce the above copyright
+ // notice, this list of conditions and the following disclaimer in the
+ // documentation and/or other materials provided with the distribution.
+ // 3. Neither the name of the Regents nor the
+ // names of its contributors may be used to endorse or promote products
+ // derived from this software without specific prior written permission.
+
+ // IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
+ // SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS,
+ // ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF
+ // REGENTS HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ // REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED
+ // TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ // PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
+ // HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
+ // MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+ template <uint64_t N>
+ struct type_usew_t;
+ template <>
+ struct type_usew_t<8> {
+ using type = uint8_t;
+ };
+
+ template <>
+ struct type_usew_t<16> {
+ using type = uint16_t;
+ };
+
+ template <>
+ struct type_usew_t<32> {
+ using type = uint32_t;
+ };
+
+ template <>
+ struct type_usew_t<64> {
+ using type = uint64_t;
+ };
+
+ template <>
+ struct type_usew_t<128> {
+ using type = __uint128_t;
+ };
+ template <uint64_t N>
+ struct type_sew_t;
+
+ template <>
+ struct type_sew_t<8> {
+ using type = int8_t;
+ };
+
+ template <>
+ struct type_sew_t<16> {
+ using type = int16_t;
+ };
+
+ template <>
+ struct type_sew_t<32> {
+ using type = int32_t;
+ };
+
+ template <>
+ struct type_sew_t<64> {
+ using type = int64_t;
+ };
+
+ template <>
+ struct type_sew_t<128> {
+ using type = __int128_t;
+ };
+
+#define VV_PARAMS(x) \
+ type_sew_t<x>::type& vd = \
+ Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
+ type_sew_t<x>::type vs1 = Rvvelt<type_sew_t<x>::type>(rvv_vs1_reg(), i); \
+ type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VV_UPARAMS(x) \
+ type_usew_t<x>::type& vd = \
+ Rvvelt<type_usew_t<x>::type>(rvv_vd_reg(), i, true); \
+ type_usew_t<x>::type vs1 = Rvvelt<type_usew_t<x>::type>(rvv_vs1_reg(), i); \
+ type_usew_t<x>::type vs2 = Rvvelt<type_usew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VX_PARAMS(x) \
+ type_sew_t<x>::type& vd = \
+ Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
+ type_sew_t<x>::type rs1 = (type_sew_t<x>::type)(get_register(rs1_reg())); \
+ type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VX_UPARAMS(x) \
+ type_usew_t<x>::type& vd = \
+ Rvvelt<type_usew_t<x>::type>(rvv_vd_reg(), i, true); \
+ type_usew_t<x>::type rs1 = (type_usew_t<x>::type)(get_register(rs1_reg())); \
+ type_usew_t<x>::type vs2 = Rvvelt<type_usew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VI_PARAMS(x) \
+ type_sew_t<x>::type& vd = \
+ Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
+ type_sew_t<x>::type simm5 = (type_sew_t<x>::type)(instr_.RvvSimm5()); \
+ type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VI_UPARAMS(x) \
+ type_usew_t<x>::type& vd = \
+ Rvvelt<type_usew_t<x>::type>(rvv_vd_reg(), i, true); \
+ type_usew_t<x>::type uimm5 = (type_usew_t<x>::type)(instr_.RvvUimm5()); \
+ type_usew_t<x>::type vs2 = Rvvelt<type_usew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VXI_PARAMS(x) \
+ type_sew_t<x>::type& vd = \
+ Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
+ type_sew_t<x>::type vs1 = Rvvelt<type_sew_t<x>::type>(rvv_vs1_reg(), i); \
+ type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i); \
+ type_sew_t<x>::type rs1 = (type_sew_t<x>::type)(get_register(rs1_reg())); \
+ type_sew_t<x>::type simm5 = (type_sew_t<x>::type)(instr_.RvvSimm5());
+
+#define VI_XI_SLIDEDOWN_PARAMS(x, off) \
+ auto& vd = Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
+ auto vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i + off);
+
+#define VI_XI_SLIDEUP_PARAMS(x, offset) \
+ auto& vd = Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
+ auto vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i - offset);
+
+ inline void rvv_trace_vd() {
+ if (::v8::internal::FLAG_trace_sim) {
+ __int128_t value = Vregister_[rvv_vd_reg()];
+ SNPrintF(trace_buf_, "0x%016" PRIx64 "%016" PRIx64 " (%" PRId64 ")",
+ *(reinterpret_cast<int64_t*>(&value) + 1),
+ *reinterpret_cast<int64_t*>(&value), icount_);
+ }
+ }
+
+ inline void rvv_trace_vs1() {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("\t%s:0x%016" PRIx64 "%016" PRIx64 "\n",
+ v8::internal::VRegisters::Name(static_cast<int>(rvv_vs1_reg())),
+ (uint64_t)(get_vregister(static_cast<int>(rvv_vs1_reg())) >> 64),
+ (uint64_t)get_vregister(static_cast<int>(rvv_vs1_reg())));
+ }
+ }
+
+ inline void rvv_trace_vs2() {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("\t%s:0x%016" PRIx64 "%016" PRIx64 "\n",
+ v8::internal::VRegisters::Name(static_cast<int>(rvv_vs2_reg())),
+ (uint64_t)(get_vregister(static_cast<int>(rvv_vs2_reg())) >> 64),
+ (uint64_t)get_vregister(static_cast<int>(rvv_vs2_reg())));
+ }
+ }
+ inline void rvv_trace_v0() {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("\t%s:0x%016" PRIx64 "%016" PRIx64 "\n",
+ v8::internal::VRegisters::Name(v0),
+ (uint64_t)(get_vregister(v0) >> 64), (uint64_t)get_vregister(v0));
+ }
+ }
+
+ inline void rvv_trace_rs1() {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("\t%s:0x%016" PRIx64 "\n",
+ v8::internal::Registers::Name(static_cast<int>(rs1_reg())),
+ (uint64_t)(get_register(rs1_reg())));
+ }
+ }
+
+ inline void rvv_trace_status() {
+ if (::v8::internal::FLAG_trace_sim) {
+ int i = 0;
+ for (; i < trace_buf_.length(); i++) {
+ if (trace_buf_[i] == '\0') break;
+ }
+ SNPrintF(trace_buf_.SubVector(i, trace_buf_.length()),
+ " sew:%s lmul:%s vstart:%lu vl:%lu", rvv_sew_s(), rvv_lmul_s(),
+ rvv_vstart(), rvv_vl());
+ }
+ }
+
+ template <class T>
+ T& Rvvelt(reg_t vReg, uint64_t n, bool is_write = false) {
+ CHECK_NE(rvv_sew(), 0);
+ CHECK_GT((rvv_vlen() >> 3) / sizeof(T), 0);
+ reg_t elts_per_reg = (rvv_vlen() >> 3) / (sizeof(T));
+ vReg += n / elts_per_reg;
+ n = n % elts_per_reg;
+ T* regStart = reinterpret_cast<T*>(reinterpret_cast<char*>(Vregister_) +
+ vReg * (rvv_vlen() >> 3));
+ return regStart[n];
+ }
+
+ inline int32_t rvv_vs1_reg() { return instr_.Vs1Value(); }
+ inline reg_t rvv_vs1() { UNIMPLEMENTED(); }
+ inline int32_t rvv_vs2_reg() { return instr_.Vs2Value(); }
+ inline reg_t rvv_vs2() { UNIMPLEMENTED(); }
+ inline int32_t rvv_vd_reg() { return instr_.VdValue(); }
+ inline int32_t rvv_vs3_reg() { return instr_.VdValue(); }
+ inline reg_t rvv_vd() { UNIMPLEMENTED(); }
+ inline int32_t rvv_nf() {
+ return (instr_.InstructionBits() & kRvvNfMask) >> kRvvNfShift;
+ }
+
+ inline void set_vrd() { UNIMPLEMENTED(); }
+
+ inline void set_rvv_vtype(uint64_t value, bool trace = true) {
+ vtype_ = value;
+ }
+ inline void set_rvv_vl(uint64_t value, bool trace = true) { vl_ = value; }
+ inline void set_rvv_vstart(uint64_t value, bool trace = true) {
+ vstart_ = value;
+ }
+ inline void set_rvv_vxsat(uint64_t value, bool trace = true) {
+ vxsat_ = value;
+ }
+ inline void set_rvv_vxrm(uint64_t value, bool trace = true) { vxrm_ = value; }
+ inline void set_rvv_vcsr(uint64_t value, bool trace = true) { vcsr_ = value; }
+ inline void set_rvv_vlenb(uint64_t value, bool trace = true) {
+ vlenb_ = value;
+ }
+
template <typename T, typename Func>
inline T CanonicalizeFPUOp3(Func fn) {
DCHECK(std::is_floating_point<T>::value);
@@ -607,6 +930,22 @@ class Simulator : public SimulatorBase {
}
template <typename Func>
+ inline float CanonicalizeDoubleToFloatOperation(Func fn, double frs) {
+ float alu_out = fn(frs);
+ if (std::isnan(alu_out) || std::isnan(drs1()))
+ alu_out = std::numeric_limits<float>::quiet_NaN();
+ return alu_out;
+ }
+
+ template <typename Func>
+ inline float CanonicalizeFloatToDoubleOperation(Func fn, float frs) {
+ double alu_out = fn(frs);
+ if (std::isnan(alu_out) || std::isnan(frs1()))
+ alu_out = std::numeric_limits<double>::quiet_NaN();
+ return alu_out;
+ }
+
+ template <typename Func>
inline float CanonicalizeFloatToDoubleOperation(Func fn) {
double alu_out = fn(frs1());
if (std::isnan(alu_out) || std::isnan(frs1()))
@@ -634,6 +973,16 @@ class Simulator : public SimulatorBase {
void DecodeCSType();
void DecodeCJType();
void DecodeCBType();
+ void DecodeVType();
+ void DecodeRvvIVV();
+ void DecodeRvvIVI();
+ void DecodeRvvIVX();
+ void DecodeRvvMVV();
+ void DecodeRvvMVX();
+ void DecodeRvvFVV();
+ void DecodeRvvFVF();
+ bool DecodeRvvVL();
+ bool DecodeRvvVS();
// Used for breakpoints and traps.
void SoftwareInterrupt();
@@ -700,6 +1049,10 @@ class Simulator : public SimulatorBase {
// Floating-point control and status register.
uint32_t FCSR_;
+ // RVV registers
+ __int128_t Vregister_[kNumVRegisters];
+ static_assert(sizeof(__int128_t) == kRvvVLEN / 8, "unmatch vlen");
+ uint64_t vstart_, vxsat_, vxrm_, vcsr_, vtype_, vl_, vlenb_;
// Simulator support.
// Allocate 1MB for stack.
size_t stack_size_;
@@ -707,7 +1060,7 @@ class Simulator : public SimulatorBase {
bool pc_modified_;
int64_t icount_;
int break_count_;
- base::EmbeddedVector<char, 128> trace_buf_;
+ base::EmbeddedVector<char, 256> trace_buf_;
// Debugger input.
char* last_debugger_input_;
@@ -820,7 +1173,6 @@ class Simulator : public SimulatorBase {
LocalMonitor local_monitor_;
GlobalMonitor::LinkedAddress global_monitor_thread_;
};
-
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/execution/runtime-profiler.cc b/chromium/v8/src/execution/runtime-profiler.cc
index 5ce45f43c2f..4d710c5aaae 100644
--- a/chromium/v8/src/execution/runtime-profiler.cc
+++ b/chromium/v8/src/execution/runtime-profiler.cc
@@ -20,24 +20,11 @@
namespace v8 {
namespace internal {
-// Number of times a function has to be seen on the stack before it is
-// optimized.
-static const int kProfilerTicksBeforeOptimization = 3;
-
-// The number of ticks required for optimizing a function increases with
-// the size of the bytecode. This is in addition to the
-// kProfilerTicksBeforeOptimization required for any function.
-static const int kBytecodeSizeAllowancePerTick = 1100;
-
// Maximum size in bytes of generate code for a function to allow OSR.
static const int kOSRBytecodeSizeAllowanceBase = 119;
static const int kOSRBytecodeSizeAllowancePerTick = 44;
-// Maximum size in bytes of generated code for a function to be optimized
-// the very first time it is seen on the stack.
-static const int kMaxBytecodeSizeForEarlyOpt = 81;
-
#define OPTIMIZATION_REASON_LIST(V) \
V(DoNotOptimize, "do not optimize") \
V(HotAndStable, "hot and stable") \
@@ -191,7 +178,7 @@ namespace {
bool ShouldOptimizeAsSmallFunction(int bytecode_size, int ticks,
bool any_ic_changed,
bool active_tier_is_turboprop) {
- if (any_ic_changed || bytecode_size >= kMaxBytecodeSizeForEarlyOpt)
+ if (any_ic_changed || bytecode_size >= FLAG_max_bytecode_size_for_early_opt)
return false;
return true;
}
@@ -209,8 +196,8 @@ OptimizationReason RuntimeProfiler::ShouldOptimize(JSFunction function,
int ticks = function.feedback_vector().profiler_ticks();
bool active_tier_is_turboprop = function.ActiveTierIsMidtierTurboprop();
int ticks_for_optimization =
- kProfilerTicksBeforeOptimization +
- (bytecode.length() / kBytecodeSizeAllowancePerTick);
+ FLAG_ticks_before_optimization +
+ (bytecode.length() / FLAG_bytecode_size_allowance_per_tick);
if (ticks >= ticks_for_optimization) {
return OptimizationReason::kHotAndStable;
} else if (ShouldOptimizeAsSmallFunction(bytecode.length(), ticks,
@@ -227,7 +214,7 @@ OptimizationReason RuntimeProfiler::ShouldOptimize(JSFunction function,
PrintF("ICs changed]\n");
} else {
PrintF(" too large for small function optimization: %d/%d]\n",
- bytecode.length(), kMaxBytecodeSizeForEarlyOpt);
+ bytecode.length(), FLAG_max_bytecode_size_for_early_opt);
}
}
return OptimizationReason::kDoNotOptimize;
@@ -250,7 +237,7 @@ void RuntimeProfiler::MarkCandidatesForOptimization(JavaScriptFrame* frame) {
MarkCandidatesForOptimizationScope scope(this);
JSFunction function = frame->function();
- CodeKind code_kind = function.GetActiveTier();
+ CodeKind code_kind = function.GetActiveTier().value();
DCHECK(function.shared().is_compiled());
DCHECK(function.shared().IsInterpreted());
diff --git a/chromium/v8/src/execution/s390/simulator-s390.cc b/chromium/v8/src/execution/s390/simulator-s390.cc
index 88a8cb41211..4d386e65b83 100644
--- a/chromium/v8/src/execution/s390/simulator-s390.cc
+++ b/chromium/v8/src/execution/s390/simulator-s390.cc
@@ -109,7 +109,6 @@ bool S390Debugger::GetValue(const char* desc, intptr_t* value) {
1;
}
}
- return false;
}
bool S390Debugger::GetFPDoubleValue(const char* desc, double* value) {
@@ -758,8 +757,14 @@ void Simulator::EvalTableInit() {
V(vlrep, VLREP, 0xE705) /* type = VRX VECTOR LOAD AND REPLICATE */ \
V(vrepi, VREPI, 0xE745) /* type = VRI_A VECTOR REPLICATE IMMEDIATE */ \
V(vlr, VLR, 0xE756) /* type = VRR_A VECTOR LOAD */ \
+ V(vsteb, VSTEB, 0xE708) /* type = VRX VECTOR STORE ELEMENT (8) */ \
+ V(vsteh, VSTEH, 0xE709) /* type = VRX VECTOR STORE ELEMENT (16) */ \
V(vstef, VSTEF, 0xE70B) /* type = VRX VECTOR STORE ELEMENT (32) */ \
+ V(vsteg, VSTEG, 0xE70A) /* type = VRX VECTOR STORE ELEMENT (64) */ \
+ V(vleb, VLEB, 0xE701) /* type = VRX VECTOR LOAD ELEMENT (8) */ \
+ V(vleh, VLEH, 0xE701) /* type = VRX VECTOR LOAD ELEMENT (16) */ \
V(vlef, VLEF, 0xE703) /* type = VRX VECTOR LOAD ELEMENT (32) */ \
+ V(vleg, VLEG, 0xE702) /* type = VRX VECTOR LOAD ELEMENT (64) */ \
V(vavgl, VAVGL, 0xE7F0) /* type = VRR_C VECTOR AVERAGE LOGICAL */ \
V(va, VA, 0xE7F3) /* type = VRR_C VECTOR ADD */ \
V(vs, VS, 0xE7F7) /* type = VRR_C VECTOR SUBTRACT */ \
@@ -1775,50 +1780,50 @@ void Simulator::TrashCallerSaveRegisters() {
#endif
}
-uint32_t Simulator::ReadWU(intptr_t addr, Instruction* instr) {
+uint32_t Simulator::ReadWU(intptr_t addr) {
uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
return *ptr;
}
-int64_t Simulator::ReadW64(intptr_t addr, Instruction* instr) {
+int64_t Simulator::ReadW64(intptr_t addr) {
int64_t* ptr = reinterpret_cast<int64_t*>(addr);
return *ptr;
}
-int32_t Simulator::ReadW(intptr_t addr, Instruction* instr) {
+int32_t Simulator::ReadW(intptr_t addr) {
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
return *ptr;
}
-void Simulator::WriteW(intptr_t addr, uint32_t value, Instruction* instr) {
+void Simulator::WriteW(intptr_t addr, uint32_t value) {
uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
*ptr = value;
return;
}
-void Simulator::WriteW(intptr_t addr, int32_t value, Instruction* instr) {
+void Simulator::WriteW(intptr_t addr, int32_t value) {
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
*ptr = value;
return;
}
-uint16_t Simulator::ReadHU(intptr_t addr, Instruction* instr) {
+uint16_t Simulator::ReadHU(intptr_t addr) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
return *ptr;
}
-int16_t Simulator::ReadH(intptr_t addr, Instruction* instr) {
+int16_t Simulator::ReadH(intptr_t addr) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
return *ptr;
}
-void Simulator::WriteH(intptr_t addr, uint16_t value, Instruction* instr) {
+void Simulator::WriteH(intptr_t addr, uint16_t value) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
*ptr = value;
return;
}
-void Simulator::WriteH(intptr_t addr, int16_t value, Instruction* instr) {
+void Simulator::WriteH(intptr_t addr, int16_t value) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
*ptr = value;
return;
@@ -2036,7 +2041,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
if (!stack_aligned) {
PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
@@ -2076,7 +2080,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
switch (redirection->type()) {
@@ -2090,7 +2093,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
@@ -2205,7 +2207,18 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
set_register(r2, result_buffer);
}
} else {
- DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL);
+ // FAST_C_CALL is temporarily handled here as well, because we lack
+ // proper support for direct C calls with FP params in the simulator.
+ // The generic BUILTIN_CALL path assumes all parameters are passed in
+ // the GP registers, thus supporting calling the slow callback without
+ // crashing. The reason for that is that in the mjsunit tests we check
+ // the `fast_c_api.supports_fp_params` (which is false on
+ // non-simulator builds for arm/arm64), thus we expect that the slow
+ // path will be called. And since the slow path passes the arguments
+ // as a `const FunctionCallbackInfo<Value>&` (which is a GP argument),
+ // the call is made correctly.
+ DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL ||
+ redirection->type() == ExternalReference::FAST_C_CALL);
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
intptr_t result = target(arg[0], arg[1], arg[2], arg[3], arg[4],
@@ -3187,12 +3200,57 @@ EVALUATE(VLR) {
return length;
}
+EVALUATE(VSTEB) {
+ DCHECK_OPCODE(VSTEB);
+ DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3);
+ intptr_t addr = GET_ADDRESS(x2, b2, d2);
+ int8_t value = get_simd_register_by_lane<int8_t>(r1, m3);
+ WriteB(addr, value);
+ return length;
+}
+
+EVALUATE(VSTEH) {
+ DCHECK_OPCODE(VSTEH);
+ DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3);
+ intptr_t addr = GET_ADDRESS(x2, b2, d2);
+ int16_t value = get_simd_register_by_lane<int16_t>(r1, m3);
+ WriteH(addr, value);
+ return length;
+}
+
EVALUATE(VSTEF) {
DCHECK_OPCODE(VSTEF);
DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3);
intptr_t addr = GET_ADDRESS(x2, b2, d2);
int32_t value = get_simd_register_by_lane<int32_t>(r1, m3);
- WriteW(addr, value, instr);
+ WriteW(addr, value);
+ return length;
+}
+
+EVALUATE(VSTEG) {
+ DCHECK_OPCODE(VSTEG);
+ DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3);
+ intptr_t addr = GET_ADDRESS(x2, b2, d2);
+ int64_t value = get_simd_register_by_lane<int64_t>(r1, m3);
+ WriteDW(addr, value);
+ return length;
+}
+
+EVALUATE(VLEB) {
+ DCHECK_OPCODE(VLEB);
+ DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3);
+ intptr_t addr = GET_ADDRESS(x2, b2, d2);
+ int8_t value = ReadB(addr);
+ set_simd_register_by_lane<int8_t>(r1, m3, value);
+ return length;
+}
+
+EVALUATE(VLEH) {
+ DCHECK_OPCODE(VLEH);
+ DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3);
+ intptr_t addr = GET_ADDRESS(x2, b2, d2);
+ int16_t value = ReadH(addr);
+ set_simd_register_by_lane<int16_t>(r1, m3, value);
return length;
}
@@ -3200,11 +3258,20 @@ EVALUATE(VLEF) {
DCHECK_OPCODE(VLEF);
DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3);
intptr_t addr = GET_ADDRESS(x2, b2, d2);
- int32_t value = ReadW(addr, instr);
+ int32_t value = ReadW(addr);
set_simd_register_by_lane<int32_t>(r1, m3, value);
return length;
}
+EVALUATE(VLEG) {
+ DCHECK_OPCODE(VLEG);
+ DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3);
+ intptr_t addr = GET_ADDRESS(x2, b2, d2);
+ uint64_t value = ReadDW(addr);
+ set_simd_register_by_lane<uint64_t>(r1, m3, value);
+ return length;
+}
+
// TODO(john): unify most fp binary operations
template <class T, class Operation>
inline static void VectorBinaryOp(Simulator* sim, int dst, int src1, int src2,
@@ -4368,20 +4435,35 @@ EVALUATE(VFMAX) {
#undef CASE
template <class S, class D, class Operation>
-void VectorFPCompare(Simulator* sim, int dst, int src1, int src2,
+void VectorFPCompare(Simulator* sim, int dst, int src1, int src2, int m6,
Operation op) {
static_assert(sizeof(S) == sizeof(D),
"Expect input type size == output type size");
+ bool some_zero = false;
+ bool all_zero = true;
FOR_EACH_LANE(i, D) {
S src1_val = sim->get_simd_register_by_lane<S>(src1, i);
S src2_val = sim->get_simd_register_by_lane<S>(src2, i);
D value = op(src1_val, src2_val);
sim->set_simd_register_by_lane<D>(dst, i, value);
+ if (value) {
+ all_zero = false;
+ } else {
+ some_zero = true;
+ }
+ }
+ // TODO(miladfarca) implement other conditions.
+ if (m6) {
+ if (all_zero) {
+ sim->condition_reg_ = CC_OF;
+ } else if (some_zero) {
+ sim->condition_reg_ = 0x04;
+ }
}
}
-#define VECTOR_FP_COMPARE_FOR_TYPE(S, D, op) \
- VectorFPCompare<S, D>(this, r1, r2, r3, \
+#define VECTOR_FP_COMPARE_FOR_TYPE(S, D, op) \
+ VectorFPCompare<S, D>(this, r1, r2, r3, m6, \
[](S a, S b) { return (a op b) ? -1 : 0; });
#define VECTOR_FP_COMPARE(op) \
@@ -4415,7 +4497,6 @@ void VectorFPCompare(Simulator* sim, int dst, int src1, int src2,
EVALUATE(VFCE) {
DCHECK_OPCODE(VFCE);
DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
- USE(m6);
VECTOR_FP_COMPARE(==)
return length;
}
@@ -4578,7 +4659,7 @@ EVALUATE(L) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t addr = b2_val + x2_val + d2_val;
- int32_t mem_val = ReadW(addr, instr);
+ int32_t mem_val = ReadW(addr);
set_low_register(r1, mem_val);
return length;
}
@@ -4727,7 +4808,7 @@ EVALUATE(LGF) {
DCHECK_OPCODE(LGF);
DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
intptr_t addr = GET_ADDRESS(x2, b2, d2);
- int64_t mem_val = static_cast<int64_t>(ReadW(addr, instr));
+ int64_t mem_val = static_cast<int64_t>(ReadW(addr));
set_register(r1, mem_val);
return length;
}
@@ -4739,7 +4820,7 @@ EVALUATE(ST) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t addr = b2_val + x2_val + d2_val;
- WriteW(addr, r1_val, instr);
+ WriteW(addr, r1_val);
return length;
}
@@ -4757,7 +4838,7 @@ EVALUATE(STY) {
DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
intptr_t addr = GET_ADDRESS(x2, b2, d2);
uint32_t value = get_low_register<uint32_t>(r1);
- WriteW(addr, value, instr);
+ WriteW(addr, value);
return length;
}
@@ -4765,7 +4846,7 @@ EVALUATE(LY) {
DCHECK_OPCODE(LY);
DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
intptr_t addr = GET_ADDRESS(x2, b2, d2);
- uint32_t mem_val = ReadWU(addr, instr);
+ uint32_t mem_val = ReadWU(addr);
set_low_register(r1, mem_val);
return length;
}
@@ -5166,7 +5247,7 @@ EVALUATE(STH) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t mem_addr = b2_val + x2_val + d2_val;
- WriteH(mem_addr, r1_val, instr);
+ WriteH(mem_addr, r1_val);
return length;
}
@@ -5248,7 +5329,7 @@ EVALUATE(LH) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t mem_addr = x2_val + b2_val + d2_val;
- int32_t result = static_cast<int32_t>(ReadH(mem_addr, instr));
+ int32_t result = static_cast<int32_t>(ReadH(mem_addr));
set_low_register(r1, result);
return length;
}
@@ -5266,7 +5347,7 @@ EVALUATE(AH) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t addr = b2_val + x2_val + d2_val;
- int32_t mem_val = static_cast<int32_t>(ReadH(addr, instr));
+ int32_t mem_val = static_cast<int32_t>(ReadH(addr));
int32_t alu_out = 0;
bool isOF = false;
isOF = CheckOverflowForIntAdd(r1_val, mem_val, int32_t);
@@ -5285,7 +5366,7 @@ EVALUATE(SH) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t addr = b2_val + x2_val + d2_val;
- int32_t mem_val = static_cast<int32_t>(ReadH(addr, instr));
+ int32_t mem_val = static_cast<int32_t>(ReadH(addr));
int32_t alu_out = 0;
bool isOF = false;
isOF = CheckOverflowForIntSub(r1_val, mem_val, int32_t);
@@ -5303,7 +5384,7 @@ EVALUATE(MH) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t addr = b2_val + x2_val + d2_val;
- int32_t mem_val = static_cast<int32_t>(ReadH(addr, instr));
+ int32_t mem_val = static_cast<int32_t>(ReadH(addr));
int32_t alu_out = 0;
alu_out = r1_val * mem_val;
set_low_register(r1, alu_out);
@@ -5341,7 +5422,7 @@ EVALUATE(N) {
int32_t r1_val = get_low_register<int32_t>(r1);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
- int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2_val);
int32_t alu_out = 0;
alu_out = r1_val & mem_val;
SetS390BitWiseConditionCode<uint32_t>(alu_out);
@@ -5356,7 +5437,7 @@ EVALUATE(CL) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t addr = b2_val + x2_val + d2_val;
- int32_t mem_val = ReadW(addr, instr);
+ int32_t mem_val = ReadW(addr);
SetS390ConditionCode<uint32_t>(r1_val, mem_val);
return length;
}
@@ -5368,7 +5449,7 @@ EVALUATE(O) {
int32_t r1_val = get_low_register<int32_t>(r1);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
- int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2_val);
int32_t alu_out = 0;
alu_out = r1_val | mem_val;
SetS390BitWiseConditionCode<uint32_t>(alu_out);
@@ -5383,7 +5464,7 @@ EVALUATE(X) {
int32_t r1_val = get_low_register<int32_t>(r1);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
- int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2_val);
int32_t alu_out = 0;
alu_out = r1_val ^ mem_val;
SetS390BitWiseConditionCode<uint32_t>(alu_out);
@@ -5398,7 +5479,7 @@ EVALUATE(C) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t addr = b2_val + x2_val + d2_val;
- int32_t mem_val = ReadW(addr, instr);
+ int32_t mem_val = ReadW(addr);
SetS390ConditionCode<int32_t>(r1_val, mem_val);
return length;
}
@@ -5410,7 +5491,7 @@ EVALUATE(A) {
int32_t r1_val = get_low_register<int32_t>(r1);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
- int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2_val);
int32_t alu_out = 0;
bool isOF = false;
isOF = CheckOverflowForIntAdd(r1_val, mem_val, int32_t);
@@ -5428,7 +5509,7 @@ EVALUATE(S) {
int32_t r1_val = get_low_register<int32_t>(r1);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
- int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2_val);
int32_t alu_out = 0;
bool isOF = false;
isOF = CheckOverflowForIntSub(r1_val, mem_val, int32_t);
@@ -5446,7 +5527,7 @@ EVALUATE(M) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t addr = b2_val + x2_val + d2_val;
DCHECK_EQ(r1 % 2, 0);
- int32_t mem_val = ReadW(addr, instr);
+ int32_t mem_val = ReadW(addr);
int32_t r1_val = get_low_register<int32_t>(r1 + 1);
int64_t product =
static_cast<int64_t>(r1_val) * static_cast<int64_t>(mem_val);
@@ -5511,7 +5592,7 @@ EVALUATE(STE) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t addr = b2_val + x2_val + d2_val;
int32_t frs_val = get_fpr<int32_t>(r1);
- WriteW(addr, frs_val, instr);
+ WriteW(addr, frs_val);
return length;
}
@@ -5520,7 +5601,7 @@ EVALUATE(MS) {
DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
- int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2_val);
int32_t r1_val = get_low_register<int32_t>(r1);
set_low_register(r1, r1_val * mem_val);
return length;
@@ -5733,7 +5814,7 @@ EVALUATE(STM) {
// Store each register in ascending order.
for (int i = 0; i <= r3 - r1; i++) {
int32_t value = get_low_register<int32_t>((r1 + i) % 16);
- WriteW(rb_val + offset + 4 * i, value, instr);
+ WriteW(rb_val + offset + 4 * i, value);
}
return length;
}
@@ -5793,7 +5874,7 @@ EVALUATE(LM) {
// Store each register in ascending order.
for (int i = 0; i <= r3 - r1; i++) {
- int32_t value = ReadW(rb_val + offset + 4 * i, instr);
+ int32_t value = ReadW(rb_val + offset + 4 * i);
set_low_register((r1 + i) % 16, value);
}
return length;
@@ -9254,7 +9335,7 @@ EVALUATE(LT) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t addr = x2_val + b2_val + d2;
- int32_t value = ReadW(addr, instr);
+ int32_t value = ReadW(addr);
set_low_register(r1, value);
SetS390ConditionCode<int32_t>(value, 0);
return length;
@@ -9267,7 +9348,7 @@ EVALUATE(LGH) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t addr = x2_val + b2_val + d2;
- int64_t mem_val = static_cast<int64_t>(ReadH(addr, instr));
+ int64_t mem_val = static_cast<int64_t>(ReadH(addr));
set_register(r1, mem_val);
return length;
}
@@ -9279,7 +9360,7 @@ EVALUATE(LLGF) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t addr = x2_val + b2_val + d2;
- uint64_t mem_val = static_cast<uint64_t>(ReadWU(addr, instr));
+ uint64_t mem_val = static_cast<uint64_t>(ReadWU(addr));
set_register(r1, mem_val);
return length;
}
@@ -9298,7 +9379,7 @@ EVALUATE(AGF) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
uint64_t alu_out = r1_val;
- uint32_t mem_val = ReadW(b2_val + d2_val + x2_val, instr);
+ uint32_t mem_val = ReadW(b2_val + d2_val + x2_val);
alu_out += mem_val;
SetS390ConditionCode<int64_t>(alu_out, 0);
set_register(r1, alu_out);
@@ -9313,7 +9394,7 @@ EVALUATE(SGF) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
uint64_t alu_out = r1_val;
- uint32_t mem_val = ReadW(b2_val + d2_val + x2_val, instr);
+ uint32_t mem_val = ReadW(b2_val + d2_val + x2_val);
alu_out -= mem_val;
SetS390ConditionCode<int64_t>(alu_out, 0);
set_register(r1, alu_out);
@@ -9338,8 +9419,7 @@ EVALUATE(MSGF) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
- int64_t mem_val =
- static_cast<int64_t>(ReadW(b2_val + d2_val + x2_val, instr));
+ int64_t mem_val = static_cast<int64_t>(ReadW(b2_val + d2_val + x2_val));
int64_t r1_val = get_register(r1);
int64_t product = r1_val * mem_val;
set_register(r1, product);
@@ -9353,8 +9433,7 @@ EVALUATE(DSGF) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
- int64_t mem_val =
- static_cast<int64_t>(ReadW(b2_val + d2_val + x2_val, instr));
+ int64_t mem_val = static_cast<int64_t>(ReadW(b2_val + d2_val + x2_val));
int64_t r1_val = get_register(r1 + 1);
int64_t quotient = r1_val / mem_val;
int64_t remainder = r1_val % mem_val;
@@ -9369,7 +9448,7 @@ EVALUATE(LRVG) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t mem_addr = b2_val + x2_val + d2;
- int64_t mem_val = ReadW64(mem_addr, instr);
+ int64_t mem_val = ReadW64(mem_addr);
set_register(r1, ByteReverse(mem_val));
return length;
}
@@ -9380,7 +9459,7 @@ EVALUATE(LRV) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t mem_addr = b2_val + x2_val + d2;
- int32_t mem_val = ReadW(mem_addr, instr);
+ int32_t mem_val = ReadW(mem_addr);
set_low_register(r1, ByteReverse(mem_val));
return length;
}
@@ -9392,7 +9471,7 @@ EVALUATE(LRVH) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t mem_addr = b2_val + x2_val + d2;
- int16_t mem_val = ReadH(mem_addr, instr);
+ int16_t mem_val = ReadH(mem_addr);
int32_t result = ByteReverse(mem_val) & 0x0000FFFF;
result |= r1_val & 0xFFFF0000;
set_low_register(r1, result);
@@ -9478,7 +9557,7 @@ EVALUATE(STRV) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t mem_addr = b2_val + x2_val + d2;
- WriteW(mem_addr, ByteReverse(r1_val), instr);
+ WriteW(mem_addr, ByteReverse(r1_val));
return length;
}
@@ -9501,7 +9580,7 @@ EVALUATE(STRVH) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t mem_addr = b2_val + x2_val + d2;
int16_t result = static_cast<int16_t>(r1_val >> 16);
- WriteH(mem_addr, ByteReverse(result), instr);
+ WriteH(mem_addr, ByteReverse(result));
return length;
}
@@ -9517,7 +9596,7 @@ EVALUATE(MSY) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
- int32_t mem_val = ReadW(b2_val + d2_val + x2_val, instr);
+ int32_t mem_val = ReadW(b2_val + d2_val + x2_val);
int32_t r1_val = get_low_register<int32_t>(r1);
set_low_register(r1, mem_val * r1_val);
return length;
@@ -9529,7 +9608,7 @@ EVALUATE(MSC) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
- int32_t mem_val = ReadW(b2_val + d2_val + x2_val, instr);
+ int32_t mem_val = ReadW(b2_val + d2_val + x2_val);
int32_t r1_val = get_low_register<int32_t>(r1);
int64_t result64 =
static_cast<int64_t>(r1_val) * static_cast<int64_t>(mem_val);
@@ -9548,7 +9627,7 @@ EVALUATE(NY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int32_t alu_out = get_low_register<int32_t>(r1);
- int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2);
alu_out &= mem_val;
SetS390BitWiseConditionCode<uint32_t>(alu_out);
set_low_register(r1, alu_out);
@@ -9561,7 +9640,7 @@ EVALUATE(CLY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
uint32_t alu_out = get_low_register<uint32_t>(r1);
- uint32_t mem_val = ReadWU(b2_val + x2_val + d2, instr);
+ uint32_t mem_val = ReadWU(b2_val + x2_val + d2);
SetS390ConditionCode<uint32_t>(alu_out, mem_val);
return length;
}
@@ -9572,7 +9651,7 @@ EVALUATE(OY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int32_t alu_out = get_low_register<int32_t>(r1);
- int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2);
alu_out |= mem_val;
SetS390BitWiseConditionCode<uint32_t>(alu_out);
set_low_register(r1, alu_out);
@@ -9585,7 +9664,7 @@ EVALUATE(XY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int32_t alu_out = get_low_register<int32_t>(r1);
- int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2);
alu_out ^= mem_val;
SetS390BitWiseConditionCode<uint32_t>(alu_out);
set_low_register(r1, alu_out);
@@ -9598,7 +9677,7 @@ EVALUATE(CY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int32_t alu_out = get_low_register<int32_t>(r1);
- int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2);
SetS390ConditionCode<int32_t>(alu_out, mem_val);
return length;
}
@@ -9609,7 +9688,7 @@ EVALUATE(AY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int32_t alu_out = get_low_register<int32_t>(r1);
- int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2);
bool isOF = false;
isOF = CheckOverflowForIntAdd(alu_out, mem_val, int32_t);
alu_out += mem_val;
@@ -9625,7 +9704,7 @@ EVALUATE(SY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int32_t alu_out = get_low_register<int32_t>(r1);
- int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2);
bool isOF = false;
isOF = CheckOverflowForIntSub(alu_out, mem_val, int32_t);
alu_out -= mem_val;
@@ -9641,7 +9720,7 @@ EVALUATE(MFY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
DCHECK_EQ(r1 % 2, 0);
- int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2);
int32_t r1_val = get_low_register<int32_t>(r1 + 1);
int64_t product =
static_cast<int64_t>(r1_val) * static_cast<int64_t>(mem_val);
@@ -9659,7 +9738,7 @@ EVALUATE(ALY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
uint32_t alu_out = get_low_register<uint32_t>(r1);
- uint32_t mem_val = ReadWU(b2_val + x2_val + d2, instr);
+ uint32_t mem_val = ReadWU(b2_val + x2_val + d2);
alu_out += mem_val;
set_low_register(r1, alu_out);
SetS390ConditionCode<uint32_t>(alu_out, 0);
@@ -9672,7 +9751,7 @@ EVALUATE(SLY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
uint32_t alu_out = get_low_register<uint32_t>(r1);
- uint32_t mem_val = ReadWU(b2_val + x2_val + d2, instr);
+ uint32_t mem_val = ReadWU(b2_val + x2_val + d2);
alu_out -= mem_val;
set_low_register(r1, alu_out);
SetS390ConditionCode<uint32_t>(alu_out, 0);
@@ -9687,7 +9766,7 @@ EVALUATE(STHY) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t addr = x2_val + b2_val + d2;
uint16_t value = get_low_register<uint32_t>(r1);
- WriteH(addr, value, instr);
+ WriteH(addr, value);
return length;
}
@@ -9759,7 +9838,7 @@ EVALUATE(LHY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t addr = x2_val + b2_val + d2;
- int32_t result = static_cast<int32_t>(ReadH(addr, instr));
+ int32_t result = static_cast<int32_t>(ReadH(addr));
set_low_register(r1, result);
return length;
}
@@ -9777,8 +9856,7 @@ EVALUATE(AHY) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
- int32_t mem_val =
- static_cast<int32_t>(ReadH(b2_val + d2_val + x2_val, instr));
+ int32_t mem_val = static_cast<int32_t>(ReadH(b2_val + d2_val + x2_val));
int32_t alu_out = 0;
bool isOF = false;
alu_out = r1_val + mem_val;
@@ -9796,8 +9874,7 @@ EVALUATE(SHY) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
- int32_t mem_val =
- static_cast<int32_t>(ReadH(b2_val + d2_val + x2_val, instr));
+ int32_t mem_val = static_cast<int32_t>(ReadH(b2_val + d2_val + x2_val));
int32_t alu_out = 0;
bool isOF = false;
alu_out = r1_val - mem_val;
@@ -9919,7 +9996,7 @@ EVALUATE(LLGH) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
- uint16_t mem_val = ReadHU(b2_val + d2_val + x2_val, instr);
+ uint16_t mem_val = ReadHU(b2_val + d2_val + x2_val);
set_register(r1, mem_val);
return length;
}
@@ -9931,7 +10008,7 @@ EVALUATE(LLH) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
- uint16_t mem_val = ReadHU(b2_val + d2_val + x2_val, instr);
+ uint16_t mem_val = ReadHU(b2_val + d2_val + x2_val);
set_low_register(r1, mem_val);
return length;
}
@@ -9942,7 +10019,7 @@ EVALUATE(ML) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
DCHECK_EQ(r1 % 2, 0);
- uint32_t mem_val = ReadWU(b2_val + x2_val + d2, instr);
+ uint32_t mem_val = ReadWU(b2_val + x2_val + d2);
uint32_t r1_val = get_low_register<uint32_t>(r1 + 1);
uint64_t product =
static_cast<uint64_t>(r1_val) * static_cast<uint64_t>(mem_val);
@@ -9960,7 +10037,7 @@ EVALUATE(DL) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
DCHECK_EQ(r1 % 2, 0);
- uint32_t mem_val = ReadWU(b2_val + x2_val + d2, instr);
+ uint32_t mem_val = ReadWU(b2_val + x2_val + d2);
uint32_t r1_val = get_low_register<uint32_t>(r1 + 1);
uint64_t quotient =
static_cast<uint64_t>(r1_val) / static_cast<uint64_t>(mem_val);
@@ -10089,7 +10166,7 @@ EVALUATE(MVHI) {
DECODE_SIL_INSTRUCTION(b1, d1, i2);
int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
intptr_t src_addr = b1_val + d1;
- WriteW(src_addr, i2, instr);
+ WriteW(src_addr, i2);
return length;
}
@@ -10461,12 +10538,12 @@ EVALUATE(ASI) {
int d1_val = d1;
intptr_t addr = b1_val + d1_val;
- int32_t mem_val = ReadW(addr, instr);
+ int32_t mem_val = ReadW(addr);
bool isOF = CheckOverflowForIntAdd(mem_val, i2, int32_t);
int32_t alu_out = mem_val + i2;
SetS390ConditionCode<int32_t>(alu_out, 0);
SetS390OverflowCode(isOF);
- WriteW(addr, alu_out, instr);
+ WriteW(addr, alu_out);
return length;
}
@@ -10545,7 +10622,7 @@ EVALUATE(STMY) {
// Store each register in ascending order.
for (int i = 0; i <= r3 - r1; i++) {
int32_t value = get_low_register<int32_t>((r1 + i) % 16);
- WriteW(b2_val + offset + 4 * i, value, instr);
+ WriteW(b2_val + offset + 4 * i, value);
}
return length;
}
@@ -10571,7 +10648,7 @@ EVALUATE(LMY) {
// Store each register in ascending order.
for (int i = 0; i <= r3 - r1; i++) {
- int32_t value = ReadW(b2_val + offset + 4 * i, instr);
+ int32_t value = ReadW(b2_val + offset + 4 * i);
set_low_register((r1 + i) % 16, value);
}
return length;
@@ -11232,7 +11309,7 @@ EVALUATE(STEY) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t addr = x2_val + b2_val + d2;
int32_t frs_val = get_fpr<int32_t>(r1);
- WriteW(addr, frs_val, instr);
+ WriteW(addr, frs_val);
return length;
}
diff --git a/chromium/v8/src/execution/s390/simulator-s390.h b/chromium/v8/src/execution/s390/simulator-s390.h
index 4c1b0a49244..cbe628691c5 100644
--- a/chromium/v8/src/execution/s390/simulator-s390.h
+++ b/chromium/v8/src/execution/s390/simulator-s390.h
@@ -280,17 +280,17 @@ class Simulator : public SimulatorBase {
inline void WriteB(intptr_t addr, uint8_t value);
inline void WriteB(intptr_t addr, int8_t value);
- inline uint16_t ReadHU(intptr_t addr, Instruction* instr);
- inline int16_t ReadH(intptr_t addr, Instruction* instr);
+ inline uint16_t ReadHU(intptr_t addr);
+ inline int16_t ReadH(intptr_t addr);
// Note: Overloaded on the sign of the value.
- inline void WriteH(intptr_t addr, uint16_t value, Instruction* instr);
- inline void WriteH(intptr_t addr, int16_t value, Instruction* instr);
-
- inline uint32_t ReadWU(intptr_t addr, Instruction* instr);
- inline int32_t ReadW(intptr_t addr, Instruction* instr);
- inline int64_t ReadW64(intptr_t addr, Instruction* instr);
- inline void WriteW(intptr_t addr, uint32_t value, Instruction* instr);
- inline void WriteW(intptr_t addr, int32_t value, Instruction* instr);
+ inline void WriteH(intptr_t addr, uint16_t value);
+ inline void WriteH(intptr_t addr, int16_t value);
+
+ inline uint32_t ReadWU(intptr_t addr);
+ inline int32_t ReadW(intptr_t addr);
+ inline int64_t ReadW64(intptr_t addr);
+ inline void WriteW(intptr_t addr, uint32_t value);
+ inline void WriteW(intptr_t addr, int32_t value);
inline int64_t ReadDW(intptr_t addr);
inline double ReadDouble(intptr_t addr);
diff --git a/chromium/v8/src/execution/simulator-base.h b/chromium/v8/src/execution/simulator-base.h
index 9edc60a3f38..90e94416096 100644
--- a/chromium/v8/src/execution/simulator-base.h
+++ b/chromium/v8/src/execution/simulator-base.h
@@ -88,9 +88,9 @@ class SimulatorBase {
static typename std::enable_if<std::is_integral<T>::value, intptr_t>::type
ConvertArg(T arg) {
static_assert(sizeof(T) <= sizeof(intptr_t), "type bigger than ptrsize");
-#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_RISCV64
- // The MIPS64 and RISCV64 calling convention is to sign extend all values,
- // even unsigned ones.
+#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_LOONG64
+ // The MIPS64, LOONG64 and RISCV64 calling convention is to sign extend all
+ // values, even unsigned ones.
using signed_t = typename std::make_signed<T>::type;
return static_cast<intptr_t>(static_cast<signed_t>(arg));
#else
diff --git a/chromium/v8/src/execution/simulator.h b/chromium/v8/src/execution/simulator.h
index 3b824e76325..5bf9d4612e6 100644
--- a/chromium/v8/src/execution/simulator.h
+++ b/chromium/v8/src/execution/simulator.h
@@ -24,6 +24,8 @@
#include "src/execution/mips/simulator-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/execution/mips64/simulator-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/execution/loong64/simulator-loong64.h"
#elif V8_TARGET_ARCH_S390
#include "src/execution/s390/simulator-s390.h"
#elif V8_TARGET_ARCH_RISCV64
diff --git a/chromium/v8/src/execution/thread-local-top.h b/chromium/v8/src/execution/thread-local-top.h
index f903747aeb6..236beda8a05 100644
--- a/chromium/v8/src/execution/thread-local-top.h
+++ b/chromium/v8/src/execution/thread-local-top.h
@@ -5,6 +5,9 @@
#ifndef V8_EXECUTION_THREAD_LOCAL_TOP_H_
#define V8_EXECUTION_THREAD_LOCAL_TOP_H_
+#include "include/v8-callbacks.h"
+#include "include/v8-exception.h"
+#include "include/v8-unwinder.h"
#include "src/common/globals.h"
#include "src/execution/thread-id.h"
#include "src/objects/contexts.h"
@@ -63,8 +66,10 @@ class ThreadLocalTop {
// corresponds to the place on the JS stack where the C++ handler
// would have been if the stack were not separate.
Address try_catch_handler_address() {
- return reinterpret_cast<Address>(
- v8::TryCatch::JSStackComparableAddress(try_catch_handler_));
+ if (try_catch_handler_) {
+ return try_catch_handler_->JSStackComparableAddressPrivate();
+ }
+ return kNullAddress;
}
// Call depth represents nested v8 api calls. Instead of storing the nesting
diff --git a/chromium/v8/src/execution/v8threads.cc b/chromium/v8/src/execution/v8threads.cc
index 06575e9c646..9fb8f1c30cd 100644
--- a/chromium/v8/src/execution/v8threads.cc
+++ b/chromium/v8/src/execution/v8threads.cc
@@ -4,6 +4,7 @@
#include "src/execution/v8threads.h"
+#include "include/v8-locker.h"
#include "src/api/api.h"
#include "src/debug/debug.h"
#include "src/execution/execution.h"
@@ -19,7 +20,7 @@ namespace {
// Track whether this V8 instance has ever called v8::Locker. This allows the
// API code to verify that the lock is always held when V8 is being entered.
-base::Atomic32 g_locker_was_ever_used_ = 0;
+base::AtomicWord g_locker_was_ever_used_ = 0;
} // namespace
@@ -52,8 +53,12 @@ bool Locker::IsLocked(v8::Isolate* isolate) {
return internal_isolate->thread_manager()->IsLockedByCurrentThread();
}
-bool Locker::IsActive() {
- return !!base::Relaxed_Load(&g_locker_was_ever_used_);
+// static
+bool Locker::IsActive() { return WasEverUsed(); }
+
+// static
+bool Locker::WasEverUsed() {
+ return base::Relaxed_Load(&g_locker_was_ever_used_) != 0;
}
Locker::~Locker() {
diff --git a/chromium/v8/src/execution/vm-state.h b/chromium/v8/src/execution/vm-state.h
index 9621bee421d..d903b222ee2 100644
--- a/chromium/v8/src/execution/vm-state.h
+++ b/chromium/v8/src/execution/vm-state.h
@@ -5,7 +5,7 @@
#ifndef V8_EXECUTION_VM_STATE_H_
#define V8_EXECUTION_VM_STATE_H_
-#include "include/v8.h"
+#include "include/v8-unwinder.h"
#include "src/common/globals.h"
#include "src/logging/counters-scopes.h"
diff --git a/chromium/v8/src/extensions/cputracemark-extension.cc b/chromium/v8/src/extensions/cputracemark-extension.cc
index 029ad0f3cb7..881ca3b1dcb 100644
--- a/chromium/v8/src/extensions/cputracemark-extension.cc
+++ b/chromium/v8/src/extensions/cputracemark-extension.cc
@@ -4,6 +4,9 @@
#include "src/extensions/cputracemark-extension.h"
+#include "include/v8-isolate.h"
+#include "include/v8-template.h"
+
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/extensions/cputracemark-extension.h b/chromium/v8/src/extensions/cputracemark-extension.h
index 362bdcebd3c..4eca092d4b7 100644
--- a/chromium/v8/src/extensions/cputracemark-extension.h
+++ b/chromium/v8/src/extensions/cputracemark-extension.h
@@ -5,10 +5,14 @@
#ifndef V8_EXTENSIONS_CPUTRACEMARK_EXTENSION_H_
#define V8_EXTENSIONS_CPUTRACEMARK_EXTENSION_H_
-#include "include/v8.h"
+#include "include/v8-extension.h"
#include "src/base/strings.h"
namespace v8 {
+
+template <typename T>
+class FunctionCallbackInfo;
+
namespace internal {
class CpuTraceMarkExtension : public v8::Extension {
diff --git a/chromium/v8/src/extensions/externalize-string-extension.cc b/chromium/v8/src/extensions/externalize-string-extension.cc
index 755023d8d65..dab8c224c4e 100644
--- a/chromium/v8/src/extensions/externalize-string-extension.cc
+++ b/chromium/v8/src/extensions/externalize-string-extension.cc
@@ -4,6 +4,7 @@
#include "src/extensions/externalize-string-extension.h"
+#include "include/v8-template.h"
#include "src/api/api-inl.h"
#include "src/base/strings.h"
#include "src/execution/isolate.h"
diff --git a/chromium/v8/src/extensions/externalize-string-extension.h b/chromium/v8/src/extensions/externalize-string-extension.h
index 8d08a7474a2..8fce62191d0 100644
--- a/chromium/v8/src/extensions/externalize-string-extension.h
+++ b/chromium/v8/src/extensions/externalize-string-extension.h
@@ -5,9 +5,13 @@
#ifndef V8_EXTENSIONS_EXTERNALIZE_STRING_EXTENSION_H_
#define V8_EXTENSIONS_EXTERNALIZE_STRING_EXTENSION_H_
-#include "include/v8.h"
+#include "include/v8-extension.h"
namespace v8 {
+
+template <typename T>
+class FunctionCallbackInfo;
+
namespace internal {
class ExternalizeStringExtension : public v8::Extension {
diff --git a/chromium/v8/src/extensions/gc-extension.cc b/chromium/v8/src/extensions/gc-extension.cc
index 6f1c601d8da..cda90bd507c 100644
--- a/chromium/v8/src/extensions/gc-extension.cc
+++ b/chromium/v8/src/extensions/gc-extension.cc
@@ -4,7 +4,11 @@
#include "src/extensions/gc-extension.h"
-#include "include/v8.h"
+#include "include/v8-isolate.h"
+#include "include/v8-object.h"
+#include "include/v8-persistent-handle.h"
+#include "include/v8-primitive.h"
+#include "include/v8-template.h"
#include "src/base/platform/platform.h"
#include "src/execution/isolate.h"
#include "src/heap/heap.h"
diff --git a/chromium/v8/src/extensions/gc-extension.h b/chromium/v8/src/extensions/gc-extension.h
index c5750c5e801..f38a946b9fa 100644
--- a/chromium/v8/src/extensions/gc-extension.h
+++ b/chromium/v8/src/extensions/gc-extension.h
@@ -5,10 +5,15 @@
#ifndef V8_EXTENSIONS_GC_EXTENSION_H_
#define V8_EXTENSIONS_GC_EXTENSION_H_
-#include "include/v8.h"
+#include "include/v8-extension.h"
+#include "include/v8-local-handle.h"
#include "src/base/strings.h"
namespace v8 {
+
+template <typename T>
+class FunctionCallbackInfo;
+
namespace internal {
// Provides garbage collection on invoking |fun_name|(options), where
diff --git a/chromium/v8/src/extensions/ignition-statistics-extension.cc b/chromium/v8/src/extensions/ignition-statistics-extension.cc
index 93ceeeeddf1..454a85f50a5 100644
--- a/chromium/v8/src/extensions/ignition-statistics-extension.cc
+++ b/chromium/v8/src/extensions/ignition-statistics-extension.cc
@@ -4,6 +4,8 @@
#include "src/extensions/ignition-statistics-extension.h"
+#include "include/v8-template.h"
+#include "src/api/api-inl.h"
#include "src/base/logging.h"
#include "src/execution/isolate.h"
#include "src/interpreter/bytecodes.h"
@@ -27,9 +29,10 @@ const char* const IgnitionStatisticsExtension::kSource =
void IgnitionStatisticsExtension::GetIgnitionDispatchCounters(
const v8::FunctionCallbackInfo<v8::Value>& args) {
- args.GetReturnValue().Set(reinterpret_cast<Isolate*>(args.GetIsolate())
- ->interpreter()
- ->GetDispatchCountersObject());
+ args.GetReturnValue().Set(
+ Utils::ToLocal(reinterpret_cast<Isolate*>(args.GetIsolate())
+ ->interpreter()
+ ->GetDispatchCountersObject()));
}
} // namespace internal
diff --git a/chromium/v8/src/extensions/ignition-statistics-extension.h b/chromium/v8/src/extensions/ignition-statistics-extension.h
index fee55f6128a..deffe4c9156 100644
--- a/chromium/v8/src/extensions/ignition-statistics-extension.h
+++ b/chromium/v8/src/extensions/ignition-statistics-extension.h
@@ -5,9 +5,13 @@
#ifndef V8_EXTENSIONS_IGNITION_STATISTICS_EXTENSION_H_
#define V8_EXTENSIONS_IGNITION_STATISTICS_EXTENSION_H_
-#include "include/v8.h"
+#include "include/v8-extension.h"
namespace v8 {
+
+template <typename T>
+class FunctionCallbackInfo;
+
namespace internal {
class IgnitionStatisticsExtension : public v8::Extension {
diff --git a/chromium/v8/src/extensions/statistics-extension.cc b/chromium/v8/src/extensions/statistics-extension.cc
index 1911dfc39e8..976a97ad73d 100644
--- a/chromium/v8/src/extensions/statistics-extension.cc
+++ b/chromium/v8/src/extensions/statistics-extension.cc
@@ -4,6 +4,7 @@
#include "src/extensions/statistics-extension.h"
+#include "include/v8-template.h"
#include "src/common/assert-scope.h"
#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h" // crbug.com/v8/8499
diff --git a/chromium/v8/src/extensions/statistics-extension.h b/chromium/v8/src/extensions/statistics-extension.h
index 4c53cbfdea4..f2b0256ee2e 100644
--- a/chromium/v8/src/extensions/statistics-extension.h
+++ b/chromium/v8/src/extensions/statistics-extension.h
@@ -5,9 +5,13 @@
#ifndef V8_EXTENSIONS_STATISTICS_EXTENSION_H_
#define V8_EXTENSIONS_STATISTICS_EXTENSION_H_
-#include "include/v8.h"
+#include "include/v8-extension.h"
namespace v8 {
+
+template <typename T>
+class FunctionCallbackInfo;
+
namespace internal {
class StatisticsExtension : public v8::Extension {
diff --git a/chromium/v8/src/extensions/trigger-failure-extension.cc b/chromium/v8/src/extensions/trigger-failure-extension.cc
index 44c07fbc001..2c66d036a25 100644
--- a/chromium/v8/src/extensions/trigger-failure-extension.cc
+++ b/chromium/v8/src/extensions/trigger-failure-extension.cc
@@ -4,6 +4,7 @@
#include "src/extensions/trigger-failure-extension.h"
+#include "include/v8-template.h"
#include "src/base/logging.h"
#include "src/common/checks.h"
diff --git a/chromium/v8/src/extensions/trigger-failure-extension.h b/chromium/v8/src/extensions/trigger-failure-extension.h
index e2cfac1eb3f..22039ccb276 100644
--- a/chromium/v8/src/extensions/trigger-failure-extension.h
+++ b/chromium/v8/src/extensions/trigger-failure-extension.h
@@ -5,9 +5,13 @@
#ifndef V8_EXTENSIONS_TRIGGER_FAILURE_EXTENSION_H_
#define V8_EXTENSIONS_TRIGGER_FAILURE_EXTENSION_H_
-#include "include/v8.h"
+#include "include/v8-extension.h"
namespace v8 {
+
+template <typename T>
+class FunctionCallbackInfo;
+
namespace internal {
class TriggerFailureExtension : public v8::Extension {
diff --git a/chromium/v8/src/extensions/vtunedomain-support-extension.cc b/chromium/v8/src/extensions/vtunedomain-support-extension.cc
index 9a7715bb237..fcf2aa6961c 100644
--- a/chromium/v8/src/extensions/vtunedomain-support-extension.cc
+++ b/chromium/v8/src/extensions/vtunedomain-support-extension.cc
@@ -3,9 +3,13 @@
// found in the LICENSE file.
#include "src/extensions/vtunedomain-support-extension.h"
+
#include <string>
#include <vector>
+#include "include/v8-isolate.h"
+#include "include/v8-template.h"
+
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/extensions/vtunedomain-support-extension.h b/chromium/v8/src/extensions/vtunedomain-support-extension.h
index 4640d0dfa5e..cccfd742235 100644
--- a/chromium/v8/src/extensions/vtunedomain-support-extension.h
+++ b/chromium/v8/src/extensions/vtunedomain-support-extension.h
@@ -5,7 +5,7 @@
#ifndef V8_EXTENSIONS_VTUNEDOMAIN_SUPPORT_EXTENSION_H_
#define V8_EXTENSIONS_VTUNEDOMAIN_SUPPORT_EXTENSION_H_
-#include "include/v8.h"
+#include "include/v8-extension.h"
#include "src/base/strings.h"
#include "src/base/vector.h"
#include "src/third_party/vtune/vtuneapi.h"
@@ -19,6 +19,10 @@
#define TASK_END_FAILED 1 << 6
namespace v8 {
+
+template <typename T>
+class FunctionCallbackInfo;
+
namespace internal {
class VTuneDomainSupportExtension : public v8::Extension {
diff --git a/chromium/v8/src/flags/flag-definitions.h b/chromium/v8/src/flags/flag-definitions.h
index 312d17b52f3..12ecfc9d453 100644
--- a/chromium/v8/src/flags/flag-definitions.h
+++ b/chromium/v8/src/flags/flag-definitions.h
@@ -175,6 +175,20 @@ struct MaybeBoolFlag {
#define V8_HEAP_SANDBOX_BOOL false
#endif
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+#define V8_VIRTUAL_MEMORY_CAGE_BOOL true
+#else
+#define V8_VIRTUAL_MEMORY_CAGE_BOOL false
+#endif
+
+// D8's MultiMappedAllocator is only available on Linux, and only if the virtual
+// memory cage is not enabled.
+#if V8_OS_LINUX && !V8_VIRTUAL_MEMORY_CAGE_BOOL
+#define MULTI_MAPPED_ALLOCATOR_AVAILABLE true
+#else
+#define MULTI_MAPPED_ALLOCATOR_AVAILABLE false
+#endif
+
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
#define ENABLE_CONTROL_FLOW_INTEGRITY_BOOL true
#else
@@ -183,7 +197,7 @@ struct MaybeBoolFlag {
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
- V8_TARGET_ARCH_MIPS
+ V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_LOONG64
#define ENABLE_SPARKPLUG true
#else
// TODO(v8:11421): Enable Sparkplug for other architectures
@@ -197,6 +211,13 @@ struct MaybeBoolFlag {
#define ENABLE_SPARKPLUG_BY_DEFAULT false
#endif
+#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
+// Must be enabled on M1.
+#define MUST_WRITE_PROTECT_CODE_MEMORY true
+#else
+#define MUST_WRITE_PROTECT_CODE_MEMORY false
+#endif
+
// Supported ARM configurations are:
// "armv6": ARMv6 + VFPv2
// "armv7": ARMv7 + VFPv3-D32 + NEON
@@ -278,7 +299,6 @@ DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
// Features that are still work in progress (behind individual flags).
#define HARMONY_INPROGRESS_BASE(V) \
- V(harmony_regexp_sequence, "RegExp Unicode sequence properties") \
V(harmony_weak_refs_with_cleanup_some, \
"harmony weak references with FinalizationRegistry.prototype.cleanupSome") \
V(harmony_import_assertions, "harmony import assertions") \
@@ -299,10 +319,8 @@ DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
#define HARMONY_STAGED(V) \
HARMONY_STAGED_BASE(V) \
V(harmony_intl_best_fit_matcher, "Intl BestFitMatcher") \
- V(harmony_intl_displaynames_v2, "Intl.DisplayNames v2") \
- V(harmony_intl_locale_info, "Intl locale info") \
- V(harmony_intl_more_timezone, \
- "Extend Intl.DateTimeFormat timeZoneName Option")
+ V(harmony_intl_enumeration, "Intl Enumberation API") \
+ V(harmony_intl_locale_info, "Intl locale info")
#else
#define HARMONY_STAGED(V) HARMONY_STAGED_BASE(V)
#endif
@@ -319,10 +337,13 @@ DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
V(harmony_class_static_blocks, "harmony static initializer blocks")
#ifdef V8_INTL_SUPPORT
-#define HARMONY_SHIPPING(V) \
- HARMONY_SHIPPING_BASE(V) \
- V(harmony_intl_dateformat_day_period, \
- "Add dayPeriod option to DateTimeFormat")
+#define HARMONY_SHIPPING(V) \
+ HARMONY_SHIPPING_BASE(V) \
+ V(harmony_intl_dateformat_day_period, \
+ "Add dayPeriod option to DateTimeFormat") \
+ V(harmony_intl_displaynames_v2, "Intl.DisplayNames v2") \
+ V(harmony_intl_more_timezone, \
+ "Extend Intl.DateTimeFormat timeZoneName Option")
#else
#define HARMONY_SHIPPING(V) HARMONY_SHIPPING_BASE(V)
#endif
@@ -487,13 +508,16 @@ DEFINE_BOOL(future, FUTURE_BOOL,
"Implies all staged features that we want to ship in the "
"not-too-far future")
-DEFINE_WEAK_IMPLICATION(future, turbo_inline_js_wasm_calls)
#if ENABLE_SPARKPLUG
DEFINE_WEAK_IMPLICATION(future, sparkplug)
+DEFINE_WEAK_IMPLICATION(future, flush_baseline_code)
#endif
#if V8_SHORT_BUILTIN_CALLS
DEFINE_WEAK_IMPLICATION(future, short_builtin_calls)
#endif
+#if !MUST_WRITE_PROTECT_CODE_MEMORY
+DEFINE_WEAK_VALUE_IMPLICATION(future, write_protect_code_memory, false)
+#endif
// Flags for jitless
DEFINE_BOOL(jitless, V8_LITE_BOOL,
@@ -519,9 +543,9 @@ DEFINE_NEG_IMPLICATION(jitless, interpreted_frames_native_stack)
DEFINE_BOOL(assert_types, false,
"generate runtime type assertions to test the typer")
-DEFINE_BOOL(trace_code_dependencies, false, "trace code dependencies")
+DEFINE_BOOL(trace_compilation_dependencies, false, "trace code dependencies")
// Depend on --trace-deopt-verbose for reporting dependency invalidations.
-DEFINE_IMPLICATION(trace_code_dependencies, trace_deopt_verbose)
+DEFINE_IMPLICATION(trace_compilation_dependencies, trace_deopt_verbose)
#ifdef V8_ALLOCATION_SITE_TRACKING
#define V8_ALLOCATION_SITE_TRACKING_BOOL true
@@ -567,8 +591,17 @@ DEFINE_BOOL_READONLY(enable_sealed_frozen_elements_kind, true,
DEFINE_BOOL(unbox_double_arrays, true, "automatically unbox arrays of doubles")
DEFINE_BOOL_READONLY(string_slices, true, "use string slices")
+DEFINE_INT(ticks_before_optimization, 3,
+ "the number of times we have to go through the interrupt budget "
+ "before considering this function for optimization")
+DEFINE_INT(bytecode_size_allowance_per_tick, 1100,
+ "increases the number of ticks required for optimization by "
+ "bytecode.length/X")
DEFINE_INT(interrupt_budget, 132 * KB,
"interrupt budget which should be used for the profiler counter")
+DEFINE_INT(
+ max_bytecode_size_for_early_opt, 81,
+ "Maximum bytecode length for a function to be optimized on the first tick")
// Flags for inline caching and feedback vectors.
DEFINE_BOOL(use_ic, true, "use inline caching")
@@ -695,19 +728,21 @@ DEFINE_INT(concurrent_recompilation_queue_length, 8,
"the length of the concurrent compilation queue")
DEFINE_INT(concurrent_recompilation_delay, 0,
"artificial compilation delay in ms")
-DEFINE_BOOL(block_concurrent_recompilation, false,
- "block queued jobs until released")
-DEFINE_BOOL(concurrent_inlining, false,
+DEFINE_BOOL(concurrent_inlining, true,
"run optimizing compiler's inlining phase on a separate thread")
-DEFINE_BOOL(stress_concurrent_inlining, false,
- "makes concurrent inlining more likely to trigger in tests")
+DEFINE_BOOL(
+ stress_concurrent_inlining, false,
+ "create additional concurrent optimization jobs but throw away result")
DEFINE_IMPLICATION(stress_concurrent_inlining, concurrent_inlining)
DEFINE_NEG_IMPLICATION(stress_concurrent_inlining, lazy_feedback_allocation)
DEFINE_WEAK_VALUE_IMPLICATION(stress_concurrent_inlining, interrupt_budget,
15 * KB)
+DEFINE_BOOL(stress_concurrent_inlining_attach_code, false,
+ "create additional concurrent optimization jobs")
+DEFINE_IMPLICATION(stress_concurrent_inlining_attach_code,
+ stress_concurrent_inlining)
DEFINE_INT(max_serializer_nesting, 25,
"maximum levels for nesting child serializers")
-DEFINE_WEAK_IMPLICATION(future, concurrent_inlining)
DEFINE_BOOL(trace_heap_broker_verbose, false,
"trace the heap broker verbosely (all reports)")
DEFINE_BOOL(trace_heap_broker_memory, false,
@@ -882,15 +917,6 @@ DEFINE_BOOL(optimize_for_size, false,
"speed")
DEFINE_VALUE_IMPLICATION(optimize_for_size, max_semi_space_size, 1)
-#ifdef DISABLE_UNTRUSTED_CODE_MITIGATIONS
-#define V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS false
-#else
-#define V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS true
-#endif
-DEFINE_BOOL(untrusted_code_mitigations, V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS,
- "Enable mitigations for executing untrusted code")
-#undef V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS
-
// Flags for WebAssembly.
#if V8_ENABLE_WEBASSEMBLY
@@ -929,6 +955,9 @@ DEFINE_BOOL(wasm_tier_up, true,
"have an effect)")
DEFINE_BOOL(wasm_dynamic_tiering, false,
"enable dynamic tier up to the optimizing compiler")
+DEFINE_INT(
+ wasm_caching_threshold, 1000000,
+ "the amount of wasm top tier code that triggers the next caching event")
DEFINE_DEBUG_BOOL(trace_wasm_decoder, false, "trace decoding of wasm code")
DEFINE_DEBUG_BOOL(trace_wasm_compiler, false, "trace compiling of wasm code")
DEFINE_DEBUG_BOOL(trace_wasm_interpreter, false,
@@ -988,7 +1017,6 @@ DEFINE_STRING(dump_wasm_module_path, nullptr,
FOREACH_WASM_FEATURE_FLAG(DECL_WASM_FLAG)
#undef DECL_WASM_FLAG
-DEFINE_IMPLICATION(experimental_wasm_gc_experiments, experimental_wasm_gc)
DEFINE_IMPLICATION(experimental_wasm_gc, experimental_wasm_typed_funcref)
DEFINE_IMPLICATION(experimental_wasm_typed_funcref, experimental_wasm_reftypes)
@@ -1015,6 +1043,9 @@ DEFINE_NEG_NEG_IMPLICATION(wasm_bounds_checks, wasm_enforce_bounds_checks)
DEFINE_BOOL(wasm_math_intrinsics, true,
"intrinsify some Math imports into wasm")
+DEFINE_BOOL(
+ wasm_inlining, false,
+ "enable inlining of wasm functions into wasm functions (experimental)")
DEFINE_BOOL(wasm_loop_unrolling, true,
"enable loop unrolling for wasm functions")
DEFINE_BOOL(wasm_fuzzer_gen_test, false,
@@ -1161,7 +1192,12 @@ DEFINE_INT(scavenge_task_trigger, 80,
DEFINE_BOOL(scavenge_separate_stack_scanning, false,
"use a separate phase for stack scanning in scavenge")
DEFINE_BOOL(trace_parallel_scavenge, false, "trace parallel scavenge")
+#if MUST_WRITE_PROTECT_CODE_MEMORY
+DEFINE_BOOL_READONLY(write_protect_code_memory, true,
+ "write protect code memory")
+#else
DEFINE_BOOL(write_protect_code_memory, true, "write protect code memory")
+#endif
#if defined(V8_ATOMIC_MARKING_STATE) && defined(V8_ATOMIC_OBJECT_FIELD_WRITES)
#define V8_CONCURRENT_MARKING_BOOL true
#else
@@ -1580,8 +1616,9 @@ DEFINE_BOOL(debug_sim, false, "Enable debugging the simulator")
DEFINE_BOOL(check_icache, false,
"Check icache flushes in ARM and MIPS simulator")
DEFINE_INT(stop_sim_at, 0, "Simulator stop after x number of instructions")
-#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_MIPS64) || \
- defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_RISCV64)
+#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_MIPS64) || \
+ defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_RISCV64) || \
+ defined(V8_TARGET_ARCH_LOONG64)
DEFINE_INT(sim_stack_alignment, 16,
"Stack alignment in bytes in simulator. This must be a power of two "
"and it must be at least 16. 16 is default.")
@@ -1782,6 +1819,8 @@ DEFINE_BOOL_READONLY(minor_mc, false,
//
DEFINE_BOOL(help, false, "Print usage message, including flags, on console")
+DEFINE_BOOL(print_flag_values, false, "Print all flag values of V8")
+
DEFINE_BOOL(dump_counters, false, "Dump counters on exit")
DEFINE_BOOL(slow_histograms, false,
"Enable slow histograms with more overhead.")
@@ -1796,7 +1835,7 @@ DEFINE_BOOL(mock_arraybuffer_allocator, false,
DEFINE_SIZE_T(mock_arraybuffer_allocator_limit, 0,
"Memory limit for mock ArrayBuffer allocator used to simulate "
"OOM for testing.")
-#if V8_OS_LINUX
+#if MULTI_MAPPED_ALLOCATOR_AVAILABLE
DEFINE_BOOL(multi_mapped_mock_allocator, false,
"Use a multi-mapped mock ArrayBuffer allocator for testing.")
#endif
@@ -1963,7 +2002,9 @@ DEFINE_PERF_PROF_BOOL(
"Remove the perf file right after creating it (for testing only).")
DEFINE_NEG_IMPLICATION(perf_prof, compact_code_space)
// TODO(v8:8462) Remove implication once perf supports remapping.
+#if !MUST_WRITE_PROTECT_CODE_MEMORY
DEFINE_NEG_IMPLICATION(perf_prof, write_protect_code_memory)
+#endif
#if V8_ENABLE_WEBASSEMBLY
DEFINE_NEG_IMPLICATION(perf_prof, wasm_write_protect_code_memory)
#endif // V8_ENABLE_WEBASSEMBLY
@@ -2118,6 +2159,7 @@ DEFINE_NEG_IMPLICATION(single_threaded_gc, stress_concurrent_allocation)
DEFINE_BOOL(verify_predictable, false,
"this mode is used for checking that V8 behaves predictably")
+DEFINE_IMPLICATION(verify_predictable, predictable)
DEFINE_INT(dump_allocations_digest_at_alloc, -1,
"dump allocations digest each n-th allocation")
diff --git a/chromium/v8/src/flags/flags.cc b/chromium/v8/src/flags/flags.cc
index 4bf401b73c2..66ad2974d03 100644
--- a/chromium/v8/src/flags/flags.cc
+++ b/chromium/v8/src/flags/flags.cc
@@ -9,6 +9,7 @@
#include <cinttypes>
#include <cstdlib>
#include <cstring>
+#include <iomanip>
#include <sstream>
#include "src/base/functional.h"
@@ -40,6 +41,8 @@ namespace internal {
namespace {
+char NormalizeChar(char ch) { return ch == '_' ? '-' : ch; }
+
struct Flag;
Flag* FindFlagByPointer(const void* ptr);
Flag* FindFlagByName(const char* name);
@@ -380,8 +383,6 @@ Flag flags[] = {
const size_t num_flags = sizeof(flags) / sizeof(*flags);
-inline char NormalizeChar(char ch) { return ch == '_' ? '-' : ch; }
-
bool EqualNames(const char* a, const char* b) {
for (int i = 0; NormalizeChar(a[i]) == NormalizeChar(b[i]); i++) {
if (a[i] == '\0') {
@@ -429,7 +430,27 @@ static const char* Type2String(Flag::FlagType type) {
UNREACHABLE();
}
-std::ostream& operator<<(std::ostream& os, const Flag& flag) {
+// Helper struct for printing normalize Flag names.
+struct FlagName {
+ explicit FlagName(const Flag& flag) : flag(flag) {}
+ const Flag& flag;
+};
+
+std::ostream& operator<<(std::ostream& os, const FlagName& flag_name) {
+ for (const char* c = flag_name.flag.name(); *c != '\0'; ++c) {
+ os << NormalizeChar(*c);
+ }
+ return os;
+}
+
+// Helper for printing flag values.
+struct FlagValue {
+ explicit FlagValue(const Flag& flag) : flag(flag) {}
+ const Flag& flag;
+};
+
+std::ostream& operator<<(std::ostream& os, const FlagValue& flag_value) {
+ const Flag& flag = flag_value.flag;
switch (flag.type()) {
case Flag::TYPE_BOOL:
os << (flag.bool_variable() ? "true" : "false");
@@ -456,33 +477,20 @@ std::ostream& operator<<(std::ostream& os, const Flag& flag) {
break;
case Flag::TYPE_STRING: {
const char* str = flag.string_value();
- os << (str ? str : "nullptr");
+ os << std::quoted(str ? str : "");
break;
}
}
return os;
}
-// static
-std::vector<const char*>* FlagList::argv() {
- std::vector<const char*>* args = new std::vector<const char*>(8);
- for (size_t i = 0; i < num_flags; ++i) {
- Flag* f = &flags[i];
- if (!f->IsDefault()) {
- {
- bool disabled = f->type() == Flag::TYPE_BOOL && !f->bool_variable();
- std::ostringstream os;
- os << (disabled ? "--no" : "--") << f->name();
- args->push_back(StrDup(os.str().c_str()));
- }
- if (f->type() != Flag::TYPE_BOOL) {
- std::ostringstream os;
- os << *f;
- args->push_back(StrDup(os.str().c_str()));
- }
- }
+std::ostream& operator<<(std::ostream& os, const Flag& flag) {
+ if (flag.type() == Flag::TYPE_BOOL) {
+ os << (flag.bool_variable() ? "--" : "--no") << FlagName(flag);
+ } else {
+ os << "--" << FlagName(flag) << "=" << FlagValue(flag);
}
- return args;
+ return os;
}
// Helper function to parse flags: Takes an argument arg and splits it into
@@ -768,16 +776,20 @@ void FlagList::PrintHelp() {
os << "Options:\n";
for (const Flag& f : flags) {
- os << " --";
- for (const char* c = f.name(); *c != '\0'; ++c) {
- os << NormalizeChar(*c);
- }
- os << " (" << f.comment() << ")\n"
+ os << " --" << FlagName(f) << " (" << f.comment() << ")\n"
<< " type: " << Type2String(f.type()) << " default: " << f
<< "\n";
}
}
+// static
+void FlagList::PrintValues() {
+ StdoutStream os;
+ for (const Flag& f : flags) {
+ os << f << "\n";
+ }
+}
+
namespace {
static uint32_t flag_hash = 0;
diff --git a/chromium/v8/src/flags/flags.h b/chromium/v8/src/flags/flags.h
index 753da04c2dc..07a29af5d46 100644
--- a/chromium/v8/src/flags/flags.h
+++ b/chromium/v8/src/flags/flags.h
@@ -19,15 +19,6 @@ namespace internal {
// The global list of all flags.
class V8_EXPORT_PRIVATE FlagList {
public:
- // The list of all flags with a value different from the default
- // and their values. The format of the list is like the format of the
- // argv array passed to the main function, e.g.
- // ("--prof", "--log-file", "v8.prof", "--nolazy").
- //
- // The caller is responsible for disposing the list, as well
- // as every element of it.
- static std::vector<const char*>* argv();
-
class HelpOptions {
public:
enum ExitBehavior : bool { kExit = true, kDontExit = false };
@@ -78,6 +69,8 @@ class V8_EXPORT_PRIVATE FlagList {
// Print help to stdout with flags, types, and default values.
static void PrintHelp();
+ static void PrintValues();
+
// Set flags as consequence of being implied by another flag.
static void EnforceFlagImplications();
diff --git a/chromium/v8/src/handles/DIR_METADATA b/chromium/v8/src/handles/DIR_METADATA
index ff55846b318..af999da1f2a 100644
--- a/chromium/v8/src/handles/DIR_METADATA
+++ b/chromium/v8/src/handles/DIR_METADATA
@@ -7,5 +7,5 @@
# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
monorail {
- component: "Blink>JavaScript>GC"
-} \ No newline at end of file
+ component: "Blink>JavaScript>GarbageCollection"
+}
diff --git a/chromium/v8/src/handles/global-handles-inl.h b/chromium/v8/src/handles/global-handles-inl.h
new file mode 100644
index 00000000000..1f86e2dcb4a
--- /dev/null
+++ b/chromium/v8/src/handles/global-handles-inl.h
@@ -0,0 +1,33 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HANDLES_GLOBAL_HANDLES_INL_H_
+#define V8_HANDLES_GLOBAL_HANDLES_INL_H_
+
+#include "src/handles/global-handles.h"
+#include "src/handles/handles-inl.h"
+#include "src/objects/heap-object-inl.h"
+
+namespace v8 {
+namespace internal {
+
+template <typename T>
+Handle<T> GlobalHandles::Create(T value) {
+ static_assert(std::is_base_of<Object, T>::value, "static type violation");
+ // The compiler should only pick this method if T is not Object.
+ static_assert(!std::is_same<Object, T>::value, "compiler error");
+ return Handle<T>::cast(Create(Object(value)));
+}
+
+template <typename T>
+T GlobalHandleVector<T>::Pop() {
+ T obj = T::cast(Object(locations_.back()));
+ locations_.pop_back();
+ return obj;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HANDLES_GLOBAL_HANDLES_INL_H_
diff --git a/chromium/v8/src/handles/global-handles.cc b/chromium/v8/src/handles/global-handles.cc
index 55230a6d0bb..d8d50166676 100644
--- a/chromium/v8/src/handles/global-handles.cc
+++ b/chromium/v8/src/handles/global-handles.cc
@@ -8,7 +8,7 @@
#include <cstdint>
#include <map>
-#include "include/v8.h"
+#include "include/v8-traced-handle.h"
#include "src/api/api-inl.h"
#include "src/base/compiler-specific.h"
#include "src/base/sanitizer/asan.h"
diff --git a/chromium/v8/src/handles/global-handles.h b/chromium/v8/src/handles/global-handles.h
index 237cedbbb58..296b0704b2c 100644
--- a/chromium/v8/src/handles/global-handles.h
+++ b/chromium/v8/src/handles/global-handles.h
@@ -10,10 +10,12 @@
#include <utility>
#include <vector>
+#include "include/v8-callbacks.h"
+#include "include/v8-persistent-handle.h"
#include "include/v8-profiler.h"
-#include "include/v8.h"
#include "src/handles/handles.h"
#include "src/heap/heap.h"
+#include "src/objects/heap-object.h"
#include "src/objects/objects.h"
#include "src/utils/utils.h"
@@ -100,12 +102,7 @@ class V8_EXPORT_PRIVATE GlobalHandles final {
Handle<Object> Create(Address value);
template <typename T>
- Handle<T> Create(T value) {
- static_assert(std::is_base_of<Object, T>::value, "static type violation");
- // The compiler should only pick this method if T is not Object.
- static_assert(!std::is_same<Object, T>::value, "compiler error");
- return Handle<T>::cast(Create(Object(value)));
- }
+ inline Handle<T> Create(T value);
Handle<Object> CreateTraced(Object value, Address* slot, bool has_destructor,
bool is_on_stack);
@@ -357,11 +354,7 @@ class GlobalHandleVector {
void Push(T val) { locations_.push_back(val.ptr()); }
// Handles into the GlobalHandleVector become invalid when they are removed,
// so "pop" returns a raw object rather than a handle.
- T Pop() {
- T obj = T::cast(Object(locations_.back()));
- locations_.pop_back();
- return obj;
- }
+ inline T Pop();
Iterator begin() { return Iterator(locations_.begin()); }
Iterator end() { return Iterator(locations_.end()); }
diff --git a/chromium/v8/src/handles/handles.cc b/chromium/v8/src/handles/handles.cc
index 392b1f81538..2503121e978 100644
--- a/chromium/v8/src/handles/handles.cc
+++ b/chromium/v8/src/handles/handles.cc
@@ -43,7 +43,7 @@ bool HandleBase::IsDereferenceAllowed() const {
RootsTable::IsImmortalImmovable(root_index)) {
return true;
}
- if (isolate->IsBuiltinsTableHandleLocation(location_)) return true;
+ if (isolate->IsBuiltinTableHandleLocation(location_)) return true;
if (!AllowHandleDereference::IsAllowed()) return false;
LocalHeap* local_heap = isolate->CurrentLocalHeap();
diff --git a/chromium/v8/src/handles/handles.h b/chromium/v8/src/handles/handles.h
index 929cba0bc7b..166b7ee4ab6 100644
--- a/chromium/v8/src/handles/handles.h
+++ b/chromium/v8/src/handles/handles.h
@@ -7,7 +7,6 @@
#include <type_traits>
-#include "include/v8.h"
#include "src/base/functional.h"
#include "src/base/macros.h"
#include "src/common/checks.h"
@@ -15,6 +14,9 @@
#include "src/zone/zone.h"
namespace v8 {
+
+class HandleScope;
+
namespace internal {
// Forward declarations.
diff --git a/chromium/v8/src/heap/DIR_METADATA b/chromium/v8/src/heap/DIR_METADATA
index ff55846b318..af999da1f2a 100644
--- a/chromium/v8/src/heap/DIR_METADATA
+++ b/chromium/v8/src/heap/DIR_METADATA
@@ -7,5 +7,5 @@
# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
monorail {
- component: "Blink>JavaScript>GC"
-} \ No newline at end of file
+ component: "Blink>JavaScript>GarbageCollection"
+}
diff --git a/chromium/v8/src/heap/array-buffer-sweeper.cc b/chromium/v8/src/heap/array-buffer-sweeper.cc
index 597e4d0f938..cdab2a9aabf 100644
--- a/chromium/v8/src/heap/array-buffer-sweeper.cc
+++ b/chromium/v8/src/heap/array-buffer-sweeper.cc
@@ -5,6 +5,7 @@
#include "src/heap/array-buffer-sweeper.h"
#include <atomic>
+#include <memory>
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
@@ -24,7 +25,9 @@ void ArrayBufferList::Append(ArrayBufferExtension* extension) {
tail_ = extension;
}
- bytes_ += extension->accounting_length();
+ const size_t accounting_length = extension->accounting_length();
+ DCHECK_GE(bytes_ + accounting_length, bytes_);
+ bytes_ += accounting_length;
extension->set_next(nullptr);
}
@@ -41,113 +44,119 @@ void ArrayBufferList::Append(ArrayBufferList* list) {
DCHECK_NULL(list->tail_);
}
- bytes_ += list->Bytes();
- list->Reset();
+ bytes_ += list->ApproximateBytes();
+ *list = ArrayBufferList();
}
-bool ArrayBufferList::Contains(ArrayBufferExtension* extension) {
- ArrayBufferExtension* current = head_;
-
- while (current) {
+bool ArrayBufferList::ContainsSlow(ArrayBufferExtension* extension) const {
+ for (ArrayBufferExtension* current = head_; current;
+ current = current->next()) {
if (current == extension) return true;
- current = current->next();
}
-
return false;
}
-size_t ArrayBufferList::BytesSlow() {
+size_t ArrayBufferList::BytesSlow() const {
ArrayBufferExtension* current = head_;
size_t sum = 0;
-
while (current) {
sum += current->accounting_length();
current = current->next();
}
-
+ DCHECK_GE(sum, ApproximateBytes());
return sum;
}
+bool ArrayBufferList::IsEmpty() const {
+ DCHECK_IMPLIES(head_, tail_);
+ DCHECK_IMPLIES(!head_, bytes_ == 0);
+ return head_ == nullptr;
+}
+
+struct ArrayBufferSweeper::SweepingJob final {
+ SweepingJob(ArrayBufferList young, ArrayBufferList old, SweepingType type)
+ : state_(SweepingState::kInProgress),
+ young_(std::move(young)),
+ old_(std::move(old)),
+ type_(type) {}
+
+ void Sweep();
+ void SweepYoung();
+ void SweepFull();
+ ArrayBufferList SweepListFull(ArrayBufferList* list);
+
+ private:
+ CancelableTaskManager::Id id_ = CancelableTaskManager::kInvalidTaskId;
+ std::atomic<SweepingState> state_;
+ ArrayBufferList young_;
+ ArrayBufferList old_;
+ const SweepingType type_;
+ std::atomic<size_t> freed_bytes_{0};
+
+ friend class ArrayBufferSweeper;
+};
+
+ArrayBufferSweeper::ArrayBufferSweeper(Heap* heap) : heap_(heap) {}
+
+ArrayBufferSweeper::~ArrayBufferSweeper() {
+ EnsureFinished();
+ ReleaseAll(&old_);
+ ReleaseAll(&young_);
+}
+
void ArrayBufferSweeper::EnsureFinished() {
- if (!sweeping_in_progress_) return;
+ if (!sweeping_in_progress()) return;
+ TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_COMPLETE_SWEEP_ARRAY_BUFFERS);
TryAbortResult abort_result =
heap_->isolate()->cancelable_task_manager()->TryAbort(job_->id_);
switch (abort_result) {
- case TryAbortResult::kTaskAborted: {
+ case TryAbortResult::kTaskAborted:
+ // Task has not run, so we need to run it synchronously here.
job_->Sweep();
- Merge();
break;
- }
-
- case TryAbortResult::kTaskRemoved: {
- if (job_->state_ == SweepingState::kInProgress) job_->Sweep();
- if (job_->state_ == SweepingState::kDone) Merge();
+ case TryAbortResult::kTaskRemoved:
+ // Task was removed, but did actually run, just ensure we are in the right
+ // state.
+ CHECK_EQ(SweepingState::kDone, job_->state_);
break;
- }
-
case TryAbortResult::kTaskRunning: {
+ // Task is running. Wait until task is finished with its work.
base::MutexGuard guard(&sweeping_mutex_);
- // Wait until task is finished with its work.
while (job_->state_ != SweepingState::kDone) {
job_finished_.Wait(&sweeping_mutex_);
}
- Merge();
break;
}
-
- default:
- UNREACHABLE();
}
- UpdateCountersForConcurrentlySweptExtensions();
+ Finalize();
DCHECK_LE(heap_->backing_store_bytes(), SIZE_MAX);
- sweeping_in_progress_ = false;
+ DCHECK(!sweeping_in_progress());
}
-void ArrayBufferSweeper::MergeBackExtensionsWhenSwept() {
- if (sweeping_in_progress_) {
- DCHECK(job_.has_value());
+void ArrayBufferSweeper::FinishIfDone() {
+ if (sweeping_in_progress()) {
+ DCHECK(job_);
if (job_->state_ == SweepingState::kDone) {
- Merge();
- sweeping_in_progress_ = false;
- } else {
- UpdateCountersForConcurrentlySweptExtensions();
+ Finalize();
}
}
}
-void ArrayBufferSweeper::UpdateCountersForConcurrentlySweptExtensions() {
- size_t freed_bytes = freed_bytes_.exchange(0, std::memory_order_relaxed);
- DecrementExternalMemoryCounters(freed_bytes);
-}
-
-void ArrayBufferSweeper::RequestSweepYoung() {
- RequestSweep(SweepingScope::kYoung);
-}
-
-void ArrayBufferSweeper::RequestSweepFull() {
- RequestSweep(SweepingScope::kFull);
-}
-
-size_t ArrayBufferSweeper::YoungBytes() { return young_bytes_; }
+void ArrayBufferSweeper::RequestSweep(SweepingType type) {
+ DCHECK(!sweeping_in_progress());
-size_t ArrayBufferSweeper::OldBytes() { return old_bytes_; }
-
-void ArrayBufferSweeper::RequestSweep(SweepingScope scope) {
- DCHECK(!sweeping_in_progress_);
-
- if (young_.IsEmpty() && (old_.IsEmpty() || scope == SweepingScope::kYoung))
+ if (young_.IsEmpty() && (old_.IsEmpty() || type == SweepingType::kYoung))
return;
+ Prepare(type);
if (!heap_->IsTearingDown() && !heap_->ShouldReduceMemory() &&
FLAG_concurrent_array_buffer_sweeping) {
- Prepare(scope);
-
- auto task = MakeCancelableTask(heap_->isolate(), [this, scope] {
+ auto task = MakeCancelableTask(heap_->isolate(), [this, type] {
GCTracer::Scope::ScopeId scope_id =
- scope == SweepingScope::kYoung
+ type == SweepingType::kYoung
? GCTracer::Scope::BACKGROUND_YOUNG_ARRAY_BUFFER_SWEEP
: GCTracer::Scope::BACKGROUND_FULL_ARRAY_BUFFER_SWEEP;
TRACE_GC_EPOCH(heap_->tracer(), scope_id, ThreadKind::kBackground);
@@ -157,74 +166,64 @@ void ArrayBufferSweeper::RequestSweep(SweepingScope scope) {
});
job_->id_ = task->id();
V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
- sweeping_in_progress_ = true;
} else {
- Prepare(scope);
job_->Sweep();
- Merge();
- UpdateCountersForConcurrentlySweptExtensions();
+ Finalize();
}
}
-void ArrayBufferSweeper::Prepare(SweepingScope scope) {
- DCHECK(!job_.has_value());
-
- if (scope == SweepingScope::kYoung) {
- job_.emplace(this, young_, ArrayBufferList(), SweepingScope::kYoung);
- young_.Reset();
- young_bytes_ = 0;
- } else {
- CHECK_EQ(scope, SweepingScope::kFull);
- job_.emplace(this, young_, old_, SweepingScope::kFull);
- young_.Reset();
- old_.Reset();
- young_bytes_ = old_bytes_ = 0;
+void ArrayBufferSweeper::Prepare(SweepingType type) {
+ DCHECK(!sweeping_in_progress());
+ switch (type) {
+ case SweepingType::kYoung: {
+ job_ = std::make_unique<SweepingJob>(std::move(young_), ArrayBufferList(),
+ type);
+ young_ = ArrayBufferList();
+ } break;
+ case SweepingType::kFull: {
+ job_ = std::make_unique<SweepingJob>(std::move(young_), std::move(old_),
+ type);
+ young_ = ArrayBufferList();
+ old_ = ArrayBufferList();
+ } break;
}
+ DCHECK(sweeping_in_progress());
}
-void ArrayBufferSweeper::Merge() {
- DCHECK(job_.has_value());
+void ArrayBufferSweeper::Finalize() {
+ DCHECK(sweeping_in_progress());
CHECK_EQ(job_->state_, SweepingState::kDone);
young_.Append(&job_->young_);
old_.Append(&job_->old_);
- young_bytes_ = young_.Bytes();
- old_bytes_ = old_.Bytes();
-
+ const size_t freed_bytes =
+ job_->freed_bytes_.exchange(0, std::memory_order_relaxed);
+ DecrementExternalMemoryCounters(freed_bytes);
job_.reset();
-}
-
-void ArrayBufferSweeper::ReleaseAll() {
- EnsureFinished();
- ReleaseAll(&old_);
- ReleaseAll(&young_);
- old_bytes_ = young_bytes_ = 0;
+ DCHECK(!sweeping_in_progress());
}
void ArrayBufferSweeper::ReleaseAll(ArrayBufferList* list) {
ArrayBufferExtension* current = list->head_;
-
while (current) {
ArrayBufferExtension* next = current->next();
delete current;
current = next;
}
-
- list->Reset();
+ *list = ArrayBufferList();
}
void ArrayBufferSweeper::Append(JSArrayBuffer object,
ArrayBufferExtension* extension) {
size_t bytes = extension->accounting_length();
+ FinishIfDone();
+
if (Heap::InYoungGeneration(object)) {
young_.Append(extension);
- young_bytes_ += bytes;
} else {
old_.Append(extension);
- old_bytes_ += bytes;
}
- MergeBackExtensionsWhenSwept();
IncrementExternalMemoryCounters(bytes);
}
@@ -235,21 +234,21 @@ void ArrayBufferSweeper::Detach(JSArrayBuffer object,
// We cannot free the extension eagerly here, since extensions are tracked in
// a singly linked list. The next GC will remove it automatically.
- if (!sweeping_in_progress_) {
+ FinishIfDone();
+
+ if (!sweeping_in_progress()) {
// If concurrent sweeping isn't running at the moment, we can also adjust
- // young_bytes_ or old_bytes_ right away.
+ // the respective bytes in the corresponding ArraybufferLists as they are
+ // only approximate.
if (Heap::InYoungGeneration(object)) {
- DCHECK_GE(young_bytes_, bytes);
- young_bytes_ -= bytes;
+ DCHECK_GE(young_.bytes_, bytes);
young_.bytes_ -= bytes;
} else {
- DCHECK_GE(old_bytes_, bytes);
- old_bytes_ -= bytes;
+ DCHECK_GE(old_.bytes_, bytes);
old_.bytes_ -= bytes;
}
}
- MergeBackExtensionsWhenSwept();
DecrementExternalMemoryCounters(bytes);
}
@@ -270,29 +269,25 @@ void ArrayBufferSweeper::DecrementExternalMemoryCounters(size_t bytes) {
heap_->update_external_memory(-static_cast<int64_t>(bytes));
}
-void ArrayBufferSweeper::IncrementFreedBytes(size_t bytes) {
- if (bytes == 0) return;
- freed_bytes_.fetch_add(bytes, std::memory_order_relaxed);
-}
-
void ArrayBufferSweeper::SweepingJob::Sweep() {
CHECK_EQ(state_, SweepingState::kInProgress);
-
- if (scope_ == SweepingScope::kYoung) {
- SweepYoung();
- } else {
- CHECK_EQ(scope_, SweepingScope::kFull);
- SweepFull();
+ switch (type_) {
+ case SweepingType::kYoung:
+ SweepYoung();
+ break;
+ case SweepingType::kFull:
+ SweepFull();
+ break;
}
state_ = SweepingState::kDone;
}
void ArrayBufferSweeper::SweepingJob::SweepFull() {
- CHECK_EQ(scope_, SweepingScope::kFull);
+ DCHECK_EQ(SweepingType::kFull, type_);
ArrayBufferList promoted = SweepListFull(&young_);
ArrayBufferList survived = SweepListFull(&old_);
- old_ = promoted;
+ old_ = std::move(promoted);
old_.Append(&survived);
}
@@ -305,9 +300,9 @@ ArrayBufferList ArrayBufferSweeper::SweepingJob::SweepListFull(
ArrayBufferExtension* next = current->next();
if (!current->IsMarked()) {
- size_t bytes = current->accounting_length();
+ const size_t bytes = current->accounting_length();
delete current;
- sweeper_->IncrementFreedBytes(bytes);
+ if (bytes) freed_bytes_.fetch_add(bytes, std::memory_order_relaxed);
} else {
current->Unmark();
survivor_list.Append(current);
@@ -316,12 +311,12 @@ ArrayBufferList ArrayBufferSweeper::SweepingJob::SweepListFull(
current = next;
}
- list->Reset();
+ *list = ArrayBufferList();
return survivor_list;
}
void ArrayBufferSweeper::SweepingJob::SweepYoung() {
- CHECK_EQ(scope_, SweepingScope::kYoung);
+ DCHECK_EQ(SweepingType::kYoung, type_);
ArrayBufferExtension* current = young_.head_;
ArrayBufferList new_young;
@@ -333,7 +328,7 @@ void ArrayBufferSweeper::SweepingJob::SweepYoung() {
if (!current->IsYoungMarked()) {
size_t bytes = current->accounting_length();
delete current;
- sweeper_->IncrementFreedBytes(bytes);
+ if (bytes) freed_bytes_.fetch_add(bytes, std::memory_order_relaxed);
} else if (current->IsYoungPromoted()) {
current->YoungUnmark();
new_old.Append(current);
diff --git a/chromium/v8/src/heap/array-buffer-sweeper.h b/chromium/v8/src/heap/array-buffer-sweeper.h
index 6dd7ed97f6c..14360dd67f2 100644
--- a/chromium/v8/src/heap/array-buffer-sweeper.h
+++ b/chromium/v8/src/heap/array-buffer-sweeper.h
@@ -5,6 +5,9 @@
#ifndef V8_HEAP_ARRAY_BUFFER_SWEEPER_H_
#define V8_HEAP_ARRAY_BUFFER_SWEEPER_H_
+#include <memory>
+
+#include "src/base/logging.h"
#include "src/base/platform/mutex.h"
#include "src/objects/js-array-buffer.h"
#include "src/tasks/cancelable-task.h"
@@ -17,47 +20,38 @@ class Heap;
// Singly linked-list of ArrayBufferExtensions that stores head and tail of the
// list to allow for concatenation of lists.
-struct ArrayBufferList {
- ArrayBufferList() : head_(nullptr), tail_(nullptr), bytes_(0) {}
-
- ArrayBufferExtension* head_;
- ArrayBufferExtension* tail_;
- size_t bytes_;
-
- bool IsEmpty() {
- DCHECK_IMPLIES(head_, tail_);
- return head_ == nullptr;
- }
-
- size_t Bytes() { return bytes_; }
- size_t BytesSlow();
-
- void Reset() {
- head_ = tail_ = nullptr;
- bytes_ = 0;
- }
+struct ArrayBufferList final {
+ bool IsEmpty() const;
+ size_t ApproximateBytes() const { return bytes_; }
+ size_t BytesSlow() const;
void Append(ArrayBufferExtension* extension);
void Append(ArrayBufferList* list);
- V8_EXPORT_PRIVATE bool Contains(ArrayBufferExtension* extension);
+ V8_EXPORT_PRIVATE bool ContainsSlow(ArrayBufferExtension* extension) const;
+
+ private:
+ ArrayBufferExtension* head_ = nullptr;
+ ArrayBufferExtension* tail_ = nullptr;
+ // Bytes are approximate as they may be subtracted eagerly, while the
+ // `ArrayBufferExtension` is still in the list. The extension will only be
+ // dropped on next sweep.
+ size_t bytes_ = 0;
+
+ friend class ArrayBufferSweeper;
};
// The ArrayBufferSweeper iterates and deletes ArrayBufferExtensions
// concurrently to the application.
-class ArrayBufferSweeper {
+class ArrayBufferSweeper final {
public:
- explicit ArrayBufferSweeper(Heap* heap)
- : heap_(heap),
- sweeping_in_progress_(false),
- freed_bytes_(0),
- young_bytes_(0),
- old_bytes_(0) {}
- ~ArrayBufferSweeper() { ReleaseAll(); }
+ enum class SweepingType { kYoung, kFull };
+ explicit ArrayBufferSweeper(Heap* heap);
+ ~ArrayBufferSweeper();
+
+ void RequestSweep(SweepingType sweeping_type);
void EnsureFinished();
- void RequestSweepYoung();
- void RequestSweepFull();
// Track the given ArrayBufferExtension for the given JSArrayBuffer.
void Append(JSArrayBuffer object, ArrayBufferExtension* extension);
@@ -65,70 +59,40 @@ class ArrayBufferSweeper {
// Detaches an ArrayBufferExtension from a JSArrayBuffer.
void Detach(JSArrayBuffer object, ArrayBufferExtension* extension);
- ArrayBufferList young() { return young_; }
- ArrayBufferList old() { return old_; }
+ const ArrayBufferList& young() const { return young_; }
+ const ArrayBufferList& old() const { return old_; }
- size_t YoungBytes();
- size_t OldBytes();
+ // Bytes accounted in the young generation. Rebuilt during sweeping.
+ size_t YoungBytes() const { return young().ApproximateBytes(); }
+ // Bytes accounted in the old generation. Rebuilt during sweeping.
+ size_t OldBytes() const { return old().ApproximateBytes(); }
private:
- enum class SweepingScope { kYoung, kFull };
+ struct SweepingJob;
enum class SweepingState { kInProgress, kDone };
- struct SweepingJob {
- ArrayBufferSweeper* sweeper_;
- CancelableTaskManager::Id id_;
- std::atomic<SweepingState> state_;
- ArrayBufferList young_;
- ArrayBufferList old_;
- SweepingScope scope_;
-
- SweepingJob(ArrayBufferSweeper* sweeper, ArrayBufferList young,
- ArrayBufferList old, SweepingScope scope)
- : sweeper_(sweeper),
- id_(0),
- state_(SweepingState::kInProgress),
- young_(young),
- old_(old),
- scope_(scope) {}
-
- void Sweep();
- void SweepYoung();
- void SweepFull();
- ArrayBufferList SweepListFull(ArrayBufferList* list);
- };
-
- base::Optional<SweepingJob> job_;
-
- void Merge();
- void MergeBackExtensionsWhenSwept();
-
- void UpdateCountersForConcurrentlySweptExtensions();
+ bool sweeping_in_progress() const { return job_.get(); }
+
+ // Finishes sweeping if it is already done.
+ void FinishIfDone();
+
+ // Increments external memory counters outside of ArrayBufferSweeper.
+ // Increment may trigger GC.
void IncrementExternalMemoryCounters(size_t bytes);
void DecrementExternalMemoryCounters(size_t bytes);
- void IncrementFreedBytes(size_t bytes);
- void RequestSweep(SweepingScope sweeping_task);
- void Prepare(SweepingScope sweeping_task);
+ void Prepare(SweepingType type);
+ void Finalize();
- ArrayBufferList SweepYoungGen();
- void SweepOldGen(ArrayBufferExtension* extension);
-
- void ReleaseAll();
void ReleaseAll(ArrayBufferList* extension);
Heap* const heap_;
- bool sweeping_in_progress_;
+ std::unique_ptr<SweepingJob> job_;
base::Mutex sweeping_mutex_;
base::ConditionVariable job_finished_;
- std::atomic<size_t> freed_bytes_;
-
ArrayBufferList young_;
ArrayBufferList old_;
-
- size_t young_bytes_;
- size_t old_bytes_;
};
} // namespace internal
diff --git a/chromium/v8/src/heap/base/asm/loong64/push_registers_asm.cc b/chromium/v8/src/heap/base/asm/loong64/push_registers_asm.cc
new file mode 100644
index 00000000000..aa8dcd356bf
--- /dev/null
+++ b/chromium/v8/src/heap/base/asm/loong64/push_registers_asm.cc
@@ -0,0 +1,48 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Push all callee-saved registers to get them on the stack for conservative
+// stack scanning.
+//
+// See asm/x64/push_registers_clang.cc for why the function is not generated
+// using clang.
+//
+// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
+// GN toolchain (e.g. ChromeOS) and not provide them.
+asm(".text \n"
+ ".global PushAllRegistersAndIterateStack \n"
+ ".type PushAllRegistersAndIterateStack, %function \n"
+ ".hidden PushAllRegistersAndIterateStack \n"
+ "PushAllRegistersAndIterateStack: \n"
+ // Push all callee-saved registers and save return address.
+ " addi.d $sp, $sp, -96 \n"
+ " st.d $ra, $sp, 88 \n"
+ " st.d $s8, $sp, 80 \n"
+ " st.d $sp, $sp, 72 \n"
+ " st.d $fp, $sp, 64 \n"
+ " st.d $s7, $sp, 56 \n"
+ " st.d $s6, $sp, 48 \n"
+ " st.d $s5, $sp, 40 \n"
+ " st.d $s4, $sp, 32 \n"
+ " st.d $s3, $sp, 24 \n"
+ " st.d $s2, $sp, 16 \n"
+ " st.d $s1, $sp, 8 \n"
+ " st.d $s0, $sp, 0 \n"
+ // Maintain frame pointer.
+ " addi.d $s8, $sp, 0 \n"
+ // Pass 1st parameter (a0) unchanged (Stack*).
+ // Pass 2nd parameter (a1) unchanged (StackVisitor*).
+ // Save 3rd parameter (a2; IterateStackCallback).
+ " addi.d $a3, $a2, 0 \n"
+ // Call the callback.
+ // Pass 3rd parameter as sp (stack pointer).
+ " addi.d $a2, $sp, 0 \n"
+ " jirl $ra, $a3, 0 \n"
+ // Load return address.
+ " ld.d $ra, $sp, 88 \n"
+ // Restore frame pointer.
+ " ld.d $s8, $sp, 80 \n"
+ // Discard all callee-saved registers.
+ " addi.d $sp, $sp, 96 \n"
+ " jirl $zero, $ra, 0 \n");
diff --git a/chromium/v8/src/heap/base/stack.cc b/chromium/v8/src/heap/base/stack.cc
index fd5eab4528a..8b6713e6876 100644
--- a/chromium/v8/src/heap/base/stack.cc
+++ b/chromium/v8/src/heap/base/stack.cc
@@ -9,6 +9,7 @@
#include "src/base/platform/platform.h"
#include "src/base/sanitizer/asan.h"
#include "src/base/sanitizer/msan.h"
+#include "src/base/sanitizer/tsan.h"
#include "src/heap/cppgc/globals.h"
namespace heap {
@@ -43,6 +44,10 @@ namespace {
// No ASAN support as accessing fake frames otherwise results in
// "stack-use-after-scope" warnings.
DISABLE_ASAN
+// No TSAN support as the stack may not be exclusively owned by the current
+// thread, e.g., for interrupt handling. Atomic reads are not enough as the
+// other thread may use a lock to synchronize the access.
+DISABLE_TSAN
void IterateAsanFakeFrameIfNecessary(StackVisitor* visitor,
void* asan_fake_stack,
const void* stack_start,
@@ -103,6 +108,10 @@ void IterateSafeStackIfNecessary(StackVisitor* visitor) {
V8_NOINLINE
// No ASAN support as method accesses redzones while walking the stack.
DISABLE_ASAN
+// No TSAN support as the stack may not be exclusively owned by the current
+// thread, e.g., for interrupt handling. Atomic reads are not enough as the
+// other thread may use a lock to synchronize the access.
+DISABLE_TSAN
void IteratePointersImpl(const Stack* stack, StackVisitor* visitor,
intptr_t* stack_end) {
#ifdef V8_USE_ADDRESS_SANITIZER
@@ -133,6 +142,7 @@ void Stack::IteratePointers(StackVisitor* visitor) const {
PushAllRegistersAndIterateStack(this, visitor, &IteratePointersImpl);
// No need to deal with callee-saved registers as they will be kept alive by
// the regular conservative stack iteration.
+ // TODO(chromium:1056170): Add support for SIMD and/or filtering.
IterateSafeStackIfNecessary(visitor);
}
diff --git a/chromium/v8/src/heap/basic-memory-chunk.cc b/chromium/v8/src/heap/basic-memory-chunk.cc
index 6fb0467c39f..0c7a8170cfa 100644
--- a/chromium/v8/src/heap/basic-memory-chunk.cc
+++ b/chromium/v8/src/heap/basic-memory-chunk.cc
@@ -25,6 +25,26 @@ STATIC_ASSERT(BasicMemoryChunk::kFlagsOffset ==
STATIC_ASSERT(BasicMemoryChunk::kHeapOffset ==
heap_internals::MemoryChunk::kHeapOffset);
+// static
+constexpr BasicMemoryChunk::MainThreadFlags BasicMemoryChunk::kAllFlagsMask;
+// static
+constexpr BasicMemoryChunk::MainThreadFlags
+ BasicMemoryChunk::kPointersToHereAreInterestingMask;
+// static
+constexpr BasicMemoryChunk::MainThreadFlags
+ BasicMemoryChunk::kPointersFromHereAreInterestingMask;
+// static
+constexpr BasicMemoryChunk::MainThreadFlags
+ BasicMemoryChunk::kEvacuationCandidateMask;
+// static
+constexpr BasicMemoryChunk::MainThreadFlags
+ BasicMemoryChunk::kIsInYoungGenerationMask;
+// static
+constexpr BasicMemoryChunk::MainThreadFlags BasicMemoryChunk::kIsLargePageMask;
+// static
+constexpr BasicMemoryChunk::MainThreadFlags
+ BasicMemoryChunk::kSkipEvacuationSlotsRecordingMask;
+
BasicMemoryChunk::BasicMemoryChunk(size_t size, Address area_start,
Address area_end) {
size_ = size;
@@ -75,13 +95,11 @@ class BasicMemoryChunkValidator {
STATIC_ASSERT(BasicMemoryChunk::kSizeOffset ==
offsetof(BasicMemoryChunk, size_));
STATIC_ASSERT(BasicMemoryChunk::kFlagsOffset ==
- offsetof(BasicMemoryChunk, flags_));
+ offsetof(BasicMemoryChunk, main_thread_flags_));
STATIC_ASSERT(BasicMemoryChunk::kHeapOffset ==
offsetof(BasicMemoryChunk, heap_));
STATIC_ASSERT(offsetof(BasicMemoryChunk, size_) ==
MemoryChunkLayout::kSizeOffset);
- STATIC_ASSERT(offsetof(BasicMemoryChunk, flags_) ==
- MemoryChunkLayout::kFlagsOffset);
STATIC_ASSERT(offsetof(BasicMemoryChunk, heap_) ==
MemoryChunkLayout::kHeapOffset);
STATIC_ASSERT(offsetof(BasicMemoryChunk, area_start_) ==
diff --git a/chromium/v8/src/heap/basic-memory-chunk.h b/chromium/v8/src/heap/basic-memory-chunk.h
index 993291dc0e0..de91e6ea9fc 100644
--- a/chromium/v8/src/heap/basic-memory-chunk.h
+++ b/chromium/v8/src/heap/basic-memory-chunk.h
@@ -9,6 +9,7 @@
#include <unordered_map>
#include "src/base/atomic-utils.h"
+#include "src/base/flags.h"
#include "src/common/globals.h"
#include "src/flags/flags.h"
#include "src/heap/marking.h"
@@ -30,7 +31,7 @@ class BasicMemoryChunk {
}
};
- enum Flag {
+ enum Flag : uintptr_t {
NO_FLAGS = 0u,
IS_EXECUTABLE = 1u << 0,
POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
@@ -44,12 +45,6 @@ class BasicMemoryChunk {
EVACUATION_CANDIDATE = 1u << 6,
NEVER_EVACUATE = 1u << 7,
- // Large objects can have a progress bar in their page header. These object
- // are scanned in increments and will be kept black while being scanned.
- // Even if the mutator writes to them they will be kept black and a white
- // to grey transition is performed in the value.
- HAS_PROGRESS_BAR = 1u << 8,
-
// |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
// from new to old space during evacuation.
PAGE_NEW_OLD_PROMOTION = 1u << 9,
@@ -111,6 +106,28 @@ class BasicMemoryChunk {
IN_SHARED_HEAP = 1u << 23,
};
+ using MainThreadFlags = base::Flags<Flag, uintptr_t>;
+
+ static constexpr MainThreadFlags kAllFlagsMask = ~MainThreadFlags(NO_FLAGS);
+
+ static constexpr MainThreadFlags kPointersToHereAreInterestingMask =
+ POINTERS_TO_HERE_ARE_INTERESTING;
+
+ static constexpr MainThreadFlags kPointersFromHereAreInterestingMask =
+ POINTERS_FROM_HERE_ARE_INTERESTING;
+
+ static constexpr MainThreadFlags kEvacuationCandidateMask =
+ EVACUATION_CANDIDATE;
+
+ static constexpr MainThreadFlags kIsInYoungGenerationMask =
+ MainThreadFlags(FROM_PAGE) | MainThreadFlags(TO_PAGE);
+
+ static constexpr MainThreadFlags kIsLargePageMask = LARGE_PAGE;
+
+ static constexpr MainThreadFlags kSkipEvacuationSlotsRecordingMask =
+ MainThreadFlags(kEvacuationCandidateMask) |
+ MainThreadFlags(kIsInYoungGenerationMask);
+
static const intptr_t kAlignment =
(static_cast<uintptr_t>(1) << kPageSizeBits);
@@ -157,54 +174,20 @@ class BasicMemoryChunk {
void set_owner(BaseSpace* space) { owner_ = space; }
- template <AccessMode access_mode = AccessMode::NON_ATOMIC>
- void SetFlag(Flag flag) {
- if (access_mode == AccessMode::NON_ATOMIC) {
- flags_ |= flag;
- } else {
- base::AsAtomicWord::SetBits<uintptr_t>(&flags_, flag, flag);
- }
- }
-
- template <AccessMode access_mode = AccessMode::NON_ATOMIC>
- bool IsFlagSet(Flag flag) const {
- return (GetFlags<access_mode>() & flag) != 0;
+ void SetFlag(Flag flag) { main_thread_flags_ |= flag; }
+ bool IsFlagSet(Flag flag) const { return main_thread_flags_ & flag; }
+ void ClearFlag(Flag flag) {
+ main_thread_flags_ = main_thread_flags_.without(flag);
}
-
- void ClearFlag(Flag flag) { flags_ &= ~flag; }
-
- // Set or clear multiple flags at a time. The flags in the mask are set to
- // the value in "flags", the rest retain the current value in |flags_|.
- void SetFlags(uintptr_t flags, uintptr_t mask) {
- flags_ = (flags_ & ~mask) | (flags & mask);
+ void ClearFlags(MainThreadFlags flags) { main_thread_flags_ &= ~flags; }
+ // Set or clear multiple flags at a time. `mask` indicates which flags are
+ // should be replaced with new `flags`.
+ void SetFlags(MainThreadFlags flags, MainThreadFlags mask) {
+ main_thread_flags_ = (main_thread_flags_ & ~mask) | (flags & mask);
}
// Return all current flags.
- template <AccessMode access_mode = AccessMode::NON_ATOMIC>
- uintptr_t GetFlags() const {
- if (access_mode == AccessMode::NON_ATOMIC) {
- return flags_;
- } else {
- return base::AsAtomicWord::Relaxed_Load(&flags_);
- }
- }
-
- using Flags = uintptr_t;
-
- static const Flags kPointersToHereAreInterestingMask =
- POINTERS_TO_HERE_ARE_INTERESTING;
-
- static const Flags kPointersFromHereAreInterestingMask =
- POINTERS_FROM_HERE_ARE_INTERESTING;
-
- static const Flags kEvacuationCandidateMask = EVACUATION_CANDIDATE;
-
- static const Flags kIsInYoungGenerationMask = FROM_PAGE | TO_PAGE;
-
- static const Flags kIsLargePageMask = LARGE_PAGE;
-
- static const Flags kSkipEvacuationSlotsRecordingMask =
- kEvacuationCandidateMask | kIsInYoungGenerationMask;
+ MainThreadFlags GetFlags() const { return main_thread_flags_; }
private:
bool InReadOnlySpaceRaw() const { return IsFlagSet(READ_ONLY_HEAP); }
@@ -227,16 +210,13 @@ class BasicMemoryChunk {
return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
}
- template <AccessMode access_mode = AccessMode::NON_ATOMIC>
- bool IsEvacuationCandidate() {
- DCHECK(!(IsFlagSet<access_mode>(NEVER_EVACUATE) &&
- IsFlagSet<access_mode>(EVACUATION_CANDIDATE)));
- return IsFlagSet<access_mode>(EVACUATION_CANDIDATE);
+ bool IsEvacuationCandidate() const {
+ DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE)));
+ return IsFlagSet(EVACUATION_CANDIDATE);
}
- template <AccessMode access_mode = AccessMode::NON_ATOMIC>
- bool ShouldSkipEvacuationSlotRecording() {
- uintptr_t flags = GetFlags<access_mode>();
+ bool ShouldSkipEvacuationSlotRecording() const {
+ MainThreadFlags flags = GetFlags();
return ((flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
((flags & COMPACTION_WAS_ABORTED) == 0);
}
@@ -360,7 +340,9 @@ class BasicMemoryChunk {
// Overall size of the chunk, including the header and guards.
size_t size_;
- uintptr_t flags_ = NO_FLAGS;
+ // Flags that are only mutable from the main thread when no concurrent
+ // component (e.g. marker, sweeper) is running.
+ MainThreadFlags main_thread_flags_{NO_FLAGS};
// TODO(v8:7464): Find a way to remove this.
// This goes against the spirit for the BasicMemoryChunk, but until C++14/17
@@ -399,6 +381,8 @@ class BasicMemoryChunk {
friend class PagedSpace;
};
+DEFINE_OPERATORS_FOR_FLAGS(BasicMemoryChunk::MainThreadFlags)
+
STATIC_ASSERT(std::is_standard_layout<BasicMemoryChunk>::value);
} // namespace internal
diff --git a/chromium/v8/src/heap/concurrent-marking.cc b/chromium/v8/src/heap/concurrent-marking.cc
index 0dfe024db99..eba77baf77e 100644
--- a/chromium/v8/src/heap/concurrent-marking.cc
+++ b/chromium/v8/src/heap/concurrent-marking.cc
@@ -31,6 +31,7 @@
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/slots-inl.h"
#include "src/objects/transitions-inl.h"
+#include "src/objects/visitors.h"
#include "src/utils/utils-inl.h"
#include "src/utils/utils.h"
@@ -87,11 +88,13 @@ class ConcurrentMarkingVisitor final
WeakObjects* weak_objects, Heap* heap,
unsigned mark_compact_epoch,
base::EnumSet<CodeFlushMode> code_flush_mode,
- bool embedder_tracing_enabled, bool is_forced_gc,
+ bool embedder_tracing_enabled,
+ bool should_keep_ages_unchanged,
MemoryChunkDataMap* memory_chunk_data)
: MarkingVisitorBase(task_id, local_marking_worklists, weak_objects, heap,
mark_compact_epoch, code_flush_mode,
- embedder_tracing_enabled, is_forced_gc),
+ embedder_tracing_enabled,
+ should_keep_ages_unchanged),
marking_state_(memory_chunk_data),
memory_chunk_data_(memory_chunk_data) {}
@@ -168,28 +171,28 @@ class ConcurrentMarkingVisitor final
private:
// Helper class for collecting in-object slot addresses and values.
- class SlotSnapshottingVisitor final : public ObjectVisitor {
+ class SlotSnapshottingVisitor final : public ObjectVisitorWithCageBases {
public:
- explicit SlotSnapshottingVisitor(SlotSnapshot* slot_snapshot)
- : slot_snapshot_(slot_snapshot) {
+ explicit SlotSnapshottingVisitor(SlotSnapshot* slot_snapshot,
+ PtrComprCageBase cage_base,
+ PtrComprCageBase code_cage_base)
+ : ObjectVisitorWithCageBases(cage_base, code_cage_base),
+ slot_snapshot_(slot_snapshot) {
slot_snapshot_->clear();
}
void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) override {
- PtrComprCageBase cage_base = GetPtrComprCageBase(host);
for (ObjectSlot p = start; p < end; ++p) {
- Object object = p.Relaxed_Load(cage_base);
+ Object object = p.Relaxed_Load(cage_base());
slot_snapshot_->add(p, object);
}
}
void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- // TODO(v8:11880): support external code space.
- PtrComprCageBase code_cage_base = GetPtrComprCageBase(host);
- Object code = slot.Relaxed_Load(code_cage_base);
- slot_snapshot_->add(slot, code);
+ Object code = slot.Relaxed_Load(code_cage_base());
+ slot_snapshot_->add(ObjectSlot(slot.address()), code);
}
void VisitPointers(HeapObject host, MaybeObjectSlot start,
@@ -280,7 +283,8 @@ class ConcurrentMarkingVisitor final
template <typename T, typename TBodyDescriptor>
const SlotSnapshot& MakeSlotSnapshot(Map map, T object, int size) {
- SlotSnapshottingVisitor visitor(&slot_snapshot_);
+ SlotSnapshottingVisitor visitor(&slot_snapshot_, cage_base(),
+ code_cage_base());
visitor.VisitPointer(object, object.map_slot());
TBodyDescriptor::IterateBody(map, object, size, &visitor);
return slot_snapshot_;
@@ -368,11 +372,12 @@ StrongDescriptorArray ConcurrentMarkingVisitor::Cast(HeapObject object) {
class ConcurrentMarking::JobTask : public v8::JobTask {
public:
JobTask(ConcurrentMarking* concurrent_marking, unsigned mark_compact_epoch,
- base::EnumSet<CodeFlushMode> code_flush_mode, bool is_forced_gc)
+ base::EnumSet<CodeFlushMode> code_flush_mode,
+ bool should_keep_ages_unchanged)
: concurrent_marking_(concurrent_marking),
mark_compact_epoch_(mark_compact_epoch),
code_flush_mode_(code_flush_mode),
- is_forced_gc_(is_forced_gc) {}
+ should_keep_ages_unchanged_(should_keep_ages_unchanged) {}
~JobTask() override = default;
JobTask(const JobTask&) = delete;
@@ -383,13 +388,13 @@ class ConcurrentMarking::JobTask : public v8::JobTask {
if (delegate->IsJoiningThread()) {
// TRACE_GC is not needed here because the caller opens the right scope.
concurrent_marking_->Run(delegate, code_flush_mode_, mark_compact_epoch_,
- is_forced_gc_);
+ should_keep_ages_unchanged_);
} else {
TRACE_GC_EPOCH(concurrent_marking_->heap_->tracer(),
GCTracer::Scope::MC_BACKGROUND_MARKING,
ThreadKind::kBackground);
concurrent_marking_->Run(delegate, code_flush_mode_, mark_compact_epoch_,
- is_forced_gc_);
+ should_keep_ages_unchanged_);
}
}
@@ -401,7 +406,7 @@ class ConcurrentMarking::JobTask : public v8::JobTask {
ConcurrentMarking* concurrent_marking_;
const unsigned mark_compact_epoch_;
base::EnumSet<CodeFlushMode> code_flush_mode_;
- const bool is_forced_gc_;
+ const bool should_keep_ages_unchanged_;
};
ConcurrentMarking::ConcurrentMarking(Heap* heap,
@@ -422,7 +427,8 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap,
void ConcurrentMarking::Run(JobDelegate* delegate,
base::EnumSet<CodeFlushMode> code_flush_mode,
- unsigned mark_compact_epoch, bool is_forced_gc) {
+ unsigned mark_compact_epoch,
+ bool should_keep_ages_unchanged) {
size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterrupCheck = 1000;
uint8_t task_id = delegate->GetTaskId() + 1;
@@ -431,7 +437,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
ConcurrentMarkingVisitor visitor(
task_id, &local_marking_worklists, weak_objects_, heap_,
mark_compact_epoch, code_flush_mode,
- heap_->local_embedder_heap_tracer()->InUse(), is_forced_gc,
+ heap_->local_embedder_heap_tracer()->InUse(), should_keep_ages_unchanged,
&task_state->memory_chunk_data);
NativeContextInferrer& native_context_inferrer =
task_state->native_context_inferrer;
@@ -575,7 +581,7 @@ void ConcurrentMarking::ScheduleJob(TaskPriority priority) {
priority, std::make_unique<JobTask>(
this, heap_->mark_compact_collector()->epoch(),
heap_->mark_compact_collector()->code_flush_mode(),
- heap_->is_current_gc_forced()));
+ heap_->ShouldCurrentGCKeepAgesUnchanged()));
DCHECK(job_handle_->IsValid());
}
diff --git a/chromium/v8/src/heap/concurrent-marking.h b/chromium/v8/src/heap/concurrent-marking.h
index 87d39ccdeb5..12ee70da562 100644
--- a/chromium/v8/src/heap/concurrent-marking.h
+++ b/chromium/v8/src/heap/concurrent-marking.h
@@ -108,7 +108,7 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
};
class JobTask;
void Run(JobDelegate* delegate, base::EnumSet<CodeFlushMode> code_flush_mode,
- unsigned mark_compact_epoch, bool is_forced_gc);
+ unsigned mark_compact_epoch, bool should_keep_ages_unchanged);
size_t GetMaxConcurrency(size_t worker_count);
std::unique_ptr<JobHandle> job_handle_;
diff --git a/chromium/v8/src/heap/cppgc-js/DEPS b/chromium/v8/src/heap/cppgc-js/DEPS
new file mode 100644
index 00000000000..37049928d5b
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc-js/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+include/cppgc",
+]
diff --git a/chromium/v8/src/heap/cppgc-js/cpp-heap.cc b/chromium/v8/src/heap/cppgc-js/cpp-heap.cc
index 8c5813867fe..5e90039f40c 100644
--- a/chromium/v8/src/heap/cppgc-js/cpp-heap.cc
+++ b/chromium/v8/src/heap/cppgc-js/cpp-heap.cc
@@ -10,8 +10,8 @@
#include "include/cppgc/heap-consistency.h"
#include "include/cppgc/platform.h"
+#include "include/v8-local-handle.h"
#include "include/v8-platform.h"
-#include "include/v8.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
@@ -217,6 +217,14 @@ void UnifiedHeapMarker::AddObject(void* object) {
cppgc::internal::HeapObjectHeader::FromObject(object));
}
+void FatalOutOfMemoryHandlerImpl(const std::string& reason,
+ const SourceLocation&, HeapBase* heap) {
+ FatalProcessOutOfMemory(
+ reinterpret_cast<v8::internal::Isolate*>(
+ static_cast<v8::internal::CppHeap*>(heap)->isolate()),
+ reason.c_str());
+}
+
} // namespace
void CppHeap::MetricRecorderAdapter::AddMainThreadEvent(
@@ -246,6 +254,7 @@ void CppHeap::MetricRecorderAdapter::AddMainThreadEvent(
if (incremental_mark_batched_events_.events.size() == kMaxBatchedEvents) {
recorder->AddMainThreadEvent(std::move(incremental_mark_batched_events_),
GetContextId());
+ incremental_mark_batched_events_ = {};
}
}
@@ -264,6 +273,7 @@ void CppHeap::MetricRecorderAdapter::AddMainThreadEvent(
if (incremental_sweep_batched_events_.events.size() == kMaxBatchedEvents) {
recorder->AddMainThreadEvent(std::move(incremental_sweep_batched_events_),
GetContextId());
+ incremental_sweep_batched_events_ = {};
}
}
@@ -274,10 +284,12 @@ void CppHeap::MetricRecorderAdapter::FlushBatchedIncrementalEvents() {
if (!incremental_mark_batched_events_.events.empty()) {
recorder->AddMainThreadEvent(std::move(incremental_mark_batched_events_),
GetContextId());
+ incremental_mark_batched_events_ = {};
}
if (!incremental_sweep_batched_events_.events.empty()) {
recorder->AddMainThreadEvent(std::move(incremental_sweep_batched_events_),
GetContextId());
+ incremental_sweep_batched_events_ = {};
}
}
@@ -355,6 +367,7 @@ void CppHeap::AttachIsolate(Isolate* isolate) {
wrapper_descriptor_);
SetMetricRecorder(std::make_unique<MetricRecorderAdapter>(*this));
SetStackStart(base::Stack::GetStackStart());
+ oom_handler().SetCustomHandler(&FatalOutOfMemoryHandlerImpl);
no_gc_scope_--;
}
@@ -376,6 +389,7 @@ void CppHeap::DetachIsolate() {
isolate_ = nullptr;
// Any future garbage collections will ignore the V8->C++ references.
isolate()->SetEmbedderHeapTracer(nullptr);
+ oom_handler().SetCustomHandler(nullptr);
// Enter no GC scope.
no_gc_scope_++;
}
@@ -483,13 +497,14 @@ void CppHeap::TraceEpilogue(TraceSummary* trace_summary) {
// The allocated bytes counter in v8 was reset to the current marked bytes, so
// any pending allocated bytes updates should be discarded.
buffered_allocated_bytes_ = 0;
- ExecutePreFinalizers();
- // TODO(chromium:1056170): replace build flag with dedicated flag.
-#if DEBUG
+ const size_t bytes_allocated_in_prefinalizers = ExecutePreFinalizers();
+#if CPPGC_VERIFY_HEAP
UnifiedHeapMarkingVerifier verifier(*this);
- verifier.Run(stack_state_of_prev_gc(), stack_end_of_current_gc(),
- stats_collector()->marked_bytes());
-#endif
+ verifier.Run(
+ stack_state_of_prev_gc(), stack_end_of_current_gc(),
+ stats_collector()->marked_bytes() + bytes_allocated_in_prefinalizers);
+#endif // CPPGC_VERIFY_HEAP
+ USE(bytes_allocated_in_prefinalizers);
{
cppgc::subtle::NoGarbageCollectionScope no_gc(*this);
diff --git a/chromium/v8/src/heap/cppgc-js/cpp-heap.h b/chromium/v8/src/heap/cppgc-js/cpp-heap.h
index 8e4c047d1cf..a2d11bcd39a 100644
--- a/chromium/v8/src/heap/cppgc-js/cpp-heap.h
+++ b/chromium/v8/src/heap/cppgc-js/cpp-heap.h
@@ -10,8 +10,10 @@ static_assert(
false, "V8 targets can not be built with cppgc_is_standalone set to true.");
#endif
+#include "include/v8-callbacks.h"
#include "include/v8-cppgc.h"
-#include "include/v8.h"
+#include "include/v8-embedder-heap.h"
+#include "include/v8-metrics.h"
#include "src/base/macros.h"
#include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/stats-collector.h"
diff --git a/chromium/v8/src/heap/cppgc-js/cpp-snapshot.cc b/chromium/v8/src/heap/cppgc-js/cpp-snapshot.cc
index dc55753ff62..9b20b5c0a78 100644
--- a/chromium/v8/src/heap/cppgc-js/cpp-snapshot.cc
+++ b/chromium/v8/src/heap/cppgc-js/cpp-snapshot.cc
@@ -264,6 +264,10 @@ class State final : public StateBase {
ephemeron_edges_.insert(&value);
}
+ void AddEagerEphemeronEdge(const void* value, cppgc::TraceCallback callback) {
+ eager_ephemeron_edges_.insert({value, callback});
+ }
+
template <typename Callback>
void ForAllEphemeronEdges(Callback callback) {
for (const HeapObjectHeader* value : ephemeron_edges_) {
@@ -271,10 +275,20 @@ class State final : public StateBase {
}
}
+ template <typename Callback>
+ void ForAllEagerEphemeronEdges(Callback callback) {
+ for (const auto& pair : eager_ephemeron_edges_) {
+ callback(pair.first, pair.second);
+ }
+ }
+
private:
bool is_weak_container_ = false;
// Values that are held alive through ephemerons by this particular key.
std::unordered_set<const HeapObjectHeader*> ephemeron_edges_;
+ // Values that are eagerly traced and held alive through ephemerons by this
+ // particular key.
+ std::unordered_map<const void*, cppgc::TraceCallback> eager_ephemeron_edges_;
};
// Root states are similar to regular states with the difference that they are
@@ -404,6 +418,9 @@ class CppGraphBuilderImpl final {
void VisitForVisibility(State& parent, const TracedReferenceBase&);
void VisitEphemeronForVisibility(const HeapObjectHeader& key,
const HeapObjectHeader& value);
+ void VisitEphemeronWithNonGarbageCollectedValueForVisibility(
+ const HeapObjectHeader& key, const void* value,
+ cppgc::TraceDescriptor value_desc);
void VisitWeakContainerForVisibility(const HeapObjectHeader&);
void VisitRootForGraphBuilding(RootState&, const HeapObjectHeader&,
const cppgc::SourceLocation&);
@@ -421,7 +438,7 @@ class CppGraphBuilderImpl final {
}
void AddEdge(State& parent, const HeapObjectHeader& header,
- const std::string& edge_name = {}) {
+ const std::string& edge_name) {
DCHECK(parent.IsVisibleNotDependent());
auto& current = states_.GetExistingState(header);
if (!current.IsVisibleNotDependent()) return;
@@ -443,7 +460,8 @@ class CppGraphBuilderImpl final {
}
}
- void AddEdge(State& parent, const TracedReferenceBase& ref) {
+ void AddEdge(State& parent, const TracedReferenceBase& ref,
+ const std::string& edge_name) {
DCHECK(parent.IsVisibleNotDependent());
v8::Local<v8::Value> v8_value = ref.Get(cpp_heap_.isolate());
if (!v8_value.IsEmpty()) {
@@ -451,12 +469,19 @@ class CppGraphBuilderImpl final {
parent.set_node(AddNode(*parent.header()));
}
auto* v8_node = graph_.V8Node(v8_value);
- graph_.AddEdge(parent.get_node(), v8_node);
+ if (!edge_name.empty()) {
+ graph_.AddEdge(parent.get_node(), v8_node,
+ parent.get_node()->InternalizeEdgeName(edge_name));
+ } else {
+ graph_.AddEdge(parent.get_node(), v8_node);
+ }
// References that have a class id set may have their internal fields
// pointing back to the object. Set up a wrapper node for the graph so
// that the snapshot generator can merge the nodes appropriately.
- if (!ref.WrapperClassId()) return;
+ // Even with a set class id, do not set up a wrapper node when the edge
+ // has a specific name.
+ if (!ref.WrapperClassId() || !edge_name.empty()) return;
void* back_reference_object = ExtractEmbedderDataBackref(
reinterpret_cast<v8::internal::Isolate*>(cpp_heap_.isolate()),
@@ -598,8 +623,18 @@ class WeakVisitor : public JSVisitor {
void VisitEphemeron(const void* key, const void* value,
cppgc::TraceDescriptor value_desc) final {
// For ephemerons, the key retains the value.
+ // Key always must be a GarbageCollected object.
+ auto& key_header = HeapObjectHeader::FromObject(key);
+ if (!value_desc.base_object_payload) {
+ // Value does not represent an actual GarbageCollected object but rather
+ // should be traced eagerly.
+ graph_builder_.VisitEphemeronWithNonGarbageCollectedValueForVisibility(
+ key_header, value, value_desc);
+ return;
+ }
+ // Regular path where both key and value are GarbageCollected objects.
graph_builder_.VisitEphemeronForVisibility(
- HeapObjectHeader::FromObject(key), HeapObjectHeader::FromObject(value));
+ key_header, HeapObjectHeader::FromObject(value));
}
protected:
@@ -645,7 +680,7 @@ class GraphBuildingVisitor final : public JSVisitor {
void Visit(const void*, cppgc::TraceDescriptor desc) final {
graph_builder_.AddEdge(
parent_scope_.ParentAsRegularState(),
- HeapObjectHeader::FromObject(desc.base_object_payload));
+ HeapObjectHeader::FromObject(desc.base_object_payload), edge_name_);
}
void VisitWeakContainer(const void* object,
cppgc::TraceDescriptor strong_desc,
@@ -655,7 +690,8 @@ class GraphBuildingVisitor final : public JSVisitor {
// container itself.
graph_builder_.AddEdge(
parent_scope_.ParentAsRegularState(),
- HeapObjectHeader::FromObject(strong_desc.base_object_payload));
+ HeapObjectHeader::FromObject(strong_desc.base_object_payload),
+ edge_name_);
}
void VisitRoot(const void*, cppgc::TraceDescriptor desc,
const cppgc::SourceLocation& loc) final {
@@ -667,12 +703,18 @@ class GraphBuildingVisitor final : public JSVisitor {
const void*, const cppgc::SourceLocation&) final {}
// JS handling.
void Visit(const TracedReferenceBase& ref) final {
- graph_builder_.AddEdge(parent_scope_.ParentAsRegularState(), ref);
+ graph_builder_.AddEdge(parent_scope_.ParentAsRegularState(), ref,
+ edge_name_);
+ }
+
+ void set_edge_name(std::string edge_name) {
+ edge_name_ = std::move(edge_name);
}
private:
CppGraphBuilderImpl& graph_builder_;
const ParentScope& parent_scope_;
+ std::string edge_name_;
};
// Base class for transforming recursion into iteration. Items are processed
@@ -765,6 +807,19 @@ void CppGraphBuilderImpl::VisitForVisibility(State* parent,
}
}
+void CppGraphBuilderImpl::
+ VisitEphemeronWithNonGarbageCollectedValueForVisibility(
+ const HeapObjectHeader& key, const void* value,
+ cppgc::TraceDescriptor value_desc) {
+ auto& key_state = states_.GetOrCreateState(key);
+ // Eagerly trace the value here, effectively marking key as visible and
+ // queuing processing for all reachable values.
+ ParentScope parent_scope(key_state);
+ VisiblityVisitor visitor(*this, parent_scope);
+ value_desc.callback(&visitor, value);
+ key_state.AddEagerEphemeronEdge(value, value_desc.callback);
+}
+
void CppGraphBuilderImpl::VisitEphemeronForVisibility(
const HeapObjectHeader& key, const HeapObjectHeader& value) {
auto& key_state = states_.GetOrCreateState(key);
@@ -820,6 +875,12 @@ void CppGraphBuilderImpl::Run() {
state.ForAllEphemeronEdges([this, &state](const HeapObjectHeader& value) {
AddEdge(state, value, "part of key -> value pair in ephemeron table");
});
+ object_visitor.set_edge_name(
+ "part of key -> value pair in ephemeron table");
+ state.ForAllEagerEphemeronEdges(
+ [&object_visitor](const void* value, cppgc::TraceCallback callback) {
+ callback(&object_visitor, value);
+ });
});
// Add roots.
{
diff --git a/chromium/v8/src/heap/cppgc-js/unified-heap-marking-state.h b/chromium/v8/src/heap/cppgc-js/unified-heap-marking-state.h
index d98e2b54bfa..388fa94aab8 100644
--- a/chromium/v8/src/heap/cppgc-js/unified-heap-marking-state.h
+++ b/chromium/v8/src/heap/cppgc-js/unified-heap-marking-state.h
@@ -6,7 +6,6 @@
#define V8_HEAP_CPPGC_JS_UNIFIED_HEAP_MARKING_STATE_H_
#include "include/v8-cppgc.h"
-#include "include/v8.h"
#include "src/base/logging.h"
#include "src/heap/heap.h"
diff --git a/chromium/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc b/chromium/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc
index e9da1163e4f..09564055dc8 100644
--- a/chromium/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc
+++ b/chromium/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc
@@ -4,7 +4,6 @@
#include "src/heap/cppgc-js/unified-heap-marking-visitor.h"
-#include "include/v8.h"
#include "src/heap/cppgc-js/unified-heap-marking-state.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/marking-state.h"
diff --git a/chromium/v8/src/heap/cppgc/DEPS b/chromium/v8/src/heap/cppgc/DEPS
new file mode 100644
index 00000000000..37049928d5b
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+include/cppgc",
+]
diff --git a/chromium/v8/src/heap/cppgc/allocation.cc b/chromium/v8/src/heap/cppgc/allocation.cc
index 22f47039827..ee4693c7f08 100644
--- a/chromium/v8/src/heap/cppgc/allocation.cc
+++ b/chromium/v8/src/heap/cppgc/allocation.cc
@@ -9,20 +9,30 @@
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/object-allocator.h"
+#if defined(__clang__) && !defined(DEBUG) && V8_HAS_ATTRIBUTE_ALWAYS_INLINE
+#define CPPGC_FORCE_ALWAYS_INLINE __attribute__((always_inline))
+#else
+#define CPPGC_FORCE_ALWAYS_INLINE
+#endif
+
namespace cppgc {
namespace internal {
STATIC_ASSERT(api_constants::kLargeObjectSizeThreshold ==
kLargeObjectSizeThreshold);
+// Using CPPGC_FORCE_ALWAYS_INLINE to guide LTO for inlining the allocation
+// fast path.
// static
-void* MakeGarbageCollectedTraitInternal::Allocate(
+CPPGC_FORCE_ALWAYS_INLINE void* MakeGarbageCollectedTraitInternal::Allocate(
cppgc::AllocationHandle& handle, size_t size, GCInfoIndex index) {
return static_cast<ObjectAllocator&>(handle).AllocateObject(size, index);
}
+// Using CPPGC_FORCE_ALWAYS_INLINE to guide LTO for inlining the allocation
+// fast path.
// static
-void* MakeGarbageCollectedTraitInternal::Allocate(
+CPPGC_FORCE_ALWAYS_INLINE void* MakeGarbageCollectedTraitInternal::Allocate(
cppgc::AllocationHandle& handle, size_t size, GCInfoIndex index,
CustomSpaceIndex space_index) {
return static_cast<ObjectAllocator&>(handle).AllocateObject(size, index,
diff --git a/chromium/v8/src/heap/cppgc/caged-heap-local-data.cc b/chromium/v8/src/heap/cppgc/caged-heap-local-data.cc
index 55ededdc087..b1ce0df00fe 100644
--- a/chromium/v8/src/heap/cppgc/caged-heap-local-data.cc
+++ b/chromium/v8/src/heap/cppgc/caged-heap-local-data.cc
@@ -13,6 +13,14 @@
namespace cppgc {
namespace internal {
+CagedHeapLocalData::CagedHeapLocalData(HeapBase& heap_base,
+ PageAllocator& allocator)
+ : heap_base(heap_base) {
+#if defined(CPPGC_YOUNG_GENERATION)
+ age_table.Reset(&allocator);
+#endif // defined(CPPGC_YOUNG_GENERATION)
+}
+
#if defined(CPPGC_YOUNG_GENERATION)
static_assert(
@@ -30,7 +38,7 @@ void AgeTable::Reset(PageAllocator* allocator) {
allocator->DiscardSystemPages(reinterpret_cast<void*>(begin), end - begin);
}
-#endif
+#endif // defined(CPPGC_YOUNG_GENERATION)
} // namespace internal
} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/caged-heap.cc b/chromium/v8/src/heap/cppgc/caged-heap.cc
index c43ea6e3a58..4ba7a36bea7 100644
--- a/chromium/v8/src/heap/cppgc/caged-heap.cc
+++ b/chromium/v8/src/heap/cppgc/caged-heap.cc
@@ -27,18 +27,17 @@ STATIC_ASSERT(api_constants::kCagedHeapReservationAlignment ==
namespace {
-VirtualMemory ReserveCagedHeap(PageAllocator* platform_allocator) {
- DCHECK_NOT_NULL(platform_allocator);
+VirtualMemory ReserveCagedHeap(PageAllocator& platform_allocator) {
DCHECK_EQ(0u,
- kCagedHeapReservationSize % platform_allocator->AllocatePageSize());
+ kCagedHeapReservationSize % platform_allocator.AllocatePageSize());
static constexpr size_t kAllocationTries = 4;
for (size_t i = 0; i < kAllocationTries; ++i) {
void* hint = reinterpret_cast<void*>(RoundDown(
- reinterpret_cast<uintptr_t>(platform_allocator->GetRandomMmapAddr()),
+ reinterpret_cast<uintptr_t>(platform_allocator.GetRandomMmapAddr()),
kCagedHeapReservationAlignment));
- VirtualMemory memory(platform_allocator, kCagedHeapReservationSize,
+ VirtualMemory memory(&platform_allocator, kCagedHeapReservationSize,
kCagedHeapReservationAlignment, hint);
if (memory.IsReserved()) return memory;
}
@@ -51,7 +50,9 @@ class CppgcBoundedPageAllocator final : public v8::base::BoundedPageAllocator {
public:
CppgcBoundedPageAllocator(v8::PageAllocator* page_allocator, Address start,
size_t size, size_t allocate_page_size)
- : BoundedPageAllocator(page_allocator, start, size, allocate_page_size) {}
+ : BoundedPageAllocator(page_allocator, start, size, allocate_page_size,
+ v8::base::PageInitializationMode::
+ kAllocatedPagesCanBeUninitialized) {}
bool FreePages(void* address, size_t size) final {
// BoundedPageAllocator is not guaranteed to allocate zeroed page.
@@ -62,7 +63,7 @@ class CppgcBoundedPageAllocator final : public v8::base::BoundedPageAllocator {
// contents. To mitigate this problem, CppgcBoundedPageAllocator clears all
// pages before they are freed. This also includes protected guard pages, so
// CppgcBoundedPageAllocator needs to update permissions before clearing.
- SetPermissions(address, size, Permission::kReadWrite);
+ CHECK(SetPermissions(address, size, Permission::kReadWrite));
memset(address, 0, size);
return v8::base::BoundedPageAllocator::FreePages(address, size);
}
@@ -70,23 +71,19 @@ class CppgcBoundedPageAllocator final : public v8::base::BoundedPageAllocator {
} // namespace
-CagedHeap::CagedHeap(HeapBase* heap_base, PageAllocator* platform_allocator)
+CagedHeap::CagedHeap(HeapBase& heap_base, PageAllocator& platform_allocator)
: reserved_area_(ReserveCagedHeap(platform_allocator)) {
using CagedAddress = CagedHeap::AllocatorType::Address;
- DCHECK_NOT_NULL(heap_base);
-
- CHECK(platform_allocator->SetPermissions(
+ const bool is_not_oom = platform_allocator.SetPermissions(
reserved_area_.address(),
- RoundUp(sizeof(CagedHeapLocalData), platform_allocator->CommitPageSize()),
- PageAllocator::kReadWrite));
+ RoundUp(sizeof(CagedHeapLocalData), platform_allocator.CommitPageSize()),
+ PageAllocator::kReadWrite);
+ // Failing to commit the reservation means that we are out of memory.
+ CHECK(is_not_oom);
- auto* local_data =
- new (reserved_area_.address()) CagedHeapLocalData(heap_base);
-#if defined(CPPGC_YOUNG_GENERATION)
- local_data->age_table.Reset(platform_allocator);
-#endif
- USE(local_data);
+ new (reserved_area_.address())
+ CagedHeapLocalData(heap_base, platform_allocator);
const CagedAddress caged_heap_start =
RoundUp(reinterpret_cast<CagedAddress>(reserved_area_.address()) +
@@ -97,7 +94,7 @@ CagedHeap::CagedHeap(HeapBase* heap_base, PageAllocator* platform_allocator)
reinterpret_cast<CagedAddress>(reserved_area_.address());
bounded_allocator_ = std::make_unique<CppgcBoundedPageAllocator>(
- platform_allocator, caged_heap_start,
+ &platform_allocator, caged_heap_start,
reserved_area_.size() - local_data_size_with_padding, kPageSize);
}
diff --git a/chromium/v8/src/heap/cppgc/caged-heap.h b/chromium/v8/src/heap/cppgc/caged-heap.h
index 7ac34624a0a..89b2f7f112f 100644
--- a/chromium/v8/src/heap/cppgc/caged-heap.h
+++ b/chromium/v8/src/heap/cppgc/caged-heap.h
@@ -22,7 +22,17 @@ class CagedHeap final {
public:
using AllocatorType = v8::base::BoundedPageAllocator;
- CagedHeap(HeapBase* heap, PageAllocator* platform_allocator);
+ static uintptr_t OffsetFromAddress(const void* address) {
+ return reinterpret_cast<uintptr_t>(address) &
+ (kCagedHeapReservationAlignment - 1);
+ }
+
+ static uintptr_t BaseFromAddress(const void* address) {
+ return reinterpret_cast<uintptr_t>(address) &
+ ~(kCagedHeapReservationAlignment - 1);
+ }
+
+ CagedHeap(HeapBase& heap, PageAllocator& platform_allocator);
CagedHeap(const CagedHeap&) = delete;
CagedHeap& operator=(const CagedHeap&) = delete;
@@ -37,13 +47,13 @@ class CagedHeap final {
return *static_cast<CagedHeapLocalData*>(reserved_area_.address());
}
- static uintptr_t OffsetFromAddress(void* address) {
- return reinterpret_cast<uintptr_t>(address) &
- (kCagedHeapReservationAlignment - 1);
+ bool IsOnHeap(const void* address) const {
+ return reinterpret_cast<void*>(BaseFromAddress(address)) ==
+ reserved_area_.address();
}
private:
- VirtualMemory reserved_area_;
+ const VirtualMemory reserved_area_;
std::unique_ptr<AllocatorType> bounded_allocator_;
};
diff --git a/chromium/v8/src/heap/cppgc/gc-info.cc b/chromium/v8/src/heap/cppgc/gc-info.cc
index de57805dcbf..4c555106fd2 100644
--- a/chromium/v8/src/heap/cppgc/gc-info.cc
+++ b/chromium/v8/src/heap/cppgc/gc-info.cc
@@ -3,19 +3,86 @@
// found in the LICENSE file.
#include "include/cppgc/internal/gc-info.h"
+
+#include "include/cppgc/internal/name-trait.h"
#include "include/v8config.h"
#include "src/heap/cppgc/gc-info-table.h"
namespace cppgc {
namespace internal {
-GCInfoIndex EnsureGCInfoIndex(std::atomic<GCInfoIndex>& registered_index,
- FinalizationCallback finalization_callback,
- TraceCallback trace_callback,
- NameCallback name_callback, bool has_v_table) {
+namespace {
+
+HeapObjectName GetHiddenName(const void*) {
+ return {NameProvider::kHiddenName, true};
+}
+
+} // namespace
+
+// static
+GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexPolymorphic(
+ std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback,
+ FinalizationCallback finalization_callback, NameCallback name_callback) {
return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
registered_index,
- {finalization_callback, trace_callback, name_callback, has_v_table});
+ {finalization_callback, trace_callback, name_callback, true});
+}
+
+// static
+GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexPolymorphic(
+ std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback,
+ FinalizationCallback finalization_callback) {
+ return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ registered_index,
+ {finalization_callback, trace_callback, GetHiddenName, true});
+}
+
+// static
+GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexPolymorphic(
+ std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback,
+ NameCallback name_callback) {
+ return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ registered_index, {nullptr, trace_callback, name_callback, true});
+}
+
+// static
+GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexPolymorphic(
+ std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback) {
+ return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ registered_index, {nullptr, trace_callback, GetHiddenName, true});
+}
+
+// static
+GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexNonPolymorphic(
+ std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback,
+ FinalizationCallback finalization_callback, NameCallback name_callback) {
+ return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ registered_index,
+ {finalization_callback, trace_callback, name_callback, false});
+}
+
+// static
+GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexNonPolymorphic(
+ std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback,
+ FinalizationCallback finalization_callback) {
+ return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ registered_index,
+ {finalization_callback, trace_callback, GetHiddenName, false});
+}
+
+// static
+GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexNonPolymorphic(
+ std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback,
+ NameCallback name_callback) {
+ return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ registered_index, {nullptr, trace_callback, name_callback, false});
+}
+
+// static
+GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexNonPolymorphic(
+ std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback) {
+ return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ registered_index, {nullptr, trace_callback, GetHiddenName, false});
}
} // namespace internal
diff --git a/chromium/v8/src/heap/cppgc/heap-base.cc b/chromium/v8/src/heap/cppgc/heap-base.cc
index c89c2842f9b..db16019b61f 100644
--- a/chromium/v8/src/heap/cppgc/heap-base.cc
+++ b/chromium/v8/src/heap/cppgc/heap-base.cc
@@ -17,6 +17,7 @@
#include "src/heap/cppgc/marking-verifier.h"
#include "src/heap/cppgc/object-view.h"
#include "src/heap/cppgc/page-memory.h"
+#include "src/heap/cppgc/platform.h"
#include "src/heap/cppgc/prefinalizer-handler.h"
#include "src/heap/cppgc/stats-collector.h"
@@ -56,23 +57,26 @@ HeapBase::HeapBase(
StackSupport stack_support)
: raw_heap_(this, custom_spaces),
platform_(std::move(platform)),
+ oom_handler_(std::make_unique<FatalOutOfMemoryHandler>(this)),
#if defined(LEAK_SANITIZER)
lsan_page_allocator_(std::make_unique<v8::base::LsanPageAllocator>(
platform_->GetPageAllocator())),
#endif // LEAK_SANITIZER
#if defined(CPPGC_CAGED_HEAP)
- caged_heap_(this, page_allocator()),
- page_backend_(std::make_unique<PageBackend>(&caged_heap_.allocator())),
+ caged_heap_(*this, *page_allocator()),
+ page_backend_(std::make_unique<PageBackend>(caged_heap_.allocator(),
+ *oom_handler_.get())),
#else // !CPPGC_CAGED_HEAP
- page_backend_(std::make_unique<PageBackend>(page_allocator())),
+ page_backend_(std::make_unique<PageBackend>(*page_allocator(),
+ *oom_handler_.get())),
#endif // !CPPGC_CAGED_HEAP
stats_collector_(std::make_unique<StatsCollector>(platform_.get())),
stack_(std::make_unique<heap::base::Stack>(
v8::base::Stack::GetStackStart())),
prefinalizer_handler_(std::make_unique<PreFinalizerHandler>(*this)),
compactor_(raw_heap_),
- object_allocator_(&raw_heap_, page_backend_.get(),
- stats_collector_.get()),
+ object_allocator_(raw_heap_, *page_backend_, *stats_collector_,
+ *prefinalizer_handler_),
sweeper_(*this),
stack_support_(stack_support) {
stats_collector_->RegisterObserver(
@@ -96,10 +100,17 @@ size_t HeapBase::ObjectPayloadSize() const {
void HeapBase::AdvanceIncrementalGarbageCollectionOnAllocationIfNeeded() {
if (marker_) marker_->AdvanceMarkingOnAllocation();
}
-void HeapBase::ExecutePreFinalizers() {
+
+size_t HeapBase::ExecutePreFinalizers() {
+#ifdef CPPGC_ALLOW_ALLOCATIONS_IN_PREFINALIZERS
+ // Allocations in pre finalizers should not trigger another GC.
+ cppgc::subtle::NoGarbageCollectionScope no_gc_scope(*this);
+#else
// Pre finalizers are forbidden from allocating objects.
cppgc::subtle::DisallowGarbageCollectionScope no_gc_scope(*this);
+#endif // CPPGC_ALLOW_ALLOCATIONS_IN_PREFINALIZERS
prefinalizer_handler_->InvokePreFinalizers();
+ return prefinalizer_handler_->ExtractBytesAllocatedInPrefinalizers();
}
void HeapBase::Terminate() {
@@ -110,6 +121,7 @@ void HeapBase::Terminate() {
constexpr size_t kMaxTerminationGCs = 20;
size_t gc_count = 0;
+ bool more_termination_gcs_needed = false;
do {
CHECK_LT(gc_count++, kMaxTerminationGCs);
@@ -132,7 +144,14 @@ void HeapBase::Terminate() {
{Sweeper::SweepingConfig::SweepingType::kAtomic,
Sweeper::SweepingConfig::CompactableSpaceHandling::kSweep});
sweeper().NotifyDoneIfNeeded();
- } while (strong_persistent_region_.NodesInUse() > 0);
+ more_termination_gcs_needed =
+ strong_persistent_region_.NodesInUse() ||
+ weak_persistent_region_.NodesInUse() || [this]() {
+ PersistentRegionLock guard;
+ return strong_cross_thread_persistent_region_.NodesInUse() ||
+ weak_cross_thread_persistent_region_.NodesInUse();
+ }();
+ } while (more_termination_gcs_needed);
object_allocator().Terminate();
disallow_gc_scope_++;
diff --git a/chromium/v8/src/heap/cppgc/heap-base.h b/chromium/v8/src/heap/cppgc/heap-base.h
index 91f99b39cc3..f350a99d012 100644
--- a/chromium/v8/src/heap/cppgc/heap-base.h
+++ b/chromium/v8/src/heap/cppgc/heap-base.h
@@ -18,6 +18,7 @@
#include "src/heap/cppgc/marker.h"
#include "src/heap/cppgc/metric-recorder.h"
#include "src/heap/cppgc/object-allocator.h"
+#include "src/heap/cppgc/platform.h"
#include "src/heap/cppgc/process-heap-statistics.h"
#include "src/heap/cppgc/process-heap.h"
#include "src/heap/cppgc/raw-heap.h"
@@ -65,6 +66,7 @@ namespace testing {
class TestWithHeap;
} // namespace testing
+class FatalOutOfMemoryHandler;
class PageBackend;
class PreFinalizerHandler;
class StatsCollector;
@@ -95,6 +97,11 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
cppgc::Platform* platform() { return platform_.get(); }
const cppgc::Platform* platform() const { return platform_.get(); }
+ FatalOutOfMemoryHandler& oom_handler() { return *oom_handler_.get(); }
+ const FatalOutOfMemoryHandler& oom_handler() const {
+ return *oom_handler_.get();
+ }
+
PageBackend* page_backend() { return page_backend_.get(); }
const PageBackend* page_backend() const { return page_backend_.get(); }
@@ -199,6 +206,8 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
stats_collector_->SetMetricRecorder(std::move(histogram_recorder));
}
+ int GetCreationThreadId() const { return creation_thread_id_; }
+
protected:
// Used by the incremental scheduler to finalize a GC if supported.
virtual void FinalizeIncrementalGarbageCollectionIfNeeded(
@@ -208,12 +217,14 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
bool IsMarking() const { return marker_.get(); }
- void ExecutePreFinalizers();
+ // Returns amount of bytes allocated while executing prefinalizers.
+ size_t ExecutePreFinalizers();
PageAllocator* page_allocator() const;
RawHeap raw_heap_;
std::shared_ptr<cppgc::Platform> platform_;
+ std::unique_ptr<FatalOutOfMemoryHandler> oom_handler_;
#if defined(LEAK_SANITIZER)
std::unique_ptr<v8::base::LsanPageAllocator> lsan_page_allocator_;
@@ -261,6 +272,8 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
bool in_atomic_pause_ = false;
+ int creation_thread_id_ = v8::base::OS::GetCurrentThreadId();
+
friend class MarkerBase::IncrementalMarkingTask;
friend class testing::TestWithHeap;
friend class cppgc::subtle::DisallowGarbageCollectionScope;
diff --git a/chromium/v8/src/heap/cppgc/heap-object-header.h b/chromium/v8/src/heap/cppgc/heap-object-header.h
index a50d115e52b..f1d67df8b55 100644
--- a/chromium/v8/src/heap/cppgc/heap-object-header.h
+++ b/chromium/v8/src/heap/cppgc/heap-object-header.h
@@ -91,6 +91,8 @@ class HeapObjectHeader {
void Unmark();
inline bool TryMarkAtomic();
+ inline void MarkNonAtomic();
+
template <AccessMode = AccessMode::kNonAtomic>
bool IsYoung() const;
@@ -118,14 +120,14 @@ class HeapObjectHeader {
static constexpr size_t DecodeSize(uint16_t encoded) {
// Essentially, gets optimized to << 1.
- using SizeField = MarkBitField::Next<size_t, 15>;
- return SizeField::decode(encoded) * kAllocationGranularity;
+ using SizeFieldImpl = MarkBitField::Next<size_t, 15>;
+ return SizeFieldImpl::decode(encoded) * kAllocationGranularity;
}
static constexpr uint16_t EncodeSize(size_t size) {
// Essentially, gets optimized to >> 1.
- using SizeField = MarkBitField::Next<size_t, 15>;
- return SizeField::encode(size / kAllocationGranularity);
+ using SizeFieldImpl = MarkBitField::Next<size_t, 15>;
+ return SizeFieldImpl::encode(size / kAllocationGranularity);
}
V8_EXPORT_PRIVATE void CheckApiConstants();
@@ -266,6 +268,11 @@ bool HeapObjectHeader::TryMarkAtomic() {
std::memory_order_relaxed);
}
+void HeapObjectHeader::MarkNonAtomic() {
+ DCHECK(!IsMarked<AccessMode::kNonAtomic>());
+ encoded_low_ |= MarkBitField::encode(true);
+}
+
template <AccessMode mode>
bool HeapObjectHeader::IsYoung() const {
return !IsMarked<mode>();
diff --git a/chromium/v8/src/heap/cppgc/heap-statistics-collector.cc b/chromium/v8/src/heap/cppgc/heap-statistics-collector.cc
index 5833211fcb3..7c91bbd4f53 100644
--- a/chromium/v8/src/heap/cppgc/heap-statistics-collector.cc
+++ b/chromium/v8/src/heap/cppgc/heap-statistics-collector.cc
@@ -73,7 +73,7 @@ void FinalizeSpace(HeapStatistics* stats,
}
void RecordObjectType(
- std::unordered_map<const char*, size_t>& type_map,
+ std::unordered_map<const void*, size_t>& type_map,
std::vector<HeapStatistics::ObjectStatsEntry>& object_statistics,
HeapObjectHeader* header, size_t object_size) {
if (!NameProvider::HideInternalNames()) {
@@ -109,7 +109,7 @@ HeapStatistics HeapStatisticsCollector::CollectDetailedStatistics(
if (!NameProvider::HideInternalNames()) {
stats.type_names.resize(type_name_to_index_map_.size());
for (auto& it : type_name_to_index_map_) {
- stats.type_names[it.second] = it.first;
+ stats.type_names[it.second] = reinterpret_cast<const char*>(it.first);
}
}
diff --git a/chromium/v8/src/heap/cppgc/heap-statistics-collector.h b/chromium/v8/src/heap/cppgc/heap-statistics-collector.h
index c0b1fe7c632..1e492bfe7f6 100644
--- a/chromium/v8/src/heap/cppgc/heap-statistics-collector.h
+++ b/chromium/v8/src/heap/cppgc/heap-statistics-collector.h
@@ -30,10 +30,10 @@ class HeapStatisticsCollector : private HeapVisitor<HeapStatisticsCollector> {
HeapStatistics::SpaceStatistics* current_space_stats_ = nullptr;
HeapStatistics::PageStatistics* current_page_stats_ = nullptr;
// Index from type name to final index in `HeapStats::type_names`.
- // Canonicalizing based on `const char*` assuming stable addresses. If the
+ // Canonicalizing based on `const void*` assuming stable addresses. If the
// implementation of `NameProvider` decides to return different type name
// c-strings, the final outcome is less compact.
- std::unordered_map<const char*, size_t> type_name_to_index_map_;
+ std::unordered_map<const void*, size_t> type_name_to_index_map_;
};
} // namespace internal
diff --git a/chromium/v8/src/heap/cppgc/heap.cc b/chromium/v8/src/heap/cppgc/heap.cc
index 58252a20ab4..a4e514a7c28 100644
--- a/chromium/v8/src/heap/cppgc/heap.cc
+++ b/chromium/v8/src/heap/cppgc/heap.cc
@@ -187,13 +187,17 @@ void Heap::FinalizeGarbageCollection(Config::StackState stack_state) {
marker_->FinishMarking(config_.stack_state);
}
marker_.reset();
- ExecutePreFinalizers();
- // TODO(chromium:1056170): replace build flag with dedicated flag.
-#if DEBUG
+ const size_t bytes_allocated_in_prefinalizers = ExecutePreFinalizers();
+#if CPPGC_VERIFY_HEAP
MarkingVerifier verifier(*this);
- verifier.Run(config_.stack_state, stack_end_of_current_gc(),
- stats_collector()->marked_bytes());
+ verifier.Run(
+ config_.stack_state, stack_end_of_current_gc(),
+ stats_collector()->marked_bytes() + bytes_allocated_in_prefinalizers);
+#endif // CPPGC_VERIFY_HEAP
+#ifndef CPPGC_ALLOW_ALLOCATIONS_IN_PREFINALIZERS
+ DCHECK_EQ(0u, bytes_allocated_in_prefinalizers);
#endif
+ USE(bytes_allocated_in_prefinalizers);
subtle::NoGarbageCollectionScope no_gc(*this);
const Sweeper::SweepingConfig sweeping_config{
diff --git a/chromium/v8/src/heap/cppgc/marker.cc b/chromium/v8/src/heap/cppgc/marker.cc
index 0e5d9ec8f1e..a7a54f68bec 100644
--- a/chromium/v8/src/heap/cppgc/marker.cc
+++ b/chromium/v8/src/heap/cppgc/marker.cc
@@ -38,7 +38,7 @@ bool EnterIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
WriteBarrier::IncrementalOrConcurrentMarkingFlagUpdater::Enter();
#if defined(CPPGC_CAGED_HEAP)
heap.caged_heap().local_data().is_incremental_marking_in_progress = true;
-#endif
+#endif // defined(CPPGC_CAGED_HEAP)
return true;
}
return false;
@@ -52,7 +52,7 @@ bool ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
WriteBarrier::IncrementalOrConcurrentMarkingFlagUpdater::Exit();
#if defined(CPPGC_CAGED_HEAP)
heap.caged_heap().local_data().is_incremental_marking_in_progress = false;
-#endif
+#endif // defined(CPPGC_CAGED_HEAP)
return true;
}
return false;
@@ -214,7 +214,7 @@ void MarkerBase::StartMarking() {
is_marking_ = true;
if (EnterIncrementalMarkingIfNeeded(config_, heap())) {
- StatsCollector::EnabledScope stats_scope(
+ StatsCollector::EnabledScope inner_stats_scope(
heap().stats_collector(), StatsCollector::kMarkIncrementalStart);
// Performing incremental or concurrent marking.
@@ -245,12 +245,6 @@ void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
config_.marking_type = MarkingConfig::MarkingType::kAtomic;
mutator_marking_state_.set_in_atomic_pause();
- // Lock guards against changes to {Weak}CrossThreadPersistent handles, that
- // may conflict with marking. E.g., a WeakCrossThreadPersistent may be
- // converted into a CrossThreadPersistent which requires that the handle
- // is either cleared or the object is retained.
- g_process_mutex.Pointer()->Lock();
-
{
// VisitRoots also resets the LABs.
VisitRoots(config_.stack_state);
@@ -308,6 +302,7 @@ void MarkerBase::ProcessWeakness() {
heap().GetWeakPersistentRegion().Trace(&visitor());
// Processing cross-thread handles requires taking the process lock.
g_process_mutex.Get().AssertHeld();
+ CHECK(visited_cross_thread_persistents_in_atomic_pause_);
heap().GetWeakCrossThreadPersistentRegion().Trace(&visitor());
// Call weak callbacks on objects that may now be pointing to dead objects.
@@ -337,13 +332,6 @@ void MarkerBase::VisitRoots(MarkingConfig::StackState stack_state) {
heap().stats_collector(), StatsCollector::kMarkVisitPersistents);
heap().GetStrongPersistentRegion().Trace(&visitor());
}
- if (config_.marking_type == MarkingConfig::MarkingType::kAtomic) {
- StatsCollector::DisabledScope inner_stats_scope(
- heap().stats_collector(),
- StatsCollector::kMarkVisitCrossThreadPersistents);
- g_process_mutex.Get().AssertHeld();
- heap().GetStrongCrossThreadPersistentRegion().Trace(&visitor());
- }
}
if (stack_state != MarkingConfig::StackState::kNoHeapPointers) {
@@ -356,6 +344,24 @@ void MarkerBase::VisitRoots(MarkingConfig::StackState stack_state) {
}
}
+bool MarkerBase::VisitCrossThreadPersistentsIfNeeded() {
+ if (config_.marking_type != MarkingConfig::MarkingType::kAtomic ||
+ visited_cross_thread_persistents_in_atomic_pause_)
+ return false;
+
+ StatsCollector::DisabledScope inner_stats_scope(
+ heap().stats_collector(),
+ StatsCollector::kMarkVisitCrossThreadPersistents);
+ // Lock guards against changes to {Weak}CrossThreadPersistent handles, that
+ // may conflict with marking. E.g., a WeakCrossThreadPersistent may be
+ // converted into a CrossThreadPersistent which requires that the handle
+ // is either cleared or the object is retained.
+ g_process_mutex.Pointer()->Lock();
+ heap().GetStrongCrossThreadPersistentRegion().Trace(&visitor());
+ visited_cross_thread_persistents_in_atomic_pause_ = true;
+ return (heap().GetStrongCrossThreadPersistentRegion().NodesInUse() > 0);
+}
+
void MarkerBase::ScheduleIncrementalMarkingTask() {
DCHECK(platform_);
if (!foreground_task_runner_ || incremental_marking_handle_) return;
@@ -400,8 +406,13 @@ bool MarkerBase::AdvanceMarkingWithLimits(v8::base::TimeDelta max_duration,
heap().stats_collector(),
StatsCollector::kMarkTransitiveClosureWithDeadline, "deadline_ms",
max_duration.InMillisecondsF());
- is_done = ProcessWorklistsWithDeadline(
- marked_bytes_limit, v8::base::TimeTicks::Now() + max_duration);
+ const auto deadline = v8::base::TimeTicks::Now() + max_duration;
+ is_done = ProcessWorklistsWithDeadline(marked_bytes_limit, deadline);
+ if (is_done && VisitCrossThreadPersistentsIfNeeded()) {
+ // Both limits are absolute and hence can be passed along without further
+ // adjustment.
+ is_done = ProcessWorklistsWithDeadline(marked_bytes_limit, deadline);
+ }
schedule_.UpdateMutatorThreadMarkedBytes(
mutator_marking_state_.marked_bytes());
}
@@ -515,7 +526,7 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
saved_did_discover_new_ephemeron_pairs =
mutator_marking_state_.DidDiscoverNewEphemeronPairs();
{
- StatsCollector::EnabledScope stats_scope(
+ StatsCollector::EnabledScope inner_stats_scope(
heap().stats_collector(), StatsCollector::kMarkProcessEphemerons);
if (!DrainWorklistWithBytesAndTimeDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
diff --git a/chromium/v8/src/heap/cppgc/marker.h b/chromium/v8/src/heap/cppgc/marker.h
index 1b41d0b6e89..c18973e2354 100644
--- a/chromium/v8/src/heap/cppgc/marker.h
+++ b/chromium/v8/src/heap/cppgc/marker.h
@@ -164,6 +164,8 @@ class V8_EXPORT_PRIVATE MarkerBase {
void VisitRoots(MarkingConfig::StackState);
+ bool VisitCrossThreadPersistentsIfNeeded();
+
void MarkNotFullyConstructedObjects();
void ScheduleIncrementalMarkingTask();
@@ -186,6 +188,7 @@ class V8_EXPORT_PRIVATE MarkerBase {
std::unique_ptr<ConcurrentMarkerBase> concurrent_marker_{nullptr};
bool main_marking_disabled_for_testing_{false};
+ bool visited_cross_thread_persistents_in_atomic_pause_{false};
friend class MarkerFactory;
};
diff --git a/chromium/v8/src/heap/cppgc/marking-verifier.cc b/chromium/v8/src/heap/cppgc/marking-verifier.cc
index 4d2ebcff1dc..0dbda1159cb 100644
--- a/chromium/v8/src/heap/cppgc/marking-verifier.cc
+++ b/chromium/v8/src/heap/cppgc/marking-verifier.cc
@@ -21,9 +21,9 @@ MarkingVerifierBase::MarkingVerifierBase(
verification_state_(verification_state),
visitor_(std::move(visitor)) {}
-void MarkingVerifierBase::Run(Heap::Config::StackState stack_state,
- uintptr_t stack_end,
- size_t expected_marked_bytes) {
+void MarkingVerifierBase::Run(
+ Heap::Config::StackState stack_state, uintptr_t stack_end,
+ v8::base::Optional<size_t> expected_marked_bytes) {
Traverse(heap_.raw_heap());
if (stack_state == Heap::Config::StackState::kMayContainHeapPointers) {
in_construction_objects_ = &in_construction_objects_stack_;
@@ -38,9 +38,9 @@ void MarkingVerifierBase::Run(Heap::Config::StackState stack_state,
in_construction_objects_heap_.find(header));
}
}
-#ifdef CPPGC_VERIFY_LIVE_BYTES
- CHECK_EQ(expected_marked_bytes, found_marked_bytes_);
-#endif // CPPGC_VERIFY_LIVE_BYTES
+ if (expected_marked_bytes) {
+ CHECK_EQ(expected_marked_bytes.value(), found_marked_bytes_);
+ }
}
void VerificationState::VerifyMarked(const void* base_object_payload) const {
diff --git a/chromium/v8/src/heap/cppgc/marking-verifier.h b/chromium/v8/src/heap/cppgc/marking-verifier.h
index 72d49daa768..ca588f40d80 100644
--- a/chromium/v8/src/heap/cppgc/marking-verifier.h
+++ b/chromium/v8/src/heap/cppgc/marking-verifier.h
@@ -7,6 +7,7 @@
#include <unordered_set>
+#include "src/base/optional.h"
#include "src/heap/base/stack.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-visitor.h"
@@ -40,7 +41,7 @@ class V8_EXPORT_PRIVATE MarkingVerifierBase
MarkingVerifierBase(const MarkingVerifierBase&) = delete;
MarkingVerifierBase& operator=(const MarkingVerifierBase&) = delete;
- void Run(Heap::Config::StackState, uintptr_t, size_t);
+ void Run(Heap::Config::StackState, uintptr_t, v8::base::Optional<size_t>);
protected:
MarkingVerifierBase(HeapBase&, VerificationState&,
diff --git a/chromium/v8/src/heap/cppgc/memory.cc b/chromium/v8/src/heap/cppgc/memory.cc
index aa3baeaa8a0..6d81957325d 100644
--- a/chromium/v8/src/heap/cppgc/memory.cc
+++ b/chromium/v8/src/heap/cppgc/memory.cc
@@ -12,7 +12,7 @@ namespace cppgc {
namespace internal {
void NoSanitizeMemset(void* address, char c, size_t bytes) {
- volatile Address base = reinterpret_cast<Address>(address);
+ volatile uint8_t* const base = static_cast<uint8_t*>(address);
for (size_t i = 0; i < bytes; ++i) {
base[i] = c;
}
diff --git a/chromium/v8/src/heap/cppgc/memory.h b/chromium/v8/src/heap/cppgc/memory.h
index adc2ce9bb38..3b9f6cb62c2 100644
--- a/chromium/v8/src/heap/cppgc/memory.h
+++ b/chromium/v8/src/heap/cppgc/memory.h
@@ -117,7 +117,11 @@ V8_INLINE void CheckMemoryIsInaccessible(const void* address, size_t size) {
static_assert(!CheckMemoryIsInaccessibleIsNoop(),
"CheckMemoryIsInaccessibleIsNoop() needs to reflect "
"CheckMemoryIsInaccessible().");
- ASAN_CHECK_MEMORY_REGION_IS_POISONED(address, size);
+ // Only check if memory is poisoned on 64 bit, since there we make sure that
+ // object sizes and alignments are multiple of shadow memory granularity.
+#if defined(V8_TARGET_ARCH_64_BIT)
+ ASAN_CHECK_WHOLE_MEMORY_REGION_IS_POISONED(address, size);
+#endif
ASAN_UNPOISON_MEMORY_REGION(address, size);
CheckMemoryIsZero(address, size);
ASAN_POISON_MEMORY_REGION(address, size);
diff --git a/chromium/v8/src/heap/cppgc/object-allocator.cc b/chromium/v8/src/heap/cppgc/object-allocator.cc
index 191e73e6d8e..43e7c9b79a0 100644
--- a/chromium/v8/src/heap/cppgc/object-allocator.cc
+++ b/chromium/v8/src/heap/cppgc/object-allocator.cc
@@ -16,6 +16,7 @@
#include "src/heap/cppgc/memory.h"
#include "src/heap/cppgc/object-start-bitmap.h"
#include "src/heap/cppgc/page-memory.h"
+#include "src/heap/cppgc/prefinalizer-handler.h"
#include "src/heap/cppgc/stats-collector.h"
#include "src/heap/cppgc/sweeper.h"
@@ -39,7 +40,7 @@ void MarkRangeAsYoung(BasePage* page, Address begin, Address end) {
? RoundUp(offset_end, kEntrySize)
: RoundDown(offset_end, kEntrySize);
- auto& age_table = page->heap()->caged_heap().local_data().age_table;
+ auto& age_table = page->heap().caged_heap().local_data().age_table;
for (auto offset = young_offset_begin; offset < young_offset_end;
offset += AgeTable::kEntrySizeInBytes) {
age_table[offset] = AgeTable::Age::kYoung;
@@ -58,9 +59,11 @@ void AddToFreeList(NormalPageSpace& space, Address start, size_t size) {
// No need for SetMemoryInaccessible() as LAB memory is retrieved as free
// inaccessible memory.
space.free_list().Add({start, size});
+ // Concurrent marking may be running while the LAB is set up next to a live
+ // object sharing the same cell in the bitmap.
NormalPage::From(BasePage::FromPayload(start))
->object_start_bitmap()
- .SetBit(start);
+ .SetBit<AccessMode::kAtomic>(start);
}
void ReplaceLinearAllocationBuffer(NormalPageSpace& space,
@@ -77,21 +80,23 @@ void ReplaceLinearAllocationBuffer(NormalPageSpace& space,
DCHECK_NOT_NULL(new_buffer);
stats_collector.NotifyAllocation(new_size);
auto* page = NormalPage::From(BasePage::FromPayload(new_buffer));
- page->object_start_bitmap().ClearBit(new_buffer);
+ // Concurrent marking may be running while the LAB is set up next to a live
+ // object sharing the same cell in the bitmap.
+ page->object_start_bitmap().ClearBit<AccessMode::kAtomic>(new_buffer);
MarkRangeAsYoung(page, new_buffer, new_buffer + new_size);
}
}
-void* AllocateLargeObject(PageBackend* page_backend, LargePageSpace* space,
- StatsCollector* stats_collector, size_t size,
+void* AllocateLargeObject(PageBackend& page_backend, LargePageSpace& space,
+ StatsCollector& stats_collector, size_t size,
GCInfoIndex gcinfo) {
- LargePage* page = LargePage::Create(*page_backend, *space, size);
- space->AddPage(page);
+ LargePage* page = LargePage::Create(page_backend, space, size);
+ space.AddPage(page);
auto* header = new (page->ObjectHeader())
HeapObjectHeader(HeapObjectHeader::kLargeObjectSizeInHeader, gcinfo);
- stats_collector->NotifyAllocation(size);
+ stats_collector.NotifyAllocation(size);
MarkRangeAsYoung(page, page->PayloadStart(), page->PayloadEnd());
return header->ObjectStart();
@@ -101,17 +106,29 @@ void* AllocateLargeObject(PageBackend* page_backend, LargePageSpace* space,
constexpr size_t ObjectAllocator::kSmallestSpaceSize;
-ObjectAllocator::ObjectAllocator(RawHeap* heap, PageBackend* page_backend,
- StatsCollector* stats_collector)
+ObjectAllocator::ObjectAllocator(RawHeap& heap, PageBackend& page_backend,
+ StatsCollector& stats_collector,
+ PreFinalizerHandler& prefinalizer_handler)
: raw_heap_(heap),
page_backend_(page_backend),
- stats_collector_(stats_collector) {}
+ stats_collector_(stats_collector),
+ prefinalizer_handler_(prefinalizer_handler) {}
void* ObjectAllocator::OutOfLineAllocate(NormalPageSpace& space, size_t size,
GCInfoIndex gcinfo) {
void* memory = OutOfLineAllocateImpl(space, size, gcinfo);
- stats_collector_->NotifySafePointForConservativeCollection();
- raw_heap_->heap()->AdvanceIncrementalGarbageCollectionOnAllocationIfNeeded();
+ stats_collector_.NotifySafePointForConservativeCollection();
+ raw_heap_.heap()->AdvanceIncrementalGarbageCollectionOnAllocationIfNeeded();
+ if (prefinalizer_handler_.IsInvokingPreFinalizers()) {
+ // Objects allocated during pre finalizers should be allocated as black
+ // since marking is already done. Atomics are not needed because there is
+ // no concurrent marking in the background.
+ HeapObjectHeader::FromObject(memory).MarkNonAtomic();
+ // Resetting the allocation buffer forces all further allocations in pre
+ // finalizers to go through this slow path.
+ ReplaceLinearAllocationBuffer(space, stats_collector_, nullptr, 0);
+ prefinalizer_handler_.NotifyAllocationInPrefinalizer(size);
+ }
return memory;
}
@@ -124,8 +141,8 @@ void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace& space,
// 1. If this allocation is big enough, allocate a large object.
if (size >= kLargeObjectSizeThreshold) {
- auto* large_space = &LargePageSpace::From(
- *raw_heap_->Space(RawHeap::RegularSpaceType::kLarge));
+ auto& large_space = LargePageSpace::From(
+ *raw_heap_.Space(RawHeap::RegularSpaceType::kLarge));
return AllocateLargeObject(page_backend_, large_space, stats_collector_,
size, gcinfo);
}
@@ -137,7 +154,7 @@ void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace& space,
// 3. Lazily sweep pages of this heap until we find a freed area for
// this allocation or we finish sweeping all pages of this heap.
- Sweeper& sweeper = raw_heap_->heap()->sweeper();
+ Sweeper& sweeper = raw_heap_.heap()->sweeper();
// TODO(chromium:1056170): Investigate whether this should be a loop which
// would result in more agressive re-use of memory at the expense of
// potentially larger allocation time.
@@ -159,11 +176,11 @@ void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace& space,
// TODO(chromium:1056170): Make use of the synchronously freed memory.
// 5. Add a new page to this heap.
- auto* new_page = NormalPage::Create(*page_backend_, space);
+ auto* new_page = NormalPage::Create(page_backend_, space);
space.AddPage(new_page);
// 6. Set linear allocation buffer to new page.
- ReplaceLinearAllocationBuffer(space, *stats_collector_,
+ ReplaceLinearAllocationBuffer(space, stats_collector_,
new_page->PayloadStart(),
new_page->PayloadSize());
@@ -182,13 +199,12 @@ void* ObjectAllocator::AllocateFromFreeList(NormalPageSpace& space, size_t size,
// Assume discarded memory on that page is now zero.
auto& page = *NormalPage::From(BasePage::FromPayload(entry.address));
if (page.discarded_memory()) {
- stats_collector_->DecrementDiscardedMemory(page.discarded_memory());
+ stats_collector_.DecrementDiscardedMemory(page.discarded_memory());
page.ResetDiscardedMemory();
}
- ReplaceLinearAllocationBuffer(space, *stats_collector_,
- static_cast<Address>(entry.address),
- entry.size);
+ ReplaceLinearAllocationBuffer(
+ space, stats_collector_, static_cast<Address>(entry.address), entry.size);
return AllocateObjectOnSpace(space, size, gcinfo);
}
@@ -196,20 +212,20 @@ void* ObjectAllocator::AllocateFromFreeList(NormalPageSpace& space, size_t size,
void ObjectAllocator::ResetLinearAllocationBuffers() {
class Resetter : public HeapVisitor<Resetter> {
public:
- explicit Resetter(StatsCollector* stats) : stats_collector_(stats) {}
+ explicit Resetter(StatsCollector& stats) : stats_collector_(stats) {}
bool VisitLargePageSpace(LargePageSpace&) { return true; }
bool VisitNormalPageSpace(NormalPageSpace& space) {
- ReplaceLinearAllocationBuffer(space, *stats_collector_, nullptr, 0);
+ ReplaceLinearAllocationBuffer(space, stats_collector_, nullptr, 0);
return true;
}
private:
- StatsCollector* stats_collector_;
+ StatsCollector& stats_collector_;
} visitor(stats_collector_);
- visitor.Traverse(*raw_heap_);
+ visitor.Traverse(raw_heap_);
}
void ObjectAllocator::Terminate() {
@@ -217,7 +233,7 @@ void ObjectAllocator::Terminate() {
}
bool ObjectAllocator::in_disallow_gc_scope() const {
- return raw_heap_->heap()->in_disallow_gc_scope();
+ return raw_heap_.heap()->in_disallow_gc_scope();
}
} // namespace internal
diff --git a/chromium/v8/src/heap/cppgc/object-allocator.h b/chromium/v8/src/heap/cppgc/object-allocator.h
index dd0035cfe9d..c02115b6670 100644
--- a/chromium/v8/src/heap/cppgc/object-allocator.h
+++ b/chromium/v8/src/heap/cppgc/object-allocator.h
@@ -20,6 +20,7 @@ namespace cppgc {
namespace internal {
class ObjectAllocator;
+class PreFinalizerHandler;
} // namespace internal
class V8_EXPORT AllocationHandle {
@@ -37,8 +38,9 @@ class V8_EXPORT_PRIVATE ObjectAllocator final : public cppgc::AllocationHandle {
public:
static constexpr size_t kSmallestSpaceSize = 32;
- ObjectAllocator(RawHeap* heap, PageBackend* page_backend,
- StatsCollector* stats_collector);
+ ObjectAllocator(RawHeap& heap, PageBackend& page_backend,
+ StatsCollector& stats_collector,
+ PreFinalizerHandler& prefinalizer_handler);
inline void* AllocateObject(size_t size, GCInfoIndex gcinfo);
inline void* AllocateObject(size_t size, GCInfoIndex gcinfo,
@@ -63,9 +65,10 @@ class V8_EXPORT_PRIVATE ObjectAllocator final : public cppgc::AllocationHandle {
void* OutOfLineAllocateImpl(NormalPageSpace&, size_t, GCInfoIndex);
void* AllocateFromFreeList(NormalPageSpace&, size_t, GCInfoIndex);
- RawHeap* raw_heap_;
- PageBackend* page_backend_;
- StatsCollector* stats_collector_;
+ RawHeap& raw_heap_;
+ PageBackend& page_backend_;
+ StatsCollector& stats_collector_;
+ PreFinalizerHandler& prefinalizer_handler_;
};
void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo) {
@@ -74,7 +77,7 @@ void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo) {
RoundUp<kAllocationGranularity>(size + sizeof(HeapObjectHeader));
const RawHeap::RegularSpaceType type =
GetInitialSpaceIndexForSize(allocation_size);
- return AllocateObjectOnSpace(NormalPageSpace::From(*raw_heap_->Space(type)),
+ return AllocateObjectOnSpace(NormalPageSpace::From(*raw_heap_.Space(type)),
allocation_size, gcinfo);
}
@@ -84,7 +87,7 @@ void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo,
const size_t allocation_size =
RoundUp<kAllocationGranularity>(size + sizeof(HeapObjectHeader));
return AllocateObjectOnSpace(
- NormalPageSpace::From(*raw_heap_->CustomSpace(space_index)),
+ NormalPageSpace::From(*raw_heap_.CustomSpace(space_index)),
allocation_size, gcinfo);
}
diff --git a/chromium/v8/src/heap/cppgc/page-memory.cc b/chromium/v8/src/heap/cppgc/page-memory.cc
index 49b44aff91c..ed76f903e8d 100644
--- a/chromium/v8/src/heap/cppgc/page-memory.cc
+++ b/chromium/v8/src/heap/cppgc/page-memory.cc
@@ -6,17 +6,21 @@
#include "src/base/macros.h"
#include "src/base/sanitizer/asan.h"
+#include "src/heap/cppgc/platform.h"
namespace cppgc {
namespace internal {
namespace {
-void Unprotect(PageAllocator* allocator, const PageMemory& page_memory) {
+void Unprotect(PageAllocator& allocator, FatalOutOfMemoryHandler& oom_handler,
+ const PageMemory& page_memory) {
if (SupportsCommittingGuardPages(allocator)) {
- CHECK(allocator->SetPermissions(page_memory.writeable_region().base(),
- page_memory.writeable_region().size(),
- PageAllocator::Permission::kReadWrite));
+ if (!allocator.SetPermissions(page_memory.writeable_region().base(),
+ page_memory.writeable_region().size(),
+ PageAllocator::Permission::kReadWrite)) {
+ oom_handler("Oilpan: Unprotecting memory.");
+ }
} else {
// No protection in case the allocator cannot commit at the required
// granularity. Only protect if the allocator supports committing at that
@@ -24,53 +28,66 @@ void Unprotect(PageAllocator* allocator, const PageMemory& page_memory) {
//
// The allocator needs to support committing the overall range.
CHECK_EQ(0u,
- page_memory.overall_region().size() % allocator->CommitPageSize());
- CHECK(allocator->SetPermissions(page_memory.overall_region().base(),
- page_memory.overall_region().size(),
- PageAllocator::Permission::kReadWrite));
+ page_memory.overall_region().size() % allocator.CommitPageSize());
+ if (!allocator.SetPermissions(page_memory.overall_region().base(),
+ page_memory.overall_region().size(),
+ PageAllocator::Permission::kReadWrite)) {
+ oom_handler("Oilpan: Unprotecting memory.");
+ }
}
}
-void Protect(PageAllocator* allocator, const PageMemory& page_memory) {
+void Protect(PageAllocator& allocator, FatalOutOfMemoryHandler& oom_handler,
+ const PageMemory& page_memory) {
if (SupportsCommittingGuardPages(allocator)) {
// Swap the same region, providing the OS with a chance for fast lookup and
// change.
- CHECK(allocator->SetPermissions(page_memory.writeable_region().base(),
- page_memory.writeable_region().size(),
- PageAllocator::Permission::kNoAccess));
+ if (!allocator.SetPermissions(page_memory.writeable_region().base(),
+ page_memory.writeable_region().size(),
+ PageAllocator::Permission::kNoAccess)) {
+ oom_handler("Oilpan: Protecting memory.");
+ }
} else {
// See Unprotect().
CHECK_EQ(0u,
- page_memory.overall_region().size() % allocator->CommitPageSize());
- CHECK(allocator->SetPermissions(page_memory.overall_region().base(),
- page_memory.overall_region().size(),
- PageAllocator::Permission::kNoAccess));
+ page_memory.overall_region().size() % allocator.CommitPageSize());
+ if (!allocator.SetPermissions(page_memory.overall_region().base(),
+ page_memory.overall_region().size(),
+ PageAllocator::Permission::kNoAccess)) {
+ oom_handler("Oilpan: Protecting memory.");
+ }
}
}
-MemoryRegion ReserveMemoryRegion(PageAllocator* allocator,
+MemoryRegion ReserveMemoryRegion(PageAllocator& allocator,
+ FatalOutOfMemoryHandler& oom_handler,
size_t allocation_size) {
void* region_memory =
- allocator->AllocatePages(nullptr, allocation_size, kPageSize,
- PageAllocator::Permission::kNoAccess);
+ allocator.AllocatePages(nullptr, allocation_size, kPageSize,
+ PageAllocator::Permission::kNoAccess);
+ if (!region_memory) {
+ oom_handler("Oilpan: Reserving memory.");
+ }
const MemoryRegion reserved_region(static_cast<Address>(region_memory),
allocation_size);
DCHECK_EQ(reserved_region.base() + allocation_size, reserved_region.end());
return reserved_region;
}
-void FreeMemoryRegion(PageAllocator* allocator,
+void FreeMemoryRegion(PageAllocator& allocator,
const MemoryRegion& reserved_region) {
// Make sure pages returned to OS are unpoisoned.
ASAN_UNPOISON_MEMORY_REGION(reserved_region.base(), reserved_region.size());
- allocator->FreePages(reserved_region.base(), reserved_region.size());
+ allocator.FreePages(reserved_region.base(), reserved_region.size());
}
} // namespace
-PageMemoryRegion::PageMemoryRegion(PageAllocator* allocator,
+PageMemoryRegion::PageMemoryRegion(PageAllocator& allocator,
+ FatalOutOfMemoryHandler& oom_handler,
MemoryRegion reserved_region, bool is_large)
: allocator_(allocator),
+ oom_handler_(oom_handler),
reserved_region_(reserved_region),
is_large_(is_large) {}
@@ -81,12 +98,14 @@ PageMemoryRegion::~PageMemoryRegion() {
// static
constexpr size_t NormalPageMemoryRegion::kNumPageRegions;
-NormalPageMemoryRegion::NormalPageMemoryRegion(PageAllocator* allocator)
- : PageMemoryRegion(allocator,
- ReserveMemoryRegion(
- allocator, RoundUp(kPageSize * kNumPageRegions,
- allocator->AllocatePageSize())),
- false) {
+NormalPageMemoryRegion::NormalPageMemoryRegion(
+ PageAllocator& allocator, FatalOutOfMemoryHandler& oom_handler)
+ : PageMemoryRegion(
+ allocator, oom_handler,
+ ReserveMemoryRegion(allocator, oom_handler,
+ RoundUp(kPageSize * kNumPageRegions,
+ allocator.AllocatePageSize())),
+ false) {
#ifdef DEBUG
for (size_t i = 0; i < kNumPageRegions; ++i) {
DCHECK_EQ(false, page_memories_in_use_[i]);
@@ -99,33 +118,35 @@ NormalPageMemoryRegion::~NormalPageMemoryRegion() = default;
void NormalPageMemoryRegion::Allocate(Address writeable_base) {
const size_t index = GetIndex(writeable_base);
ChangeUsed(index, true);
- Unprotect(allocator_, GetPageMemory(index));
+ Unprotect(allocator_, oom_handler_, GetPageMemory(index));
}
void NormalPageMemoryRegion::Free(Address writeable_base) {
const size_t index = GetIndex(writeable_base);
ChangeUsed(index, false);
- Protect(allocator_, GetPageMemory(index));
+ Protect(allocator_, oom_handler_, GetPageMemory(index));
}
void NormalPageMemoryRegion::UnprotectForTesting() {
for (size_t i = 0; i < kNumPageRegions; ++i) {
- Unprotect(allocator_, GetPageMemory(i));
+ Unprotect(allocator_, oom_handler_, GetPageMemory(i));
}
}
-LargePageMemoryRegion::LargePageMemoryRegion(PageAllocator* allocator,
- size_t length)
- : PageMemoryRegion(allocator,
- ReserveMemoryRegion(
- allocator, RoundUp(length + 2 * kGuardPageSize,
- allocator->AllocatePageSize())),
- true) {}
+LargePageMemoryRegion::LargePageMemoryRegion(
+ PageAllocator& allocator, FatalOutOfMemoryHandler& oom_handler,
+ size_t length)
+ : PageMemoryRegion(
+ allocator, oom_handler,
+ ReserveMemoryRegion(allocator, oom_handler,
+ RoundUp(length + 2 * kGuardPageSize,
+ allocator.AllocatePageSize())),
+ true) {}
LargePageMemoryRegion::~LargePageMemoryRegion() = default;
void LargePageMemoryRegion::UnprotectForTesting() {
- Unprotect(allocator_, GetPageMemory());
+ Unprotect(allocator_, oom_handler_, GetPageMemory());
}
PageMemoryRegionTree::PageMemoryRegionTree() = default;
@@ -165,27 +186,33 @@ std::pair<NormalPageMemoryRegion*, Address> NormalPageMemoryPool::Take(
return pair;
}
-PageBackend::PageBackend(PageAllocator* allocator) : allocator_(allocator) {}
+PageBackend::PageBackend(PageAllocator& allocator,
+ FatalOutOfMemoryHandler& oom_handler)
+ : allocator_(allocator), oom_handler_(oom_handler) {}
PageBackend::~PageBackend() = default;
Address PageBackend::AllocateNormalPageMemory(size_t bucket) {
+ v8::base::MutexGuard guard(&mutex_);
std::pair<NormalPageMemoryRegion*, Address> result = page_pool_.Take(bucket);
if (!result.first) {
- auto pmr = std::make_unique<NormalPageMemoryRegion>(allocator_);
+ auto pmr =
+ std::make_unique<NormalPageMemoryRegion>(allocator_, oom_handler_);
for (size_t i = 0; i < NormalPageMemoryRegion::kNumPageRegions; ++i) {
page_pool_.Add(bucket, pmr.get(),
pmr->GetPageMemory(i).writeable_region().base());
}
page_memory_region_tree_.Add(pmr.get());
normal_page_memory_regions_.push_back(std::move(pmr));
- return AllocateNormalPageMemory(bucket);
+ result = page_pool_.Take(bucket);
+ DCHECK(result.first);
}
result.first->Allocate(result.second);
return result.second;
}
void PageBackend::FreeNormalPageMemory(size_t bucket, Address writeable_base) {
+ v8::base::MutexGuard guard(&mutex_);
auto* pmr = static_cast<NormalPageMemoryRegion*>(
page_memory_region_tree_.Lookup(writeable_base));
pmr->Free(writeable_base);
@@ -193,15 +220,18 @@ void PageBackend::FreeNormalPageMemory(size_t bucket, Address writeable_base) {
}
Address PageBackend::AllocateLargePageMemory(size_t size) {
- auto pmr = std::make_unique<LargePageMemoryRegion>(allocator_, size);
+ v8::base::MutexGuard guard(&mutex_);
+ auto pmr =
+ std::make_unique<LargePageMemoryRegion>(allocator_, oom_handler_, size);
const PageMemory pm = pmr->GetPageMemory();
- Unprotect(allocator_, pm);
+ Unprotect(allocator_, oom_handler_, pm);
page_memory_region_tree_.Add(pmr.get());
large_page_memory_regions_.insert(std::make_pair(pmr.get(), std::move(pmr)));
return pm.writeable_region().base();
}
void PageBackend::FreeLargePageMemory(Address writeable_base) {
+ v8::base::MutexGuard guard(&mutex_);
PageMemoryRegion* pmr = page_memory_region_tree_.Lookup(writeable_base);
page_memory_region_tree_.Remove(pmr);
auto size = large_page_memory_regions_.erase(pmr);
diff --git a/chromium/v8/src/heap/cppgc/page-memory.h b/chromium/v8/src/heap/cppgc/page-memory.h
index 51b2b61f7d7..e5b73318f79 100644
--- a/chromium/v8/src/heap/cppgc/page-memory.h
+++ b/chromium/v8/src/heap/cppgc/page-memory.h
@@ -13,11 +13,14 @@
#include "include/cppgc/platform.h"
#include "src/base/macros.h"
+#include "src/base/platform/mutex.h"
#include "src/heap/cppgc/globals.h"
namespace cppgc {
namespace internal {
+class FatalOutOfMemoryHandler;
+
class V8_EXPORT_PRIVATE MemoryRegion final {
public:
MemoryRegion() = default;
@@ -79,9 +82,11 @@ class V8_EXPORT_PRIVATE PageMemoryRegion {
virtual void UnprotectForTesting() = 0;
protected:
- PageMemoryRegion(PageAllocator*, MemoryRegion, bool);
+ PageMemoryRegion(PageAllocator&, FatalOutOfMemoryHandler&, MemoryRegion,
+ bool);
- PageAllocator* const allocator_;
+ PageAllocator& allocator_;
+ FatalOutOfMemoryHandler& oom_handler_;
const MemoryRegion reserved_region_;
const bool is_large_;
};
@@ -91,7 +96,7 @@ class V8_EXPORT_PRIVATE NormalPageMemoryRegion final : public PageMemoryRegion {
public:
static constexpr size_t kNumPageRegions = 10;
- explicit NormalPageMemoryRegion(PageAllocator*);
+ NormalPageMemoryRegion(PageAllocator&, FatalOutOfMemoryHandler&);
~NormalPageMemoryRegion() override;
const PageMemory GetPageMemory(size_t index) const {
@@ -133,7 +138,7 @@ class V8_EXPORT_PRIVATE NormalPageMemoryRegion final : public PageMemoryRegion {
// LargePageMemoryRegion serves a single large PageMemory object.
class V8_EXPORT_PRIVATE LargePageMemoryRegion final : public PageMemoryRegion {
public:
- LargePageMemoryRegion(PageAllocator*, size_t);
+ LargePageMemoryRegion(PageAllocator&, FatalOutOfMemoryHandler&, size_t);
~LargePageMemoryRegion() override;
const PageMemory GetPageMemory() const {
@@ -193,7 +198,7 @@ class V8_EXPORT_PRIVATE NormalPageMemoryPool final {
// regions alive.
class V8_EXPORT_PRIVATE PageBackend final {
public:
- explicit PageBackend(PageAllocator*);
+ PageBackend(PageAllocator&, FatalOutOfMemoryHandler&);
~PageBackend();
// Allocates a normal page from the backend.
@@ -223,7 +228,10 @@ class V8_EXPORT_PRIVATE PageBackend final {
PageBackend& operator=(const PageBackend&) = delete;
private:
- PageAllocator* allocator_;
+ // Guards against concurrent uses of `Lookup()`.
+ mutable v8::base::Mutex mutex_;
+ PageAllocator& allocator_;
+ FatalOutOfMemoryHandler& oom_handler_;
NormalPageMemoryPool page_pool_;
PageMemoryRegionTree page_memory_region_tree_;
std::vector<std::unique_ptr<PageMemoryRegion>> normal_page_memory_regions_;
@@ -233,8 +241,8 @@ class V8_EXPORT_PRIVATE PageBackend final {
// Returns true if the provided allocator supports committing at the required
// granularity.
-inline bool SupportsCommittingGuardPages(PageAllocator* allocator) {
- return kGuardPageSize % allocator->CommitPageSize() == 0;
+inline bool SupportsCommittingGuardPages(PageAllocator& allocator) {
+ return kGuardPageSize % allocator.CommitPageSize() == 0;
}
Address NormalPageMemoryRegion::Lookup(ConstAddress address) const {
@@ -268,6 +276,7 @@ PageMemoryRegion* PageMemoryRegionTree::Lookup(ConstAddress address) const {
}
Address PageBackend::Lookup(ConstAddress address) const {
+ v8::base::MutexGuard guard(&mutex_);
PageMemoryRegion* pmr = page_memory_region_tree_.Lookup(address);
return pmr ? pmr->Lookup(address) : nullptr;
}
diff --git a/chromium/v8/src/heap/cppgc/persistent-node.cc b/chromium/v8/src/heap/cppgc/persistent-node.cc
index 8a3d6cd97cb..37933bbcab0 100644
--- a/chromium/v8/src/heap/cppgc/persistent-node.cc
+++ b/chromium/v8/src/heap/cppgc/persistent-node.cc
@@ -9,15 +9,16 @@
#include "include/cppgc/cross-thread-persistent.h"
#include "include/cppgc/persistent.h"
+#include "src/base/platform/platform.h"
#include "src/heap/cppgc/process-heap.h"
namespace cppgc {
namespace internal {
-PersistentRegion::~PersistentRegion() { ClearAllUsedNodes(); }
+PersistentRegionBase::~PersistentRegionBase() { ClearAllUsedNodes(); }
template <typename PersistentBaseClass>
-void PersistentRegion::ClearAllUsedNodes() {
+void PersistentRegionBase::ClearAllUsedNodes() {
for (auto& slots : nodes_) {
for (auto& node : *slots) {
if (!node.IsUsed()) continue;
@@ -35,14 +36,15 @@ void PersistentRegion::ClearAllUsedNodes() {
CPPGC_DCHECK(0u == nodes_in_use_);
}
-template void PersistentRegion::ClearAllUsedNodes<CrossThreadPersistentBase>();
-template void PersistentRegion::ClearAllUsedNodes<PersistentBase>();
+template void
+PersistentRegionBase::ClearAllUsedNodes<CrossThreadPersistentBase>();
+template void PersistentRegionBase::ClearAllUsedNodes<PersistentBase>();
-void PersistentRegion::ClearAllUsedNodes() {
+void PersistentRegionBase::ClearAllUsedNodes() {
ClearAllUsedNodes<PersistentBase>();
}
-size_t PersistentRegion::NodesInUse() const {
+size_t PersistentRegionBase::NodesInUse() const {
#ifdef DEBUG
const size_t accumulated_nodes_in_use_ = std::accumulate(
nodes_.cbegin(), nodes_.cend(), 0u, [](size_t acc, const auto& slots) {
@@ -56,7 +58,7 @@ size_t PersistentRegion::NodesInUse() const {
return nodes_in_use_;
}
-void PersistentRegion::EnsureNodeSlots() {
+void PersistentRegionBase::EnsureNodeSlots() {
nodes_.push_back(std::make_unique<PersistentNodeSlots>());
for (auto& node : *nodes_.back()) {
node.InitializeAsFreeNode(free_list_head_);
@@ -64,7 +66,7 @@ void PersistentRegion::EnsureNodeSlots() {
}
}
-void PersistentRegion::Trace(Visitor* visitor) {
+void PersistentRegionBase::Trace(Visitor* visitor) {
free_list_head_ = nullptr;
for (auto& slots : nodes_) {
bool is_empty = true;
@@ -92,6 +94,15 @@ void PersistentRegion::Trace(Visitor* visitor) {
nodes_.end());
}
+PersistentRegion::PersistentRegion()
+ : creation_thread_id_(v8::base::OS::GetCurrentThreadId()) {
+ USE(creation_thread_id_);
+}
+
+void PersistentRegion::CheckIsCreationThread() {
+ DCHECK_EQ(creation_thread_id_, v8::base::OS::GetCurrentThreadId());
+}
+
PersistentRegionLock::PersistentRegionLock() {
g_process_mutex.Pointer()->Lock();
}
@@ -107,24 +118,24 @@ void PersistentRegionLock::AssertLocked() {
CrossThreadPersistentRegion::~CrossThreadPersistentRegion() {
PersistentRegionLock guard;
- PersistentRegion::ClearAllUsedNodes<CrossThreadPersistentBase>();
+ PersistentRegionBase::ClearAllUsedNodes<CrossThreadPersistentBase>();
nodes_.clear();
- // PersistentRegion destructor will be a noop.
+ // PersistentRegionBase destructor will be a noop.
}
void CrossThreadPersistentRegion::Trace(Visitor* visitor) {
PersistentRegionLock::AssertLocked();
- PersistentRegion::Trace(visitor);
+ PersistentRegionBase::Trace(visitor);
}
size_t CrossThreadPersistentRegion::NodesInUse() const {
// This method does not require a lock.
- return PersistentRegion::NodesInUse();
+ return PersistentRegionBase::NodesInUse();
}
void CrossThreadPersistentRegion::ClearAllUsedNodes() {
PersistentRegionLock::AssertLocked();
- PersistentRegion::ClearAllUsedNodes<CrossThreadPersistentBase>();
+ PersistentRegionBase::ClearAllUsedNodes<CrossThreadPersistentBase>();
}
} // namespace internal
diff --git a/chromium/v8/src/heap/cppgc/platform.cc b/chromium/v8/src/heap/cppgc/platform.cc
index 90516d6065e..fd769ae4698 100644
--- a/chromium/v8/src/heap/cppgc/platform.cc
+++ b/chromium/v8/src/heap/cppgc/platform.cc
@@ -5,10 +5,38 @@
#include "include/cppgc/platform.h"
#include "src/base/lazy-instance.h"
+#include "src/base/logging.h"
+#include "src/base/macros.h"
#include "src/base/platform/platform.h"
+#include "src/base/sanitizer/asan.h"
#include "src/heap/cppgc/gc-info-table.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/platform.h"
namespace cppgc {
+namespace internal {
+
+void Abort() { v8::base::OS::Abort(); }
+
+void FatalOutOfMemoryHandler::operator()(const std::string& reason,
+ const SourceLocation& loc) const {
+ if (custom_handler_) {
+ (*custom_handler_)(reason, loc, heap_);
+ FATAL("Custom out of memory handler should not have returned");
+ }
+#ifdef DEBUG
+ V8_Fatal(loc.FileName(), static_cast<int>(loc.Line()),
+ "Oilpan: Out of memory (%s)", reason.c_str());
+#else // !DEBUG
+ V8_Fatal("Oilpan: Out of memory");
+#endif // !DEBUG
+}
+
+void FatalOutOfMemoryHandler::SetCustomHandler(Callback* callback) {
+ custom_handler_ = callback;
+}
+
+} // namespace internal
namespace {
PageAllocator* g_page_allocator = nullptr;
@@ -20,6 +48,17 @@ TracingController* Platform::GetTracingController() {
}
void InitializeProcess(PageAllocator* page_allocator) {
+#if defined(V8_USE_ADDRESS_SANITIZER) && defined(V8_TARGET_ARCH_64_BIT)
+ // Retrieve asan's internal shadow memory granularity and check that Oilpan's
+ // object alignment/sizes are multiple of this granularity. This is needed to
+ // perform poisoness checks.
+ size_t shadow_scale;
+ __asan_get_shadow_mapping(&shadow_scale, nullptr);
+ DCHECK(shadow_scale);
+ const size_t poisoning_granularity = 1 << shadow_scale;
+ CHECK_EQ(0u, internal::kAllocationGranularity % poisoning_granularity);
+#endif
+
CHECK(!g_page_allocator);
internal::GlobalGCInfoTable::Initialize(page_allocator);
g_page_allocator = page_allocator;
@@ -27,9 +66,4 @@ void InitializeProcess(PageAllocator* page_allocator) {
void ShutdownProcess() { g_page_allocator = nullptr; }
-namespace internal {
-
-void Abort() { v8::base::OS::Abort(); }
-
-} // namespace internal
} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/platform.h b/chromium/v8/src/heap/cppgc/platform.h
new file mode 100644
index 00000000000..2fba1ada1ba
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/platform.h
@@ -0,0 +1,43 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_PLATFORM_H_
+#define V8_HEAP_CPPGC_PLATFORM_H_
+
+#include <string>
+
+#include "include/cppgc/source-location.h"
+#include "src/base/macros.h"
+
+namespace cppgc {
+namespace internal {
+
+class HeapBase;
+
+class V8_EXPORT_PRIVATE FatalOutOfMemoryHandler final {
+ public:
+ using Callback = void(const std::string&, const SourceLocation&, HeapBase*);
+
+ FatalOutOfMemoryHandler() = default;
+ explicit FatalOutOfMemoryHandler(HeapBase* heap) : heap_(heap) {}
+
+ [[noreturn]] void operator()(
+ const std::string& reason = std::string(),
+ const SourceLocation& = SourceLocation::Current()) const;
+
+ void SetCustomHandler(Callback*);
+
+ // Disallow copy/move.
+ FatalOutOfMemoryHandler(const FatalOutOfMemoryHandler&) = delete;
+ FatalOutOfMemoryHandler& operator=(const FatalOutOfMemoryHandler&) = delete;
+
+ private:
+ HeapBase* heap_ = nullptr;
+ Callback* custom_handler_ = nullptr;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_PLATFORM_H_
diff --git a/chromium/v8/src/heap/cppgc/pointer-policies.cc b/chromium/v8/src/heap/cppgc/pointer-policies.cc
index 3c7cb617611..b50f96d70e5 100644
--- a/chromium/v8/src/heap/cppgc/pointer-policies.cc
+++ b/chromium/v8/src/heap/cppgc/pointer-policies.cc
@@ -30,8 +30,8 @@ bool IsOnStack(const void* address) {
} // namespace
-void EnabledCheckingPolicy::CheckPointerImpl(const void* ptr,
- bool points_to_payload) {
+void SameThreadEnabledCheckingPolicyBase::CheckPointerImpl(
+ const void* ptr, bool points_to_payload, bool check_off_heap_assignments) {
// `ptr` must not reside on stack.
DCHECK(!IsOnStack(ptr));
auto* base_page = BasePage::FromPayload(ptr);
@@ -41,12 +41,14 @@ void EnabledCheckingPolicy::CheckPointerImpl(const void* ptr,
// References cannot change their heap association which means that state is
// immutable once it is set.
+ bool is_on_heap = true;
if (!heap_) {
heap_ = &base_page->heap();
if (!heap_->page_backend()->Lookup(reinterpret_cast<Address>(this))) {
// If `this` is not contained within the heap of `ptr`, we must deal with
// an on-stack or off-heap reference. For both cases there should be no
// heap registered.
+ is_on_heap = false;
CHECK(!HeapRegistry::TryFromManagedPointer(this));
}
}
@@ -54,6 +56,8 @@ void EnabledCheckingPolicy::CheckPointerImpl(const void* ptr,
// Member references should never mix heaps.
DCHECK_EQ(heap_, &base_page->heap());
+ DCHECK_EQ(heap_->GetCreationThreadId(), v8::base::OS::GetCurrentThreadId());
+
// Header checks.
const HeapObjectHeader* header = nullptr;
if (points_to_payload) {
@@ -68,20 +72,24 @@ void EnabledCheckingPolicy::CheckPointerImpl(const void* ptr,
DCHECK(!header->IsFree());
}
-#ifdef CPPGC_CHECK_ASSIGNMENTS_IN_PREFINALIZERS
- if (heap_->prefinalizer_handler()->IsInvokingPreFinalizers()) {
- // During prefinalizers invocation, check that |ptr| refers to a live object
- // and that it is assigned to a live slot.
- DCHECK(header->IsMarked());
- // Slot can be in a large object.
- const auto* slot_page = BasePage::FromInnerAddress(heap_, this);
- // Off-heap slots (from other heaps or on-stack) are considered live.
- bool slot_is_live =
- !slot_page || slot_page->ObjectHeaderFromInnerAddress(this).IsMarked();
- DCHECK(slot_is_live);
- USE(slot_is_live);
+#ifdef CPPGC_VERIFY_HEAP
+ if (check_off_heap_assignments || is_on_heap) {
+ if (heap_->prefinalizer_handler()->IsInvokingPreFinalizers()) {
+ // Slot can be in a large object.
+ const auto* slot_page = BasePage::FromInnerAddress(heap_, this);
+ // Off-heap slots (from other heaps or on-stack) are considered live.
+ bool slot_is_live =
+ !slot_page ||
+ slot_page->ObjectHeaderFromInnerAddress(this).IsMarked();
+ // During prefinalizers invocation, check that if the slot is live then
+ // |ptr| refers to a live object.
+ DCHECK_IMPLIES(slot_is_live, header->IsMarked());
+ USE(slot_is_live);
+ }
}
-#endif // CPPGC_CHECK_ASSIGNMENTS_IN_PREFINALIZERS
+#else
+ USE(is_on_heap);
+#endif // CPPGC_VERIFY_HEAP
}
PersistentRegion& StrongPersistentPolicy::GetPersistentRegion(
diff --git a/chromium/v8/src/heap/cppgc/prefinalizer-handler.cc b/chromium/v8/src/heap/cppgc/prefinalizer-handler.cc
index c05f06f6b00..9f641d6f4b3 100644
--- a/chromium/v8/src/heap/cppgc/prefinalizer-handler.cc
+++ b/chromium/v8/src/heap/cppgc/prefinalizer-handler.cc
@@ -31,7 +31,8 @@ bool PreFinalizerRegistrationDispatcher::PreFinalizer::operator==(
}
PreFinalizerHandler::PreFinalizerHandler(HeapBase& heap)
- : heap_(heap)
+ : current_ordered_pre_finalizers_(&ordered_pre_finalizers_),
+ heap_(heap)
#ifdef DEBUG
,
creation_thread_id_(v8::base::OS::GetCurrentThreadId())
@@ -44,7 +45,10 @@ void PreFinalizerHandler::RegisterPrefinalizer(PreFinalizer pre_finalizer) {
DCHECK_EQ(ordered_pre_finalizers_.end(),
std::find(ordered_pre_finalizers_.begin(),
ordered_pre_finalizers_.end(), pre_finalizer));
- ordered_pre_finalizers_.push_back(pre_finalizer);
+ DCHECK_EQ(current_ordered_pre_finalizers_->end(),
+ std::find(current_ordered_pre_finalizers_->begin(),
+ current_ordered_pre_finalizers_->end(), pre_finalizer));
+ current_ordered_pre_finalizers_->push_back(pre_finalizer);
}
void PreFinalizerHandler::InvokePreFinalizers() {
@@ -54,6 +58,13 @@ void PreFinalizerHandler::InvokePreFinalizers() {
DCHECK(CurrentThreadIsCreationThread());
LivenessBroker liveness_broker = LivenessBrokerFactory::Create();
is_invoking_ = true;
+ DCHECK_EQ(0u, bytes_allocated_in_prefinalizers);
+ // Reset all LABs to force allocations to the slow path for black allocation.
+ heap_.object_allocator().ResetLinearAllocationBuffers();
+ // Prefinalizers can allocate other objects with prefinalizers, which will
+ // modify ordered_pre_finalizers_ and break iterators.
+ std::vector<PreFinalizer> new_ordered_pre_finalizers;
+ current_ordered_pre_finalizers_ = &new_ordered_pre_finalizers;
ordered_pre_finalizers_.erase(
ordered_pre_finalizers_.begin(),
std::remove_if(ordered_pre_finalizers_.rbegin(),
@@ -62,6 +73,12 @@ void PreFinalizerHandler::InvokePreFinalizers() {
return (pf.callback)(liveness_broker, pf.object);
})
.base());
+ // Newly added objects with prefinalizers will always survive the current GC
+ // cycle, so it's safe to add them after clearing out the older prefinalizers.
+ ordered_pre_finalizers_.insert(ordered_pre_finalizers_.end(),
+ new_ordered_pre_finalizers.begin(),
+ new_ordered_pre_finalizers.end());
+ current_ordered_pre_finalizers_ = &ordered_pre_finalizers_;
is_invoking_ = false;
ordered_pre_finalizers_.shrink_to_fit();
}
@@ -74,5 +91,11 @@ bool PreFinalizerHandler::CurrentThreadIsCreationThread() {
#endif
}
+void PreFinalizerHandler::NotifyAllocationInPrefinalizer(size_t size) {
+ DCHECK_GT(bytes_allocated_in_prefinalizers + size,
+ bytes_allocated_in_prefinalizers);
+ bytes_allocated_in_prefinalizers += size;
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/prefinalizer-handler.h b/chromium/v8/src/heap/cppgc/prefinalizer-handler.h
index e91931bf6f2..bc17c99b183 100644
--- a/chromium/v8/src/heap/cppgc/prefinalizer-handler.h
+++ b/chromium/v8/src/heap/cppgc/prefinalizer-handler.h
@@ -27,6 +27,11 @@ class PreFinalizerHandler final {
bool IsInvokingPreFinalizers() const { return is_invoking_; }
+ void NotifyAllocationInPrefinalizer(size_t);
+ size_t ExtractBytesAllocatedInPrefinalizers() {
+ return std::exchange(bytes_allocated_in_prefinalizers, 0);
+ }
+
private:
// Checks that the current thread is the thread that created the heap.
bool CurrentThreadIsCreationThread();
@@ -36,12 +41,16 @@ class PreFinalizerHandler final {
// objects) for an object, by processing the ordered_pre_finalizers_
// back-to-front.
std::vector<PreFinalizer> ordered_pre_finalizers_;
+ std::vector<PreFinalizer>* current_ordered_pre_finalizers_;
HeapBase& heap_;
bool is_invoking_ = false;
#ifdef DEBUG
int creation_thread_id_;
#endif
+
+ // Counter of bytes allocated during prefinalizers.
+ size_t bytes_allocated_in_prefinalizers = 0u;
};
} // namespace internal
diff --git a/chromium/v8/src/heap/cppgc/stats-collector.cc b/chromium/v8/src/heap/cppgc/stats-collector.cc
index 54b68f4c28f..ce74fe53c8a 100644
--- a/chromium/v8/src/heap/cppgc/stats-collector.cc
+++ b/chromium/v8/src/heap/cppgc/stats-collector.cc
@@ -41,19 +41,19 @@ void StatsCollector::NotifyAllocation(size_t bytes) {
// The current GC may not have been started. This is ok as recording considers
// the whole time range between garbage collections.
allocated_bytes_since_safepoint_ += bytes;
-#ifdef CPPGC_VERIFY_LIVE_BYTES
- DCHECK_GE(live_bytes_ + bytes, live_bytes_);
- live_bytes_ += bytes;
-#endif // CPPGC_VERIFY_LIVE_BYTES
+#ifdef CPPGC_VERIFY_HEAP
+ DCHECK_GE(tracked_live_bytes_ + bytes, tracked_live_bytes_);
+ tracked_live_bytes_ += bytes;
+#endif // CPPGC_VERIFY_HEAP
}
void StatsCollector::NotifyExplicitFree(size_t bytes) {
// See IncreaseAllocatedObjectSize for lifetime of the counter.
explicitly_freed_bytes_since_safepoint_ += bytes;
-#ifdef CPPGC_VERIFY_LIVE_BYTES
- DCHECK_GE(live_bytes_, bytes);
- live_bytes_ -= bytes;
-#endif // CPPGC_VERIFY_LIVE_BYTES
+#ifdef CPPGC_VERIFY_HEAP
+ DCHECK_GE(tracked_live_bytes_, bytes);
+ tracked_live_bytes_ -= bytes;
+#endif // CPPGC_VERIFY_HEAP
}
void StatsCollector::NotifySafePointForConservativeCollection() {
@@ -124,9 +124,9 @@ void StatsCollector::NotifyMarkingCompleted(size_t marked_bytes) {
explicitly_freed_bytes_since_safepoint_;
allocated_bytes_since_safepoint_ = 0;
explicitly_freed_bytes_since_safepoint_ = 0;
-#ifdef CPPGC_VERIFY_LIVE_BYTES
- live_bytes_ = marked_bytes;
-#endif // CPPGC_VERIFY_LIVE_BYTES
+#ifdef CPPGC_VERIFY_HEAP
+ tracked_live_bytes_ = marked_bytes;
+#endif // CPPGC_VERIFY_HEAP
DCHECK_LE(memory_freed_bytes_since_end_of_marking_, memory_allocated_bytes_);
memory_allocated_bytes_ -= memory_freed_bytes_since_end_of_marking_;
diff --git a/chromium/v8/src/heap/cppgc/stats-collector.h b/chromium/v8/src/heap/cppgc/stats-collector.h
index d63d297c772..c3d8dbbfc07 100644
--- a/chromium/v8/src/heap/cppgc/stats-collector.h
+++ b/chromium/v8/src/heap/cppgc/stats-collector.h
@@ -334,9 +334,10 @@ class V8_EXPORT_PRIVATE StatsCollector final {
// arithmetic for simplicity.
int64_t allocated_bytes_since_safepoint_ = 0;
int64_t explicitly_freed_bytes_since_safepoint_ = 0;
-#ifdef CPPGC_VERIFY_LIVE_BYTES
- size_t live_bytes_ = 0;
-#endif // CPPGC_VERIFY_LIVE_BYTES
+#ifdef CPPGC_VERIFY_HEAP
+ // Tracks live bytes for overflows.
+ size_t tracked_live_bytes_ = 0;
+#endif // CPPGC_VERIFY_HEAP
int64_t memory_allocated_bytes_ = 0;
int64_t memory_freed_bytes_since_end_of_marking_ = 0;
diff --git a/chromium/v8/src/heap/cppgc/sweeper.cc b/chromium/v8/src/heap/cppgc/sweeper.cc
index 482bab1595b..26b4498d6b5 100644
--- a/chromium/v8/src/heap/cppgc/sweeper.cc
+++ b/chromium/v8/src/heap/cppgc/sweeper.cc
@@ -817,7 +817,7 @@ class Sweeper::SweeperImpl final {
MutatorThreadSweeper sweeper(&space_states_, platform_,
config_.free_memory_handling);
{
- StatsCollector::EnabledScope stats_scope(
+ StatsCollector::EnabledScope inner_stats_scope(
stats_collector_, internal_scope_id, "deltaInSeconds",
deadline_in_seconds - platform_->MonotonicallyIncreasingTime());
diff --git a/chromium/v8/src/heap/cppgc/visitor.cc b/chromium/v8/src/heap/cppgc/visitor.cc
index e871159b7b2..2f786b99ac6 100644
--- a/chromium/v8/src/heap/cppgc/visitor.cc
+++ b/chromium/v8/src/heap/cppgc/visitor.cc
@@ -5,7 +5,9 @@
#include "src/heap/cppgc/visitor.h"
#include "src/base/sanitizer/msan.h"
+#include "src/heap/cppgc/caged-heap.h"
#include "src/heap/cppgc/gc-info-table.h"
+#include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/object-view.h"
@@ -50,6 +52,11 @@ void TraceConservatively(ConservativeTracingVisitor* conservative_visitor,
void ConservativeTracingVisitor::TraceConservativelyIfNeeded(
const void* address) {
+#if defined(CPPGC_CAGED_HEAP)
+ // TODO(chromium:1056170): Add support for SIMD in stack scanning.
+ if (V8_LIKELY(!heap_.caged_heap().IsOnHeap(address))) return;
+#endif
+
const BasePage* page = reinterpret_cast<const BasePage*>(
page_backend_.Lookup(static_cast<ConstAddress>(address)));
diff --git a/chromium/v8/src/heap/cppgc/write-barrier.cc b/chromium/v8/src/heap/cppgc/write-barrier.cc
index 6980e4c8939..007abe3005c 100644
--- a/chromium/v8/src/heap/cppgc/write-barrier.cc
+++ b/chromium/v8/src/heap/cppgc/write-barrier.cc
@@ -132,12 +132,12 @@ void WriteBarrier::GenerationalBarrierSlow(const CagedHeapLocalData& local_data,
// A write during atomic pause (e.g. pre-finalizer) may trigger the slow path
// of the barrier. This is a result of the order of bailouts where not marking
// results in applying the generational barrier.
- if (local_data.heap_base->in_atomic_pause()) return;
+ if (local_data.heap_base.in_atomic_pause()) return;
if (value_offset > 0 && age_table[value_offset] == AgeTable::Age::kOld)
return;
// Record slot.
- local_data.heap_base->remembered_slots().insert(const_cast<void*>(slot));
+ local_data.heap_base.remembered_slots().insert(const_cast<void*>(slot));
}
#endif // CPPGC_YOUNG_GENERATION
diff --git a/chromium/v8/src/heap/embedder-tracing.h b/chromium/v8/src/heap/embedder-tracing.h
index befb1a7e7ac..1f15a7e826d 100644
--- a/chromium/v8/src/heap/embedder-tracing.h
+++ b/chromium/v8/src/heap/embedder-tracing.h
@@ -6,7 +6,8 @@
#define V8_HEAP_EMBEDDER_TRACING_H_
#include "include/v8-cppgc.h"
-#include "include/v8.h"
+#include "include/v8-embedder-heap.h"
+#include "include/v8-traced-handle.h"
#include "src/common/globals.h"
#include "src/flags/flags.h"
diff --git a/chromium/v8/src/heap/factory-base.cc b/chromium/v8/src/heap/factory-base.cc
index 1e197b93024..576e26507ff 100644
--- a/chromium/v8/src/heap/factory-base.cc
+++ b/chromium/v8/src/heap/factory-base.cc
@@ -627,13 +627,13 @@ MaybeHandle<String> FactoryBase<Impl>::NewConsString(
// Copy left part.
{
const uint8_t* src =
- left->template GetChars<uint8_t>(no_gc, access_guard);
+ left->template GetChars<uint8_t>(isolate(), no_gc, access_guard);
CopyChars(dest, src, left_length);
}
// Copy right part.
{
const uint8_t* src =
- right->template GetChars<uint8_t>(no_gc, access_guard);
+ right->template GetChars<uint8_t>(isolate(), no_gc, access_guard);
CopyChars(dest + left_length, src, right_length);
}
return result;
@@ -645,9 +645,10 @@ MaybeHandle<String> FactoryBase<Impl>::NewConsString(
DisallowGarbageCollection no_gc;
SharedStringAccessGuardIfNeeded access_guard(isolate());
base::uc16* sink = result->GetChars(no_gc, access_guard);
- String::WriteToFlat(*left, sink, 0, left->length(), access_guard);
- String::WriteToFlat(*right, sink + left->length(), 0, right->length(),
+ String::WriteToFlat(*left, sink, 0, left->length(), isolate(),
access_guard);
+ String::WriteToFlat(*right, sink + left->length(), 0, right->length(),
+ isolate(), access_guard);
return result;
}
@@ -809,8 +810,7 @@ HeapObject FactoryBase<Impl>::AllocateRawArray(int size,
(size >
isolate()->heap()->AsHeap()->MaxRegularHeapObjectSize(allocation)) &&
FLAG_use_marking_progress_bar) {
- BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(result);
- chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
+ LargePage::FromHeapObject(result)->ProgressBar().Enable();
}
return result;
}
diff --git a/chromium/v8/src/heap/factory-inl.h b/chromium/v8/src/heap/factory-inl.h
index 72d53014fd2..b64db0abf9f 100644
--- a/chromium/v8/src/heap/factory-inl.h
+++ b/chromium/v8/src/heap/factory-inl.h
@@ -71,6 +71,15 @@ ReadOnlyRoots Factory::read_only_roots() const {
return ReadOnlyRoots(isolate());
}
+Factory::CodeBuilder& Factory::CodeBuilder::set_interpreter_data(
+ Handle<HeapObject> interpreter_data) {
+ // This DCHECK requires this function to be in -inl.h.
+ DCHECK(interpreter_data->IsInterpreterData() ||
+ interpreter_data->IsBytecodeArray());
+ interpreter_data_ = interpreter_data;
+ return *this;
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/factory.cc b/chromium/v8/src/heap/factory.cc
index 0c80e81f515..ae6c0e27f84 100644
--- a/chromium/v8/src/heap/factory.cc
+++ b/chromium/v8/src/heap/factory.cc
@@ -100,14 +100,15 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
kind_specific_flags_ == 0
? roots.trampoline_trivial_code_data_container_handle()
: roots.trampoline_promise_rejection_code_data_container_handle());
- DCHECK_EQ(canonical_code_data_container->kind_specific_flags(),
+ DCHECK_EQ(canonical_code_data_container->kind_specific_flags(kRelaxedLoad),
kind_specific_flags_);
data_container = canonical_code_data_container;
} else {
data_container = factory->NewCodeDataContainer(
0, read_only_data_container_ ? AllocationType::kReadOnly
: AllocationType::kOld);
- data_container->set_kind_specific_flags(kind_specific_flags_);
+ data_container->set_kind_specific_flags(kind_specific_flags_,
+ kRelaxedStore);
}
// Basic block profiling data for builtins is stored in the JS heap rather
@@ -161,10 +162,11 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
// passing IsPendingAllocation).
raw_code.set_inlined_bytecode_size(inlined_bytecode_size_);
raw_code.set_code_data_container(*data_container, kReleaseStore);
- raw_code.set_deoptimization_data(*deoptimization_data_);
if (kind_ == CodeKind::BASELINE) {
+ raw_code.set_bytecode_or_interpreter_data(*interpreter_data_);
raw_code.set_bytecode_offset_table(*position_table_);
} else {
+ raw_code.set_deoptimization_data(*deoptimization_data_);
raw_code.set_source_position_table(*position_table_);
}
raw_code.set_handler_table_offset(
@@ -312,7 +314,8 @@ void Factory::CodeBuilder::FinalizeOnHeapCode(Handle<Code> code,
Code::SizeFor(code_desc_.instruction_size() + code_desc_.metadata_size());
int size_to_trim = old_object_size - new_object_size;
DCHECK_GE(size_to_trim, 0);
- heap->UndoLastAllocationAt(code->address() + new_object_size, size_to_trim);
+ heap->CreateFillerObjectAt(code->address() + new_object_size, size_to_trim,
+ ClearRecordedSlots::kNo);
}
MaybeHandle<Code> Factory::NewEmptyCode(CodeKind kind, int buffer_size) {
@@ -456,16 +459,6 @@ Handle<Tuple2> Factory::NewTuple2(Handle<Object> value1, Handle<Object> value2,
return handle(result, isolate());
}
-Handle<BaselineData> Factory::NewBaselineData(
- Handle<Code> code, Handle<HeapObject> function_data) {
- auto baseline_data =
- NewStructInternal<BaselineData>(BASELINE_DATA_TYPE, AllocationType::kOld);
- DisallowGarbageCollection no_gc;
- baseline_data.set_baseline_code(*code);
- baseline_data.set_data(*function_data);
- return handle(baseline_data, isolate());
-}
-
Handle<Oddball> Factory::NewOddball(Handle<Map> map, const char* to_string,
Handle<Object> to_number,
const char* type_of, byte kind) {
@@ -512,8 +505,7 @@ MaybeHandle<FixedArray> Factory::TryNewFixedArray(
if (!allocation.To(&result)) return MaybeHandle<FixedArray>();
if ((size > heap->MaxRegularHeapObjectSize(allocation_type)) &&
FLAG_use_marking_progress_bar) {
- BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(result);
- chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
+ LargePage::FromHeapObject(result)->ProgressBar().Enable();
}
DisallowGarbageCollection no_gc;
result.set_map_after_allocation(*fixed_array_map(), SKIP_WRITE_BARRIER);
@@ -1029,14 +1021,14 @@ Handle<String> Factory::NewProperSubString(Handle<String> str, int begin,
NewRawOneByteString(length).ToHandleChecked();
DisallowGarbageCollection no_gc;
uint8_t* dest = result->GetChars(no_gc);
- String::WriteToFlat(*str, dest, begin, end);
+ String::WriteToFlat(*str, dest, begin, length);
return result;
} else {
Handle<SeqTwoByteString> result =
NewRawTwoByteString(length).ToHandleChecked();
DisallowGarbageCollection no_gc;
base::uc16* dest = result->GetChars(no_gc);
- String::WriteToFlat(*str, dest, begin, end);
+ String::WriteToFlat(*str, dest, begin, length);
return result;
}
}
@@ -1580,14 +1572,17 @@ Handle<WasmArray> Factory::NewWasmArray(
WasmArray result = WasmArray::cast(raw);
result.set_raw_properties_or_hash(*empty_fixed_array(), kRelaxedStore);
result.set_length(length);
- for (uint32_t i = 0; i < length; i++) {
- Address address = result.ElementAddress(i);
- if (type->element_type().is_numeric()) {
+ if (type->element_type().is_numeric()) {
+ for (uint32_t i = 0; i < length; i++) {
+ Address address = result.ElementAddress(i);
elements[i]
.Packed(type->element_type())
.CopyTo(reinterpret_cast<byte*>(address));
- } else {
- base::WriteUnalignedValue<Object>(address, *elements[i].to_ref());
+ }
+ } else {
+ for (uint32_t i = 0; i < length; i++) {
+ int offset = result.element_offset(i);
+ TaggedField<Object>::store(result, offset, *elements[i].to_ref());
}
}
return handle(result, isolate());
@@ -1602,11 +1597,13 @@ Handle<WasmStruct> Factory::NewWasmStruct(const wasm::StructType* type,
WasmStruct result = WasmStruct::cast(raw);
result.set_raw_properties_or_hash(*empty_fixed_array(), kRelaxedStore);
for (uint32_t i = 0; i < type->field_count(); i++) {
- Address address = result.RawFieldAddress(type->field_offset(i));
+ int offset = type->field_offset(i);
if (type->field(i).is_numeric()) {
+ Address address = result.RawFieldAddress(offset);
args[i].Packed(type->field(i)).CopyTo(reinterpret_cast<byte*>(address));
} else {
- base::WriteUnalignedValue<Object>(address, *args[i].to_ref());
+ offset += WasmStruct::kHeaderSize;
+ TaggedField<Object>::store(result, offset, *args[i].to_ref());
}
}
return handle(result, isolate());
@@ -2178,7 +2175,7 @@ Handle<CodeDataContainer> Factory::NewCodeDataContainer(
CodeDataContainer::cast(New(code_data_container_map(), allocation));
DisallowGarbageCollection no_gc;
data_container.set_next_code_link(*undefined_value(), SKIP_WRITE_BARRIER);
- data_container.set_kind_specific_flags(flags);
+ data_container.set_kind_specific_flags(flags, kRelaxedStore);
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
data_container.AllocateExternalPointerEntries(isolate());
data_container.set_raw_code(Smi::zero(), SKIP_WRITE_BARRIER);
@@ -2198,7 +2195,7 @@ Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
Builtins::CodeObjectIsExecutable(code->builtin_id());
Handle<Code> result = Builtins::GenerateOffHeapTrampolineFor(
isolate(), off_heap_entry,
- code->code_data_container(kAcquireLoad).kind_specific_flags(),
+ code->code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad),
generate_jump_to_instruction_stream);
// Trampolines may not contain any metadata since all metadata offsets,
@@ -2256,7 +2253,7 @@ Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
Handle<Code> Factory::CopyCode(Handle<Code> code) {
Handle<CodeDataContainer> data_container = NewCodeDataContainer(
- code->code_data_container(kAcquireLoad).kind_specific_flags(),
+ code->code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad),
AllocationType::kOld);
Heap* heap = isolate()->heap();
@@ -2872,7 +2869,6 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
map, empty_byte_array(), buffer, byte_offset, byte_length));
JSTypedArray raw = *typed_array;
DisallowGarbageCollection no_gc;
- raw.AllocateExternalPointerEntries(isolate());
raw.set_length(length);
raw.SetOffHeapDataPtr(isolate(), buffer->backing_store(), byte_offset);
raw.set_is_length_tracking(false);
@@ -2887,7 +2883,6 @@ Handle<JSDataView> Factory::NewJSDataView(Handle<JSArrayBuffer> buffer,
isolate());
Handle<JSDataView> obj = Handle<JSDataView>::cast(NewJSArrayBufferView(
map, empty_fixed_array(), buffer, byte_offset, byte_length));
- obj->AllocateExternalPointerEntries(isolate());
obj->set_data_pointer(
isolate(), static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
return obj;
@@ -3664,7 +3659,8 @@ Handle<Map> Factory::CreateStrictFunctionMap(
}
Handle<Map> Factory::CreateClassFunctionMap(Handle<JSFunction> empty_function) {
- Handle<Map> map = NewMap(JS_FUNCTION_TYPE, JSFunction::kSizeWithPrototype);
+ Handle<Map> map =
+ NewMap(JS_CLASS_CONSTRUCTOR_TYPE, JSFunction::kSizeWithPrototype);
{
DisallowGarbageCollection no_gc;
Map raw_map = *map;
diff --git a/chromium/v8/src/heap/factory.h b/chromium/v8/src/heap/factory.h
index 1acf9a65c24..355a8d5d6e7 100644
--- a/chromium/v8/src/heap/factory.h
+++ b/chromium/v8/src/heap/factory.h
@@ -116,9 +116,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
return handle(obj, isolate());
}
- Handle<BaselineData> NewBaselineData(Handle<Code> code,
- Handle<HeapObject> function_data);
-
Handle<Oddball> NewOddball(Handle<Map> map, const char* to_string,
Handle<Object> to_number, const char* type_of,
byte kind);
@@ -884,11 +881,15 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
CodeBuilder& set_deoptimization_data(
Handle<DeoptimizationData> deopt_data) {
+ DCHECK_NE(kind_, CodeKind::BASELINE);
DCHECK(!deopt_data.is_null());
deoptimization_data_ = deopt_data;
return *this;
}
+ inline CodeBuilder& set_interpreter_data(
+ Handle<HeapObject> interpreter_data);
+
CodeBuilder& set_is_turbofanned() {
DCHECK(!CodeKindIsUnoptimizedJSFunction(kind_));
is_turbofanned_ = true;
@@ -943,6 +944,7 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<ByteArray> position_table_;
Handle<DeoptimizationData> deoptimization_data_ =
DeoptimizationData::Empty(isolate_);
+ Handle<HeapObject> interpreter_data_;
BasicBlockProfilerData* profiler_data_ = nullptr;
bool is_executable_ = true;
bool read_only_data_container_ = false;
diff --git a/chromium/v8/src/heap/gc-tracer.cc b/chromium/v8/src/heap/gc-tracer.cc
index a780ac01b01..8ddd177c6bd 100644
--- a/chromium/v8/src/heap/gc-tracer.cc
+++ b/chromium/v8/src/heap/gc-tracer.cc
@@ -257,14 +257,14 @@ void GCTracer::Start(GarbageCollector collector,
previous_ = current_;
switch (collector) {
- case SCAVENGER:
+ case GarbageCollector::SCAVENGER:
current_ = Event(Event::SCAVENGER, gc_reason, collector_reason);
break;
- case MINOR_MARK_COMPACTOR:
+ case GarbageCollector::MINOR_MARK_COMPACTOR:
current_ =
Event(Event::MINOR_MARK_COMPACTOR, gc_reason, collector_reason);
break;
- case MARK_COMPACTOR:
+ case GarbageCollector::MARK_COMPACTOR:
if (heap_->incremental_marking()->WasActivated()) {
current_ = Event(Event::INCREMENTAL_MARK_COMPACTOR, gc_reason,
collector_reason);
@@ -344,10 +344,11 @@ void GCTracer::Stop(GarbageCollector collector) {
}
DCHECK_LE(0, start_counter_);
- DCHECK((collector == SCAVENGER && current_.type == Event::SCAVENGER) ||
- (collector == MINOR_MARK_COMPACTOR &&
+ DCHECK((collector == GarbageCollector::SCAVENGER &&
+ current_.type == Event::SCAVENGER) ||
+ (collector == GarbageCollector::MINOR_MARK_COMPACTOR &&
current_.type == Event::MINOR_MARK_COMPACTOR) ||
- (collector == MARK_COMPACTOR &&
+ (collector == GarbageCollector::MARK_COMPACTOR &&
(current_.type == Event::MARK_COMPACTOR ||
current_.type == Event::INCREMENTAL_MARK_COMPACTOR)));
diff --git a/chromium/v8/src/heap/gc-tracer.h b/chromium/v8/src/heap/gc-tracer.h
index d5bdf513a64..6daeadc94b9 100644
--- a/chromium/v8/src/heap/gc-tracer.h
+++ b/chromium/v8/src/heap/gc-tracer.h
@@ -7,6 +7,7 @@
#include "include/v8-metrics.h"
#include "src/base/compiler-specific.h"
+#include "src/base/macros.h"
#include "src/base/optional.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/time.h"
@@ -31,23 +32,24 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
#define TRACE_GC_CATEGORIES \
"devtools.timeline," TRACE_DISABLED_BY_DEFAULT("v8.gc")
-#define TRACE_GC(tracer, scope_id) \
- GCTracer::Scope::ScopeId gc_tracer_scope_id(scope_id); \
- GCTracer::Scope gc_tracer_scope(tracer, gc_tracer_scope_id, \
- ThreadKind::kMain); \
- TRACE_EVENT0(TRACE_GC_CATEGORIES, GCTracer::Scope::Name(gc_tracer_scope_id))
-
-#define TRACE_GC1(tracer, scope_id, thread_kind) \
- GCTracer::Scope::ScopeId gc_tracer_scope_id(scope_id); \
- GCTracer::Scope gc_tracer_scope(tracer, gc_tracer_scope_id, thread_kind); \
- TRACE_EVENT0(TRACE_GC_CATEGORIES, GCTracer::Scope::Name(gc_tracer_scope_id))
-
-#define TRACE_GC_EPOCH(tracer, scope_id, thread_kind) \
- GCTracer::Scope::ScopeId gc_tracer_scope_id(scope_id); \
- GCTracer::Scope gc_tracer_scope(tracer, gc_tracer_scope_id, thread_kind); \
- CollectionEpoch gc_tracer_epoch = tracer->CurrentEpoch(scope_id); \
- TRACE_EVENT1(TRACE_GC_CATEGORIES, GCTracer::Scope::Name(gc_tracer_scope_id), \
- "epoch", gc_tracer_epoch)
+#define TRACE_GC(tracer, scope_id) \
+ GCTracer::Scope UNIQUE_IDENTIFIER(gc_tracer_scope)( \
+ tracer, GCTracer::Scope::ScopeId(scope_id), ThreadKind::kMain); \
+ TRACE_EVENT0(TRACE_GC_CATEGORIES, \
+ GCTracer::Scope::Name(GCTracer::Scope::ScopeId(scope_id)))
+
+#define TRACE_GC1(tracer, scope_id, thread_kind) \
+ GCTracer::Scope UNIQUE_IDENTIFIER(gc_tracer_scope)( \
+ tracer, GCTracer::Scope::ScopeId(scope_id), thread_kind); \
+ TRACE_EVENT0(TRACE_GC_CATEGORIES, \
+ GCTracer::Scope::Name(GCTracer::Scope::ScopeId(scope_id)))
+
+#define TRACE_GC_EPOCH(tracer, scope_id, thread_kind) \
+ GCTracer::Scope UNIQUE_IDENTIFIER(gc_tracer_scope)( \
+ tracer, GCTracer::Scope::ScopeId(scope_id), thread_kind); \
+ TRACE_EVENT1(TRACE_GC_CATEGORIES, \
+ GCTracer::Scope::Name(GCTracer::Scope::ScopeId(scope_id)), \
+ "epoch", tracer->CurrentEpoch(scope_id))
// GCTracer collects and prints ONE line after each garbage collector
// invocation IFF --trace_gc is used.
@@ -59,11 +61,11 @@ class V8_EXPORT_PRIVATE GCTracer {
struct IncrementalMarkingInfos {
IncrementalMarkingInfos() : duration(0), longest_step(0), steps(0) {}
- void Update(double duration) {
+ void Update(double delta) {
steps++;
- this->duration += duration;
- if (duration > longest_step) {
- longest_step = duration;
+ duration += delta;
+ if (delta > longest_step) {
+ longest_step = delta;
}
}
diff --git a/chromium/v8/src/heap/heap-inl.h b/chromium/v8/src/heap/heap-inl.h
index 7c8a2f54d6b..9b998ea6af2 100644
--- a/chromium/v8/src/heap/heap-inl.h
+++ b/chromium/v8/src/heap/heap-inl.h
@@ -43,6 +43,7 @@
#include "src/objects/scope-info.h"
#include "src/objects/slots-inl.h"
#include "src/objects/struct-inl.h"
+#include "src/objects/visitors-inl.h"
#include "src/profiler/heap-profiler.h"
#include "src/strings/string-hasher.h"
#include "src/utils/ostreams.h"
@@ -769,6 +770,9 @@ bool Heap::HasDirtyJSFinalizationRegistries() {
return !dirty_js_finalization_registries_list().IsUndefined(isolate());
}
+VerifyPointersVisitor::VerifyPointersVisitor(Heap* heap)
+ : ObjectVisitorWithCageBases(heap), heap_(heap) {}
+
AlwaysAllocateScope::AlwaysAllocateScope(Heap* heap) : heap_(heap) {
heap_->always_allocate_scope_count_++;
}
@@ -784,12 +788,12 @@ CodeSpaceMemoryModificationScope::CodeSpaceMemoryModificationScope(Heap* heap)
: heap_(heap) {
if (heap_->write_protect_code_memory()) {
heap_->increment_code_space_memory_modification_scope_depth();
- heap_->code_space()->SetReadAndWritable();
+ heap_->code_space()->SetCodeModificationPermissions();
LargePage* page = heap_->code_lo_space()->first_page();
while (page != nullptr) {
DCHECK(page->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
- page->SetReadAndWritable();
+ page->SetCodeModificationPermissions();
page = page->next_page();
}
}
@@ -847,7 +851,7 @@ CodePageMemoryModificationScope::CodePageMemoryModificationScope(
if (scope_active_) {
DCHECK(chunk_->owner()->identity() == CODE_SPACE ||
(chunk_->owner()->identity() == CODE_LO_SPACE));
- MemoryChunk::cast(chunk_)->SetReadAndWritable();
+ MemoryChunk::cast(chunk_)->SetCodeModificationPermissions();
}
}
diff --git a/chromium/v8/src/heap/heap.cc b/chromium/v8/src/heap/heap.cc
index 982b80bb89f..4a57a1678eb 100644
--- a/chromium/v8/src/heap/heap.cc
+++ b/chromium/v8/src/heap/heap.cc
@@ -31,7 +31,7 @@
#include "src/execution/runtime-profiler.h"
#include "src/execution/v8threads.h"
#include "src/execution/vm-state-inl.h"
-#include "src/handles/global-handles.h"
+#include "src/handles/global-handles-inl.h"
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/barrier.h"
#include "src/heap/base/stack.h"
@@ -461,18 +461,18 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
if (space != NEW_SPACE && space != NEW_LO_SPACE) {
isolate_->counters()->gc_compactor_caused_by_request()->Increment();
*reason = "GC in old space requested";
- return MARK_COMPACTOR;
+ return GarbageCollector::MARK_COMPACTOR;
}
if (FLAG_gc_global || ShouldStressCompaction() || !new_space()) {
*reason = "GC in old space forced by flags";
- return MARK_COMPACTOR;
+ return GarbageCollector::MARK_COMPACTOR;
}
if (incremental_marking()->NeedsFinalization() &&
AllocationLimitOvershotByLargeMargin()) {
*reason = "Incremental marking needs finalization";
- return MARK_COMPACTOR;
+ return GarbageCollector::MARK_COMPACTOR;
}
if (!CanPromoteYoungAndExpandOldGeneration(0)) {
@@ -480,7 +480,7 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
->gc_compactor_caused_by_oldspace_exhaustion()
->Increment();
*reason = "scavenge might not succeed";
- return MARK_COMPACTOR;
+ return GarbageCollector::MARK_COMPACTOR;
}
// Default
@@ -653,8 +653,8 @@ void Heap::DumpJSONHeapStatistics(std::stringstream& stream) {
// clang-format off
#define DICT(s) "{" << s << "}"
#define LIST(s) "[" << s << "]"
-#define ESCAPE(s) "\"" << s << "\""
-#define MEMBER(s) ESCAPE(s) << ":"
+#define QUOTE(s) "\"" << s << "\""
+#define MEMBER(s) QUOTE(s) << ":"
auto SpaceStatistics = [this](int space_index) {
HeapSpaceStatistics space_stats;
@@ -663,7 +663,7 @@ void Heap::DumpJSONHeapStatistics(std::stringstream& stream) {
std::stringstream stream;
stream << DICT(
MEMBER("name")
- << ESCAPE(BaseSpace::GetSpaceName(
+ << QUOTE(BaseSpace::GetSpaceName(
static_cast<AllocationSpace>(space_index)))
<< ","
MEMBER("size") << space_stats.space_size() << ","
@@ -674,7 +674,7 @@ void Heap::DumpJSONHeapStatistics(std::stringstream& stream) {
};
stream << DICT(
- MEMBER("isolate") << ESCAPE(reinterpret_cast<void*>(isolate())) << ","
+ MEMBER("isolate") << QUOTE(reinterpret_cast<void*>(isolate())) << ","
MEMBER("id") << gc_count() << ","
MEMBER("time_ms") << isolate()->time_millis_since_init() << ","
MEMBER("total_heap_size") << stats.total_heap_size() << ","
@@ -699,7 +699,7 @@ void Heap::DumpJSONHeapStatistics(std::stringstream& stream) {
#undef DICT
#undef LIST
-#undef ESCAPE
+#undef QUOTE
#undef MEMBER
// clang-format on
}
@@ -792,16 +792,16 @@ void Heap::PrintRetainingPath(HeapObject target, RetainingPathOption option) {
}
int distance = static_cast<int>(retaining_path.size());
for (auto node : retaining_path) {
- HeapObject object = node.first;
- bool ephemeron = node.second;
+ HeapObject node_object = node.first;
+ bool node_ephemeron = node.second;
PrintF("\n");
PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
PrintF("Distance from root %d%s: ", distance,
- ephemeron ? " (ephemeron)" : "");
- object.ShortPrint();
+ node_ephemeron ? " (ephemeron)" : "");
+ node_object.ShortPrint();
PrintF("\n");
#ifdef OBJECT_PRINT
- object.Print();
+ node_object.Print();
PrintF("\n");
#endif
--distance;
@@ -978,10 +978,11 @@ size_t Heap::UsedGlobalHandlesSize() {
void Heap::MergeAllocationSitePretenuringFeedback(
const PretenuringFeedbackMap& local_pretenuring_feedback) {
+ PtrComprCageBase cage_base(isolate());
AllocationSite site;
for (auto& site_and_count : local_pretenuring_feedback) {
site = site_and_count.first;
- MapWord map_word = site_and_count.first.map_word(kRelaxedLoad);
+ MapWord map_word = site.map_word(cage_base, kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
site = AllocationSite::cast(map_word.ToForwardingAddress());
}
@@ -1003,7 +1004,6 @@ void Heap::MergeAllocationSitePretenuringFeedback(
void Heap::AddAllocationObserversToAllSpaces(
AllocationObserver* observer, AllocationObserver* new_space_observer) {
DCHECK(observer && new_space_observer);
- SafepointScope scope(this);
for (SpaceIterator it(this); it.HasNext();) {
Space* space = it.Next();
@@ -1018,7 +1018,6 @@ void Heap::AddAllocationObserversToAllSpaces(
void Heap::RemoveAllocationObserversFromAllSpaces(
AllocationObserver* observer, AllocationObserver* new_space_observer) {
DCHECK(observer && new_space_observer);
- SafepointScope scope(this);
for (SpaceIterator it(this); it.HasNext();) {
Space* space = it.Next();
@@ -1181,8 +1180,8 @@ void Heap::ProcessPretenuringFeedback() {
// Step 2: Pretenure allocation sites for manual requests.
if (allocation_sites_to_pretenure_) {
while (!allocation_sites_to_pretenure_->empty()) {
- auto site = allocation_sites_to_pretenure_->Pop();
- if (PretenureAllocationSiteManually(isolate_, site)) {
+ auto pretenure_site = allocation_sites_to_pretenure_->Pop();
+ if (PretenureAllocationSiteManually(isolate_, pretenure_site)) {
trigger_deoptimization = true;
}
}
@@ -1254,7 +1253,7 @@ void Heap::DeoptMarkedAllocationSites() {
}
void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
- if (collector == MARK_COMPACTOR) {
+ if (collector == GarbageCollector::MARK_COMPACTOR) {
memory_pressure_level_.store(MemoryPressureLevel::kNone,
std::memory_order_relaxed);
}
@@ -1686,6 +1685,8 @@ bool Heap::CollectGarbage(AllocationSpace space,
is_current_gc_forced_ = gc_callback_flags & v8::kGCCallbackFlagForced ||
current_gc_flags_ & kForcedGC ||
force_gc_on_next_allocation_;
+ is_current_gc_for_heap_profiler_ =
+ gc_reason == GarbageCollectionReason::kHeapProfiler;
if (force_gc_on_next_allocation_) force_gc_on_next_allocation_ = false;
DevToolsTraceEventScope devtools_trace_event_scope(
@@ -1728,7 +1729,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
size_t committed_memory_before = 0;
- if (collector == MARK_COMPACTOR) {
+ if (collector == GarbageCollector::MARK_COMPACTOR) {
committed_memory_before = CommittedOldGenerationMemory();
if (cpp_heap()) {
// CppHeap needs a stack marker at the top of all entry points to allow
@@ -1765,8 +1766,9 @@ bool Heap::CollectGarbage(AllocationSpace space,
PROFILE(isolate_, CodeMovingGCEvent());
}
- GCType gc_type = collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact
- : kGCTypeScavenge;
+ GCType gc_type = collector == GarbageCollector::MARK_COMPACTOR
+ ? kGCTypeMarkSweepCompact
+ : kGCTypeScavenge;
{
GCCallbacksScope scope(this);
// Temporary override any embedder stack state as callbacks may create
@@ -1778,7 +1780,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
AllowGarbageCollection allow_gc;
AllowJavascriptExecution allow_js(isolate());
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE);
- VMState<EXTERNAL> state(isolate_);
+ VMState<EXTERNAL> callback_state(isolate_);
HandleScope handle_scope(isolate_);
CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
}
@@ -1790,10 +1792,11 @@ bool Heap::CollectGarbage(AllocationSpace space,
freed_global_handles +=
PerformGarbageCollection(collector, gc_callback_flags);
}
- // Clear is_current_gc_forced now that the current GC is complete. Do this
- // before GarbageCollectionEpilogue() since that could trigger another
- // unforced GC.
+ // Clear flags describing the current GC now that the current GC is
+ // complete. Do this before GarbageCollectionEpilogue() since that could
+ // trigger another unforced GC.
is_current_gc_forced_ = false;
+ is_current_gc_for_heap_profiler_ = false;
{
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
@@ -1814,22 +1817,24 @@ bool Heap::CollectGarbage(AllocationSpace space,
AllowGarbageCollection allow_gc;
AllowJavascriptExecution allow_js(isolate());
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_EPILOGUE);
- VMState<EXTERNAL> state(isolate_);
+ VMState<EXTERNAL> callback_state(isolate_);
HandleScope handle_scope(isolate_);
CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
}
}
- if (collector == MARK_COMPACTOR || collector == SCAVENGER) {
+ if (collector == GarbageCollector::MARK_COMPACTOR ||
+ collector == GarbageCollector::SCAVENGER) {
tracer()->RecordGCPhasesHistograms(gc_type_timer);
}
}
GarbageCollectionEpilogue();
- if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) {
+ if (collector == GarbageCollector::MARK_COMPACTOR &&
+ FLAG_track_detached_contexts) {
isolate()->CheckDetachedContextsAfterGC();
}
- if (collector == MARK_COMPACTOR) {
+ if (collector == GarbageCollector::MARK_COMPACTOR) {
// Calculate used memory first, then committed memory. Following code
// assumes that committed >= used, which might not hold when this is
// calculated in the wrong order and background threads allocate
@@ -1858,7 +1863,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
tracer()->Stop(collector);
}
- if (collector == MARK_COMPACTOR &&
+ if (collector == GarbageCollector::MARK_COMPACTOR &&
(gc_callback_flags & (kGCCallbackFlagForced |
kGCCallbackFlagCollectAllAvailableGarbage)) != 0) {
isolate()->CountUsage(v8::Isolate::kForcedGC);
@@ -1929,14 +1934,7 @@ void Heap::StartIncrementalMarking(int gc_flags,
}
void Heap::CompleteSweepingFull() {
- TRACE_GC_EPOCH(tracer(), GCTracer::Scope::MC_COMPLETE_SWEEPING,
- ThreadKind::kMain);
-
- {
- TRACE_GC(tracer(), GCTracer::Scope::MC_COMPLETE_SWEEP_ARRAY_BUFFERS);
- array_buffer_sweeper()->EnsureFinished();
- }
-
+ array_buffer_sweeper()->EnsureFinished();
mark_compact_collector()->EnsureSweepingCompleted();
DCHECK(!mark_compact_collector()->sweeping_in_progress());
}
@@ -2157,11 +2155,11 @@ void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
namespace {
GCTracer::Scope::ScopeId CollectorScopeId(GarbageCollector collector) {
switch (collector) {
- case MARK_COMPACTOR:
+ case GarbageCollector::MARK_COMPACTOR:
return GCTracer::Scope::ScopeId::MARK_COMPACTOR;
- case MINOR_MARK_COMPACTOR:
+ case GarbageCollector::MINOR_MARK_COMPACTOR:
return GCTracer::Scope::ScopeId::MINOR_MARK_COMPACTOR;
- case SCAVENGER:
+ case GarbageCollector::SCAVENGER:
return GCTracer::Scope::ScopeId::SCAVENGER;
}
UNREACHABLE();
@@ -2208,13 +2206,13 @@ size_t Heap::PerformGarbageCollection(
NewSpaceSize() + (new_lo_space() ? new_lo_space()->SizeOfObjects() : 0);
switch (collector) {
- case MARK_COMPACTOR:
+ case GarbageCollector::MARK_COMPACTOR:
MarkCompact();
break;
- case MINOR_MARK_COMPACTOR:
+ case GarbageCollector::MINOR_MARK_COMPACTOR:
MinorMarkCompact();
break;
- case SCAVENGER:
+ case GarbageCollector::SCAVENGER:
Scavenge();
break;
}
@@ -2224,14 +2222,14 @@ size_t Heap::PerformGarbageCollection(
UpdateSurvivalStatistics(static_cast<int>(start_young_generation_size));
ConfigureInitialOldGenerationSize();
- if (collector != MARK_COMPACTOR) {
+ if (collector != GarbageCollector::MARK_COMPACTOR) {
// Objects that died in the new space might have been accounted
// as bytes marked ahead of schedule by the incremental marker.
incremental_marking()->UpdateMarkedBytesAfterScavenge(
start_young_generation_size - SurvivedYoungObjectSize());
}
- if (!fast_promotion_mode_ || collector == MARK_COMPACTOR) {
+ if (!fast_promotion_mode_ || collector == GarbageCollector::MARK_COMPACTOR) {
ComputeFastPromotionMode();
}
@@ -2252,7 +2250,7 @@ size_t Heap::PerformGarbageCollection(
isolate_->global_handles()->InvokeFirstPassWeakCallbacks();
}
- if (collector == MARK_COMPACTOR) {
+ if (collector == GarbageCollector::MARK_COMPACTOR) {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EMBEDDER_TRACING_EPILOGUE);
// TraceEpilogue may trigger operations that invalidate global handles. It
// has to be called *after* all other operations that potentially touch and
@@ -2291,7 +2289,7 @@ void Heap::PerformSharedGarbageCollection(Isolate* initiator,
base::MutexGuard guard(isolate()->client_isolate_mutex());
const char* collector_reason = nullptr;
- GarbageCollector collector = MARK_COMPACTOR;
+ GarbageCollector collector = GarbageCollector::MARK_COMPACTOR;
tracer()->Start(collector, gc_reason, collector_reason);
@@ -2309,7 +2307,7 @@ void Heap::PerformSharedGarbageCollection(Isolate* initiator,
client_heap->shared_map_allocator_->FreeLinearAllocationArea();
});
- PerformGarbageCollection(MARK_COMPACTOR);
+ PerformGarbageCollection(GarbageCollector::MARK_COMPACTOR);
isolate()->IterateClientIsolates([initiator](Isolate* client) {
GlobalSafepoint::StopMainThread stop_main_thread =
@@ -2366,7 +2364,7 @@ void Heap::UpdateCurrentEpoch(GarbageCollector collector) {
void Heap::UpdateEpochFull() { epoch_full_ = next_epoch(); }
void Heap::RecomputeLimits(GarbageCollector collector) {
- if (!((collector == MARK_COMPACTOR) ||
+ if (!((collector == GarbageCollector::MARK_COMPACTOR) ||
(HasLowYoungGenerationAllocationRate() &&
old_generation_size_configured_))) {
return;
@@ -2398,7 +2396,7 @@ void Heap::RecomputeLimits(GarbageCollector collector) {
size_t new_space_capacity = NewSpaceCapacity();
HeapGrowingMode mode = CurrentHeapGrowingMode();
- if (collector == MARK_COMPACTOR) {
+ if (collector == GarbageCollector::MARK_COMPACTOR) {
external_memory_.ResetAfterGC();
set_old_generation_allocation_limit(
@@ -2682,7 +2680,7 @@ void Heap::UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk,
guard.emplace(&unprotected_memory_chunks_mutex_);
}
if (unprotected_memory_chunks_.insert(chunk).second) {
- chunk->SetReadAndWritable();
+ chunk->SetCodeModificationPermissions();
}
}
}
@@ -2734,8 +2732,9 @@ void Heap::UpdateExternalString(String string, size_t old_payload,
String Heap::UpdateYoungReferenceInExternalStringTableEntry(Heap* heap,
FullObjectSlot p) {
+ PtrComprCageBase cage_base(heap->isolate());
HeapObject obj = HeapObject::cast(*p);
- MapWord first_word = obj.map_word(kRelaxedLoad);
+ MapWord first_word = obj.map_word(cage_base, kRelaxedLoad);
String new_string;
@@ -2743,9 +2742,9 @@ String Heap::UpdateYoungReferenceInExternalStringTableEntry(Heap* heap,
if (!first_word.IsForwardingAddress()) {
// Unreachable external string can be finalized.
String string = String::cast(obj);
- if (!string.IsExternalString()) {
+ if (!string.IsExternalString(cage_base)) {
// Original external string has been internalized.
- DCHECK(string.IsThinString());
+ DCHECK(string.IsThinString(cage_base));
return String();
}
heap->FinalizeExternalString(string);
@@ -2757,10 +2756,10 @@ String Heap::UpdateYoungReferenceInExternalStringTableEntry(Heap* heap,
}
// String is still reachable.
- if (new_string.IsThinString()) {
+ if (new_string.IsThinString(cage_base)) {
// Filtering Thin strings out of the external string table.
return String();
- } else if (new_string.IsExternalString()) {
+ } else if (new_string.IsExternalString(cage_base)) {
MemoryChunk::MoveExternalBackingStoreBytes(
ExternalBackingStoreType::kExternalString,
Page::FromAddress((*p).ptr()), Page::FromHeapObject(new_string),
@@ -2769,7 +2768,7 @@ String Heap::UpdateYoungReferenceInExternalStringTableEntry(Heap* heap,
}
// Internalization can replace external strings with non-external strings.
- return new_string.IsExternalString() ? new_string : String();
+ return new_string.IsExternalString(cage_base) ? new_string : String();
}
void Heap::ExternalStringTable::VerifyYoung() {
@@ -3476,15 +3475,6 @@ void Heap::RightTrimWeakFixedArray(WeakFixedArray object,
elements_to_trim * kTaggedSize);
}
-void Heap::UndoLastAllocationAt(Address addr, int size) {
- DCHECK_LE(0, size);
- if (size == 0) return;
- if (code_space_->TryFreeLast(addr, size)) {
- return;
- }
- CreateFillerObjectAt(addr, size, ClearRecordedSlots::kNo);
-}
-
template <typename T>
void Heap::CreateFillerForArray(T object, int elements_to_trim,
int bytes_to_trim) {
@@ -3865,14 +3855,14 @@ class SlotCollectingVisitor final : public ObjectVisitor {
MaybeObjectSlot slot(int i) { return slots_[i]; }
#if V8_EXTERNAL_CODE_SPACE
- ObjectSlot code_slot(int i) { return code_slots_[i]; }
+ CodeObjectSlot code_slot(int i) { return code_slots_[i]; }
int number_of_code_slots() { return static_cast<int>(code_slots_.size()); }
#endif
private:
std::vector<MaybeObjectSlot> slots_;
#if V8_EXTERNAL_CODE_SPACE
- std::vector<ObjectSlot> code_slots_;
+ std::vector<CodeObjectSlot> code_slots_;
#endif
};
@@ -4159,11 +4149,13 @@ void Heap::RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
void Heap::AppendArrayBufferExtension(JSArrayBuffer object,
ArrayBufferExtension* extension) {
+ // ArrayBufferSweeper is managing all counters and updating Heap counters.
array_buffer_sweeper_->Append(object, extension);
}
void Heap::DetachArrayBufferExtension(JSArrayBuffer object,
ArrayBufferExtension* extension) {
+ // ArrayBufferSweeper is managing all counters and updating Heap counters.
return array_buffer_sweeper_->Detach(object, extension);
}
@@ -4457,11 +4449,11 @@ void Heap::VerifyReadOnlyHeap() {
read_only_space_->Verify(isolate());
}
-class SlotVerifyingVisitor : public ObjectVisitor {
+class SlotVerifyingVisitor : public ObjectVisitorWithCageBases {
public:
- SlotVerifyingVisitor(std::set<Address>* untyped,
+ SlotVerifyingVisitor(Isolate* isolate, std::set<Address>* untyped,
std::set<std::pair<SlotType, Address>>* typed)
- : untyped_(untyped), typed_(typed) {}
+ : ObjectVisitorWithCageBases(isolate), untyped_(untyped), typed_(typed) {}
virtual bool ShouldHaveBeenRecorded(HeapObject host, MaybeObject target) = 0;
@@ -4469,7 +4461,8 @@ class SlotVerifyingVisitor : public ObjectVisitor {
ObjectSlot end) override {
#ifdef DEBUG
for (ObjectSlot slot = start; slot < end; ++slot) {
- DCHECK(!MapWord::IsPacked((*slot).ptr()) || !HasWeakHeapObjectTag(*slot));
+ Object obj = slot.load(cage_base());
+ CHECK(!MapWord::IsPacked(obj.ptr()) || !HasWeakHeapObjectTag(obj));
}
#endif // DEBUG
VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
@@ -4478,7 +4471,7 @@ class SlotVerifyingVisitor : public ObjectVisitor {
void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) final {
for (MaybeObjectSlot slot = start; slot < end; ++slot) {
- if (ShouldHaveBeenRecorded(host, *slot)) {
+ if (ShouldHaveBeenRecorded(host, slot.load(cage_base()))) {
CHECK_GT(untyped_->count(slot.address()), 0);
}
}
@@ -4486,11 +4479,8 @@ class SlotVerifyingVisitor : public ObjectVisitor {
void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- // TODO(v8:11880): support external code space.
- PtrComprCageBase code_cage_base =
- GetPtrComprCageBaseFromOnHeapAddress(slot.address());
if (ShouldHaveBeenRecorded(
- host, MaybeObject::FromObject(slot.load(code_cage_base)))) {
+ host, MaybeObject::FromObject(slot.load(code_cage_base())))) {
CHECK_GT(untyped_->count(slot.address()), 0);
}
}
@@ -4506,7 +4496,7 @@ class SlotVerifyingVisitor : public ObjectVisitor {
}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
- Object target = rinfo->target_object();
+ Object target = rinfo->target_object_no_host(cage_base());
if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
CHECK(
InTypedSet(FULL_EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
@@ -4535,10 +4525,10 @@ class SlotVerifyingVisitor : public ObjectVisitor {
class OldToNewSlotVerifyingVisitor : public SlotVerifyingVisitor {
public:
- OldToNewSlotVerifyingVisitor(std::set<Address>* untyped,
+ OldToNewSlotVerifyingVisitor(Isolate* isolate, std::set<Address>* untyped,
std::set<std::pair<SlotType, Address>>* typed,
EphemeronRememberedSet* ephemeron_remembered_set)
- : SlotVerifyingVisitor(untyped, typed),
+ : SlotVerifyingVisitor(isolate, untyped, typed),
ephemeron_remembered_set_(ephemeron_remembered_set) {}
bool ShouldHaveBeenRecorded(HeapObject host, MaybeObject target) override {
@@ -4618,7 +4608,8 @@ void Heap::VerifyRememberedSetFor(HeapObject object) {
std::set<std::pair<SlotType, Address>> typed_old_to_new;
if (!InYoungGeneration(object)) {
CollectSlots<OLD_TO_NEW>(chunk, start, end, &old_to_new, &typed_old_to_new);
- OldToNewSlotVerifyingVisitor visitor(&old_to_new, &typed_old_to_new,
+ OldToNewSlotVerifyingVisitor visitor(isolate(), &old_to_new,
+ &typed_old_to_new,
&this->ephemeron_remembered_set_);
object.IterateBody(&visitor);
}
@@ -4669,21 +4660,29 @@ void Heap::ZapCodeObject(Address start_address, int size_in_bytes) {
Code Heap::builtin(Builtin builtin) {
DCHECK(Builtins::IsBuiltinId(builtin));
return Code::cast(
- Object(isolate()->builtins_table()[static_cast<int>(builtin)]));
+ Object(isolate()->builtin_table()[static_cast<int>(builtin)]));
}
Address Heap::builtin_address(Builtin builtin) {
+ const int index = Builtins::ToInt(builtin);
+ DCHECK(Builtins::IsBuiltinId(builtin) || index == Builtins::kBuiltinCount);
+ // Note: Must return an address within the full builtin_table for
+ // IterateBuiltins to work.
+ return reinterpret_cast<Address>(&isolate()->builtin_table()[index]);
+}
+
+Address Heap::builtin_tier0_address(Builtin builtin) {
const int index = static_cast<int>(builtin);
DCHECK(Builtins::IsBuiltinId(builtin) || index == Builtins::kBuiltinCount);
- return reinterpret_cast<Address>(&isolate()->builtins_table()[index]);
+ return reinterpret_cast<Address>(
+ &isolate()->isolate_data()->builtin_tier0_table()[index]);
}
void Heap::set_builtin(Builtin builtin, Code code) {
DCHECK(Builtins::IsBuiltinId(builtin));
DCHECK(Internals::HasHeapObjectTag(code.ptr()));
- // The given builtin may be completely uninitialized thus we cannot check its
- // type here.
- isolate()->builtins_table()[static_cast<int>(builtin)] = code.ptr();
+ // The given builtin may be uninitialized thus we cannot check its type here.
+ isolate()->builtin_table()[Builtins::ToInt(builtin)] = code.ptr();
}
void Heap::IterateWeakRoots(RootVisitor* v, base::EnumSet<SkipRoot> options) {
@@ -4914,6 +4913,12 @@ void Heap::IterateBuiltins(RootVisitor* v) {
FullObjectSlot(builtin_address(builtin)));
}
+ for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLastTier0;
+ ++builtin) {
+ v->VisitRootPointer(Root::kBuiltins, Builtins::name(builtin),
+ FullObjectSlot(builtin_tier0_address(builtin)));
+ }
+
// The entry table doesn't need to be updated since all builtins are embedded.
STATIC_ASSERT(Builtins::AllBuiltinsAreIsolateIndependent());
}
@@ -6366,10 +6371,10 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
static constexpr intptr_t kLogicalChunkAlignmentMask =
kLogicalChunkAlignment - 1;
- class MarkingVisitor : public ObjectVisitor, public RootVisitor {
+ class MarkingVisitor : public ObjectVisitorWithCageBases, public RootVisitor {
public:
explicit MarkingVisitor(UnreachableObjectsFilter* filter)
- : filter_(filter) {}
+ : ObjectVisitorWithCageBases(filter->heap_), filter_(filter) {}
void VisitMapPointer(HeapObject object) override {
MarkHeapObject(Map::unchecked_cast(object.map()));
@@ -6386,9 +6391,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- // TODO(v8:11880): support external code space.
- PtrComprCageBase code_cage_base = GetPtrComprCageBase(host);
- HeapObject code = HeapObject::unchecked_cast(slot.load(code_cage_base));
+ HeapObject code = HeapObject::unchecked_cast(slot.load(code_cage_base()));
MarkHeapObject(code);
}
@@ -6397,7 +6400,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
MarkHeapObject(target);
}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
- MarkHeapObject(rinfo->target_object());
+ MarkHeapObject(rinfo->target_object_no_host(cage_base()));
}
void VisitRootPointers(Root root, const char* description,
@@ -6426,9 +6429,8 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
template <typename TSlot>
V8_INLINE void MarkPointersImpl(TSlot start, TSlot end) {
// Treat weak references as strong.
- Isolate* isolate = filter_->heap_->isolate();
for (TSlot p = start; p < end; ++p) {
- typename TSlot::TObject object = p.load(isolate);
+ typename TSlot::TObject object = p.load(cage_base());
HeapObject heap_object;
if (object.GetHeapObject(&heap_object)) {
MarkHeapObject(heap_object);
@@ -6867,9 +6869,7 @@ void VerifyPointersVisitor::VisitPointers(HeapObject host,
void VerifyPointersVisitor::VisitCodePointer(HeapObject host,
CodeObjectSlot slot) {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- // TODO(v8:11880): support external code space.
- PtrComprCageBase code_cage_base = GetPtrComprCageBase(host);
- Object maybe_code = slot.load(code_cage_base);
+ Object maybe_code = slot.load(code_cage_base());
HeapObject code;
if (maybe_code.GetHeapObject(&code)) {
VerifyCodeObjectImpl(code);
@@ -6894,22 +6894,20 @@ void VerifyPointersVisitor::VisitRootPointers(Root root,
void VerifyPointersVisitor::VerifyHeapObjectImpl(HeapObject heap_object) {
CHECK(IsValidHeapObject(heap_, heap_object));
- CHECK(heap_object.map().IsMap());
+ CHECK(heap_object.map(cage_base()).IsMap());
}
void VerifyPointersVisitor::VerifyCodeObjectImpl(HeapObject heap_object) {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
CHECK(IsValidCodeObject(heap_, heap_object));
- PtrComprCageBase cage_base(heap_->isolate());
- CHECK(heap_object.map(cage_base).IsMap(cage_base));
- CHECK(heap_object.map(cage_base).instance_type() == CODE_TYPE);
+ CHECK(heap_object.map(cage_base()).IsMap());
+ CHECK(heap_object.map(cage_base()).instance_type() == CODE_TYPE);
}
template <typename TSlot>
void VerifyPointersVisitor::VerifyPointersImpl(TSlot start, TSlot end) {
- Isolate* isolate = heap_->isolate();
for (TSlot slot = start; slot < end; ++slot) {
- typename TSlot::TObject object = slot.load(isolate);
+ typename TSlot::TObject object = slot.load(cage_base());
HeapObject heap_object;
if (object.GetHeapObject(&heap_object)) {
VerifyHeapObjectImpl(heap_object);
@@ -6937,7 +6935,7 @@ void VerifyPointersVisitor::VisitCodeTarget(Code host, RelocInfo* rinfo) {
}
void VerifyPointersVisitor::VisitEmbeddedPointer(Code host, RelocInfo* rinfo) {
- VerifyHeapObjectImpl(rinfo->target_object());
+ VerifyHeapObjectImpl(rinfo->target_object_no_host(cage_base()));
}
void VerifySmisVisitor::VisitRootPointers(Root root, const char* description,
@@ -6999,9 +6997,17 @@ void Heap::CreateObjectStats() {
}
Map Heap::GcSafeMapOfCodeSpaceObject(HeapObject object) {
- MapWord map_word = object.map_word(kRelaxedLoad);
- return map_word.IsForwardingAddress() ? map_word.ToForwardingAddress().map()
- : map_word.ToMap();
+ PtrComprCageBase cage_base(isolate());
+ MapWord map_word = object.map_word(cage_base, kRelaxedLoad);
+ if (map_word.IsForwardingAddress()) {
+#if V8_EXTERNAL_CODE_SPACE
+ PtrComprCageBase code_cage_base(isolate()->code_cage_base());
+#else
+ PtrComprCageBase code_cage_base = cage_base;
+#endif
+ return map_word.ToForwardingAddress(code_cage_base).map(cage_base);
+ }
+ return map_word.ToMap();
}
Code Heap::GcSafeCastToCode(HeapObject object, Address inner_pointer) {
@@ -7091,11 +7097,18 @@ void Heap::GenerationalBarrierSlow(HeapObject object, Address slot,
void Heap::RecordEphemeronKeyWrite(EphemeronHashTable table, Address slot) {
DCHECK(ObjectInYoungGeneration(HeapObjectSlot(slot).ToHeapObject()));
- int slot_index = EphemeronHashTable::SlotToIndex(table.address(), slot);
- InternalIndex entry = EphemeronHashTable::IndexToEntry(slot_index);
- auto it =
- ephemeron_remembered_set_.insert({table, std::unordered_set<int>()});
- it.first->second.insert(entry.as_int());
+ if (FLAG_minor_mc) {
+ // Minor MC lacks support for specialized generational ephemeron barriers.
+ // The regular write barrier works as well but keeps more memory alive.
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(table);
+ RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
+ } else {
+ int slot_index = EphemeronHashTable::SlotToIndex(table.address(), slot);
+ InternalIndex entry = EphemeronHashTable::IndexToEntry(slot_index);
+ auto it =
+ ephemeron_remembered_set_.insert({table, std::unordered_set<int>()});
+ it.first->second.insert(entry.as_int());
+ }
}
void Heap::EphemeronKeyWriteBarrierFromCode(Address raw_object,
@@ -7171,7 +7184,7 @@ void Heap::WriteBarrierForRange(HeapObject object, TSlot start_slot,
if (incremental_marking()->IsMarking()) {
mode |= kDoMarking;
- if (!source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) {
+ if (!source_page->ShouldSkipEvacuationSlotRecording()) {
mode |= kDoEvacuationSlotRecording;
}
}
diff --git a/chromium/v8/src/heap/heap.h b/chromium/v8/src/heap/heap.h
index 61dea819f03..74aac82907b 100644
--- a/chromium/v8/src/heap/heap.h
+++ b/chromium/v8/src/heap/heap.h
@@ -15,8 +15,10 @@
// Clients of this interface shouldn't depend on lots of heap internals.
// Do not include anything from src/heap here!
+#include "include/v8-callbacks.h"
+#include "include/v8-embedder-heap.h"
#include "include/v8-internal.h"
-#include "include/v8.h"
+#include "include/v8-isolate.h"
#include "src/base/atomic-utils.h"
#include "src/base/enum-set.h"
#include "src/base/platform/condition-variable.h"
@@ -472,24 +474,26 @@ class Heap {
}
static inline bool IsYoungGenerationCollector(GarbageCollector collector) {
- return collector == SCAVENGER || collector == MINOR_MARK_COMPACTOR;
+ return collector == GarbageCollector::SCAVENGER ||
+ collector == GarbageCollector::MINOR_MARK_COMPACTOR;
}
static inline GarbageCollector YoungGenerationCollector() {
#if ENABLE_MINOR_MC
- return (FLAG_minor_mc) ? MINOR_MARK_COMPACTOR : SCAVENGER;
+ return (FLAG_minor_mc) ? GarbageCollector::MINOR_MARK_COMPACTOR
+ : GarbageCollector::SCAVENGER;
#else
- return SCAVENGER;
+ return GarbageCollector::SCAVENGER;
#endif // ENABLE_MINOR_MC
}
static inline const char* CollectorName(GarbageCollector collector) {
switch (collector) {
- case SCAVENGER:
+ case GarbageCollector::SCAVENGER:
return "Scavenger";
- case MARK_COMPACTOR:
+ case GarbageCollector::MARK_COMPACTOR:
return "Mark-Compact";
- case MINOR_MARK_COMPACTOR:
+ case GarbageCollector::MINOR_MARK_COMPACTOR:
return "Minor Mark-Compact";
}
return "Unknown collector";
@@ -577,8 +581,6 @@ class Heap {
int elements_to_trim);
void RightTrimWeakFixedArray(WeakFixedArray obj, int elements_to_trim);
- void UndoLastAllocationAt(Address addr, int size);
-
// Converts the given boolean condition to JavaScript boolean value.
inline Oddball ToBoolean(bool condition);
@@ -1053,6 +1055,7 @@ class Heap {
V8_EXPORT_PRIVATE Code builtin(Builtin builtin);
Address builtin_address(Builtin builtin);
+ Address builtin_tier0_address(Builtin builtin);
void set_builtin(Builtin builtin, Code code);
// ===========================================================================
@@ -1462,6 +1465,12 @@ class Heap {
bool is_current_gc_forced() const { return is_current_gc_forced_; }
+ // Returns whether the currently in-progress GC should avoid increasing the
+ // ages on any objects that live for a set number of collections.
+ bool ShouldCurrentGCKeepAgesUnchanged() const {
+ return is_current_gc_forced_ || is_current_gc_for_heap_profiler_;
+ }
+
// Returns the size of objects residing in non-new spaces.
// Excludes external memory held by those objects.
V8_EXPORT_PRIVATE size_t OldGenerationSizeOfObjects();
@@ -2449,6 +2458,7 @@ class Heap {
std::unique_ptr<GlobalSafepoint> safepoint_;
bool is_current_gc_forced_ = false;
+ bool is_current_gc_for_heap_profiler_ = false;
ExternalStringTable external_string_table_;
@@ -2655,9 +2665,10 @@ class V8_NODISCARD CodePageMemoryModificationScope {
// point into the heap to a location that has a map pointer at its first word.
// Caveat: Heap::Contains is an approximation because it can return true for
// objects in a heap space but above the allocation pointer.
-class VerifyPointersVisitor : public ObjectVisitor, public RootVisitor {
+class VerifyPointersVisitor : public ObjectVisitorWithCageBases,
+ public RootVisitor {
public:
- explicit VerifyPointersVisitor(Heap* heap) : heap_(heap) {}
+ V8_INLINE explicit VerifyPointersVisitor(Heap* heap);
void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) override;
void VisitPointers(HeapObject host, MaybeObjectSlot start,
diff --git a/chromium/v8/src/heap/large-spaces.cc b/chromium/v8/src/heap/large-spaces.cc
index 1736fee60d1..2cac8dc0a56 100644
--- a/chromium/v8/src/heap/large-spaces.cc
+++ b/chromium/v8/src/heap/large-spaces.cc
@@ -230,7 +230,7 @@ void OldLargeObjectSpace::ClearMarkingStateOfLiveObjects() {
Marking::MarkWhite(marking_state->MarkBitFrom(obj));
MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
- chunk->ResetProgressBar();
+ chunk->ProgressBar().ResetIfEnabled();
marking_state->SetLiveBytes(chunk, 0);
}
DCHECK(marking_state->IsWhite(obj));
@@ -354,6 +354,7 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
}
+ PtrComprCageBase cage_base(isolate);
for (LargePage* chunk = first_page(); chunk != nullptr;
chunk = chunk->next_page()) {
// Each chunk contains an object that starts at the large object page's
@@ -364,23 +365,26 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
// The first word should be a map, and we expect all map pointers to be
// in map space or read-only space.
- Map map = object.map();
- CHECK(map.IsMap());
+ Map map = object.map(cage_base);
+ CHECK(map.IsMap(cage_base));
CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
// We have only the following types in the large object space:
- if (!(object.IsAbstractCode() || object.IsSeqString() ||
- object.IsExternalString() || object.IsThinString() ||
- object.IsFixedArray() || object.IsFixedDoubleArray() ||
- object.IsWeakFixedArray() || object.IsWeakArrayList() ||
- object.IsPropertyArray() || object.IsByteArray() ||
- object.IsFeedbackVector() || object.IsBigInt() ||
- object.IsFreeSpace() || object.IsFeedbackMetadata() ||
- object.IsContext() || object.IsUncompiledDataWithoutPreparseData() ||
- object.IsPreparseData()) &&
+ if (!(object.IsAbstractCode(cage_base) || object.IsSeqString(cage_base) ||
+ object.IsExternalString(cage_base) ||
+ object.IsThinString(cage_base) || object.IsFixedArray(cage_base) ||
+ object.IsFixedDoubleArray(cage_base) ||
+ object.IsWeakFixedArray(cage_base) ||
+ object.IsWeakArrayList(cage_base) ||
+ object.IsPropertyArray(cage_base) || object.IsByteArray(cage_base) ||
+ object.IsFeedbackVector(cage_base) || object.IsBigInt(cage_base) ||
+ object.IsFreeSpace(cage_base) ||
+ object.IsFeedbackMetadata(cage_base) || object.IsContext(cage_base) ||
+ object.IsUncompiledDataWithoutPreparseData(cage_base) ||
+ object.IsPreparseData(cage_base)) &&
!FLAG_young_generation_large_objects) {
FATAL("Found invalid Object (instance_type=%i) in large object space.",
- object.map().instance_type());
+ object.map(cage_base).instance_type());
}
// The object itself should look OK.
@@ -391,27 +395,27 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
}
// Byte arrays and strings don't have interior pointers.
- if (object.IsAbstractCode()) {
+ if (object.IsAbstractCode(cage_base)) {
VerifyPointersVisitor code_visitor(heap());
object.IterateBody(map, object.Size(), &code_visitor);
- } else if (object.IsFixedArray()) {
+ } else if (object.IsFixedArray(cage_base)) {
FixedArray array = FixedArray::cast(object);
for (int j = 0; j < array.length(); j++) {
Object element = array.get(j);
if (element.IsHeapObject()) {
HeapObject element_object = HeapObject::cast(element);
CHECK(IsValidHeapObject(heap(), element_object));
- CHECK(element_object.map().IsMap());
+ CHECK(element_object.map(cage_base).IsMap(cage_base));
}
}
- } else if (object.IsPropertyArray()) {
+ } else if (object.IsPropertyArray(cage_base)) {
PropertyArray array = PropertyArray::cast(object);
for (int j = 0; j < array.length(); j++) {
Object property = array.get(j);
if (property.IsHeapObject()) {
HeapObject property_object = HeapObject::cast(property);
CHECK(heap()->Contains(property_object));
- CHECK(property_object.map().IsMap());
+ CHECK(property_object.map(cage_base).IsMap(cage_base));
}
}
}
diff --git a/chromium/v8/src/heap/mark-compact-inl.h b/chromium/v8/src/heap/mark-compact-inl.h
index 2210c73958b..a623360197c 100644
--- a/chromium/v8/src/heap/mark-compact-inl.h
+++ b/chromium/v8/src/heap/mark-compact-inl.h
@@ -68,7 +68,7 @@ void MarkCompactCollector::RecordSlot(HeapObject object, ObjectSlot slot,
void MarkCompactCollector::RecordSlot(HeapObject object, HeapObjectSlot slot,
HeapObject target) {
MemoryChunk* source_page = MemoryChunk::FromHeapObject(object);
- if (!source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) {
+ if (!source_page->ShouldSkipEvacuationSlotRecording()) {
RecordSlot(source_page, slot, target);
}
}
@@ -76,7 +76,7 @@ void MarkCompactCollector::RecordSlot(HeapObject object, HeapObjectSlot slot,
void MarkCompactCollector::RecordSlot(MemoryChunk* source_page,
HeapObjectSlot slot, HeapObject target) {
BasicMemoryChunk* target_page = BasicMemoryChunk::FromHeapObject(target);
- if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>()) {
+ if (target_page->IsEvacuationCandidate()) {
if (V8_EXTERNAL_CODE_SPACE_BOOL &&
target_page->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
RememberedSet<OLD_TO_CODE>::Insert<AccessMode::ATOMIC>(source_page,
@@ -127,16 +127,6 @@ void MainMarkingVisitor<MarkingState>::RecordRelocSlot(Code host,
MarkCompactCollector::RecordRelocSlot(host, rinfo, target);
}
-template <typename MarkingState>
-void MainMarkingVisitor<MarkingState>::MarkDescriptorArrayFromWriteBarrier(
- DescriptorArray descriptors, int number_of_own_descriptors) {
- // This is necessary because the Scavenger records slots only for the
- // promoted black objects and the marking visitor of DescriptorArray skips
- // the descriptors marked by the visitor.VisitDescriptors() below.
- this->MarkDescriptorArrayBlack(descriptors);
- this->VisitDescriptors(descriptors, number_of_own_descriptors);
-}
-
template <LiveObjectIterationMode mode>
LiveObjectRange<mode>::iterator::iterator(const MemoryChunk* chunk,
Bitmap* bitmap, Address start)
@@ -173,6 +163,7 @@ operator++(int) {
template <LiveObjectIterationMode mode>
void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
+ PtrComprCageBase cage_base(chunk_->heap()->isolate());
while (!it_.Done()) {
HeapObject object;
int size = 0;
@@ -208,10 +199,10 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
// make sure that we skip all set bits in the black area until the
// object ends.
HeapObject black_object = HeapObject::FromAddress(addr);
- Object map_object = black_object.map(kAcquireLoad);
- CHECK(map_object.IsMap());
+ Object map_object = black_object.map(cage_base, kAcquireLoad);
+ CHECK(map_object.IsMap(cage_base));
map = Map::cast(map_object);
- DCHECK(map.IsMap());
+ DCHECK(map.IsMap(cage_base));
size = black_object.SizeFromMap(map);
CHECK_LE(addr + size, chunk_->area_end());
Address end = addr + size - kTaggedSize;
@@ -240,10 +231,10 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
}
} else if ((mode == kGreyObjects || mode == kAllLiveObjects)) {
object = HeapObject::FromAddress(addr);
- Object map_object = object.map(kAcquireLoad);
- CHECK(map_object.IsMap());
+ Object map_object = object.map(cage_base, kAcquireLoad);
+ CHECK(map_object.IsMap(cage_base));
map = Map::cast(map_object);
- DCHECK(map.IsMap());
+ DCHECK(map.IsMap(cage_base));
size = object.SizeFromMap(map);
CHECK_LE(addr + size, chunk_->area_end());
}
diff --git a/chromium/v8/src/heap/mark-compact.cc b/chromium/v8/src/heap/mark-compact.cc
index 0fffb4ea458..3873374b0f9 100644
--- a/chromium/v8/src/heap/mark-compact.cc
+++ b/chromium/v8/src/heap/mark-compact.cc
@@ -52,6 +52,7 @@
#include "src/objects/slots-inl.h"
#include "src/objects/smi.h"
#include "src/objects/transitions-inl.h"
+#include "src/objects/visitors.h"
#include "src/tasks/cancelable-task.h"
#include "src/tracing/tracing-category-observer.h"
#include "src/utils/utils-inl.h"
@@ -75,12 +76,13 @@ STATIC_ASSERT(Heap::kMinObjectSizeInTaggedWords >= 2);
#ifdef VERIFY_HEAP
namespace {
-class MarkingVerifier : public ObjectVisitor, public RootVisitor {
+class MarkingVerifier : public ObjectVisitorWithCageBases, public RootVisitor {
public:
virtual void Run() = 0;
protected:
- explicit MarkingVerifier(Heap* heap) : heap_(heap) {}
+ explicit MarkingVerifier(Heap* heap)
+ : ObjectVisitorWithCageBases(heap), heap_(heap) {}
virtual ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
const MemoryChunk* chunk) = 0;
@@ -235,10 +237,7 @@ class FullMarkingVerifier : public MarkingVerifier {
void VerifyCodePointer(CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- // TODO(v8:11880): support external code space.
- PtrComprCageBase code_cage_base =
- GetPtrComprCageBaseFromOnHeapAddress(slot.address());
- Object maybe_code = slot.load(code_cage_base);
+ Object maybe_code = slot.load(code_cage_base());
HeapObject code;
if (maybe_code.GetHeapObject(&code)) {
VerifyHeapObjectImpl(code);
@@ -256,9 +255,9 @@ class FullMarkingVerifier : public MarkingVerifier {
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
- if (!host.IsWeakObject(rinfo->target_object())) {
- HeapObject object = rinfo->target_object();
- VerifyHeapObjectImpl(object);
+ HeapObject target_object = rinfo->target_object_no_host(cage_base());
+ if (!host.IsWeakObject(target_object)) {
+ VerifyHeapObjectImpl(target_object);
}
}
@@ -273,10 +272,8 @@ class FullMarkingVerifier : public MarkingVerifier {
template <typename TSlot>
V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end) {
- PtrComprCageBase cage_base =
- GetPtrComprCageBaseFromOnHeapAddress(start.address());
for (TSlot slot = start; slot < end; ++slot) {
- typename TSlot::TObject object = slot.load(cage_base);
+ typename TSlot::TObject object = slot.load(cage_base());
HeapObject heap_object;
if (object.GetHeapObjectIfStrong(&heap_object)) {
VerifyHeapObjectImpl(heap_object);
@@ -287,7 +284,8 @@ class FullMarkingVerifier : public MarkingVerifier {
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
};
-class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
+class EvacuationVerifier : public ObjectVisitorWithCageBases,
+ public RootVisitor {
public:
virtual void Run() = 0;
@@ -314,7 +312,8 @@ class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
void VisitMapPointer(HeapObject object) override { VerifyMap(object.map()); }
protected:
- explicit EvacuationVerifier(Heap* heap) : heap_(heap) {}
+ explicit EvacuationVerifier(Heap* heap)
+ : ObjectVisitorWithCageBases(heap), heap_(heap) {}
inline Heap* heap() { return heap_; }
@@ -396,10 +395,8 @@ class FullEvacuationVerifier : public EvacuationVerifier {
template <typename TSlot>
void VerifyPointersImpl(TSlot start, TSlot end) {
- PtrComprCageBase cage_base =
- GetPtrComprCageBaseFromOnHeapAddress(start.address());
for (TSlot current = start; current < end; ++current) {
- typename TSlot::TObject object = current.load(cage_base);
+ typename TSlot::TObject object = current.load(cage_base());
HeapObject heap_object;
if (object.GetHeapObjectIfStrong(&heap_object)) {
VerifyHeapObjectImpl(heap_object);
@@ -415,10 +412,7 @@ class FullEvacuationVerifier : public EvacuationVerifier {
}
void VerifyCodePointer(CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- // TODO(v8:11880): support external code space.
- PtrComprCageBase code_cage_base =
- GetPtrComprCageBaseFromOnHeapAddress(slot.address());
- Object maybe_code = slot.load(code_cage_base);
+ Object maybe_code = slot.load(code_cage_base());
HeapObject code;
if (maybe_code.GetHeapObject(&code)) {
VerifyHeapObjectImpl(code);
@@ -429,7 +423,7 @@ class FullEvacuationVerifier : public EvacuationVerifier {
VerifyHeapObjectImpl(target);
}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
- VerifyHeapObjectImpl(rinfo->target_object());
+ VerifyHeapObjectImpl(rinfo->target_object_no_host(cage_base()));
}
void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
VerifyPointersImpl(start, end);
@@ -565,7 +559,7 @@ void MarkCompactCollector::StartMarking() {
marking_visitor_ = std::make_unique<MarkingVisitor>(
marking_state(), local_marking_worklists(), weak_objects(), heap_,
epoch(), code_flush_mode(), heap_->local_embedder_heap_tracer()->InUse(),
- heap_->is_current_gc_forced());
+ heap_->ShouldCurrentGCKeepAgesUnchanged());
// Marking bits are cleared by the sweeper.
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
@@ -646,6 +640,9 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
void MarkCompactCollector::EnsureSweepingCompleted() {
if (!sweeper()->sweeping_in_progress()) return;
+ TRACE_GC_EPOCH(heap()->tracer(), GCTracer::Scope::MC_COMPLETE_SWEEPING,
+ ThreadKind::kMain);
+
sweeper()->EnsureCompleted();
heap()->old_space()->RefillFreeList();
heap()->code_space()->RefillFreeList();
@@ -1016,7 +1013,8 @@ void MarkCompactCollector::Finish() {
void MarkCompactCollector::SweepArrayBufferExtensions() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH_SWEEP_ARRAY_BUFFERS);
- heap_->array_buffer_sweeper()->RequestSweepFull();
+ heap_->array_buffer_sweeper()->RequestSweep(
+ ArrayBufferSweeper::SweepingType::kFull);
}
class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
@@ -1062,24 +1060,26 @@ class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
// keep alive its embedded pointers (which would otherwise be dropped).
// - Prefix of the string table.
class MarkCompactCollector::CustomRootBodyMarkingVisitor final
- : public ObjectVisitor {
+ : public ObjectVisitorWithCageBases {
public:
explicit CustomRootBodyMarkingVisitor(MarkCompactCollector* collector)
- : collector_(collector) {}
+ : ObjectVisitorWithCageBases(collector->isolate()),
+ collector_(collector) {}
void VisitPointer(HeapObject host, ObjectSlot p) final {
- MarkObject(host, *p);
+ MarkObject(host, p.load(cage_base()));
}
- void VisitMapPointer(HeapObject host) final { MarkObject(host, host.map()); }
+ void VisitMapPointer(HeapObject host) final {
+ MarkObject(host, host.map(cage_base()));
+ }
void VisitPointers(HeapObject host, ObjectSlot start, ObjectSlot end) final {
- PtrComprCageBase cage_base = GetPtrComprCageBase(host);
for (ObjectSlot p = start; p < end; ++p) {
// The map slot should be handled in VisitMapPointer.
DCHECK_NE(host.map_slot(), p);
- DCHECK(!HasWeakHeapObjectTag(p.load(cage_base)));
- MarkObject(host, p.load(cage_base));
+ DCHECK(!HasWeakHeapObjectTag(p.load(cage_base())));
+ MarkObject(host, p.load(cage_base()));
}
}
@@ -1102,7 +1102,7 @@ class MarkCompactCollector::CustomRootBodyMarkingVisitor final
MarkObject(host, target);
}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
- MarkObject(host, rinfo->target_object());
+ MarkObject(host, rinfo->target_object_no_host(cage_base()));
}
private:
@@ -1223,22 +1223,24 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
};
-class RecordMigratedSlotVisitor : public ObjectVisitor {
+class RecordMigratedSlotVisitor : public ObjectVisitorWithCageBases {
public:
explicit RecordMigratedSlotVisitor(
MarkCompactCollector* collector,
EphemeronRememberedSet* ephemeron_remembered_set)
- : collector_(collector),
+ : ObjectVisitorWithCageBases(collector->isolate()),
+ collector_(collector),
ephemeron_remembered_set_(ephemeron_remembered_set) {}
inline void VisitPointer(HeapObject host, ObjectSlot p) final {
- DCHECK(!HasWeakHeapObjectTag(*p));
- RecordMigratedSlot(host, MaybeObject::FromObject(*p), p.address());
+ DCHECK(!HasWeakHeapObjectTag(p.load(cage_base())));
+ RecordMigratedSlot(host, MaybeObject::FromObject(p.load(cage_base())),
+ p.address());
}
inline void VisitPointer(HeapObject host, MaybeObjectSlot p) final {
- DCHECK(!MapWord::IsPacked(p.Relaxed_Load().ptr()));
- RecordMigratedSlot(host, *p, p.address());
+ DCHECK(!MapWord::IsPacked(p.Relaxed_Load(cage_base()).ptr()));
+ RecordMigratedSlot(host, p.load(cage_base()), p.address());
}
inline void VisitPointers(HeapObject host, ObjectSlot start,
@@ -1261,10 +1263,8 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
// This code is similar to the implementation of VisitPointer() modulo
// new kind of slot.
- DCHECK(!HasWeakHeapObjectTag(*slot));
- // TODO(v8:11880): support external code space.
- PtrComprCageBase code_cage_base = GetPtrComprCageBase(host);
- Object code = slot.load(code_cage_base);
+ DCHECK(!HasWeakHeapObjectTag(slot.load(code_cage_base())));
+ Object code = slot.load(code_cage_base());
RecordMigratedSlot(host, MaybeObject::FromObject(code), slot.address());
}
@@ -1298,7 +1298,8 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
inline void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
DCHECK_EQ(host, rinfo->host());
DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
- HeapObject object = HeapObject::cast(rinfo->target_object());
+ HeapObject object =
+ HeapObject::cast(rinfo->target_object_no_host(cage_base()));
GenerationalBarrierForCode(host, rinfo, object);
collector_->RecordRelocSlot(host, rinfo, object);
}
@@ -1434,9 +1435,10 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
inline bool TryEvacuateObject(AllocationSpace target_space, HeapObject object,
int size, HeapObject* target_object) {
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap && AbortCompactionForTesting(object)) return false;
-#endif // VERIFY_HEAP
+#ifdef DEBUG
+ if (FLAG_stress_compaction && AbortCompactionForTesting(object))
+ return false;
+#endif // DEBUG
AllocationAlignment alignment = HeapObject::RequiredAlignment(object.map());
AllocationResult allocation = local_allocator_->Allocate(
target_space, size, AllocationOrigin::kGC, alignment);
@@ -1463,7 +1465,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
migration_function_(this, dst, src, size, dest);
}
-#ifdef VERIFY_HEAP
+#ifdef DEBUG
bool AbortCompactionForTesting(HeapObject object) {
if (FLAG_stress_compaction) {
const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
@@ -1480,7 +1482,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
}
return false;
}
-#endif // VERIFY_HEAP
+#endif // DEBUG
Heap* heap_;
EvacuationAllocator* local_allocator_;
@@ -1707,19 +1709,12 @@ void MarkCompactCollector::VisitObject(HeapObject obj) {
void MarkCompactCollector::RevisitObject(HeapObject obj) {
DCHECK(marking_state()->IsBlack(obj));
- DCHECK_IMPLIES(MemoryChunk::FromHeapObject(obj)->IsFlagSet(
- MemoryChunk::HAS_PROGRESS_BAR),
- 0u == MemoryChunk::FromHeapObject(obj)->ProgressBar());
+ DCHECK_IMPLIES(MemoryChunk::FromHeapObject(obj)->ProgressBar().IsEnabled(),
+ 0u == MemoryChunk::FromHeapObject(obj)->ProgressBar().Value());
MarkingVisitor::RevisitScope revisit(marking_visitor_.get());
marking_visitor_->Visit(obj.map(), obj);
}
-void MarkCompactCollector::MarkDescriptorArrayFromWriteBarrier(
- DescriptorArray descriptors, int number_of_own_descriptors) {
- marking_visitor_->MarkDescriptorArrayFromWriteBarrier(
- descriptors, number_of_own_descriptors);
-}
-
bool MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
int iterations = 0;
int max_iterations = FLAG_ephemeron_fixpoint_iterations;
@@ -2368,23 +2363,6 @@ void MarkCompactCollector::FlushBytecodeFromSFI(
DCHECK(!shared_info.is_compiled());
}
-void MarkCompactCollector::MarkBaselineDataAsLive(BaselineData baseline_data) {
- if (non_atomic_marking_state()->IsBlackOrGrey(baseline_data)) return;
-
- // Mark baseline data as live.
- non_atomic_marking_state()->WhiteToBlack(baseline_data);
-
- // Record object slots.
- DCHECK(
- non_atomic_marking_state()->IsBlackOrGrey(baseline_data.baseline_code()));
- ObjectSlot code = baseline_data.RawField(BaselineData::kBaselineCodeOffset);
- RecordSlot(baseline_data, code, HeapObject::cast(*code));
-
- DCHECK(non_atomic_marking_state()->IsBlackOrGrey(baseline_data.data()));
- ObjectSlot data = baseline_data.RawField(BaselineData::kDataOffset);
- RecordSlot(baseline_data, data, HeapObject::cast(*data));
-}
-
void MarkCompactCollector::ProcessOldCodeCandidates() {
DCHECK(FLAG_flush_bytecode || FLAG_flush_baseline_code ||
weak_objects_.code_flushing_candidates.IsEmpty());
@@ -2393,10 +2371,12 @@ void MarkCompactCollector::ProcessOldCodeCandidates() {
&flushing_candidate)) {
bool is_bytecode_live = non_atomic_marking_state()->IsBlackOrGrey(
flushing_candidate.GetBytecodeArray(isolate()));
- if (FLAG_flush_baseline_code && flushing_candidate.HasBaselineData()) {
- BaselineData baseline_data = flushing_candidate.baseline_data();
- if (non_atomic_marking_state()->IsBlackOrGrey(
- baseline_data.baseline_code())) {
+ if (FLAG_flush_baseline_code && flushing_candidate.HasBaselineCode()) {
+ CodeT baseline_codet =
+ CodeT::cast(flushing_candidate.function_data(kAcquireLoad));
+ // Safe to do a relaxed load here since the CodeT was acquire-loaded.
+ Code baseline_code = FromCodeT(baseline_codet, kRelaxedLoad);
+ if (non_atomic_marking_state()->IsBlackOrGrey(baseline_code)) {
// Currently baseline code holds bytecode array strongly and it is
// always ensured that bytecode is live if baseline code is live. Hence
// baseline code can safely load bytecode array without any additional
@@ -2404,19 +2384,23 @@ void MarkCompactCollector::ProcessOldCodeCandidates() {
// flush code if the bytecode is not live and also update baseline code
// to bailout if there is no bytecode.
DCHECK(is_bytecode_live);
- MarkBaselineDataAsLive(baseline_data);
+
+ // Regardless of whether the CodeT is a CodeDataContainer or the Code
+ // itself, if the Code is live then the CodeT has to be live and will
+ // have been marked via the owning JSFunction.
+ DCHECK(non_atomic_marking_state()->IsBlackOrGrey(baseline_codet));
} else if (is_bytecode_live) {
// If baseline code is flushed but we have a valid bytecode array reset
- // the function_data field to BytecodeArray.
- flushing_candidate.set_function_data(baseline_data.data(),
- kReleaseStore);
+ // the function_data field to the BytecodeArray/InterpreterData.
+ flushing_candidate.set_function_data(
+ baseline_code.bytecode_or_interpreter_data(), kReleaseStore);
}
}
if (!is_bytecode_live) {
// If baseline code flushing is disabled we should only flush bytecode
// from functions that don't have baseline data.
- DCHECK(FLAG_flush_baseline_code || !flushing_candidate.HasBaselineData());
+ DCHECK(FLAG_flush_baseline_code || !flushing_candidate.HasBaselineCode());
// If the BytecodeArray is dead, flush it, which will replace the field
// with an uncompiled data object.
@@ -2508,10 +2492,10 @@ bool MarkCompactCollector::TransitionArrayNeedsCompaction(
DCHECK_EQ(raw_target.ToSmi(), Smi::uninitialized_deserialization_value());
#ifdef DEBUG
// Targets can only be dead iff this array is fully deserialized.
- for (int i = 0; i < num_transitions; ++i) {
+ for (int j = 0; j < num_transitions; ++j) {
DCHECK_IMPLIES(
- !transitions.GetRawTarget(i).IsSmi(),
- !non_atomic_marking_state()->IsWhite(transitions.GetTarget(i)));
+ !transitions.GetRawTarget(j).IsSmi(),
+ !non_atomic_marking_state()->IsWhite(transitions.GetTarget(j)));
}
#endif
return false;
@@ -2519,8 +2503,8 @@ bool MarkCompactCollector::TransitionArrayNeedsCompaction(
TransitionsAccessor::GetTargetFromRaw(raw_target))) {
#ifdef DEBUG
// Targets can only be dead iff this array is fully deserialized.
- for (int i = 0; i < num_transitions; ++i) {
- DCHECK(!transitions.GetRawTarget(i).IsSmi());
+ for (int j = 0; j < num_transitions; ++j) {
+ DCHECK(!transitions.GetRawTarget(j).IsSmi());
}
#endif
return true;
@@ -2895,8 +2879,10 @@ static inline SlotCallbackResult UpdateSlot(PtrComprCageBase cage_base,
MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
Page::FromHeapObject(heap_obj)->IsFlagSet(
Page::COMPACTION_WAS_ABORTED));
- typename TSlot::TObject target =
- MakeSlotValue<TSlot, reference_type>(map_word.ToForwardingAddress());
+ PtrComprCageBase host_cage_base =
+ V8_EXTERNAL_CODE_SPACE_BOOL ? GetPtrComprCageBase(heap_obj) : cage_base;
+ typename TSlot::TObject target = MakeSlotValue<TSlot, reference_type>(
+ map_word.ToForwardingAddress(host_cage_base));
if (access_mode == AccessMode::NON_ATOMIC) {
slot.store(target);
} else {
@@ -2966,50 +2952,50 @@ static inline SlotCallbackResult UpdateStrongCodeSlot(
// Visitor for updating root pointers and to-space pointers.
// It does not expect to encounter pointers to dead objects.
-class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
+class PointersUpdatingVisitor : public ObjectVisitorWithCageBases,
+ public RootVisitor {
public:
- explicit PointersUpdatingVisitor(Heap* heap) : cage_base_(heap->isolate()) {}
+ explicit PointersUpdatingVisitor(Heap* heap)
+ : ObjectVisitorWithCageBases(heap) {}
void VisitPointer(HeapObject host, ObjectSlot p) override {
- UpdateStrongSlotInternal(cage_base_, p);
+ UpdateStrongSlotInternal(cage_base(), p);
}
void VisitPointer(HeapObject host, MaybeObjectSlot p) override {
- UpdateSlotInternal(cage_base_, p);
+ UpdateSlotInternal(cage_base(), p);
}
void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) override {
for (ObjectSlot p = start; p < end; ++p) {
- UpdateStrongSlotInternal(cage_base_, p);
+ UpdateStrongSlotInternal(cage_base(), p);
}
}
void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) final {
for (MaybeObjectSlot p = start; p < end; ++p) {
- UpdateSlotInternal(cage_base_, p);
+ UpdateSlotInternal(cage_base(), p);
}
}
void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- // TODO(v8:11880): support external code space.
- PtrComprCageBase code_cage_base = cage_base_;
- UpdateStrongCodeSlot<AccessMode::NON_ATOMIC>(host, cage_base_,
- code_cage_base, slot);
+ UpdateStrongCodeSlot<AccessMode::NON_ATOMIC>(host, cage_base(),
+ code_cage_base(), slot);
}
void VisitRootPointer(Root root, const char* description,
FullObjectSlot p) override {
DCHECK(!MapWord::IsPacked(p.Relaxed_Load().ptr()));
- UpdateRootSlotInternal(cage_base_, p);
+ UpdateRootSlotInternal(cage_base(), p);
}
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override {
for (FullObjectSlot p = start; p < end; ++p) {
- UpdateRootSlotInternal(cage_base_, p);
+ UpdateRootSlotInternal(cage_base(), p);
}
}
@@ -3017,7 +3003,7 @@ class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
OffHeapObjectSlot start,
OffHeapObjectSlot end) override {
for (OffHeapObjectSlot p = start; p < end; ++p) {
- UpdateRootSlotInternal(cage_base_, p);
+ UpdateRootSlotInternal(cage_base(), p);
}
}
@@ -3056,8 +3042,6 @@ class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
PtrComprCageBase cage_base, MaybeObjectSlot slot) {
return UpdateSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
}
-
- PtrComprCageBase cage_base_;
};
static String UpdateReferenceInExternalStringTableEntry(Heap* heap,
@@ -3374,7 +3358,8 @@ void FullEvacuator::RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) {
} else {
// Aborted compaction page. Actual processing happens on the main
// thread for simplicity reasons.
- collector_->ReportAbortedEvacuationCandidate(failed_object, chunk);
+ collector_->ReportAbortedEvacuationCandidate(failed_object.address(),
+ chunk);
}
}
break;
@@ -3442,27 +3427,24 @@ class PageEvacuationJob : public v8::JobTask {
};
template <class Evacuator, class Collector>
-void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
+size_t MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
Collector* collector,
std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items,
- MigrationObserver* migration_observer, const intptr_t live_bytes) {
- // Used for trace summary.
- double compaction_speed = 0;
- if (FLAG_trace_evacuation) {
- compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
+ MigrationObserver* migration_observer) {
+ base::Optional<ProfilingMigrationObserver> profiling_observer;
+ if (isolate()->LogObjectRelocation()) {
+ profiling_observer.emplace(heap());
}
-
- const bool profiling = isolate()->LogObjectRelocation();
- ProfilingMigrationObserver profiling_observer(heap());
-
- const size_t pages_count = evacuation_items.size();
std::vector<std::unique_ptr<v8::internal::Evacuator>> evacuators;
const int wanted_num_tasks = NumberOfParallelCompactionTasks();
for (int i = 0; i < wanted_num_tasks; i++) {
auto evacuator = std::make_unique<Evacuator>(collector);
- if (profiling) evacuator->AddObserver(&profiling_observer);
- if (migration_observer != nullptr)
+ if (profiling_observer) {
+ evacuator->AddObserver(&profiling_observer.value());
+ }
+ if (migration_observer) {
evacuator->AddObserver(migration_observer);
+ }
evacuators.push_back(std::move(evacuator));
}
V8::GetCurrentPlatform()
@@ -3470,21 +3452,10 @@ void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
std::make_unique<PageEvacuationJob>(
isolate(), &evacuators, std::move(evacuation_items)))
->Join();
-
- for (auto& evacuator : evacuators) evacuator->Finalize();
- evacuators.clear();
-
- if (FLAG_trace_evacuation) {
- PrintIsolate(isolate(),
- "%8.0f ms: evacuation-summary: parallel=%s pages=%zu "
- "wanted_tasks=%d cores=%d live_bytes=%" V8PRIdPTR
- " compaction_speed=%.f\n",
- isolate()->time_millis_since_init(),
- FLAG_parallel_compaction ? "yes" : "no", pages_count,
- wanted_num_tasks,
- V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1,
- live_bytes, compaction_speed);
+ for (auto& evacuator : evacuators) {
+ evacuator->Finalize();
}
+ return wanted_num_tasks;
}
bool MarkCompactCollectorBase::ShouldMovePage(Page* p, intptr_t live_bytes,
@@ -3497,6 +3468,26 @@ bool MarkCompactCollectorBase::ShouldMovePage(Page* p, intptr_t live_bytes,
heap()->CanExpandOldGeneration(live_bytes);
}
+namespace {
+
+void TraceEvacuation(Isolate* isolate, size_t pages_count,
+ size_t wanted_num_tasks, size_t live_bytes,
+ size_t aborted_pages) {
+ DCHECK(FLAG_trace_evacuation);
+ PrintIsolate(
+ isolate,
+ "%8.0f ms: evacuation-summary: parallel=%s pages=%zu "
+ "wanted_tasks=%zu cores=%d live_bytes=%" V8PRIdPTR
+ " compaction_speed=%.f aborted=%zu\n",
+ isolate->time_millis_since_init(),
+ FLAG_parallel_compaction ? "yes" : "no", pages_count, wanted_num_tasks,
+ V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1, live_bytes,
+ isolate->heap()->tracer()->CompactionSpeedInBytesPerMillisecond(),
+ aborted_pages);
+}
+
+} // namespace
+
void MarkCompactCollector::EvacuatePagesInParallel() {
std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items;
intptr_t live_bytes = 0;
@@ -3554,8 +3545,10 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
"MarkCompactCollector::EvacuatePagesInParallel", "pages",
evacuation_items.size());
- CreateAndExecuteEvacuationTasks<FullEvacuator>(
- this, std::move(evacuation_items), nullptr, live_bytes);
+ const size_t pages_count = evacuation_items.size();
+ const size_t wanted_num_tasks =
+ CreateAndExecuteEvacuationTasks<FullEvacuator>(
+ this, std::move(evacuation_items), nullptr);
// After evacuation there might still be swept pages that weren't
// added to one of the compaction space but still reside in the
@@ -3564,7 +3557,12 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
// in the sweeping or old-to-new remembered set.
sweeper()->MergeOldToNewRememberedSetsForSweptPages();
- PostProcessEvacuationCandidates();
+ const size_t aborted_pages = PostProcessEvacuationCandidates();
+
+ if (FLAG_trace_evacuation) {
+ TraceEvacuation(isolate(), pages_count, wanted_num_tasks, live_bytes,
+ aborted_pages);
+ }
}
class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
@@ -3945,7 +3943,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
void UpdateUntypedPointers() {
if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
DCHECK_IMPLIES(
- collector == MARK_COMPACTOR,
+ collector == GarbageCollector::MARK_COMPACTOR,
chunk_->SweepingDone() &&
chunk_->sweeping_slot_set<AccessMode::NON_ATOMIC>() == nullptr);
@@ -3958,9 +3956,9 @@ class RememberedSetUpdatingItem : public UpdatingItem {
},
SlotSet::FREE_EMPTY_BUCKETS);
- DCHECK_IMPLIES(
- collector == MARK_COMPACTOR && FLAG_always_promote_young_mc,
- slots == 0);
+ DCHECK_IMPLIES(collector == GarbageCollector::MARK_COMPACTOR &&
+ FLAG_always_promote_young_mc,
+ slots == 0);
if (slots == 0) {
chunk_->ReleaseSlotSet<OLD_TO_NEW>();
@@ -3969,7 +3967,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
if (chunk_->sweeping_slot_set<AccessMode::NON_ATOMIC>()) {
DCHECK_IMPLIES(
- collector == MARK_COMPACTOR,
+ collector == GarbageCollector::MARK_COMPACTOR,
!chunk_->SweepingDone() &&
(chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>()) ==
nullptr);
@@ -3984,9 +3982,9 @@ class RememberedSetUpdatingItem : public UpdatingItem {
},
SlotSet::FREE_EMPTY_BUCKETS);
- DCHECK_IMPLIES(
- collector == MARK_COMPACTOR && FLAG_always_promote_young_mc,
- slots == 0);
+ DCHECK_IMPLIES(collector == GarbageCollector::MARK_COMPACTOR &&
+ FLAG_always_promote_young_mc,
+ slots == 0);
if (slots == 0) {
chunk_->ReleaseSweepingSlotSet();
@@ -4023,7 +4021,11 @@ class RememberedSetUpdatingItem : public UpdatingItem {
(chunk_->slot_set<OLD_TO_CODE, AccessMode::NON_ATOMIC>() !=
nullptr)) {
PtrComprCageBase cage_base = heap_->isolate();
- PtrComprCageBase code_cage_base = heap_->isolate();
+#if V8_EXTERNAL_CODE_SPACE
+ PtrComprCageBase code_cage_base(heap_->isolate()->code_cage_base());
+#else
+ PtrComprCageBase code_cage_base = cage_base;
+#endif
RememberedSet<OLD_TO_CODE>::Iterate(
chunk_,
[=](MaybeObjectSlot slot) {
@@ -4088,8 +4090,8 @@ std::unique_ptr<UpdatingItem> MarkCompactCollector::CreateToSpaceUpdatingItem(
std::unique_ptr<UpdatingItem>
MarkCompactCollector::CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
- return std::make_unique<
- RememberedSetUpdatingItem<NonAtomicMarkingState, MARK_COMPACTOR>>(
+ return std::make_unique<RememberedSetUpdatingItem<
+ NonAtomicMarkingState, GarbageCollector::MARK_COMPACTOR>>(
heap(), non_atomic_marking_state(), chunk, updating_mode);
}
@@ -4157,25 +4159,26 @@ class EphemeronTableUpdatingItem : public UpdatingItem {
void Process() override {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"EphemeronTableUpdatingItem::Process");
+ PtrComprCageBase cage_base(heap_->isolate());
for (auto it = heap_->ephemeron_remembered_set_.begin();
it != heap_->ephemeron_remembered_set_.end();) {
EphemeronHashTable table = it->first;
auto& indices = it->second;
- if (table.map_word(kRelaxedLoad).IsForwardingAddress()) {
+ if (table.map_word(cage_base, kRelaxedLoad).IsForwardingAddress()) {
// The table has moved, and RecordMigratedSlotVisitor::VisitEphemeron
// inserts entries for the moved table into ephemeron_remembered_set_.
it = heap_->ephemeron_remembered_set_.erase(it);
continue;
}
- DCHECK(table.map().IsMap());
- DCHECK(table.Object::IsEphemeronHashTable());
+ DCHECK(table.map(cage_base).IsMap(cage_base));
+ DCHECK(table.IsEphemeronHashTable(cage_base));
for (auto iti = indices.begin(); iti != indices.end();) {
// EphemeronHashTable keys must be heap objects.
HeapObjectSlot key_slot(table.RawFieldOfElementAt(
EphemeronHashTable::EntryToIndex(InternalIndex(*iti))));
HeapObject key = key_slot.ToHeapObject();
- MapWord map_word = key.map_word(kRelaxedLoad);
+ MapWord map_word = key.map_word(cage_base, kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
key = map_word.ToForwardingAddress();
key_slot.StoreHeapObject(key);
@@ -4254,39 +4257,37 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
}
void MarkCompactCollector::ReportAbortedEvacuationCandidate(
- HeapObject failed_object, MemoryChunk* chunk) {
+ Address failed_start, MemoryChunk* chunk) {
base::MutexGuard guard(&mutex_);
aborted_evacuation_candidates_.push_back(
- std::make_pair(failed_object, static_cast<Page*>(chunk)));
+ std::make_pair(failed_start, static_cast<Page*>(chunk)));
}
-void MarkCompactCollector::PostProcessEvacuationCandidates() {
+size_t MarkCompactCollector::PostProcessEvacuationCandidates() {
CHECK_IMPLIES(FLAG_crash_on_aborted_evacuation,
aborted_evacuation_candidates_.empty());
- for (auto object_and_page : aborted_evacuation_candidates_) {
- HeapObject failed_object = object_and_page.first;
- Page* page = object_and_page.second;
+ for (auto start_and_page : aborted_evacuation_candidates_) {
+ Address failed_start = start_and_page.first;
+ Page* page = start_and_page.second;
page->SetFlag(Page::COMPACTION_WAS_ABORTED);
// Aborted compaction page. We have to record slots here, since we
// might not have recorded them in first place.
// Remove outdated slots.
- RememberedSetSweeping::RemoveRange(page, page->address(),
- failed_object.address(),
+ RememberedSetSweeping::RemoveRange(page, page->address(), failed_start,
SlotSet::FREE_EMPTY_BUCKETS);
- RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(),
- failed_object.address(),
+ RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(), failed_start,
SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
- failed_object.address());
+ failed_start);
// Remove invalidated slots.
- if (failed_object.address() > page->area_start()) {
+ if (failed_start > page->area_start()) {
InvalidatedSlotsCleanup old_to_new_cleanup =
InvalidatedSlotsCleanup::OldToNew(page);
- old_to_new_cleanup.Free(page->area_start(), failed_object.address());
+ old_to_new_cleanup.Free(page->area_start(), failed_start);
}
// Recompute live bytes.
@@ -4314,10 +4315,7 @@ void MarkCompactCollector::PostProcessEvacuationCandidates() {
}
}
DCHECK_EQ(aborted_pages_verified, aborted_pages);
- if (FLAG_trace_evacuation && (aborted_pages > 0)) {
- PrintIsolate(isolate(), "%8.0f ms: evacuation: aborted=%d\n",
- isolate()->time_millis_since_init(), aborted_pages);
- }
+ return aborted_pages;
}
void MarkCompactCollector::ReleaseEvacuationCandidates() {
@@ -4502,10 +4500,8 @@ class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
template <typename TSlot>
void VerifyPointersImpl(TSlot start, TSlot end) {
- PtrComprCageBase cage_base =
- GetPtrComprCageBaseFromOnHeapAddress(start.address());
for (TSlot current = start; current < end; ++current) {
- typename TSlot::TObject object = current.load(cage_base);
+ typename TSlot::TObject object = current.load(cage_base());
HeapObject heap_object;
if (object.GetHeapObject(&heap_object)) {
VerifyHeapObjectImpl(heap_object);
@@ -4521,10 +4517,7 @@ class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
}
void VerifyCodePointer(CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- // TODO(v8:11880): support external code space.
- PtrComprCageBase code_cage_base =
- GetPtrComprCageBaseFromOnHeapAddress(slot.address());
- Code code = Code::unchecked_cast(slot.load(code_cage_base));
+ Code code = Code::unchecked_cast(slot.load(code_cage_base()));
VerifyHeapObjectImpl(code);
}
void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
@@ -4532,7 +4525,7 @@ class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
VerifyHeapObjectImpl(target);
}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
- VerifyHeapObjectImpl(rinfo->target_object());
+ VerifyHeapObjectImpl(rinfo->target_object_no_host(cage_base()));
}
void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
VerifyPointersImpl(start, end);
@@ -4554,9 +4547,11 @@ class YoungGenerationMarkingVisitor final
: public NewSpaceVisitor<YoungGenerationMarkingVisitor> {
public:
YoungGenerationMarkingVisitor(
- MinorMarkCompactCollector::MarkingState* marking_state,
+ Isolate* isolate, MinorMarkCompactCollector::MarkingState* marking_state,
MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
- : worklist_(global_worklist, task_id), marking_state_(marking_state) {}
+ : NewSpaceVisitor(isolate),
+ worklist_(global_worklist, task_id),
+ marking_state_(marking_state) {}
V8_INLINE void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) final {
@@ -4640,7 +4635,7 @@ MinorMarkCompactCollector::MinorMarkCompactCollector(Heap* heap)
: MarkCompactCollectorBase(heap),
worklist_(new MinorMarkCompactCollector::MarkingWorklist()),
main_marking_visitor_(new YoungGenerationMarkingVisitor(
- marking_state(), worklist_, kMainMarker)),
+ heap->isolate(), marking_state(), worklist_, kMainMarker)),
page_parallel_job_semaphore_(0) {
static_assert(
kNumMarkers <= MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks,
@@ -4663,7 +4658,8 @@ void MinorMarkCompactCollector::CleanupSweepToIteratePages() {
}
void MinorMarkCompactCollector::SweepArrayBufferExtensions() {
- heap_->array_buffer_sweeper()->RequestSweepYoung();
+ heap_->array_buffer_sweeper()->RequestSweep(
+ ArrayBufferSweeper::SweepingType::kYoung);
}
class YoungGenerationMigrationObserver final : public MigrationObserver {
@@ -4818,6 +4814,9 @@ class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
};
void MinorMarkCompactCollector::CollectGarbage() {
+ // Minor MC does not support processing the ephemeron remembered set.
+ DCHECK(heap()->ephemeron_remembered_set_.empty());
+
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEPING);
heap()->mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
@@ -5036,8 +5035,8 @@ MinorMarkCompactCollector::CreateToSpaceUpdatingItem(MemoryChunk* chunk,
std::unique_ptr<UpdatingItem>
MinorMarkCompactCollector::CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
- return std::make_unique<
- RememberedSetUpdatingItem<NonAtomicMarkingState, MINOR_MARK_COMPACTOR>>(
+ return std::make_unique<RememberedSetUpdatingItem<
+ NonAtomicMarkingState, GarbageCollector::MINOR_MARK_COMPACTOR>>(
heap(), non_atomic_marking_state(), chunk, updating_mode);
}
@@ -5052,7 +5051,7 @@ class YoungGenerationMarkingTask {
MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
: marking_worklist_(global_worklist, task_id),
marking_state_(collector->marking_state()),
- visitor_(marking_state_, global_worklist, task_id) {
+ visitor_(isolate, marking_state_, global_worklist, task_id) {
local_live_bytes_.reserve(isolate->heap()->new_space()->Capacity() /
Page::kPageSize);
}
@@ -5543,8 +5542,14 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
YoungGenerationMigrationObserver observer(heap(),
heap()->mark_compact_collector());
- CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
- this, std::move(evacuation_items), &observer, live_bytes);
+ const auto pages_count = evacuation_items.size();
+ const auto wanted_num_tasks =
+ CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
+ this, std::move(evacuation_items), &observer);
+
+ if (FLAG_trace_evacuation) {
+ TraceEvacuation(isolate(), pages_count, wanted_num_tasks, live_bytes, 0);
+ }
}
#endif // ENABLE_MINOR_MC
diff --git a/chromium/v8/src/heap/mark-compact.h b/chromium/v8/src/heap/mark-compact.h
index 9ce993898c5..5a7a450e38e 100644
--- a/chromium/v8/src/heap/mark-compact.h
+++ b/chromium/v8/src/heap/mark-compact.h
@@ -219,11 +219,12 @@ class MarkCompactCollectorBase {
virtual std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) = 0;
+ // Returns the number of wanted compaction tasks.
template <class Evacuator, class Collector>
- void CreateAndExecuteEvacuationTasks(
+ size_t CreateAndExecuteEvacuationTasks(
Collector* collector,
std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items,
- MigrationObserver* migration_observer, const intptr_t live_bytes);
+ MigrationObserver* migration_observer);
// Returns whether this page should be moved according to heuristics.
bool ShouldMovePage(Page* p, intptr_t live_bytes, bool promote_young);
@@ -377,11 +378,12 @@ class MainMarkingVisitor final
WeakObjects* weak_objects, Heap* heap,
unsigned mark_compact_epoch,
base::EnumSet<CodeFlushMode> code_flush_mode,
- bool embedder_tracing_enabled, bool is_forced_gc)
+ bool embedder_tracing_enabled,
+ bool should_keep_ages_unchanged)
: MarkingVisitorBase<MainMarkingVisitor<MarkingState>, MarkingState>(
kMainThreadTask, local_marking_worklists, weak_objects, heap,
mark_compact_epoch, code_flush_mode, embedder_tracing_enabled,
- is_forced_gc),
+ should_keep_ages_unchanged),
marking_state_(marking_state),
revisiting_object_(false) {}
@@ -391,9 +393,6 @@ class MainMarkingVisitor final
V8_UNLIKELY(revisiting_object_);
}
- void MarkDescriptorArrayFromWriteBarrier(DescriptorArray descriptors,
- int number_of_own_descriptors);
-
private:
// Functions required by MarkingVisitorBase.
@@ -582,10 +581,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void VisitObject(HeapObject obj);
// Used by incremental marking for black-allocated objects.
void RevisitObject(HeapObject obj);
- // Ensures that all descriptors int range [0, number_of_own_descripts)
- // are visited.
- void MarkDescriptorArrayFromWriteBarrier(DescriptorArray array,
- int number_of_own_descriptors);
// Drains the main thread marking worklist until the specified number of
// bytes are processed. If the number of bytes is zero, then the worklist
@@ -670,10 +665,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Flushes a weakly held bytecode array from a shared function info.
void FlushBytecodeFromSFI(SharedFunctionInfo shared_info);
- // Marks the BaselineData as live and records the slots of baseline data
- // fields. This assumes that the objects in the data fields are alive.
- void MarkBaselineDataAsLive(BaselineData baseline_data);
-
// Clears bytecode arrays / baseline code that have not been executed for
// multiple collections.
void ProcessOldCodeCandidates();
@@ -727,8 +718,9 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
void ReleaseEvacuationCandidates();
- void PostProcessEvacuationCandidates();
- void ReportAbortedEvacuationCandidate(HeapObject failed_object,
+ // Returns number of aborted pages.
+ size_t PostProcessEvacuationCandidates();
+ void ReportAbortedEvacuationCandidate(Address failed_start,
MemoryChunk* chunk);
static const int kEphemeronChunkSize = 8 * KB;
@@ -782,7 +774,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Pages that are actually processed during evacuation.
std::vector<Page*> old_space_evacuation_pages_;
std::vector<Page*> new_space_evacuation_pages_;
- std::vector<std::pair<HeapObject, Page*>> aborted_evacuation_candidates_;
+ std::vector<std::pair<Address, Page*>> aborted_evacuation_candidates_;
Sweeper* sweeper_;
diff --git a/chromium/v8/src/heap/marking-barrier-inl.h b/chromium/v8/src/heap/marking-barrier-inl.h
index d03bdcb0f7a..03e89a68e43 100644
--- a/chromium/v8/src/heap/marking-barrier-inl.h
+++ b/chromium/v8/src/heap/marking-barrier-inl.h
@@ -40,6 +40,21 @@ bool MarkingBarrier::MarkValue(HeapObject host, HeapObject value) {
return true;
}
+template <typename TSlot>
+inline void MarkingBarrier::MarkRange(HeapObject host, TSlot start, TSlot end) {
+ auto* isolate = heap_->isolate();
+ for (TSlot slot = start; slot < end; ++slot) {
+ typename TSlot::TObject object = slot.Relaxed_Load();
+ HeapObject heap_object;
+ // Mark both, weak and strong edges.
+ if (object.GetHeapObject(isolate, &heap_object)) {
+ if (MarkValue(host, heap_object) && is_compacting_) {
+ collector_->RecordSlot(host, HeapObjectSlot(slot), heap_object);
+ }
+ }
+ }
+}
+
bool MarkingBarrier::WhiteToGreyAndPush(HeapObject obj) {
if (marking_state_.WhiteToGrey(obj)) {
worklist_.Push(obj);
diff --git a/chromium/v8/src/heap/marking-barrier.cc b/chromium/v8/src/heap/marking-barrier.cc
index 06f2e67810a..51b46057565 100644
--- a/chromium/v8/src/heap/marking-barrier.cc
+++ b/chromium/v8/src/heap/marking-barrier.cc
@@ -4,6 +4,7 @@
#include "src/heap/marking-barrier.h"
+#include "src/base/logging.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier.h"
#include "src/heap/heap.h"
@@ -15,6 +16,7 @@
#include "src/heap/marking-worklist-inl.h"
#include "src/heap/marking-worklist.h"
#include "src/heap/safepoint.h"
+#include "src/objects/heap-object.h"
#include "src/objects/js-array-buffer.h"
namespace v8 {
@@ -74,12 +76,30 @@ void MarkingBarrier::Write(JSArrayBuffer host,
void MarkingBarrier::Write(DescriptorArray descriptor_array,
int number_of_own_descriptors) {
DCHECK(IsCurrentMarkingBarrier());
- DCHECK(is_main_thread_barrier_);
- int16_t raw_marked = descriptor_array.raw_number_of_marked_descriptors();
- if (NumberOfMarkedDescriptors::decode(collector_->epoch(), raw_marked) <
- number_of_own_descriptors) {
- collector_->MarkDescriptorArrayFromWriteBarrier(descriptor_array,
- number_of_own_descriptors);
+ DCHECK(IsReadOnlyHeapObject(descriptor_array.map()));
+ // The DescriptorArray needs to be marked black here to ensure that slots are
+ // recorded by the Scavenger in case the DescriptorArray is promoted while
+ // incremental marking is running. This is needed as the regular marking
+ // visitor does not re-process any already marked descriptors. If we don't
+ // mark it black here, the Scavenger may promote a DescriptorArray and any
+ // already marked descriptors will not have any slots recorded.
+ if (!marking_state_.IsBlack(descriptor_array)) {
+ marking_state_.WhiteToGrey(descriptor_array);
+ marking_state_.GreyToBlack(descriptor_array);
+ MarkRange(descriptor_array, descriptor_array.GetFirstPointerSlot(),
+ descriptor_array.GetDescriptorSlot(0));
+ }
+ const int16_t old_marked = descriptor_array.UpdateNumberOfMarkedDescriptors(
+ collector_->epoch(), number_of_own_descriptors);
+ if (old_marked < number_of_own_descriptors) {
+ // This marks the range from [old_marked, number_of_own_descriptors) instead
+ // of registering weak slots which may temporarily hold alive more objects
+ // for the current GC cycle. Weakness is not needed for actual trimming, see
+ // `MarkCompactCollector::TrimDescriptorArray()`.
+ MarkRange(descriptor_array,
+ MaybeObjectSlot(descriptor_array.GetDescriptorSlot(old_marked)),
+ MaybeObjectSlot(descriptor_array.GetDescriptorSlot(
+ number_of_own_descriptors)));
}
}
diff --git a/chromium/v8/src/heap/marking-barrier.h b/chromium/v8/src/heap/marking-barrier.h
index 9ed1ee63824..a8e084b699e 100644
--- a/chromium/v8/src/heap/marking-barrier.h
+++ b/chromium/v8/src/heap/marking-barrier.h
@@ -55,6 +55,9 @@ class MarkingBarrier {
bool IsCurrentMarkingBarrier();
+ template <typename TSlot>
+ inline void MarkRange(HeapObject value, TSlot start, TSlot end);
+
Heap* heap_;
MarkCompactCollector* collector_;
IncrementalMarking* incremental_marking_;
diff --git a/chromium/v8/src/heap/marking-visitor-inl.h b/chromium/v8/src/heap/marking-visitor-inl.h
index fe8661c516f..28fe88d9d19 100644
--- a/chromium/v8/src/heap/marking-visitor-inl.h
+++ b/chromium/v8/src/heap/marking-visitor-inl.h
@@ -8,6 +8,7 @@
#include "src/heap/marking-visitor.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
+#include "src/heap/progress-bar.h"
#include "src/heap/spaces.h"
#include "src/objects/objects.h"
#include "src/objects/smi.h"
@@ -20,6 +21,15 @@ namespace internal {
// ===========================================================================
template <typename ConcreteVisitor, typename MarkingState>
+void MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitMapPointer(
+ HeapObject host) {
+ // Note that we are skipping the recording the slot because map objects
+ // can't move, so this is safe (see ProcessStrongHeapObject for comparison)
+ MarkObject(host, HeapObject::cast(
+ host.map(ObjectVisitorWithCageBases::cage_base())));
+}
+
+template <typename ConcreteVisitor, typename MarkingState>
void MarkingVisitorBase<ConcreteVisitor, MarkingState>::MarkObject(
HeapObject host, HeapObject object) {
DCHECK(ReadOnlyHeap::Contains(object) || heap_->Contains(object));
@@ -75,7 +85,8 @@ MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitPointersImpl(
HeapObject host, TSlot start, TSlot end) {
using THeapObjectSlot = typename TSlot::THeapObjectSlot;
for (TSlot slot = start; slot < end; ++slot) {
- typename TSlot::TObject object = slot.Relaxed_Load();
+ typename TSlot::TObject object =
+ slot.Relaxed_Load(ObjectVisitorWithCageBases::cage_base());
HeapObject heap_object;
if (object.GetHeapObjectIfStrong(&heap_object)) {
// If the reference changes concurrently from strong to weak, the write
@@ -93,9 +104,8 @@ V8_INLINE void
MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitCodePointerImpl(
HeapObject host, CodeObjectSlot slot) {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- // TODO(v8:11880): support external code space.
- PtrComprCageBase code_cage_base = GetPtrComprCageBase(host);
- Object object = slot.Relaxed_Load(code_cage_base);
+ Object object =
+ slot.Relaxed_Load(ObjectVisitorWithCageBases::code_cage_base());
HeapObject heap_object;
if (object.GetHeapObjectIfStrong(&heap_object)) {
// If the reference changes concurrently from strong to weak, the write
@@ -109,7 +119,8 @@ template <typename ConcreteVisitor, typename MarkingState>
void MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitEmbeddedPointer(
Code host, RelocInfo* rinfo) {
DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
- HeapObject object = rinfo->target_object();
+ HeapObject object =
+ rinfo->target_object_no_host(ObjectVisitorWithCageBases::cage_base());
if (!concrete_visitor()->marking_state()->IsBlackOrGrey(object)) {
if (host.IsWeakObject(object)) {
weak_objects_->weak_objects_in_code.Push(task_id_,
@@ -141,7 +152,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitBytecodeArray(
int size = BytecodeArray::BodyDescriptor::SizeOf(map, object);
this->VisitMapPointer(object);
BytecodeArray::BodyDescriptor::IterateBody(map, object, size, this);
- if (!is_forced_gc_) {
+ if (!should_keep_ages_unchanged_) {
object.MakeOlder();
}
return size;
@@ -185,11 +196,13 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitSharedFunctionInfo(
// If bytecode flushing is disabled but baseline code flushing is enabled
// then we have to visit the bytecode but not the baseline code.
DCHECK(IsBaselineCodeFlushingEnabled(code_flush_mode_));
- BaselineData baseline_data =
- BaselineData::cast(shared_info.function_data(kAcquireLoad));
- // Visit the bytecode hanging off baseline data.
- VisitPointer(baseline_data,
- baseline_data.RawField(BaselineData::kDataOffset));
+ CodeT baseline_codet = CodeT::cast(shared_info.function_data(kAcquireLoad));
+ // Safe to do a relaxed load here since the CodeT was acquire-loaded.
+ Code baseline_code = FromCodeT(baseline_codet, kRelaxedLoad);
+ // Visit the bytecode hanging off baseline code.
+ VisitPointer(baseline_code,
+ baseline_code.RawField(
+ Code::kDeoptimizationDataOrInterpreterDataOffset));
weak_objects_->code_flushing_candidates.Push(task_id_, shared_info);
} else {
// In other cases, record as a flushing candidate since we have old
@@ -206,13 +219,13 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitSharedFunctionInfo(
template <typename ConcreteVisitor, typename MarkingState>
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::
VisitFixedArrayWithProgressBar(Map map, FixedArray object,
- MemoryChunk* chunk) {
+ ProgressBar& progress_bar) {
const int kProgressBarScanningChunk = kMaxRegularHeapObjectSize;
STATIC_ASSERT(kMaxRegularHeapObjectSize % kTaggedSize == 0);
DCHECK(concrete_visitor()->marking_state()->IsBlackOrGrey(object));
concrete_visitor()->marking_state()->GreyToBlack(object);
int size = FixedArray::BodyDescriptor::SizeOf(map, object);
- size_t current_progress_bar = chunk->ProgressBar();
+ size_t current_progress_bar = progress_bar.Value();
int start = static_cast<int>(current_progress_bar);
if (start == 0) {
this->VisitMapPointer(object);
@@ -221,7 +234,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::
int end = std::min(size, start + kProgressBarScanningChunk);
if (start < end) {
VisitPointers(object, object.RawField(start), object.RawField(end));
- bool success = chunk->TrySetProgressBar(current_progress_bar, end);
+ bool success = progress_bar.TrySetNewValue(current_progress_bar, end);
CHECK(success);
if (end < size) {
// The object can be pushed back onto the marking worklist only after
@@ -237,9 +250,10 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitFixedArray(
Map map, FixedArray object) {
// Arrays with the progress bar are not left-trimmable because they reside
// in the large object space.
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
- return chunk->IsFlagSet<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR)
- ? VisitFixedArrayWithProgressBar(map, object, chunk)
+ ProgressBar& progress_bar =
+ MemoryChunk::FromHeapObject(object)->ProgressBar();
+ return progress_bar.IsEnabled()
+ ? VisitFixedArrayWithProgressBar(map, object, progress_bar)
: concrete_visitor()->VisitLeftTrimmableArray(map, object);
}
diff --git a/chromium/v8/src/heap/marking-visitor.h b/chromium/v8/src/heap/marking-visitor.h
index 555b2e81185..fdacf8cbaf9 100644
--- a/chromium/v8/src/heap/marking-visitor.h
+++ b/chromium/v8/src/heap/marking-visitor.h
@@ -106,15 +106,17 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
WeakObjects* weak_objects, Heap* heap,
unsigned mark_compact_epoch,
base::EnumSet<CodeFlushMode> code_flush_mode,
- bool is_embedder_tracing_enabled, bool is_forced_gc)
- : local_marking_worklists_(local_marking_worklists),
+ bool is_embedder_tracing_enabled,
+ bool should_keep_ages_unchanged)
+ : HeapVisitor<int, ConcreteVisitor>(heap),
+ local_marking_worklists_(local_marking_worklists),
weak_objects_(weak_objects),
heap_(heap),
task_id_(task_id),
mark_compact_epoch_(mark_compact_epoch),
code_flush_mode_(code_flush_mode),
is_embedder_tracing_enabled_(is_embedder_tracing_enabled),
- is_forced_gc_(is_forced_gc),
+ should_keep_ages_unchanged_(should_keep_ages_unchanged),
is_shared_heap_(heap->IsShared()) {}
V8_INLINE int VisitBytecodeArray(Map map, BytecodeArray object);
@@ -134,11 +136,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
V8_INLINE int VisitWeakCell(Map map, WeakCell object);
// ObjectVisitor overrides.
- void VisitMapPointer(HeapObject host) final {
- // Note that we are skipping the recording the slot because map objects
- // can't move, so this is safe (see ProcessStrongHeapObject for comparison)
- MarkObject(host, HeapObject::cast(host.map()));
- }
+ V8_INLINE void VisitMapPointer(HeapObject host) final;
V8_INLINE void VisitPointer(HeapObject host, ObjectSlot p) final {
VisitPointersImpl(host, p, p + 1);
}
@@ -193,7 +191,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
template <typename T>
int VisitEmbedderTracingSubclass(Map map, T object);
V8_INLINE int VisitFixedArrayWithProgressBar(Map map, FixedArray object,
- MemoryChunk* chunk);
+ ProgressBar& progress_bar);
// Marks the descriptor array black without pushing it on the marking work
// list and visits its header. Returns the size of the descriptor array
// if it was successully marked as black.
@@ -208,7 +206,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
const unsigned mark_compact_epoch_;
const base::EnumSet<CodeFlushMode> code_flush_mode_;
const bool is_embedder_tracing_enabled_;
- const bool is_forced_gc_;
+ const bool should_keep_ages_unchanged_;
const bool is_shared_heap_;
};
diff --git a/chromium/v8/src/heap/memory-chunk-layout.h b/chromium/v8/src/heap/memory-chunk-layout.h
index f37583ab426..1b958f0cbff 100644
--- a/chromium/v8/src/heap/memory-chunk-layout.h
+++ b/chromium/v8/src/heap/memory-chunk-layout.h
@@ -7,6 +7,7 @@
#include "src/heap/heap.h"
#include "src/heap/list.h"
+#include "src/heap/progress-bar.h"
#include "src/heap/slot-set.h"
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
@@ -50,7 +51,7 @@ class V8_EXPORT_PRIVATE MemoryChunkLayout {
FIELD(VirtualMemory, Reservation),
// MemoryChunk fields:
FIELD(SlotSet* [kNumSets], SlotSet),
- FIELD(std::atomic<size_t>, ProgressBar),
+ FIELD(ProgressBar, ProgressBar),
FIELD(std::atomic<intptr_t>, LiveByteCount),
FIELD(SlotSet*, SweepingSlotSet),
FIELD(TypedSlotsSet* [kNumSets], TypedSlotSet),
diff --git a/chromium/v8/src/heap/memory-chunk.cc b/chromium/v8/src/heap/memory-chunk.cc
index 0d9afdb1c7c..959501724fa 100644
--- a/chromium/v8/src/heap/memory-chunk.cc
+++ b/chromium/v8/src/heap/memory-chunk.cc
@@ -76,7 +76,7 @@ void MemoryChunk::SetReadAndExecutable() {
PageAllocator::kReadExecute);
}
-void MemoryChunk::SetReadAndWritable() {
+void MemoryChunk::SetCodeModificationPermissions() {
DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
DCHECK(owner_identity() == CODE_SPACE || owner_identity() == CODE_LO_SPACE);
// Incrementing the write_unprotect_counter_ and changing the page
@@ -100,6 +100,14 @@ void MemoryChunk::SetReadAndWritable() {
}
}
+void MemoryChunk::SetDefaultCodePermissions() {
+ if (FLAG_jitless) {
+ SetReadable();
+ } else {
+ SetReadAndExecutable();
+ }
+}
+
namespace {
PageAllocator::Permission DefaultWritableCodePermissions() {
@@ -130,7 +138,7 @@ MemoryChunk* MemoryChunk::Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
// Not actually used but initialize anyway for predictability.
chunk->invalidated_slots_[OLD_TO_CODE] = nullptr;
}
- chunk->progress_bar_ = 0;
+ chunk->progress_bar_.Initialize();
chunk->set_concurrent_sweeping_state(ConcurrentSweepingState::kDone);
chunk->page_protection_change_mutex_ = new base::Mutex();
chunk->write_unprotect_counter_ = 0;
diff --git a/chromium/v8/src/heap/memory-chunk.h b/chromium/v8/src/heap/memory-chunk.h
index 66196c1f136..761ea9a83aa 100644
--- a/chromium/v8/src/heap/memory-chunk.h
+++ b/chromium/v8/src/heap/memory-chunk.h
@@ -162,22 +162,10 @@ class MemoryChunk : public BasicMemoryChunk {
// Approximate amount of physical memory committed for this chunk.
V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory();
- size_t ProgressBar() {
- DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
- return progress_bar_.load(std::memory_order_acquire);
- }
-
- bool TrySetProgressBar(size_t old_value, size_t new_value) {
- DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
- return progress_bar_.compare_exchange_strong(old_value, new_value,
- std::memory_order_acq_rel);
- }
-
- void ResetProgressBar() {
- if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
- progress_bar_.store(0, std::memory_order_release);
- }
+ class ProgressBar& ProgressBar() {
+ return progress_bar_;
}
+ const class ProgressBar& ProgressBar() const { return progress_bar_; }
inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
size_t amount);
@@ -203,15 +191,9 @@ class MemoryChunk : public BasicMemoryChunk {
V8_EXPORT_PRIVATE void SetReadable();
V8_EXPORT_PRIVATE void SetReadAndExecutable();
- V8_EXPORT_PRIVATE void SetReadAndWritable();
-
- void SetDefaultCodePermissions() {
- if (FLAG_jitless) {
- SetReadable();
- } else {
- SetReadAndExecutable();
- }
- }
+
+ V8_EXPORT_PRIVATE void SetCodeModificationPermissions();
+ V8_EXPORT_PRIVATE void SetDefaultCodePermissions();
heap::ListNode<MemoryChunk>& list_node() { return list_node_; }
const heap::ListNode<MemoryChunk>& list_node() const { return list_node_; }
@@ -256,9 +238,9 @@ class MemoryChunk : public BasicMemoryChunk {
// is ceil(size() / kPageSize).
SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
- // Used by the incremental marker to keep track of the scanning progress in
- // large objects that have a progress bar and are scanned in increments.
- std::atomic<size_t> progress_bar_;
+ // Used by the marker to keep track of the scanning progress in large objects
+ // that have a progress bar and are scanned in increments.
+ class ProgressBar progress_bar_;
// Count of bytes marked black on page.
std::atomic<intptr_t> live_byte_count_;
diff --git a/chromium/v8/src/heap/memory-measurement.cc b/chromium/v8/src/heap/memory-measurement.cc
index a29ffb10e19..0ef5d7550b9 100644
--- a/chromium/v8/src/heap/memory-measurement.cc
+++ b/chromium/v8/src/heap/memory-measurement.cc
@@ -4,9 +4,10 @@
#include "src/heap/memory-measurement.h"
-#include "include/v8.h"
+#include "include/v8-local-handle.h"
#include "src/api/api-inl.h"
#include "src/execution/isolate-inl.h"
+#include "src/handles/global-handles-inl.h"
#include "src/heap/factory-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/marking-worklist.h"
diff --git a/chromium/v8/src/heap/memory-measurement.h b/chromium/v8/src/heap/memory-measurement.h
index cf72c57abdb..2b5377943c4 100644
--- a/chromium/v8/src/heap/memory-measurement.h
+++ b/chromium/v8/src/heap/memory-measurement.h
@@ -8,6 +8,7 @@
#include <list>
#include <unordered_map>
+#include "include/v8-statistics.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/base/utils/random-number-generator.h"
#include "src/common/globals.h"
diff --git a/chromium/v8/src/heap/new-spaces.cc b/chromium/v8/src/heap/new-spaces.cc
index d08fe48f23f..b935a585bc8 100644
--- a/chromium/v8/src/heap/new-spaces.cc
+++ b/chromium/v8/src/heap/new-spaces.cc
@@ -57,7 +57,7 @@ bool SemiSpace::EnsureCurrentCapacity() {
memory_chunk_list_.Remove(current_page);
// Clear new space flags to avoid this page being treated as a new
// space page that is potentially being swept.
- current_page->SetFlags(0, Page::kIsInYoungGenerationMask);
+ current_page->ClearFlags(Page::kIsInYoungGenerationMask);
heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
current_page);
current_page = next_current;
@@ -76,8 +76,7 @@ bool SemiSpace::EnsureCurrentCapacity() {
DCHECK_NOT_NULL(current_page);
memory_chunk_list_.PushBack(current_page);
marking_state->ClearLiveness(current_page);
- current_page->SetFlags(first_page()->GetFlags(),
- static_cast<uintptr_t>(Page::kCopyAllFlags));
+ current_page->SetFlags(first_page()->GetFlags(), Page::kAllFlagsMask);
heap()->CreateFillerObjectAt(current_page->area_start(),
static_cast<int>(current_page->area_size()),
ClearRecordedSlots::kNo);
@@ -214,7 +213,8 @@ void SemiSpace::ShrinkTo(size_t new_capacity) {
target_capacity_ = new_capacity;
}
-void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
+void SemiSpace::FixPagesFlags(Page::MainThreadFlags flags,
+ Page::MainThreadFlags mask) {
for (Page* page : *this) {
page->set_owner(this);
page->SetFlags(flags, mask);
@@ -253,8 +253,7 @@ void SemiSpace::RemovePage(Page* page) {
}
void SemiSpace::PrependPage(Page* page) {
- page->SetFlags(current_page()->GetFlags(),
- static_cast<uintptr_t>(Page::kCopyAllFlags));
+ page->SetFlags(current_page()->GetFlags(), Page::kAllFlagsMask);
page->set_owner(this);
memory_chunk_list_.PushFront(page);
current_capacity_ += Page::kPageSize;
@@ -276,7 +275,7 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
DCHECK(from->first_page());
DCHECK(to->first_page());
- intptr_t saved_to_space_flags = to->current_page()->GetFlags();
+ auto saved_to_space_flags = to->current_page()->GetFlags();
// We swap all properties but id_.
std::swap(from->target_capacity_, to->target_capacity_);
@@ -289,7 +288,7 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
to->external_backing_store_bytes_);
to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
- from->FixPagesFlags(0, 0);
+ from->FixPagesFlags(Page::NO_FLAGS, Page::NO_FLAGS);
}
void SemiSpace::set_age_mark(Address mark) {
diff --git a/chromium/v8/src/heap/new-spaces.h b/chromium/v8/src/heap/new-spaces.h
index 7f6f46c78b1..45129acea1e 100644
--- a/chromium/v8/src/heap/new-spaces.h
+++ b/chromium/v8/src/heap/new-spaces.h
@@ -173,7 +173,7 @@ class SemiSpace : public Space {
void RewindPages(int num_pages);
// Copies the flags into the masked positions on all pages in the space.
- void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
+ void FixPagesFlags(Page::MainThreadFlags flags, Page::MainThreadFlags mask);
// The currently committed space capacity.
size_t current_capacity_;
diff --git a/chromium/v8/src/heap/object-stats.cc b/chromium/v8/src/heap/object-stats.cc
index 6dcd0a51a08..4c00e8154a1 100644
--- a/chromium/v8/src/heap/object-stats.cc
+++ b/chromium/v8/src/heap/object-stats.cc
@@ -145,7 +145,7 @@ FieldStatsCollector::GetInobjectFieldStats(Map map) {
DescriptorArray descriptors = map.instance_descriptors();
for (InternalIndex descriptor : map.IterateOwnDescriptors()) {
PropertyDetails details = descriptors.GetDetails(descriptor);
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
FieldIndex index = FieldIndex::ForDescriptor(map, descriptor);
// Stop on first out-of-object field.
if (!index.is_inobject()) break;
diff --git a/chromium/v8/src/heap/objects-visiting-inl.h b/chromium/v8/src/heap/objects-visiting-inl.h
index bef24bb1d52..715b83b9aca 100644
--- a/chromium/v8/src/heap/objects-visiting-inl.h
+++ b/chromium/v8/src/heap/objects-visiting-inl.h
@@ -19,6 +19,7 @@
#include "src/objects/ordered-hash-table.h"
#include "src/objects/synthetic-module-inl.h"
#include "src/objects/torque-defined-classes.h"
+#include "src/objects/visitors.h"
#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-objects.h"
@@ -28,6 +29,19 @@ namespace v8 {
namespace internal {
template <typename ResultType, typename ConcreteVisitor>
+HeapVisitor<ResultType, ConcreteVisitor>::HeapVisitor(
+ PtrComprCageBase cage_base, PtrComprCageBase code_cage_base)
+ : ObjectVisitorWithCageBases(cage_base, code_cage_base) {}
+
+template <typename ResultType, typename ConcreteVisitor>
+HeapVisitor<ResultType, ConcreteVisitor>::HeapVisitor(Isolate* isolate)
+ : ObjectVisitorWithCageBases(isolate) {}
+
+template <typename ResultType, typename ConcreteVisitor>
+HeapVisitor<ResultType, ConcreteVisitor>::HeapVisitor(Heap* heap)
+ : ObjectVisitorWithCageBases(heap) {}
+
+template <typename ResultType, typename ConcreteVisitor>
template <typename T>
T HeapVisitor<ResultType, ConcreteVisitor>::Cast(HeapObject object) {
return T::cast(object);
@@ -35,7 +49,7 @@ T HeapVisitor<ResultType, ConcreteVisitor>::Cast(HeapObject object) {
template <typename ResultType, typename ConcreteVisitor>
ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(HeapObject object) {
- return Visit(object.map(), object);
+ return Visit(object.map(cage_base()), object);
}
template <typename ResultType, typename ConcreteVisitor>
@@ -173,6 +187,10 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitFreeSpace(
}
template <typename ConcreteVisitor>
+NewSpaceVisitor<ConcreteVisitor>::NewSpaceVisitor(Isolate* isolate)
+ : HeapVisitor<int, ConcreteVisitor>(isolate) {}
+
+template <typename ConcreteVisitor>
int NewSpaceVisitor<ConcreteVisitor>::VisitNativeContext(Map map,
NativeContext object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
diff --git a/chromium/v8/src/heap/objects-visiting.cc b/chromium/v8/src/heap/objects-visiting.cc
index a33844743f9..e3514c51fa6 100644
--- a/chromium/v8/src/heap/objects-visiting.cc
+++ b/chromium/v8/src/heap/objects-visiting.cc
@@ -26,7 +26,7 @@ struct WeakListVisitor;
template <class T>
Object VisitWeakList(Heap* heap, Object list, WeakObjectRetainer* retainer) {
- Object undefined = ReadOnlyRoots(heap).undefined_value();
+ HeapObject undefined = ReadOnlyRoots(heap).undefined_value();
Object head = undefined;
T tail;
bool record_slots = MustRecordSlots(heap);
@@ -47,7 +47,7 @@ Object VisitWeakList(Heap* heap, Object list, WeakObjectRetainer* retainer) {
} else {
// Subsequent elements in the list.
DCHECK(!tail.is_null());
- WeakListVisitor<T>::SetWeakNext(tail, retained);
+ WeakListVisitor<T>::SetWeakNext(tail, HeapObject::cast(retained));
if (record_slots) {
HeapObject slot_holder = WeakListVisitor<T>::WeakNextHolder(tail);
int slot_offset = WeakListVisitor<T>::WeakNextOffset();
@@ -187,7 +187,7 @@ struct WeakListVisitor<AllocationSite> {
template <>
struct WeakListVisitor<JSFinalizationRegistry> {
- static void SetWeakNext(JSFinalizationRegistry obj, Object next) {
+ static void SetWeakNext(JSFinalizationRegistry obj, HeapObject next) {
obj.set_next_dirty(next, UPDATE_WEAK_WRITE_BARRIER);
}
diff --git a/chromium/v8/src/heap/objects-visiting.h b/chromium/v8/src/heap/objects-visiting.h
index 43578ba806d..7babd44fb45 100644
--- a/chromium/v8/src/heap/objects-visiting.h
+++ b/chromium/v8/src/heap/objects-visiting.h
@@ -9,7 +9,6 @@
#include "src/objects/map.h"
#include "src/objects/objects.h"
#include "src/objects/visitors.h"
-#include "torque-generated/field-offsets.h"
namespace v8 {
namespace internal {
@@ -78,8 +77,13 @@ TORQUE_VISITOR_ID_LIST(FORWARD_DECLARE)
// ...
// }
template <typename ResultType, typename ConcreteVisitor>
-class HeapVisitor : public ObjectVisitor {
+class HeapVisitor : public ObjectVisitorWithCageBases {
public:
+ inline HeapVisitor(PtrComprCageBase cage_base,
+ PtrComprCageBase code_cage_base);
+ inline explicit HeapVisitor(Isolate* isolate);
+ inline explicit HeapVisitor(Heap* heap);
+
V8_INLINE ResultType Visit(HeapObject object);
V8_INLINE ResultType Visit(Map map, HeapObject object);
// A callback for visiting the map pointer in the object header.
@@ -115,6 +119,8 @@ class HeapVisitor : public ObjectVisitor {
template <typename ConcreteVisitor>
class NewSpaceVisitor : public HeapVisitor<int, ConcreteVisitor> {
public:
+ V8_INLINE NewSpaceVisitor(Isolate* isolate);
+
V8_INLINE bool ShouldVisitMapPointer() { return false; }
// Special cases for young generation.
diff --git a/chromium/v8/src/heap/paged-spaces.cc b/chromium/v8/src/heap/paged-spaces.cc
index 021cf940d77..baac9d54122 100644
--- a/chromium/v8/src/heap/paged-spaces.cc
+++ b/chromium/v8/src/heap/paged-spaces.cc
@@ -365,6 +365,7 @@ void PagedSpace::DecreaseLimit(Address new_limit) {
optional_scope.emplace(chunk);
}
+ ConcurrentAllocationMutex guard(this);
SetTopAndLimit(top(), new_limit);
Free(new_limit, old_limit - new_limit,
SpaceAccountingMode::kSpaceAccounted);
@@ -498,11 +499,11 @@ void PagedSpace::SetReadAndExecutable() {
}
}
-void PagedSpace::SetReadAndWritable() {
+void PagedSpace::SetCodeModificationPermissions() {
DCHECK(identity() == CODE_SPACE);
for (Page* page : *this) {
CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
- page->SetReadAndWritable();
+ page->SetCodeModificationPermissions();
}
}
@@ -570,8 +571,9 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
DCHECK(identity() == OLD_SPACE || identity() == MAP_SPACE);
DCHECK_EQ(origin, AllocationOrigin::kRuntime);
- auto result = TryAllocationFromFreeListBackground(
- local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
+ base::Optional<std::pair<Address, size_t>> result =
+ TryAllocationFromFreeListBackground(local_heap, min_size_in_bytes,
+ max_size_in_bytes, alignment, origin);
if (result) return result;
MarkCompactCollector* collector = heap()->mark_compact_collector();
@@ -582,7 +584,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
RefillFreeList();
// Retry the free list allocation.
- auto result = TryAllocationFromFreeListBackground(
+ result = TryAllocationFromFreeListBackground(
local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
if (result) return result;
@@ -600,7 +602,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
RefillFreeList();
if (static_cast<size_t>(max_freed) >= min_size_in_bytes) {
- auto result = TryAllocationFromFreeListBackground(
+ result = TryAllocationFromFreeListBackground(
local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
if (result) return result;
}
@@ -608,7 +610,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) &&
heap()->CanExpandOldGenerationBackground(local_heap, AreaSize())) {
- auto result = ExpandBackground(local_heap, max_size_in_bytes);
+ result = ExpandBackground(local_heap, max_size_in_bytes);
if (result) {
DCHECK_EQ(Heap::GetFillToAlign(result->first, alignment), 0);
return result;
@@ -686,6 +688,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
}
+ PtrComprCageBase cage_base(isolate);
for (Page* page : *this) {
CHECK_EQ(page->owner(), this);
@@ -727,10 +730,11 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
CHECK(object.address() + size <= top);
end_of_previous_object = object.address() + size;
- if (object.IsExternalString()) {
+ if (object.IsExternalString(cage_base)) {
ExternalString external_string = ExternalString::cast(object);
- size_t size = external_string.ExternalPayloadSize();
- external_page_bytes[ExternalBackingStoreType::kExternalString] += size;
+ size_t payload_size = external_string.ExternalPayloadSize();
+ external_page_bytes[ExternalBackingStoreType::kExternalString] +=
+ payload_size;
}
}
for (int i = 0; i < kNumTypes; i++) {
diff --git a/chromium/v8/src/heap/paged-spaces.h b/chromium/v8/src/heap/paged-spaces.h
index d502b226c4a..b5f9f0e391c 100644
--- a/chromium/v8/src/heap/paged-spaces.h
+++ b/chromium/v8/src/heap/paged-spaces.h
@@ -217,7 +217,7 @@ class V8_EXPORT_PRIVATE PagedSpace
void SetReadable();
void SetReadAndExecutable();
- void SetReadAndWritable();
+ void SetCodeModificationPermissions();
void SetDefaultCodePermissions() {
if (FLAG_jitless) {
diff --git a/chromium/v8/src/heap/progress-bar.h b/chromium/v8/src/heap/progress-bar.h
new file mode 100644
index 00000000000..b00558b684c
--- /dev/null
+++ b/chromium/v8/src/heap/progress-bar.h
@@ -0,0 +1,61 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_PROGRESS_BAR_H_
+#define V8_HEAP_PROGRESS_BAR_H_
+
+#include <atomic>
+#include <cstdint>
+
+#include "src/base/logging.h"
+
+namespace v8 {
+namespace internal {
+
+// The progress bar allows for keeping track of the bytes processed of a single
+// object. The progress bar itself must be enabled before it's used.
+//
+// Only large objects use the progress bar which is stored in their page header.
+// These objects are scanned in increments and will be kept black while being
+// scanned. Even if the mutator writes to them they will be kept black and a
+// white to grey transition is performed in the value.
+//
+// The progress bar starts as disabled. After enabling (through `Enable()`), it
+// can never be disabled again.
+class ProgressBar final {
+ public:
+ void Initialize() { value_ = kDisabledSentinel; }
+ void Enable() { value_ = 0; }
+ bool IsEnabled() const {
+ return value_.load(std::memory_order_acquire) != kDisabledSentinel;
+ }
+
+ size_t Value() const {
+ DCHECK(IsEnabled());
+ return value_.load(std::memory_order_acquire);
+ }
+
+ bool TrySetNewValue(size_t old_value, size_t new_value) {
+ DCHECK(IsEnabled());
+ DCHECK_NE(kDisabledSentinel, new_value);
+ return value_.compare_exchange_strong(old_value, new_value,
+ std::memory_order_acq_rel);
+ }
+
+ void ResetIfEnabled() {
+ if (IsEnabled()) {
+ value_.store(0, std::memory_order_release);
+ }
+ }
+
+ private:
+ static constexpr size_t kDisabledSentinel = SIZE_MAX;
+
+ std::atomic<size_t> value_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_PROGRESS_BAR_H_
diff --git a/chromium/v8/src/heap/safepoint.cc b/chromium/v8/src/heap/safepoint.cc
index e67c9743f89..2d79292bbc0 100644
--- a/chromium/v8/src/heap/safepoint.cc
+++ b/chromium/v8/src/heap/safepoint.cc
@@ -23,6 +23,10 @@ GlobalSafepoint::GlobalSafepoint(Heap* heap)
: heap_(heap), local_heaps_head_(nullptr), active_safepoint_scopes_(0) {}
void GlobalSafepoint::EnterSafepointScope(StopMainThread stop_main_thread) {
+ // Safepoints need to be initiated on the main thread.
+ DCHECK_EQ(ThreadId::Current(), heap_->isolate()->thread_id());
+ DCHECK_NULL(LocalHeap::Current());
+
if (++active_safepoint_scopes_ > 1) return;
TimedHistogramScope timer(
@@ -32,7 +36,6 @@ void GlobalSafepoint::EnterSafepointScope(StopMainThread stop_main_thread) {
local_heaps_mutex_.Lock();
barrier_.Arm();
- DCHECK_NULL(LocalHeap::Current());
int running = 0;
@@ -66,11 +69,13 @@ void GlobalSafepoint::EnterSafepointScope(StopMainThread stop_main_thread) {
}
void GlobalSafepoint::LeaveSafepointScope(StopMainThread stop_main_thread) {
+ // Safepoints need to be initiated on the main thread.
+ DCHECK_EQ(ThreadId::Current(), heap_->isolate()->thread_id());
+ DCHECK_NULL(LocalHeap::Current());
+
DCHECK_GT(active_safepoint_scopes_, 0);
if (--active_safepoint_scopes_ > 0) return;
- DCHECK_NULL(LocalHeap::Current());
-
for (LocalHeap* local_heap = local_heaps_head_; local_heap;
local_heap = local_heap->next_) {
if (local_heap->is_main_thread() &&
diff --git a/chromium/v8/src/heap/scavenger-inl.h b/chromium/v8/src/heap/scavenger-inl.h
index 5eea1afafe6..7c05527e8c7 100644
--- a/chromium/v8/src/heap/scavenger-inl.h
+++ b/chromium/v8/src/heap/scavenger-inl.h
@@ -341,10 +341,10 @@ SlotCallbackResult Scavenger::EvacuateShortcutCandidate(Map map,
kReleaseStore);
return Heap::InYoungGeneration(target) ? KEEP_SLOT : REMOVE_SLOT;
}
- Map map = first_word.ToMap();
- SlotCallbackResult result =
- EvacuateObjectDefault(map, slot, first, first.SizeFromMap(map),
- Map::ObjectFieldsFrom(map.visitor_id()));
+ Map first_map = first_word.ToMap();
+ SlotCallbackResult result = EvacuateObjectDefault(
+ first_map, slot, first, first.SizeFromMap(first_map),
+ Map::ObjectFieldsFrom(first_map.visitor_id()));
object.set_map_word(MapWord::FromForwardingAddress(slot.ToHeapObject()),
kReleaseStore);
return result;
diff --git a/chromium/v8/src/heap/scavenger.cc b/chromium/v8/src/heap/scavenger.cc
index f697e831053..3372033efd3 100644
--- a/chromium/v8/src/heap/scavenger.cc
+++ b/chromium/v8/src/heap/scavenger.cc
@@ -490,7 +490,8 @@ void ScavengerCollector::IterateStackAndScavenge(
}
void ScavengerCollector::SweepArrayBufferExtensions() {
- heap_->array_buffer_sweeper()->RequestSweepYoung();
+ heap_->array_buffer_sweeper()->RequestSweep(
+ ArrayBufferSweeper::SweepingType::kYoung);
}
void ScavengerCollector::HandleSurvivingNewLargeObjects() {
@@ -779,7 +780,8 @@ RootScavengeVisitor::RootScavengeVisitor(Scavenger* scavenger)
: scavenger_(scavenger) {}
ScavengeVisitor::ScavengeVisitor(Scavenger* scavenger)
- : scavenger_(scavenger) {}
+ : NewSpaceVisitor<ScavengeVisitor>(scavenger->heap()->isolate()),
+ scavenger_(scavenger) {}
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/setup-heap-internal.cc b/chromium/v8/src/heap/setup-heap-internal.cc
index 08488eacd0f..b3034dff8b6 100644
--- a/chromium/v8/src/heap/setup-heap-internal.cc
+++ b/chromium/v8/src/heap/setup-heap-internal.cc
@@ -59,7 +59,7 @@ Handle<SharedFunctionInfo> CreateSharedFunctionInfo(
Handle<SharedFunctionInfo> shared =
isolate->factory()->NewSharedFunctionInfoForBuiltin(
isolate->factory()->empty_string(), builtin, kind);
- shared->set_internal_formal_parameter_count(len);
+ shared->set_internal_formal_parameter_count(JSParameterCount(len));
shared->set_length(len);
return shared;
}
@@ -640,7 +640,7 @@ void Heap::CreateApiObjects() {
}
void Heap::CreateInitialObjects() {
- HandleScope scope(isolate());
+ HandleScope initial_objects_handle_scope(isolate());
Factory* factory = isolate()->factory();
ReadOnlyRoots roots(this);
@@ -736,7 +736,7 @@ void Heap::CreateInitialObjects() {
set_interpreter_entry_trampoline_for_profiling(roots.undefined_value());
{
- HandleScope scope(isolate());
+ HandleScope handle_scope(isolate());
#define SYMBOL_INIT(_, name) \
{ \
Handle<Symbol> symbol( \
@@ -748,7 +748,7 @@ void Heap::CreateInitialObjects() {
}
{
- HandleScope scope(isolate());
+ HandleScope handle_scope(isolate());
#define SYMBOL_INIT(_, name, description) \
Handle<Symbol> name = factory->NewSymbol(AllocationType::kReadOnly); \
Handle<String> name##d = factory->InternalizeUtf8String(#description); \
diff --git a/chromium/v8/src/heap/spaces.cc b/chromium/v8/src/heap/spaces.cc
index 4d3fd9411f2..a1992c3e5ea 100644
--- a/chromium/v8/src/heap/spaces.cc
+++ b/chromium/v8/src/heap/spaces.cc
@@ -45,6 +45,9 @@ namespace internal {
STATIC_ASSERT(kClearedWeakHeapObjectLower32 > 0);
STATIC_ASSERT(kClearedWeakHeapObjectLower32 < Page::kHeaderSize);
+// static
+constexpr Page::MainThreadFlags Page::kCopyOnFlipFlagsMask;
+
void Page::AllocateFreeListCategories() {
DCHECK_NULL(categories_);
categories_ =
@@ -82,7 +85,7 @@ Page* Page::ConvertNewToOld(Page* old_page) {
DCHECK(old_page->InNewSpace());
OldSpace* old_space = old_page->heap()->old_space();
old_page->set_owner(old_space);
- old_page->SetFlags(0, static_cast<uintptr_t>(~0));
+ old_page->ClearFlags(Page::kAllFlagsMask);
Page* new_page = old_space->InitializePage(old_page);
old_space->AddPage(new_page);
return new_page;
diff --git a/chromium/v8/src/heap/spaces.h b/chromium/v8/src/heap/spaces.h
index 6a047fd3750..eb71467f78d 100644
--- a/chromium/v8/src/heap/spaces.h
+++ b/chromium/v8/src/heap/spaces.h
@@ -211,13 +211,11 @@ STATIC_ASSERT(sizeof(std::atomic<intptr_t>) == kSystemPointerSize);
// Page* p = Page::FromAllocationAreaAddress(address);
class Page : public MemoryChunk {
public:
- static const intptr_t kCopyAllFlags = ~0;
-
// Page flags copied from from-space to to-space when flipping semispaces.
- static const intptr_t kCopyOnFlipFlagsMask =
- static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
- static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
- static_cast<intptr_t>(MemoryChunk::INCREMENTAL_MARKING);
+ static constexpr MainThreadFlags kCopyOnFlipFlagsMask =
+ MainThreadFlags(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
+ MainThreadFlags(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
+ MainThreadFlags(MemoryChunk::INCREMENTAL_MARKING);
// Returns the page containing a given address. The address ranges
// from [page_addr .. page_addr + kPageSize[. This only works if the object
diff --git a/chromium/v8/src/heap/sweeper.cc b/chromium/v8/src/heap/sweeper.cc
index 7d2d680456a..7e18fc28954 100644
--- a/chromium/v8/src/heap/sweeper.cc
+++ b/chromium/v8/src/heap/sweeper.cc
@@ -366,6 +366,7 @@ int Sweeper::RawSweep(
// Iterate over the page using the live objects and free the memory before
// the given live object.
Address free_start = p->area_start();
+ PtrComprCageBase cage_base(heap_->isolate());
for (auto object_and_size :
LiveObjectRange<kBlackObjects>(p, marking_state_->bitmap(p))) {
HeapObject const object = object_and_size.first;
@@ -383,8 +384,8 @@ int Sweeper::RawSweep(
free_start, free_end, p, non_empty_typed_slots, &free_ranges_map,
&old_to_new_cleanup);
}
- Map map = object.map(kAcquireLoad);
- DCHECK(map.IsMap());
+ Map map = object.map(cage_base, kAcquireLoad);
+ DCHECK(map.IsMap(cage_base));
int size = object.SizeFromMap(map);
live_bytes += size;
free_start = free_end + size;
diff --git a/chromium/v8/src/heap/third-party/heap-api.h b/chromium/v8/src/heap/third-party/heap-api.h
index 9354c7bca87..2a7409040bf 100644
--- a/chromium/v8/src/heap/third-party/heap-api.h
+++ b/chromium/v8/src/heap/third-party/heap-api.h
@@ -5,11 +5,13 @@
#ifndef V8_HEAP_THIRD_PARTY_HEAP_API_H_
#define V8_HEAP_THIRD_PARTY_HEAP_API_H_
-#include "include/v8.h"
#include "src/base/address-region.h"
#include "src/heap/heap.h"
namespace v8 {
+
+class Isolate;
+
namespace internal {
namespace third_party_heap {
diff --git a/chromium/v8/src/heap/weak-object-worklists.cc b/chromium/v8/src/heap/weak-object-worklists.cc
index 8a36c3aef87..50e268ab91f 100644
--- a/chromium/v8/src/heap/weak-object-worklists.cc
+++ b/chromium/v8/src/heap/weak-object-worklists.cc
@@ -25,11 +25,13 @@ void WeakObjects::UpdateAfterScavenge() {
#undef INVOKE_UPDATE
}
+// static
void WeakObjects::UpdateTransitionArrays(
WeakObjectWorklist<TransitionArray>& transition_arrays) {
DCHECK(!ContainsYoungObjects(transition_arrays));
}
+// static
void WeakObjects::UpdateEphemeronHashTables(
WeakObjectWorklist<EphemeronHashTable>& ephemeron_hash_tables) {
ephemeron_hash_tables.Update(
@@ -61,21 +63,25 @@ bool EphemeronUpdater(Ephemeron slot_in, Ephemeron* slot_out) {
}
} // anonymous namespace
+// static
void WeakObjects::UpdateCurrentEphemerons(
WeakObjectWorklist<Ephemeron>& current_ephemerons) {
current_ephemerons.Update(EphemeronUpdater);
}
+// static
void WeakObjects::UpdateNextEphemerons(
WeakObjectWorklist<Ephemeron>& next_ephemerons) {
next_ephemerons.Update(EphemeronUpdater);
}
+// static
void WeakObjects::UpdateDiscoveredEphemerons(
WeakObjectWorklist<Ephemeron>& discovered_ephemerons) {
discovered_ephemerons.Update(EphemeronUpdater);
}
+// static
void WeakObjects::UpdateWeakReferences(
WeakObjectWorklist<HeapObjectAndSlot>& weak_references) {
weak_references.Update(
@@ -96,6 +102,7 @@ void WeakObjects::UpdateWeakReferences(
});
}
+// static
void WeakObjects::UpdateWeakObjectsInCode(
WeakObjectWorklist<HeapObjectAndCode>& weak_objects_in_code) {
weak_objects_in_code.Update(
@@ -113,6 +120,7 @@ void WeakObjects::UpdateWeakObjectsInCode(
});
}
+// static
void WeakObjects::UpdateJSWeakRefs(
WeakObjectWorklist<JSWeakRef>& js_weak_refs) {
js_weak_refs.Update(
@@ -128,16 +136,19 @@ void WeakObjects::UpdateJSWeakRefs(
});
}
+// static
void WeakObjects::UpdateWeakCells(WeakObjectWorklist<WeakCell>& weak_cells) {
// TODO(syg, marja): Support WeakCells in the young generation.
DCHECK(!ContainsYoungObjects(weak_cells));
}
+// static
void WeakObjects::UpdateCodeFlushingCandidates(
WeakObjectWorklist<SharedFunctionInfo>& code_flushing_candidates) {
DCHECK(!ContainsYoungObjects(code_flushing_candidates));
}
+// static
void WeakObjects::UpdateFlushedJSFunctions(
WeakObjectWorklist<JSFunction>& flushed_js_functions) {
flushed_js_functions.Update(
@@ -153,6 +164,7 @@ void WeakObjects::UpdateFlushedJSFunctions(
});
}
+// static
void WeakObjects::UpdateBaselineFlushingCandidates(
WeakObjectWorklist<JSFunction>& baseline_flush_candidates) {
baseline_flush_candidates.Update(
@@ -169,6 +181,7 @@ void WeakObjects::UpdateBaselineFlushingCandidates(
}
#ifdef DEBUG
+// static
template <typename Type>
bool WeakObjects::ContainsYoungObjects(WeakObjectWorklist<Type>& worklist) {
bool result = false;
diff --git a/chromium/v8/src/heap/weak-object-worklists.h b/chromium/v8/src/heap/weak-object-worklists.h
index 60e698e0a75..c61b15a0e91 100644
--- a/chromium/v8/src/heap/weak-object-worklists.h
+++ b/chromium/v8/src/heap/weak-object-worklists.h
@@ -74,13 +74,13 @@ class WeakObjects {
private:
#define DECLARE_UPDATE_METHODS(Type, _, Name) \
- void Update##Name(WeakObjectWorklist<Type>&);
+ static void Update##Name(WeakObjectWorklist<Type>&);
WEAK_OBJECT_WORKLISTS(DECLARE_UPDATE_METHODS)
#undef DECLARE_UPDATE_METHODS
#ifdef DEBUG
template <typename Type>
- bool ContainsYoungObjects(WeakObjectWorklist<Type>& worklist);
+ static bool ContainsYoungObjects(WeakObjectWorklist<Type>& worklist);
#endif
};
diff --git a/chromium/v8/src/ic/OWNERS b/chromium/v8/src/ic/OWNERS
index 3c99566e981..369dfdf31b3 100644
--- a/chromium/v8/src/ic/OWNERS
+++ b/chromium/v8/src/ic/OWNERS
@@ -1,5 +1,4 @@
ishell@chromium.org
jkummerow@chromium.org
mvstanton@chromium.org
-mythria@chromium.org
verwaest@chromium.org
diff --git a/chromium/v8/src/ic/accessor-assembler.cc b/chromium/v8/src/ic/accessor-assembler.cc
index f27e3b7f590..505849b9b6c 100644
--- a/chromium/v8/src/ic/accessor-assembler.cc
+++ b/chromium/v8/src/ic/accessor-assembler.cc
@@ -28,6 +28,11 @@ namespace internal {
//////////////////// Private helpers.
+#define LOAD_KIND(kind) \
+ IntPtrConstant(static_cast<intptr_t>(LoadHandler::Kind::kind))
+#define STORE_KIND(kind) \
+ Int32Constant(static_cast<intptr_t>(StoreHandler::Kind::kind))
+
// Loads dataX field from the DataHandler object.
TNode<MaybeObject> AccessorAssembler::LoadHandlerDataField(
TNode<DataHandler> handler, int data_index) {
@@ -35,7 +40,7 @@ TNode<MaybeObject> AccessorAssembler::LoadHandlerDataField(
TNode<Map> handler_map = LoadMap(handler);
TNode<Uint16T> instance_type = LoadMapInstanceType(handler_map);
#endif
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
Word32Or(InstanceTypeEqual(instance_type, LOAD_HANDLER_TYPE),
InstanceTypeEqual(instance_type, STORE_HANDLER_TYPE)));
int offset = 0;
@@ -57,7 +62,7 @@ TNode<MaybeObject> AccessorAssembler::LoadHandlerDataField(
UNREACHABLE();
}
USE(minimum_size);
- CSA_ASSERT(this, UintPtrGreaterThanOrEqual(
+ CSA_DCHECK(this, UintPtrGreaterThanOrEqual(
LoadMapInstanceSizeInWords(handler_map),
IntPtrConstant(minimum_size / kTaggedSize)));
return LoadMaybeWeakObjectField(handler, offset);
@@ -106,7 +111,7 @@ void AccessorAssembler::HandlePolymorphicCase(
// Load the {feedback} array length.
TNode<IntPtrT> length = LoadAndUntagWeakFixedArrayLength(feedback);
- CSA_ASSERT(this, IntPtrLessThanOrEqual(IntPtrConstant(kEntrySize), length));
+ CSA_DCHECK(this, IntPtrLessThanOrEqual(IntPtrConstant(kEntrySize), length));
// This is a hand-crafted loop that iterates backwards and only compares
// against zero at the end, since we already know that we will have at least a
@@ -118,7 +123,7 @@ void AccessorAssembler::HandlePolymorphicCase(
{
TNode<MaybeObject> maybe_cached_map =
LoadWeakFixedArrayElement(feedback, var_index.value());
- CSA_ASSERT(this, IsWeakOrCleared(maybe_cached_map));
+ CSA_DCHECK(this, IsWeakOrCleared(maybe_cached_map));
GotoIfNot(IsWeakReferenceTo(maybe_cached_map, lookup_start_object_map),
&loop_next);
@@ -150,7 +155,7 @@ void AccessorAssembler::TryMegaDOMCase(TNode<Object> lookup_start_object,
LoadMapBitField(lookup_start_object_map)),
miss);
- CSA_ASSERT(this, TaggedEqual(LoadFeedbackVectorSlot(CAST(vector), slot),
+ CSA_DCHECK(this, TaggedEqual(LoadFeedbackVectorSlot(CAST(vector), slot),
MegaDOMSymbolConstant()));
// In some cases, we load the
@@ -160,7 +165,7 @@ void AccessorAssembler::TryMegaDOMCase(TNode<Object> lookup_start_object,
} else {
TNode<MaybeObject> maybe_handler =
LoadFeedbackVectorSlot(CAST(vector), slot, kTaggedSize);
- CSA_ASSERT(this, IsStrong(maybe_handler));
+ CSA_DCHECK(this, IsStrong(maybe_handler));
handler = CAST(maybe_handler);
}
@@ -169,13 +174,13 @@ void AccessorAssembler::TryMegaDOMCase(TNode<Object> lookup_start_object,
// Load the getter
TNode<MaybeObject> maybe_getter = LoadMegaDomHandlerAccessor(handler);
- CSA_ASSERT(this, IsWeakOrCleared(maybe_getter));
+ CSA_DCHECK(this, IsWeakOrCleared(maybe_getter));
TNode<FunctionTemplateInfo> getter =
CAST(GetHeapObjectAssumeWeak(maybe_getter, miss));
// Load the accessor context
TNode<MaybeObject> maybe_context = LoadMegaDomHandlerContext(handler);
- CSA_ASSERT(this, IsWeakOrCleared(maybe_context));
+ CSA_DCHECK(this, IsWeakOrCleared(maybe_context));
TNode<Context> context = CAST(GetHeapObjectAssumeWeak(maybe_context, miss));
// TODO(gsathya): This builtin throws an exception on interface check fail but
@@ -255,7 +260,7 @@ void AccessorAssembler::HandleLoadAccessor(
[=] { return LoadHandlerDataField(handler, 3); },
[=] { return LoadHandlerDataField(handler, 2); });
- CSA_ASSERT(this, IsWeakOrCleared(maybe_context));
+ CSA_DCHECK(this, IsWeakOrCleared(maybe_context));
CSA_CHECK(this, IsNotCleared(maybe_context));
TNode<HeapObject> context = GetHeapObjectAssumeWeak(maybe_context);
@@ -267,13 +272,10 @@ void AccessorAssembler::HandleLoadAccessor(
TVARIABLE(HeapObject, api_holder, CAST(p->lookup_start_object()));
Label load(this);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kApiGetter)),
- &load);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kApiGetter)), &load);
- CSA_ASSERT(
- this,
- WordEqual(handler_kind,
- IntPtrConstant(LoadHandler::kApiGetterHolderIsPrototype)));
+ CSA_DCHECK(this,
+ WordEqual(handler_kind, LOAD_KIND(kApiGetterHolderIsPrototype)));
api_holder = LoadMapPrototype(LoadMap(CAST(p->lookup_start_object())));
Goto(&load);
@@ -475,25 +477,21 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
if_oob(this, Label::kDeferred), try_string_to_array_index(this),
emit_element_load(this);
TVARIABLE(IntPtrT, var_intptr_index);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kElement)),
- &if_element);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kElement)), &if_element);
if (access_mode == LoadAccessMode::kHas) {
- CSA_ASSERT(this,
- WordNotEqual(handler_kind,
- IntPtrConstant(LoadHandler::kIndexedString)));
+ CSA_DCHECK(this, WordNotEqual(handler_kind, LOAD_KIND(kIndexedString)));
Goto(&if_property);
} else {
- Branch(
- WordEqual(handler_kind, IntPtrConstant(LoadHandler::kIndexedString)),
- &if_indexed_string, &if_property);
+ Branch(WordEqual(handler_kind, LOAD_KIND(kIndexedString)),
+ &if_indexed_string, &if_property);
}
BIND(&if_element);
{
Comment("element_load");
// TODO(ishell): implement
- CSA_ASSERT(this, IsClearWord<LoadHandler::IsWasmArrayBits>(handler_word));
+ CSA_DCHECK(this, IsClearWord<LoadHandler::IsWasmArrayBits>(handler_word));
TVARIABLE(Int32T, var_instance_type);
TNode<IntPtrT> intptr_index = TryToIntptr(
p->name(), &try_string_to_array_index, &var_instance_type);
@@ -510,7 +508,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
CallCFunction(function, MachineType::Int32(),
std::make_pair(MachineType::AnyTagged(), p->name())));
GotoIf(Word32Equal(Int32Constant(-1), result), miss);
- CSA_ASSERT(this, Int32GreaterThanOrEqual(result, Int32Constant(0)));
+ CSA_DCHECK(this, Int32GreaterThanOrEqual(result, Int32Constant(0)));
var_intptr_index = ChangeInt32ToIntPtr(result);
Goto(&emit_element_load);
@@ -588,19 +586,19 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
if (access_mode != LoadAccessMode::kHas) {
BIND(&if_indexed_string);
{
- Label if_oob(this, Label::kDeferred);
+ Label if_oob_string(this, Label::kDeferred);
Comment("indexed string");
TNode<String> string_holder = CAST(holder);
TNode<UintPtrT> index = Unsigned(TryToIntptr(p->name(), miss));
TNode<UintPtrT> length =
Unsigned(LoadStringLengthAsWord(string_holder));
- GotoIf(UintPtrGreaterThanOrEqual(index, length), &if_oob);
+ GotoIf(UintPtrGreaterThanOrEqual(index, length), &if_oob_string);
TNode<Int32T> code = StringCharCodeAt(string_holder, index);
TNode<String> result = StringFromSingleCharCode(code);
Return(result);
- BIND(&if_oob);
+ BIND(&if_oob_string);
TNode<BoolT> allow_out_of_bounds =
IsSetWord<LoadHandler::AllowOutOfBoundsBits>(handler_word);
GotoIfNot(allow_out_of_bounds, miss);
@@ -637,41 +635,32 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
native_data_property(this, Label::kDeferred),
api_getter(this, Label::kDeferred);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kField)), &field);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kField)), &field);
- GotoIf(WordEqual(handler_kind,
- IntPtrConstant(LoadHandler::kConstantFromPrototype)),
- &constant);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kConstantFromPrototype)), &constant);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kNonExistent)),
- &nonexistent);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kNonExistent)), &nonexistent);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kNormal)),
- &normal);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kNormal)), &normal);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kAccessor)),
- &accessor);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kAccessor)), &accessor);
- GotoIf(
- WordEqual(handler_kind, IntPtrConstant(LoadHandler::kNativeDataProperty)),
- &native_data_property);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kNativeDataProperty)),
+ &native_data_property);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kApiGetter)),
- &api_getter);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kApiGetter)), &api_getter);
- GotoIf(WordEqual(handler_kind,
- IntPtrConstant(LoadHandler::kApiGetterHolderIsPrototype)),
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kApiGetterHolderIsPrototype)),
&api_getter);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kGlobal)),
- &global);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kGlobal)), &global);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kSlow)), &slow);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kSlow)), &slow);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kProxy)), &proxy);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kProxy)), &proxy);
- Branch(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kModuleExport)),
- &module_export, &interceptor);
+ Branch(WordEqual(handler_kind, LOAD_KIND(kModuleExport)), &module_export,
+ &interceptor);
BIND(&field);
{
@@ -680,7 +669,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
GotoIf(IsSetWord<LoadHandler::IsWasmStructBits>(handler_word),
&is_wasm_field);
#else
- CSA_ASSERT(this, IsClearWord<LoadHandler::IsWasmStructBits>(handler_word));
+ CSA_DCHECK(this, IsClearWord<LoadHandler::IsWasmStructBits>(handler_word));
#endif // V8_ENABLE_WEBASSEMBLY
HandleLoadField(CAST(holder), handler_word, var_double_value, rebox_double,
@@ -740,7 +729,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
CAST(LoadDescriptorValue(LoadMap(CAST(holder)), descriptor));
TNode<Object> getter =
LoadObjectField(accessor_pair, AccessorPair::kGetterOffset);
- CSA_ASSERT(this, Word32BinaryNot(IsTheHole(getter)));
+ CSA_DCHECK(this, Word32BinaryNot(IsTheHole(getter)));
exit_point->Return(Call(p->context(), getter, p->receiver()));
}
@@ -764,7 +753,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
// handling with proxies which is currently not supported by builtins. So
// for such cases, we should install a slow path and never reach here. Fix
// it to not generate this for LoadGlobals.
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
WordNotEqual(IntPtrConstant(static_cast<int>(on_nonexistent)),
IntPtrConstant(static_cast<int>(
OnNonExistent::kThrowReferenceError))));
@@ -807,7 +796,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
BIND(&global);
{
- CSA_ASSERT(this, IsPropertyCell(CAST(holder)));
+ CSA_DCHECK(this, IsPropertyCell(CAST(holder)));
// Ensure the property cell doesn't contain the hole.
TNode<Object> value =
LoadObjectField(CAST(holder), PropertyCell::kValueOffset);
@@ -876,37 +865,27 @@ void AccessorAssembler::HandleLoadICSmiHandlerHasNamedCase(
Label return_true(this), return_false(this), return_lookup(this),
normal(this), global(this), slow(this);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kField)),
- &return_true);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kField)), &return_true);
- GotoIf(WordEqual(handler_kind,
- IntPtrConstant(LoadHandler::kConstantFromPrototype)),
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kConstantFromPrototype)),
&return_true);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kNonExistent)),
- &return_false);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kNonExistent)), &return_false);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kNormal)),
- &normal);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kNormal)), &normal);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kAccessor)),
- &return_true);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kAccessor)), &return_true);
- GotoIf(
- WordEqual(handler_kind, IntPtrConstant(LoadHandler::kNativeDataProperty)),
- &return_true);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kNativeDataProperty)), &return_true);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kApiGetter)),
- &return_true);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kApiGetter)), &return_true);
- GotoIf(WordEqual(handler_kind,
- IntPtrConstant(LoadHandler::kApiGetterHolderIsPrototype)),
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kApiGetterHolderIsPrototype)),
&return_true);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kSlow)), &slow);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kSlow)), &slow);
- Branch(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kGlobal)), &global,
- &return_lookup);
+ Branch(WordEqual(handler_kind, LOAD_KIND(kGlobal)), &global, &return_lookup);
BIND(&return_true);
exit_point->Return(TrueConstant());
@@ -916,14 +895,11 @@ void AccessorAssembler::HandleLoadICSmiHandlerHasNamedCase(
BIND(&return_lookup);
{
- CSA_ASSERT(
+ CSA_DCHECK(
this,
- Word32Or(
- WordEqual(handler_kind, IntPtrConstant(LoadHandler::kInterceptor)),
- Word32Or(
- WordEqual(handler_kind, IntPtrConstant(LoadHandler::kProxy)),
- WordEqual(handler_kind,
- IntPtrConstant(LoadHandler::kModuleExport)))));
+ Word32Or(WordEqual(handler_kind, LOAD_KIND(kInterceptor)),
+ Word32Or(WordEqual(handler_kind, LOAD_KIND(kProxy)),
+ WordEqual(handler_kind, LOAD_KIND(kModuleExport)))));
exit_point->ReturnCallStub(
Builtins::CallableFor(isolate(), Builtin::kHasProperty), p->context(),
p->receiver(), p->name());
@@ -945,7 +921,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerHasNamedCase(
BIND(&global);
{
- CSA_ASSERT(this, IsPropertyCell(CAST(holder)));
+ CSA_DCHECK(this, IsPropertyCell(CAST(holder)));
// Ensure the property cell doesn't contain the hole.
TNode<Object> value =
LoadObjectField(CAST(holder), PropertyCell::kValueOffset);
@@ -1025,7 +1001,7 @@ TNode<Object> AccessorAssembler::HandleProtoHandler(
int mask = ICHandler::LookupOnLookupStartObjectBits::kMask |
ICHandler::DoAccessCheckOnLookupStartObjectBits::kMask;
if (ic_mode == ICMode::kGlobalIC) {
- CSA_ASSERT(this, IsClearWord(handler_flags, mask));
+ CSA_DCHECK(this, IsClearWord(handler_flags, mask));
} else {
DCHECK_EQ(ICMode::kNonGlobalIC, ic_mode);
@@ -1033,7 +1009,7 @@ TNode<Object> AccessorAssembler::HandleProtoHandler(
if_lookup_on_lookup_start_object(this);
GotoIf(IsClearWord(handler_flags, mask), &done);
// Only one of the bits can be set at a time.
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
WordNotEqual(WordAnd(handler_flags, IntPtrConstant(mask)),
IntPtrConstant(mask)));
Branch(
@@ -1044,7 +1020,7 @@ TNode<Object> AccessorAssembler::HandleProtoHandler(
BIND(&if_do_access_check);
{
TNode<MaybeObject> data2 = LoadHandlerDataField(handler, 2);
- CSA_ASSERT(this, IsWeakOrCleared(data2));
+ CSA_DCHECK(this, IsWeakOrCleared(data2));
TNode<Context> expected_native_context =
CAST(GetHeapObjectAssumeWeak(data2, miss));
EmitAccessCheck(expected_native_context, p->context(),
@@ -1058,7 +1034,7 @@ TNode<Object> AccessorAssembler::HandleProtoHandler(
// lookup_start_object can be a JSGlobalObject) because prototype
// validity cell check already guards modifications of the global
// object.
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
Word32BinaryNot(HasInstanceType(
CAST(p->lookup_start_object()), JS_GLOBAL_OBJECT_TYPE)));
@@ -1123,11 +1099,9 @@ void AccessorAssembler::HandleLoadICProtoHandler(
{
// If the "maybe_holder_or_constant" in the handler is a smi, then it's
// guaranteed that it's not a holder object, but a constant value.
- CSA_ASSERT(
- this,
- WordEqual(
- Signed(DecodeWord<LoadHandler::KindBits>(SmiUntag(smi_handler))),
- IntPtrConstant(LoadHandler::kConstantFromPrototype)));
+ CSA_DCHECK(this, WordEqual(Signed(DecodeWord<LoadHandler::KindBits>(
+ SmiUntag(smi_handler))),
+ LOAD_KIND(kConstantFromPrototype)));
if (access_mode == LoadAccessMode::kHas) {
exit_point->Return(TrueConstant());
} else {
@@ -1141,7 +1115,7 @@ void AccessorAssembler::HandleLoadICProtoHandler(
// the validity cell check implies that |holder| is
// alive. However, for global object receivers, |maybe_holder| may
// be cleared.
- CSA_ASSERT(this, IsWeakOrCleared(maybe_holder_or_constant));
+ CSA_DCHECK(this, IsWeakOrCleared(maybe_holder_or_constant));
TNode<HeapObject> holder =
GetHeapObjectAssumeWeak(maybe_holder_or_constant, miss);
*var_holder = holder;
@@ -1159,7 +1133,7 @@ void AccessorAssembler::EmitAccessCheck(TNode<Context> expected_native_context,
TNode<Context> context,
TNode<Object> receiver,
Label* can_access, Label* miss) {
- CSA_ASSERT(this, IsNativeContext(expected_native_context));
+ CSA_DCHECK(this, IsNativeContext(expected_native_context));
TNode<NativeContext> native_context = LoadNativeContext(context);
GotoIf(TaggedEqual(expected_native_context, native_context), can_access);
@@ -1181,7 +1155,7 @@ void AccessorAssembler::JumpIfDataProperty(TNode<Uint32T> details,
GotoIf(IsSetWord32(details, PropertyDetails::kAttributesReadOnlyMask),
readonly);
} else {
- CSA_ASSERT(this, IsNotSetWord32(details,
+ CSA_DCHECK(this, IsNotSetWord32(details,
PropertyDetails::kAttributesReadOnlyMask));
}
TNode<Uint32T> kind = DecodeWord32<PropertyDetails::KindField>(details);
@@ -1221,25 +1195,24 @@ void AccessorAssembler::HandleStoreICHandlerCase(
Label if_fast_smi(this), if_proxy(this), if_interceptor(this),
if_slow(this);
- STATIC_ASSERT(StoreHandler::kGlobalProxy + 1 == StoreHandler::kNormal);
- STATIC_ASSERT(StoreHandler::kNormal + 1 == StoreHandler::kInterceptor);
- STATIC_ASSERT(StoreHandler::kInterceptor + 1 == StoreHandler::kSlow);
- STATIC_ASSERT(StoreHandler::kSlow + 1 == StoreHandler::kProxy);
- STATIC_ASSERT(StoreHandler::kProxy + 1 == StoreHandler::kKindsNumber);
+#define ASSERT_CONSECUTIVE(a, b) \
+ STATIC_ASSERT(static_cast<intptr_t>(StoreHandler::Kind::a) + 1 == \
+ static_cast<intptr_t>(StoreHandler::Kind::b));
+ ASSERT_CONSECUTIVE(kGlobalProxy, kNormal)
+ ASSERT_CONSECUTIVE(kNormal, kInterceptor)
+ ASSERT_CONSECUTIVE(kInterceptor, kSlow)
+ ASSERT_CONSECUTIVE(kSlow, kProxy)
+ ASSERT_CONSECUTIVE(kProxy, kKindsNumber)
+#undef ASSERT_CONSECUTIVE
TNode<Uint32T> handler_kind =
DecodeWord32<StoreHandler::KindBits>(handler_word);
- GotoIf(
- Int32LessThan(handler_kind, Int32Constant(StoreHandler::kGlobalProxy)),
- &if_fast_smi);
- GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kProxy)),
- &if_proxy);
- GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kInterceptor)),
+ GotoIf(Int32LessThan(handler_kind, STORE_KIND(kGlobalProxy)), &if_fast_smi);
+ GotoIf(Word32Equal(handler_kind, STORE_KIND(kProxy)), &if_proxy);
+ GotoIf(Word32Equal(handler_kind, STORE_KIND(kInterceptor)),
&if_interceptor);
- GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kSlow)),
- &if_slow);
- CSA_ASSERT(this,
- Word32Equal(handler_kind, Int32Constant(StoreHandler::kNormal)));
+ GotoIf(Word32Equal(handler_kind, STORE_KIND(kSlow)), &if_slow);
+ CSA_DCHECK(this, Word32Equal(handler_kind, STORE_KIND(kNormal)));
TNode<PropertyDictionary> properties =
CAST(LoadSlowProperties(CAST(holder)));
@@ -1282,14 +1255,9 @@ void AccessorAssembler::HandleStoreICHandlerCase(
BIND(&if_fast_smi);
{
- TNode<Uint32T> handler_kind =
- DecodeWord32<StoreHandler::KindBits>(handler_word);
-
Label data(this), accessor(this), native_data_property(this);
- GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kAccessor)),
- &accessor);
- Branch(Word32Equal(handler_kind,
- Int32Constant(StoreHandler::kNativeDataProperty)),
+ GotoIf(Word32Equal(handler_kind, STORE_KIND(kAccessor)), &accessor);
+ Branch(Word32Equal(handler_kind, STORE_KIND(kNativeDataProperty)),
&native_data_property, &data);
BIND(&accessor);
@@ -1356,7 +1324,7 @@ void AccessorAssembler::HandleStoreICHandlerCase(
BIND(&store_transition_or_global);
{
// Load value or miss if the {handler} weak cell is cleared.
- CSA_ASSERT(this, IsWeakOrCleared(handler));
+ CSA_DCHECK(this, IsWeakOrCleared(handler));
TNode<HeapObject> map_or_property_cell =
GetHeapObjectAssumeWeak(handler, miss);
@@ -1391,13 +1359,13 @@ void AccessorAssembler::HandleStoreICTransitionMapHandlerCase(
}
TNode<Uint32T> bitfield3 = LoadMapBitField3(transition_map);
- CSA_ASSERT(this, IsClearWord32<Map::Bits3::IsDictionaryMapBit>(bitfield3));
+ CSA_DCHECK(this, IsClearWord32<Map::Bits3::IsDictionaryMapBit>(bitfield3));
GotoIf(IsSetWord32<Map::Bits3::IsDeprecatedBit>(bitfield3), miss);
// Load last descriptor details.
TNode<UintPtrT> nof =
DecodeWordFromWord32<Map::Bits3::NumberOfOwnDescriptorsBits>(bitfield3);
- CSA_ASSERT(this, WordNotEqual(nof, IntPtrConstant(0)));
+ CSA_DCHECK(this, WordNotEqual(nof, IntPtrConstant(0)));
TNode<DescriptorArray> descriptors = LoadMapDescriptors(transition_map);
TNode<IntPtrT> factor = IntPtrConstant(DescriptorArray::kEntrySize);
@@ -1407,7 +1375,7 @@ void AccessorAssembler::HandleStoreICTransitionMapHandlerCase(
TNode<Name> key = LoadKeyByKeyIndex(descriptors, last_key_index);
GotoIf(TaggedNotEqual(key, p->name()), miss);
} else {
- CSA_ASSERT(this, TaggedEqual(LoadKeyByKeyIndex(descriptors, last_key_index),
+ CSA_DCHECK(this, TaggedEqual(LoadKeyByKeyIndex(descriptors, last_key_index),
p->name()));
}
TNode<Uint32T> details = LoadDetailsByKeyIndex(descriptors, last_key_index);
@@ -1454,7 +1422,7 @@ void AccessorAssembler::CheckFieldType(TNode<DescriptorArray> descriptors,
&r_heapobject);
GotoIf(Word32Equal(representation, Int32Constant(Representation::kNone)),
bailout);
- CSA_ASSERT(this, Word32Equal(representation,
+ CSA_DCHECK(this, Word32Equal(representation,
Int32Constant(Representation::kTagged)));
Goto(&all_fine);
@@ -1509,12 +1477,13 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
bool do_transitioning_store) {
Label done(this), if_field(this), if_descriptor(this);
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
Word32Equal(DecodeWord32<PropertyDetails::KindField>(details),
Int32Constant(kData)));
- Branch(Word32Equal(DecodeWord32<PropertyDetails::LocationField>(details),
- Int32Constant(kField)),
+ Branch(Word32Equal(
+ DecodeWord32<PropertyDetails::LocationField>(details),
+ Int32Constant(static_cast<int32_t>(PropertyLocation::kField))),
&if_field, &if_descriptor);
BIND(&if_field);
@@ -1675,7 +1644,7 @@ void AccessorAssembler::CheckPrototypeValidityCell(
GotoIf(
TaggedEqual(maybe_validity_cell, SmiConstant(Map::kPrototypeChainValid)),
&done);
- CSA_ASSERT(this, TaggedIsNotSmi(maybe_validity_cell));
+ CSA_DCHECK(this, TaggedIsNotSmi(maybe_validity_cell));
TNode<Object> cell_value =
LoadObjectField(CAST(maybe_validity_cell), Cell::kValueOffset);
@@ -1693,10 +1662,10 @@ void AccessorAssembler::HandleStoreAccessor(const StoreICParameters* p,
Signed(DecodeWordFromWord32<StoreHandler::DescriptorBits>(handler_word));
TNode<HeapObject> accessor_pair =
CAST(LoadDescriptorValue(LoadMap(holder), descriptor));
- CSA_ASSERT(this, IsAccessorPair(accessor_pair));
+ CSA_DCHECK(this, IsAccessorPair(accessor_pair));
TNode<Object> setter =
LoadObjectField(accessor_pair, AccessorPair::kSetterOffset);
- CSA_ASSERT(this, Word32BinaryNot(IsTheHole(setter)));
+ CSA_DCHECK(this, Word32BinaryNot(IsTheHole(setter)));
Return(Call(p->context(), setter, p->receiver(), p->value()));
}
@@ -1759,41 +1728,33 @@ void AccessorAssembler::HandleStoreICProtoHandler(
Label if_add_normal(this), if_store_global_proxy(this), if_api_setter(this),
if_accessor(this), if_native_data_property(this), if_slow(this);
- CSA_ASSERT(this, TaggedIsSmi(smi_handler));
+ CSA_DCHECK(this, TaggedIsSmi(smi_handler));
TNode<Int32T> handler_word = SmiToInt32(CAST(smi_handler));
TNode<Uint32T> handler_kind =
DecodeWord32<StoreHandler::KindBits>(handler_word);
- GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kNormal)),
- &if_add_normal);
+ GotoIf(Word32Equal(handler_kind, STORE_KIND(kNormal)), &if_add_normal);
- GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kSlow)),
- &if_slow);
+ GotoIf(Word32Equal(handler_kind, STORE_KIND(kSlow)), &if_slow);
TNode<MaybeObject> maybe_holder = LoadHandlerDataField(handler, 1);
- CSA_ASSERT(this, IsWeakOrCleared(maybe_holder));
+ CSA_DCHECK(this, IsWeakOrCleared(maybe_holder));
TNode<HeapObject> holder = GetHeapObjectAssumeWeak(maybe_holder, miss);
- GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kGlobalProxy)),
+ GotoIf(Word32Equal(handler_kind, STORE_KIND(kGlobalProxy)),
&if_store_global_proxy);
- GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kAccessor)),
- &if_accessor);
+ GotoIf(Word32Equal(handler_kind, STORE_KIND(kAccessor)), &if_accessor);
- GotoIf(Word32Equal(handler_kind,
- Int32Constant(StoreHandler::kNativeDataProperty)),
+ GotoIf(Word32Equal(handler_kind, STORE_KIND(kNativeDataProperty)),
&if_native_data_property);
- GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kApiSetter)),
- &if_api_setter);
+ GotoIf(Word32Equal(handler_kind, STORE_KIND(kApiSetter)), &if_api_setter);
- GotoIf(
- Word32Equal(handler_kind,
- Int32Constant(StoreHandler::kApiSetterHolderIsPrototype)),
- &if_api_setter);
+ GotoIf(Word32Equal(handler_kind, STORE_KIND(kApiSetterHolderIsPrototype)),
+ &if_api_setter);
- CSA_ASSERT(this,
- Word32Equal(handler_kind, Int32Constant(StoreHandler::kProxy)));
+ CSA_DCHECK(this, Word32Equal(handler_kind, STORE_KIND(kProxy)));
HandleStoreToProxy(p, CAST(holder), miss, support_elements);
BIND(&if_slow);
@@ -1840,7 +1801,7 @@ void AccessorAssembler::HandleStoreICProtoHandler(
BIND(&if_api_setter);
{
Comment("api_setter");
- CSA_ASSERT(this, TaggedIsNotSmi(handler));
+ CSA_DCHECK(this, TaggedIsNotSmi(handler));
TNode<CallHandlerInfo> call_handler_info = CAST(holder);
// Context is stored either in data2 or data3 field depending on whether
@@ -1851,7 +1812,7 @@ void AccessorAssembler::HandleStoreICProtoHandler(
[=] { return LoadHandlerDataField(handler, 3); },
[=] { return LoadHandlerDataField(handler, 2); });
- CSA_ASSERT(this, IsWeakOrCleared(maybe_context));
+ CSA_DCHECK(this, IsWeakOrCleared(maybe_context));
TNode<Object> context = Select<Object>(
IsCleared(maybe_context), [=] { return SmiConstant(0); },
[=] { return GetHeapObjectAssumeWeak(maybe_context); });
@@ -1864,13 +1825,10 @@ void AccessorAssembler::HandleStoreICProtoHandler(
TVARIABLE(Object, api_holder, p->receiver());
Label store(this);
- GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kApiSetter)),
- &store);
+ GotoIf(Word32Equal(handler_kind, STORE_KIND(kApiSetter)), &store);
- CSA_ASSERT(this,
- Word32Equal(
- handler_kind,
- Int32Constant(StoreHandler::kApiSetterHolderIsPrototype)));
+ CSA_DCHECK(this, Word32Equal(handler_kind,
+ STORE_KIND(kApiSetterHolderIsPrototype)));
api_holder = LoadMapPrototype(LoadMap(CAST(p->receiver())));
Goto(&store);
@@ -1935,11 +1893,9 @@ void AccessorAssembler::HandleStoreICSmiHandlerCase(TNode<Word32T> handler_word,
#ifdef DEBUG
TNode<Uint32T> handler_kind =
DecodeWord32<StoreHandler::KindBits>(handler_word);
- CSA_ASSERT(
- this,
- Word32Or(
- Word32Equal(handler_kind, Int32Constant(StoreHandler::kField)),
- Word32Equal(handler_kind, Int32Constant(StoreHandler::kConstField))));
+ CSA_DCHECK(this,
+ Word32Or(Word32Equal(handler_kind, STORE_KIND(kField)),
+ Word32Equal(handler_kind, STORE_KIND(kConstField))));
#endif
TNode<Uint32T> field_representation =
@@ -1984,7 +1940,7 @@ void AccessorAssembler::HandleStoreICSmiHandlerCase(TNode<Word32T> handler_word,
BIND(&if_double_field);
{
- CSA_ASSERT(this, Word32Equal(field_representation,
+ CSA_DCHECK(this, Word32Equal(field_representation,
Int32Constant(Representation::kDouble)));
Comment("double field checks");
TNode<Float64T> double_value = TryTaggedToFloat64(value, miss);
@@ -2005,7 +1961,7 @@ void AccessorAssembler::CheckHeapObjectTypeMatchesDescriptor(
// Skip field type check in favor of constant value check when storing
// to constant field.
GotoIf(Word32Equal(DecodeWord32<StoreHandler::KindBits>(handler_word),
- Int32Constant(StoreHandler::kConstField)),
+ STORE_KIND(kConstField)),
&done);
TNode<IntPtrT> descriptor =
Signed(DecodeWordFromWord32<StoreHandler::DescriptorBits>(handler_word));
@@ -2048,8 +2004,6 @@ void AccessorAssembler::HandleStoreFieldAndReturn(
TNode<Word32T> handler_word, TNode<JSObject> holder, TNode<Object> value,
base::Optional<TNode<Float64T>> double_value, Representation representation,
Label* miss) {
- Label done(this);
-
bool store_value_as_double = representation.IsDouble();
TNode<BoolT> is_inobject =
@@ -2072,7 +2026,7 @@ void AccessorAssembler::HandleStoreFieldAndReturn(
// Store the double value directly into the mutable HeapNumber.
TNode<Object> field = LoadObjectField(property_storage, offset);
- CSA_ASSERT(this, IsHeapNumber(CAST(field)));
+ CSA_DCHECK(this, IsHeapNumber(CAST(field)));
actual_property_storage = CAST(field);
actual_offset = IntPtrConstant(HeapNumber::kValueOffset);
Goto(&property_and_offset_ready);
@@ -2085,7 +2039,7 @@ void AccessorAssembler::HandleStoreFieldAndReturn(
// Do constant value check if necessary.
Label do_store(this);
GotoIfNot(Word32Equal(DecodeWord32<StoreHandler::KindBits>(handler_word),
- Int32Constant(StoreHandler::kConstField)),
+ STORE_KIND(kConstField)),
&do_store);
{
if (store_value_as_double) {
@@ -2172,7 +2126,7 @@ TNode<PropertyArray> AccessorAssembler::ExtendPropertiesBackingStore(
FixedArrayBase::GetMaxLengthForNewSpaceAllocation(PACKED_ELEMENTS));
// The size of a new properties backing store is guaranteed to be small
// enough that the new backing store will be allocated in new space.
- CSA_ASSERT(this, IntPtrLessThan(new_capacity,
+ CSA_DCHECK(this, IntPtrLessThan(new_capacity,
IntPtrConstant(kMaxNumberOfDescriptors +
JSObject::kFieldsAdded)));
@@ -2624,56 +2578,58 @@ void AccessorAssembler::GenericPropertyLoad(
GotoIf(IsSetWord32<Map::Bits3::IsDictionaryMapBit>(bitfield3),
&if_property_dictionary);
- // Try looking up the property on the lookup_start_object; if unsuccessful,
- // look for a handler in the stub cache.
- TNode<DescriptorArray> descriptors =
- LoadMapDescriptors(lookup_start_object_map);
+ {
+ // Try looking up the property on the lookup_start_object; if unsuccessful,
+ // look for a handler in the stub cache.
+ TNode<DescriptorArray> descriptors =
+ LoadMapDescriptors(lookup_start_object_map);
- Label if_descriptor_found(this), try_stub_cache(this);
- TVARIABLE(IntPtrT, var_name_index);
- Label* notfound = use_stub_cache == kUseStubCache ? &try_stub_cache
- : &lookup_prototype_chain;
- DescriptorLookup(name, descriptors, bitfield3, &if_descriptor_found,
- &var_name_index, notfound);
+ Label if_descriptor_found(this), try_stub_cache(this);
+ TVARIABLE(IntPtrT, var_name_index);
+ Label* notfound = use_stub_cache == kUseStubCache ? &try_stub_cache
+ : &lookup_prototype_chain;
+ DescriptorLookup(name, descriptors, bitfield3, &if_descriptor_found,
+ &var_name_index, notfound);
- BIND(&if_descriptor_found);
- {
- LoadPropertyFromFastObject(lookup_start_object, lookup_start_object_map,
- descriptors, var_name_index.value(),
- &var_details, &var_value);
- Goto(&if_found_on_lookup_start_object);
- }
-
- if (use_stub_cache == kUseStubCache) {
- DCHECK_EQ(lookup_start_object, p->receiver_and_lookup_start_object());
- Label stub_cache(this);
- BIND(&try_stub_cache);
- // When there is no feedback vector don't use stub cache.
- GotoIfNot(IsUndefined(p->vector()), &stub_cache);
- // Fall back to the slow path for private symbols.
- Branch(IsPrivateSymbol(name), slow, &lookup_prototype_chain);
-
- BIND(&stub_cache);
- Comment("stub cache probe for fast property load");
- TVARIABLE(MaybeObject, var_handler);
- Label found_handler(this, &var_handler), stub_cache_miss(this);
- TryProbeStubCache(isolate()->load_stub_cache(), lookup_start_object, name,
- &found_handler, &var_handler, &stub_cache_miss);
- BIND(&found_handler);
+ BIND(&if_descriptor_found);
{
- LazyLoadICParameters lazy_p(p);
- HandleLoadICHandlerCase(&lazy_p, CAST(var_handler.value()),
- &stub_cache_miss, &direct_exit);
+ LoadPropertyFromFastObject(lookup_start_object, lookup_start_object_map,
+ descriptors, var_name_index.value(),
+ &var_details, &var_value);
+ Goto(&if_found_on_lookup_start_object);
}
- BIND(&stub_cache_miss);
- {
- // TODO(jkummerow): Check if the property exists on the prototype
- // chain. If it doesn't, then there's no point in missing.
- Comment("KeyedLoadGeneric_miss");
- TailCallRuntime(Runtime::kKeyedLoadIC_Miss, p->context(),
- p->receiver_and_lookup_start_object(), name, p->slot(),
- p->vector());
+ if (use_stub_cache == kUseStubCache) {
+ DCHECK_EQ(lookup_start_object, p->receiver_and_lookup_start_object());
+ Label stub_cache(this);
+ BIND(&try_stub_cache);
+ // When there is no feedback vector don't use stub cache.
+ GotoIfNot(IsUndefined(p->vector()), &stub_cache);
+ // Fall back to the slow path for private symbols.
+ Branch(IsPrivateSymbol(name), slow, &lookup_prototype_chain);
+
+ BIND(&stub_cache);
+ Comment("stub cache probe for fast property load");
+ TVARIABLE(MaybeObject, var_handler);
+ Label found_handler(this, &var_handler), stub_cache_miss(this);
+ TryProbeStubCache(isolate()->load_stub_cache(), lookup_start_object, name,
+ &found_handler, &var_handler, &stub_cache_miss);
+ BIND(&found_handler);
+ {
+ LazyLoadICParameters lazy_p(p);
+ HandleLoadICHandlerCase(&lazy_p, CAST(var_handler.value()),
+ &stub_cache_miss, &direct_exit);
+ }
+
+ BIND(&stub_cache_miss);
+ {
+ // TODO(jkummerow): Check if the property exists on the prototype
+ // chain. If it doesn't, then there's no point in missing.
+ Comment("KeyedLoadGeneric_miss");
+ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, p->context(),
+ p->receiver_and_lookup_start_object(), name, p->slot(),
+ p->vector());
+ }
}
}
@@ -2750,7 +2706,7 @@ void AccessorAssembler::GenericPropertyLoad(
BIND(&is_private_symbol);
{
- CSA_ASSERT(this, IsPrivateSymbol(name));
+ CSA_DCHECK(this, IsPrivateSymbol(name));
// For private names that don't exist on the receiver, we bail
// to the runtime to throw. For private symbols, we just return
@@ -2791,7 +2747,7 @@ TNode<IntPtrT> AccessorAssembler::StubCachePrimaryOffset(TNode<Name> name,
TNode<Map> map) {
// Compute the hash of the name (use entire hash field).
TNode<Uint32T> raw_hash_field = LoadNameRawHashField(name);
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
Word32Equal(Word32And(raw_hash_field,
Int32Constant(Name::kHashNotComputedMask)),
Int32Constant(0)));
@@ -3100,9 +3056,9 @@ void AccessorAssembler::LoadIC_Noninlined(const LoadICParameters* p,
ExitPoint* exit_point) {
// Neither deprecated map nor monomorphic. These cases are handled in the
// bytecode handler.
- CSA_ASSERT(this, Word32BinaryNot(IsDeprecatedMap(lookup_start_object_map)));
- CSA_ASSERT(this, TaggedNotEqual(lookup_start_object_map, feedback));
- CSA_ASSERT(this, Word32BinaryNot(IsWeakFixedArrayMap(LoadMap(feedback))));
+ CSA_DCHECK(this, Word32BinaryNot(IsDeprecatedMap(lookup_start_object_map)));
+ CSA_DCHECK(this, TaggedNotEqual(lookup_start_object_map, feedback));
+ CSA_DCHECK(this, Word32BinaryNot(IsWeakFixedArrayMap(LoadMap(feedback))));
DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
{
@@ -3240,7 +3196,7 @@ void AccessorAssembler::LoadGlobalIC_TryPropertyCellCase(
BIND(&if_property_cell);
{
// Load value or try handler case if the weak reference is cleared.
- CSA_ASSERT(this, IsWeakOrCleared(maybe_weak_ref));
+ CSA_DCHECK(this, IsWeakOrCleared(maybe_weak_ref));
TNode<PropertyCell> property_cell =
CAST(GetHeapObjectAssumeWeak(maybe_weak_ref, try_handler));
TNode<Object> value =
@@ -3420,16 +3376,18 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p,
// slot.
Comment("KeyedLoadIC_try_polymorphic_name");
TVARIABLE(Name, var_name);
- TVARIABLE(IntPtrT, var_index);
Label if_polymorphic_name(this), feedback_matches(this),
if_internalized(this), if_notinternalized(this, Label::kDeferred);
// Fast-case: The recorded {feedback} matches the {name}.
GotoIf(TaggedEqual(strong_feedback, p->name()), &feedback_matches);
- // Try to internalize the {name} if it isn't already.
- TryToName(p->name(), &miss, &var_index, &if_internalized, &var_name, &miss,
- &if_notinternalized);
+ {
+ // Try to internalize the {name} if it isn't already.
+ TVARIABLE(IntPtrT, var_index);
+ TryToName(p->name(), &miss, &var_index, &if_internalized, &var_name,
+ &miss, &if_notinternalized);
+ }
BIND(&if_internalized);
{
@@ -3572,8 +3530,8 @@ void AccessorAssembler::KeyedLoadICPolymorphicName(const LoadICParameters* p,
// When we get here, we know that the {name} matches the recorded
// feedback name in the {vector} and can safely be used for the
// LoadIC handler logic below.
- CSA_ASSERT(this, Word32BinaryNot(IsDeprecatedMap(lookup_start_object_map)));
- CSA_ASSERT(this, TaggedEqual(name, LoadFeedbackVectorSlot(vector, slot)),
+ CSA_DCHECK(this, Word32BinaryNot(IsDeprecatedMap(lookup_start_object_map)));
+ CSA_DCHECK(this, TaggedEqual(name, LoadFeedbackVectorSlot(vector, slot)),
name, vector);
// Check if we have a matching handler for the {lookup_start_object_map}.
@@ -3674,7 +3632,7 @@ void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) {
{
Label try_handler(this), miss(this, Label::kDeferred);
- CSA_ASSERT(this, IsWeakOrCleared(maybe_weak_ref));
+ CSA_DCHECK(this, IsWeakOrCleared(maybe_weak_ref));
TNode<PropertyCell> property_cell =
CAST(GetHeapObjectAssumeWeak(maybe_weak_ref, &try_handler));
@@ -3741,7 +3699,7 @@ void AccessorAssembler::StoreGlobalIC_PropertyCellCase(
TNode<Int32T> details = LoadAndUntagToWord32ObjectField(
property_cell, PropertyCell::kPropertyDetailsRawOffset);
GotoIf(IsSetWord32(details, PropertyDetails::kAttributesReadOnlyMask), miss);
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
Word32Equal(DecodeWord32<PropertyDetails::KindField>(details),
Int32Constant(kData)));
@@ -3753,12 +3711,12 @@ void AccessorAssembler::StoreGlobalIC_PropertyCellCase(
GotoIf(Word32Equal(type, Int32Constant(
static_cast<int>(PropertyCellType::kConstant))),
&constant);
- CSA_ASSERT(this, Word32BinaryNot(IsTheHole(cell_contents)));
+ CSA_DCHECK(this, Word32BinaryNot(IsTheHole(cell_contents)));
GotoIf(Word32Equal(
type, Int32Constant(static_cast<int>(PropertyCellType::kMutable))),
&store);
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
Word32Or(Word32Equal(type, Int32Constant(static_cast<int>(
PropertyCellType::kConstantType))),
Word32Equal(type, Int32Constant(static_cast<int>(
@@ -3787,7 +3745,7 @@ void AccessorAssembler::StoreGlobalIC_PropertyCellCase(
{
// Since |value| is never the hole, the equality check below also handles an
// invalidated property cell correctly.
- CSA_ASSERT(this, Word32BinaryNot(IsTheHole(value)));
+ CSA_DCHECK(this, Word32BinaryNot(IsTheHole(value)));
GotoIfNot(TaggedEqual(cell_contents, value), miss);
exit_point->Return(value);
}
@@ -3929,8 +3887,7 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
TNode<Int32T> handler_word = SmiToInt32(CAST(var_handler.value()));
TNode<Uint32T> handler_kind =
DecodeWord32<StoreHandler::KindBits>(handler_word);
- CSA_ASSERT(this, Word32Equal(handler_kind,
- Int32Constant(StoreHandler::kSlow)));
+ CSA_DCHECK(this, Word32Equal(handler_kind, STORE_KIND(kSlow)));
#endif
Comment("StoreInArrayLiteralIC_Slow");
@@ -3952,7 +3909,7 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
BIND(&try_megamorphic);
{
Comment("StoreInArrayLiteralIC_try_megamorphic");
- CSA_ASSERT(
+ CSA_DCHECK(
this,
Word32Or(TaggedEqual(strong_feedback, UninitializedSymbolConstant()),
TaggedEqual(strong_feedback, MegamorphicSymbolConstant())));
@@ -4007,7 +3964,7 @@ void AccessorAssembler::GenerateLoadIC_Megamorphic() {
TVARIABLE(MaybeObject, var_handler);
Label if_handler(this, &var_handler), miss(this, Label::kDeferred);
- CSA_ASSERT(this, TaggedEqual(LoadFeedbackVectorSlot(CAST(vector), slot),
+ CSA_DCHECK(this, TaggedEqual(LoadFeedbackVectorSlot(CAST(vector), slot),
MegamorphicSymbolConstant()));
TryProbeStubCache(isolate()->load_stub_cache(), receiver, CAST(name),
@@ -4582,15 +4539,15 @@ void AccessorAssembler::GenerateCloneObjectIC() {
Label allocate_object(this);
GotoIf(IsNullOrUndefined(source), &allocate_object);
- CSA_SLOW_ASSERT(this, IsJSObjectMap(source_map));
- CSA_SLOW_ASSERT(this, IsJSObjectMap(result_map));
+ CSA_SLOW_DCHECK(this, IsJSObjectMap(source_map));
+ CSA_SLOW_DCHECK(this, IsJSObjectMap(result_map));
// The IC fast case should only be taken if the result map a compatible
// elements kind with the source object.
TNode<FixedArrayBase> source_elements = LoadElements(CAST(source));
- auto flags = ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW;
- var_elements = CAST(CloneFixedArray(source_elements, flags));
+ auto flag = ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW;
+ var_elements = CAST(CloneFixedArray(source_elements, flag));
// Copy the PropertyArray backing store. The source PropertyArray must be
// either an Smi, or a PropertyArray.
@@ -4668,7 +4625,7 @@ void AccessorAssembler::GenerateCloneObjectIC() {
BIND(&try_megamorphic);
{
Comment("CloneObjectIC_try_megamorphic");
- CSA_ASSERT(
+ CSA_DCHECK(
this,
Word32Or(TaggedEqual(strong_feedback, UninitializedSymbolConstant()),
TaggedEqual(strong_feedback, MegamorphicSymbolConstant())));
@@ -4690,7 +4647,7 @@ void AccessorAssembler::GenerateCloneObjectIC() {
slot, maybe_vector));
var_handler = UncheckedCast<MaybeObject>(map_or_result);
GotoIf(IsMap(map_or_result), &if_handler);
- CSA_ASSERT(this, IsJSObject(map_or_result));
+ CSA_DCHECK(this, IsJSObject(map_or_result));
Return(map_or_result);
}
}
@@ -4793,5 +4750,8 @@ void AccessorAssembler::BranchIfPrototypesHaveNoElements(
}
}
+#undef LOAD_KIND
+#undef STORE_KIND
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/ic/handler-configuration-inl.h b/chromium/v8/src/ic/handler-configuration-inl.h
index 285c266b809..081229c4439 100644
--- a/chromium/v8/src/ic/handler-configuration-inl.h
+++ b/chromium/v8/src/ic/handler-configuration-inl.h
@@ -39,27 +39,27 @@ LoadHandler::Kind LoadHandler::GetHandlerKind(Smi smi_handler) {
}
Handle<Smi> LoadHandler::LoadNormal(Isolate* isolate) {
- int config = KindBits::encode(kNormal);
+ int config = KindBits::encode(Kind::kNormal);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> LoadHandler::LoadGlobal(Isolate* isolate) {
- int config = KindBits::encode(kGlobal);
+ int config = KindBits::encode(Kind::kGlobal);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> LoadHandler::LoadInterceptor(Isolate* isolate) {
- int config = KindBits::encode(kInterceptor);
+ int config = KindBits::encode(Kind::kInterceptor);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> LoadHandler::LoadSlow(Isolate* isolate) {
- int config = KindBits::encode(kSlow);
+ int config = KindBits::encode(Kind::kSlow);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> LoadHandler::LoadField(Isolate* isolate, FieldIndex field_index) {
- int config = KindBits::encode(kField) |
+ int config = KindBits::encode(Kind::kField) |
IsInobjectBits::encode(field_index.is_inobject()) |
IsDoubleBits::encode(field_index.is_double()) |
FieldIndexBits::encode(field_index.index());
@@ -68,49 +68,51 @@ Handle<Smi> LoadHandler::LoadField(Isolate* isolate, FieldIndex field_index) {
Handle<Smi> LoadHandler::LoadWasmStructField(Isolate* isolate,
WasmValueType type, int offset) {
- int config = KindBits::encode(kField) | IsWasmStructBits::encode(true) |
+ int config = KindBits::encode(Kind::kField) | IsWasmStructBits::encode(true) |
WasmFieldTypeBits::encode(type) |
WasmFieldOffsetBits::encode(offset);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> LoadHandler::LoadConstantFromPrototype(Isolate* isolate) {
- int config = KindBits::encode(kConstantFromPrototype);
+ int config = KindBits::encode(Kind::kConstantFromPrototype);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> LoadHandler::LoadAccessor(Isolate* isolate, int descriptor) {
- int config = KindBits::encode(kAccessor) | DescriptorBits::encode(descriptor);
+ int config =
+ KindBits::encode(Kind::kAccessor) | DescriptorBits::encode(descriptor);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> LoadHandler::LoadProxy(Isolate* isolate) {
- int config = KindBits::encode(kProxy);
+ int config = KindBits::encode(Kind::kProxy);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> LoadHandler::LoadNativeDataProperty(Isolate* isolate,
int descriptor) {
- int config = KindBits::encode(kNativeDataProperty) |
+ int config = KindBits::encode(Kind::kNativeDataProperty) |
DescriptorBits::encode(descriptor);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> LoadHandler::LoadApiGetter(Isolate* isolate,
bool holder_is_receiver) {
- int config = KindBits::encode(
- holder_is_receiver ? kApiGetter : kApiGetterHolderIsPrototype);
+ int config =
+ KindBits::encode(holder_is_receiver ? Kind::kApiGetter
+ : Kind::kApiGetterHolderIsPrototype);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> LoadHandler::LoadModuleExport(Isolate* isolate, int index) {
int config =
- KindBits::encode(kModuleExport) | ExportsIndexBits::encode(index);
+ KindBits::encode(Kind::kModuleExport) | ExportsIndexBits::encode(index);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> LoadHandler::LoadNonExistent(Isolate* isolate) {
- int config = KindBits::encode(kNonExistent);
+ int config = KindBits::encode(Kind::kNonExistent);
return handle(Smi::FromInt(config), isolate);
}
@@ -120,7 +122,7 @@ Handle<Smi> LoadHandler::LoadElement(Isolate* isolate,
bool is_js_array,
KeyedAccessLoadMode load_mode) {
int config =
- KindBits::encode(kElement) |
+ KindBits::encode(Kind::kElement) |
AllowOutOfBoundsBits::encode(load_mode == LOAD_IGNORE_OUT_OF_BOUNDS) |
ElementsKindBits::encode(elements_kind) |
ConvertHoleBits::encode(convert_hole_to_undefined) |
@@ -131,15 +133,15 @@ Handle<Smi> LoadHandler::LoadElement(Isolate* isolate,
Handle<Smi> LoadHandler::LoadIndexedString(Isolate* isolate,
KeyedAccessLoadMode load_mode) {
int config =
- KindBits::encode(kIndexedString) |
+ KindBits::encode(Kind::kIndexedString) |
AllowOutOfBoundsBits::encode(load_mode == LOAD_IGNORE_OUT_OF_BOUNDS);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> LoadHandler::LoadWasmArrayElement(Isolate* isolate,
WasmValueType type) {
- int config = KindBits::encode(kElement) | IsWasmArrayBits::encode(true) |
- WasmArrayTypeBits::encode(type);
+ int config = KindBits::encode(Kind::kElement) |
+ IsWasmArrayBits::encode(true) | WasmArrayTypeBits::encode(type);
return handle(Smi::FromInt(config), isolate);
}
@@ -148,17 +150,17 @@ OBJECT_CONSTRUCTORS_IMPL(StoreHandler, DataHandler)
CAST_ACCESSOR(StoreHandler)
Handle<Smi> StoreHandler::StoreGlobalProxy(Isolate* isolate) {
- int config = KindBits::encode(kGlobalProxy);
+ int config = KindBits::encode(Kind::kGlobalProxy);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> StoreHandler::StoreNormal(Isolate* isolate) {
- int config = KindBits::encode(kNormal);
+ int config = KindBits::encode(Kind::kNormal);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> StoreHandler::StoreInterceptor(Isolate* isolate) {
- int config = KindBits::encode(kInterceptor);
+ int config = KindBits::encode(Kind::kInterceptor);
return handle(Smi::FromInt(config), isolate);
}
@@ -210,8 +212,8 @@ Builtin StoreHandler::ElementsTransitionAndStoreBuiltin(
Handle<Smi> StoreHandler::StoreSlow(Isolate* isolate,
KeyedAccessStoreMode store_mode) {
- int config =
- KindBits::encode(kSlow) | KeyedAccessStoreModeBits::encode(store_mode);
+ int config = KindBits::encode(Kind::kSlow) |
+ KeyedAccessStoreModeBits::encode(store_mode);
return handle(Smi::FromInt(config), isolate);
}
@@ -220,7 +222,7 @@ Handle<Smi> StoreHandler::StoreProxy(Isolate* isolate) {
}
Smi StoreHandler::StoreProxy() {
- int config = KindBits::encode(kProxy);
+ int config = KindBits::encode(Kind::kProxy);
return Smi::FromInt(config);
}
@@ -228,7 +230,7 @@ Handle<Smi> StoreHandler::StoreField(Isolate* isolate, Kind kind,
int descriptor, FieldIndex field_index,
Representation representation) {
DCHECK(!representation.IsNone());
- DCHECK(kind == kField || kind == kConstField);
+ DCHECK(kind == Kind::kField || kind == Kind::kConstField);
int config = KindBits::encode(kind) |
IsInobjectBits::encode(field_index.is_inobject()) |
@@ -242,26 +244,29 @@ Handle<Smi> StoreHandler::StoreField(Isolate* isolate, int descriptor,
FieldIndex field_index,
PropertyConstness constness,
Representation representation) {
- Kind kind = constness == PropertyConstness::kMutable ? kField : kConstField;
+ Kind kind = constness == PropertyConstness::kMutable ? Kind::kField
+ : Kind::kConstField;
return StoreField(isolate, kind, descriptor, field_index, representation);
}
Handle<Smi> StoreHandler::StoreNativeDataProperty(Isolate* isolate,
int descriptor) {
- int config = KindBits::encode(kNativeDataProperty) |
+ int config = KindBits::encode(Kind::kNativeDataProperty) |
DescriptorBits::encode(descriptor);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> StoreHandler::StoreAccessor(Isolate* isolate, int descriptor) {
- int config = KindBits::encode(kAccessor) | DescriptorBits::encode(descriptor);
+ int config =
+ KindBits::encode(Kind::kAccessor) | DescriptorBits::encode(descriptor);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> StoreHandler::StoreApiSetter(Isolate* isolate,
bool holder_is_receiver) {
- int config = KindBits::encode(
- holder_is_receiver ? kApiSetter : kApiSetterHolderIsPrototype);
+ int config =
+ KindBits::encode(holder_is_receiver ? Kind::kApiSetter
+ : Kind::kApiSetterHolderIsPrototype);
return handle(Smi::FromInt(config), isolate);
}
diff --git a/chromium/v8/src/ic/handler-configuration.cc b/chromium/v8/src/ic/handler-configuration.cc
index 194478fc806..83793b64aee 100644
--- a/chromium/v8/src/ic/handler-configuration.cc
+++ b/chromium/v8/src/ic/handler-configuration.cc
@@ -172,7 +172,7 @@ KeyedAccessLoadMode LoadHandler::GetKeyedAccessLoadMode(MaybeObject handler) {
if (handler->IsSmi()) {
int const raw_handler = handler.ToSmi().value();
Kind const kind = KindBits::decode(raw_handler);
- if ((kind == kElement || kind == kIndexedString) &&
+ if ((kind == Kind::kElement || kind == Kind::kIndexedString) &&
AllowOutOfBoundsBits::decode(raw_handler)) {
return LOAD_IGNORE_OUT_OF_BOUNDS;
}
@@ -191,7 +191,7 @@ KeyedAccessStoreMode StoreHandler::GetKeyedAccessStoreMode(
// KeyedAccessStoreMode, compute it using KeyedAccessStoreModeForBuiltin
// method. Hence if any other Handler get to this path, just return
// STANDARD_STORE.
- if (kind != kSlow) {
+ if (kind != Kind::kSlow) {
return STANDARD_STORE;
}
KeyedAccessStoreMode store_mode =
@@ -251,8 +251,8 @@ MaybeObjectHandle StoreHandler::StoreTransition(Isolate* isolate,
DCHECK(!transition_map->IsJSGlobalObjectMap());
Handle<StoreHandler> handler = isolate->factory()->NewStoreHandler(0);
// Store normal with enabled lookup on receiver.
- int config =
- KindBits::encode(kNormal) | LookupOnLookupStartObjectBits::encode(true);
+ int config = KindBits::encode(Kind::kNormal) |
+ LookupOnLookupStartObjectBits::encode(true);
handler->set_smi_handler(Smi::FromInt(config));
handler->set_validity_cell(*validity_cell);
return MaybeObjectHandle(handler);
diff --git a/chromium/v8/src/ic/handler-configuration.h b/chromium/v8/src/ic/handler-configuration.h
index 2fc200f93ef..728f1f575a4 100644
--- a/chromium/v8/src/ic/handler-configuration.h
+++ b/chromium/v8/src/ic/handler-configuration.h
@@ -48,7 +48,7 @@ class LoadHandler final : public DataHandler {
DECL_PRINTER(LoadHandler)
DECL_VERIFIER(LoadHandler)
- enum Kind {
+ enum class Kind {
kElement,
kIndexedString,
kNormal,
@@ -245,7 +245,7 @@ class StoreHandler final : public DataHandler {
DECL_PRINTER(StoreHandler)
DECL_VERIFIER(StoreHandler)
- enum Kind {
+ enum class Kind {
kField,
kConstField,
kAccessor,
diff --git a/chromium/v8/src/ic/ic.cc b/chromium/v8/src/ic/ic.cc
index 68eee92cef8..b2e3b6fe25e 100644
--- a/chromium/v8/src/ic/ic.cc
+++ b/chromium/v8/src/ic/ic.cc
@@ -142,13 +142,19 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
ic_info.type += type;
int code_offset = 0;
+ AbstractCode code = function.abstract_code(isolate_);
if (function.ActiveTierIsIgnition()) {
code_offset = InterpretedFrame::GetBytecodeOffset(frame->fp());
+ } else if (function.ActiveTierIsBaseline()) {
+ // TODO(pthier): AbstractCode should fully support Baseline code.
+ BaselineFrame* baseline_frame = BaselineFrame::cast(frame);
+ code_offset = baseline_frame->GetBytecodeOffset();
+ code = AbstractCode::cast(baseline_frame->GetBytecodeArray());
} else {
code_offset = static_cast<int>(frame->pc() - function.code_entry_point());
}
- JavaScriptFrame::CollectFunctionAndOffsetForICStats(
- function, function.abstract_code(isolate_), code_offset);
+ JavaScriptFrame::CollectFunctionAndOffsetForICStats(function, code,
+ code_offset);
// Reserve enough space for IC transition state, the longest length is 17.
ic_info.state.reserve(17);
@@ -973,11 +979,11 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
// Use simple field loads for some well-known callback properties.
// The method will only return true for absolute truths based on the
// lookup start object maps.
- FieldIndex index;
+ FieldIndex field_index;
if (Accessors::IsJSObjectFieldAccessor(isolate(), map, lookup->name(),
- &index)) {
+ &field_index)) {
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldDH);
- return LoadHandler::LoadField(isolate(), index);
+ return LoadHandler::LoadField(isolate(), field_index);
}
if (holder->IsJSModuleNamespace()) {
Handle<ObjectHashTable> exports(
@@ -988,9 +994,9 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
Smi::ToInt(lookup->name()->GetHash()));
// We found the accessor, so the entry must exist.
DCHECK(entry.is_found());
- int index = ObjectHashTable::EntryToValueIndex(entry);
+ int value_index = ObjectHashTable::EntryToValueIndex(entry);
Handle<Smi> smi_handler =
- LoadHandler::LoadModuleExport(isolate(), index);
+ LoadHandler::LoadModuleExport(isolate(), value_index);
if (holder_is_lookup_start_object) {
return smi_handler;
}
@@ -1134,7 +1140,8 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
return LoadHandler::LoadSlow(isolate());
} else {
- DCHECK_EQ(kField, lookup->property_details().location());
+ DCHECK_EQ(PropertyLocation::kField,
+ lookup->property_details().location());
#if V8_ENABLE_WEBASSEMBLY
if (V8_UNLIKELY(holder->IsWasmObject(isolate()))) {
smi_handler =
@@ -1993,7 +2000,7 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
}
// -------------- Fields --------------
- if (lookup->property_details().location() == kField) {
+ if (lookup->property_details().location() == PropertyLocation::kField) {
TRACE_HANDLER_STATS(isolate(), StoreIC_StoreFieldDH);
int descriptor = lookup->GetFieldDescriptorIndex();
FieldIndex index = lookup->GetFieldIndex();
@@ -2010,7 +2017,8 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
}
// -------------- Constant properties --------------
- DCHECK_EQ(kDescriptor, lookup->property_details().location());
+ DCHECK_EQ(PropertyLocation::kDescriptor,
+ lookup->property_details().location());
set_slow_stub_reason("constant property");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return MaybeObjectHandle(StoreHandler::StoreSlow(isolate()));
diff --git a/chromium/v8/src/ic/keyed-store-generic.cc b/chromium/v8/src/ic/keyed-store-generic.cc
index 8218d3d5211..be54b9c1132 100644
--- a/chromium/v8/src/ic/keyed-store-generic.cc
+++ b/chromium/v8/src/ic/keyed-store-generic.cc
@@ -317,7 +317,7 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
TNode<IntPtrT> index, TNode<Object> value, TNode<Context> context,
Label* slow, UpdateLength update_length) {
if (update_length != kDontChangeLength) {
- CSA_ASSERT(this, IsJSArrayMap(receiver_map));
+ CSA_DCHECK(this, IsJSArrayMap(receiver_map));
// Check if the length property is writable. The fast check is only
// supported for fast properties.
GotoIf(IsDictionaryMap(receiver_map), slow);
@@ -429,7 +429,7 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
TryRewriteElements(receiver, receiver_map, elements, native_context,
PACKED_SMI_ELEMENTS, target_kind, slow);
// The elements backing store didn't change, no reload necessary.
- CSA_ASSERT(this, TaggedEqual(elements, LoadElements(receiver)));
+ CSA_DCHECK(this, TaggedEqual(elements, LoadElements(receiver)));
Store(elements, offset, value);
MaybeUpdateLengthAndReturn(receiver, index, value, update_length);
}
@@ -760,7 +760,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
TNode<JSReceiver> receiver, TNode<Map> receiver_map,
const StoreICParameters* p, ExitPoint* exit_point, Label* slow,
Maybe<LanguageMode> maybe_language_mode) {
- CSA_ASSERT(this, IsSimpleObjectMap(receiver_map));
+ CSA_DCHECK(this, IsSimpleObjectMap(receiver_map));
// TODO(rmcilroy) Type as Struct once we use a trimmed down
// LoadAccessorFromFastObject instead of LoadPropertyFromFastObject.
TVARIABLE(Object, var_accessor_pair);
@@ -891,14 +891,13 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
GotoIf(IsJSTypedArrayMap(receiver_map), slow);
CheckForAssociatedProtector(name, slow);
Label extensible(this), is_private_symbol(this);
- TNode<Uint32T> bitfield3 = LoadMapBitField3(receiver_map);
GotoIf(IsPrivateSymbol(name), &is_private_symbol);
Branch(IsSetWord32<Map::Bits3::IsExtensibleBit>(bitfield3), &extensible,
slow);
BIND(&is_private_symbol);
{
- CSA_ASSERT(this, IsPrivateSymbol(name));
+ CSA_DCHECK(this, IsPrivateSymbol(name));
// For private names, we miss to the runtime which will throw.
// For private symbols, we extend and store an own property.
Branch(IsPrivateName(CAST(name)), slow, &extensible);
@@ -931,7 +930,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
Label not_callable(this);
TNode<Struct> accessor_pair = CAST(var_accessor_pair.value());
GotoIf(IsAccessorInfo(accessor_pair), slow);
- CSA_ASSERT(this, IsAccessorPair(accessor_pair));
+ CSA_DCHECK(this, IsAccessorPair(accessor_pair));
TNode<HeapObject> setter =
CAST(LoadObjectField(accessor_pair, AccessorPair::kSetterOffset));
TNode<Map> setter_map = LoadMap(setter);
@@ -1112,7 +1111,7 @@ void KeyedStoreGenericAssembler::SetProperty(TNode<Context> context,
Label done(this), slow(this, Label::kDeferred);
ExitPoint exit_point(this, [&](TNode<Object> result) { Goto(&done); });
- CSA_ASSERT(this, Word32Equal(is_simple_receiver,
+ CSA_DCHECK(this, Word32Equal(is_simple_receiver,
IsSimpleObjectMap(LoadMap(receiver))));
GotoIfNot(is_simple_receiver, &slow);
diff --git a/chromium/v8/src/ic/unary-op-assembler.cc b/chromium/v8/src/ic/unary-op-assembler.cc
index 97ce0cf48d2..fb5ab7f4221 100644
--- a/chromium/v8/src/ic/unary-op-assembler.cc
+++ b/chromium/v8/src/ic/unary-op-assembler.cc
@@ -145,7 +145,7 @@ class UnaryOpAssemblerImpl final : public CodeStubAssembler {
Label if_smi(this), if_heapnumber(this), if_oddball(this);
Label if_bigint(this, Label::kDeferred);
Label if_other(this, Label::kDeferred);
- TNode<Object> value = var_value.value();
+ value = var_value.value();
GotoIf(TaggedIsSmi(value), &if_smi);
TNode<HeapObject> value_heap_object = CAST(value);
@@ -181,7 +181,7 @@ class UnaryOpAssemblerImpl final : public CodeStubAssembler {
// We do not require an Or with earlier feedback here because once we
// convert the value to a number, we cannot reach this path. We can
// only reach this path on the first pass when the feedback is kNone.
- CSA_ASSERT(this, SmiEqual(var_feedback.value(),
+ CSA_DCHECK(this, SmiEqual(var_feedback.value(),
SmiConstant(BinaryOperationFeedback::kNone)));
OverwriteFeedback(&var_feedback,
BinaryOperationFeedback::kNumberOrOddball);
@@ -195,7 +195,7 @@ class UnaryOpAssemblerImpl final : public CodeStubAssembler {
// We do not require an Or with earlier feedback here because once we
// convert the value to a number, we cannot reach this path. We can
// only reach this path on the first pass when the feedback is kNone.
- CSA_ASSERT(this, SmiEqual(var_feedback.value(),
+ CSA_DCHECK(this, SmiEqual(var_feedback.value(),
SmiConstant(BinaryOperationFeedback::kNone)));
OverwriteFeedback(&var_feedback, BinaryOperationFeedback::kAny);
var_value = CallBuiltin(Builtin::kNonNumberToNumeric, context,
diff --git a/chromium/v8/src/init/bootstrapper.cc b/chromium/v8/src/init/bootstrapper.cc
index 326944e13e9..ea654ba1030 100644
--- a/chromium/v8/src/init/bootstrapper.cc
+++ b/chromium/v8/src/init/bootstrapper.cc
@@ -281,7 +281,7 @@ class Genesis {
Handle<Context> native_context);
bool ConfigureApiObject(Handle<JSObject> object,
Handle<ObjectTemplateInfo> object_template);
- bool ConfigureGlobalObjects(
+ bool ConfigureGlobalObject(
v8::Local<v8::ObjectTemplate> global_proxy_template);
// Migrates all properties from the 'from' object to the 'to'
@@ -357,26 +357,6 @@ void Bootstrapper::LogAllMaps() {
LOG(isolate_, LogAllMaps());
}
-void Bootstrapper::DetachGlobal(Handle<Context> env) {
- isolate_->counters()->errors_thrown_per_context()->AddSample(
- env->native_context().GetErrorsThrown());
-
- ReadOnlyRoots roots(isolate_);
- Handle<JSGlobalProxy> global_proxy(env->global_proxy(), isolate_);
- global_proxy->set_native_context(roots.null_value());
- // NOTE: Turbofan's JSNativeContextSpecialization depends on DetachGlobal
- // causing a map change.
- JSObject::ForceSetPrototype(isolate_, global_proxy,
- isolate_->factory()->null_value());
- global_proxy->map().SetConstructor(roots.null_value());
- if (FLAG_track_detached_contexts) {
- isolate_->AddDetachedContext(env);
- }
- DCHECK(global_proxy->IsDetached());
-
- env->native_context().set_microtask_queue(isolate_, nullptr);
-}
-
namespace {
#ifdef DEBUG
@@ -551,7 +531,7 @@ V8_NOINLINE Handle<JSFunction> SimpleCreateFunction(Isolate* isolate,
fun->shared().set_native(true);
if (adapt) {
- fun->shared().set_internal_formal_parameter_count(len);
+ fun->shared().set_internal_formal_parameter_count(JSParameterCount(len));
} else {
fun->shared().DontAdaptArguments();
}
@@ -1338,9 +1318,8 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
global_proxy_function->initial_map().set_may_have_interesting_symbols(true);
native_context()->set_global_proxy_function(*global_proxy_function);
- // Set global_proxy.__proto__ to js_global after ConfigureGlobalObjects
- // Return the global proxy.
-
+ // Set the global object as the (hidden) __proto__ of the global proxy after
+ // ConfigureGlobalObject
factory()->ReinitializeJSGlobalProxy(global_proxy, global_proxy_function);
// Set the native context for the global object.
@@ -1548,9 +1527,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(isolate_, object_function, "seal",
Builtin::kObjectSeal, 1, false);
- Handle<JSFunction> object_create = SimpleInstallFunction(
- isolate_, object_function, "create", Builtin::kObjectCreate, 2, false);
- native_context()->set_object_create(*object_create);
+ SimpleInstallFunction(isolate_, object_function, "create",
+ Builtin::kObjectCreate, 2, false);
SimpleInstallFunction(isolate_, object_function, "defineProperties",
Builtin::kObjectDefineProperties, 2, true);
@@ -2375,7 +2353,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Context::PROMISE_FUNCTION_INDEX);
Handle<SharedFunctionInfo> shared(promise_fun->shared(), isolate_);
- shared->set_internal_formal_parameter_count(1);
+ shared->set_internal_formal_parameter_count(JSParameterCount(1));
shared->set_length(1);
InstallSpeciesGetter(isolate_, promise_fun);
@@ -2438,7 +2416,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
InstallWithIntrinsicDefaultProto(isolate_, regexp_fun,
Context::REGEXP_FUNCTION_INDEX);
Handle<SharedFunctionInfo> shared(regexp_fun->shared(), isolate_);
- shared->set_internal_formal_parameter_count(2);
+ shared->set_internal_formal_parameter_count(JSParameterCount(2));
shared->set_length(2);
{
@@ -2462,7 +2440,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtin::kRegExpPrototypeFlagsGetter, true);
SimpleInstallGetter(isolate_, prototype, factory->global_string(),
Builtin::kRegExpPrototypeGlobalGetter, true);
- SimpleInstallGetter(isolate(), prototype, factory->has_indices_string(),
+ SimpleInstallGetter(isolate(), prototype, factory->hasIndices_string(),
Builtin::kRegExpPrototypeHasIndicesGetter, true);
SimpleInstallGetter(isolate_, prototype, factory->ignoreCase_string(),
Builtin::kRegExpPrototypeIgnoreCaseGetter, true);
@@ -2746,9 +2724,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(isolate_, math, "cos", Builtin::kMathCos, 1, true);
SimpleInstallFunction(isolate_, math, "cosh", Builtin::kMathCosh, 1, true);
SimpleInstallFunction(isolate_, math, "exp", Builtin::kMathExp, 1, true);
- Handle<JSFunction> math_floor = SimpleInstallFunction(
- isolate_, math, "floor", Builtin::kMathFloor, 1, true);
- native_context()->set_math_floor(*math_floor);
+ SimpleInstallFunction(isolate_, math, "floor", Builtin::kMathFloor, 1,
+ true);
SimpleInstallFunction(isolate_, math, "fround", Builtin::kMathFround, 1,
true);
SimpleInstallFunction(isolate_, math, "hypot", Builtin::kMathHypot, 2,
@@ -2762,9 +2739,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
true);
SimpleInstallFunction(isolate_, math, "max", Builtin::kMathMax, 2, false);
SimpleInstallFunction(isolate_, math, "min", Builtin::kMathMin, 2, false);
- Handle<JSFunction> math_pow = SimpleInstallFunction(
- isolate_, math, "pow", Builtin::kMathPow, 2, true);
- native_context()->set_math_pow(*math_pow);
+ SimpleInstallFunction(isolate_, math, "pow", Builtin::kMathPow, 2, true);
SimpleInstallFunction(isolate_, math, "random", Builtin::kMathRandom, 0,
true);
SimpleInstallFunction(isolate_, math, "round", Builtin::kMathRound, 1,
@@ -3780,7 +3755,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate_->proxy_map()->SetConstructor(*proxy_function);
- proxy_function->shared().set_internal_formal_parameter_count(2);
+ proxy_function->shared().set_internal_formal_parameter_count(
+ JSParameterCount(2));
proxy_function->shared().set_length(2);
native_context()->set_proxy_function(*proxy_function);
@@ -4129,10 +4105,9 @@ bool Genesis::CompileExtension(Isolate* isolate, v8::Extension* extension) {
Handle<String> script_name =
factory->NewStringFromUtf8(name).ToHandleChecked();
MaybeHandle<SharedFunctionInfo> maybe_function_info =
- Compiler::GetSharedFunctionInfoForScript(
- isolate, source, ScriptDetails(script_name), extension, nullptr,
- ScriptCompiler::kNoCompileOptions,
- ScriptCompiler::kNoCacheBecauseV8Extension, EXTENSION_CODE);
+ Compiler::GetSharedFunctionInfoForScriptWithExtension(
+ isolate, source, ScriptDetails(script_name), extension,
+ ScriptCompiler::kNoCompileOptions, EXTENSION_CODE);
if (!maybe_function_info.ToHandle(&function_info)) return false;
cache->Add(isolate, name, function_info);
}
@@ -4393,7 +4368,6 @@ void Genesis::InitializeCallSiteBuiltins() {
#define EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(id) \
void Genesis::InitializeGlobal_##id() {}
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_sequence)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_top_level_await)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_import_assertions)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_private_brand_checks)
@@ -4594,6 +4568,20 @@ void Genesis::InitializeGlobal_harmony_intl_locale_info() {
Builtin::kLocalePrototypeWeekInfo, true);
}
+void Genesis::InitializeGlobal_harmony_intl_enumeration() {
+ if (!FLAG_harmony_intl_enumeration) return;
+
+ Handle<JSObject> intl = Handle<JSObject>::cast(
+ JSReceiver::GetProperty(
+ isolate(),
+ Handle<JSReceiver>(native_context()->global_object(), isolate()),
+ factory()->InternalizeUtf8String("Intl"))
+ .ToHandleChecked());
+
+ SimpleInstallFunction(isolate(), intl, "supportedValuesOf",
+ Builtin::kIntlSupportedValuesOf, 1, false);
+}
+
#endif // V8_INTL_SUPPORT
Handle<JSFunction> Genesis::CreateArrayBuffer(
@@ -5182,7 +5170,7 @@ bool Genesis::InstallExtension(Isolate* isolate,
return result;
}
-bool Genesis::ConfigureGlobalObjects(
+bool Genesis::ConfigureGlobalObject(
v8::Local<v8::ObjectTemplate> global_proxy_template) {
Handle<JSObject> global_proxy(native_context()->global_proxy(), isolate());
Handle<JSObject> global_object(native_context()->global_object(), isolate());
@@ -5250,7 +5238,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
from->map().instance_descriptors(isolate()), isolate());
for (InternalIndex i : from->map().IterateOwnDescriptors()) {
PropertyDetails details = descs->GetDetails(i);
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
if (details.kind() == kData) {
HandleScope inner(isolate());
Handle<Name> key = Handle<Name>(descs->GetKey(i), isolate());
@@ -5267,7 +5255,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
}
} else {
- DCHECK_EQ(kDescriptor, details.location());
+ DCHECK_EQ(PropertyLocation::kDescriptor, details.location());
DCHECK_EQ(kAccessor, details.kind());
Handle<Name> key(descs->GetKey(i), isolate());
// If the property is already there we skip it.
@@ -5470,16 +5458,20 @@ Genesis::Genesis(
isolate->set_context(*native_context());
isolate->counters()->contexts_created_by_snapshot()->Increment();
- if (context_snapshot_index == 0) {
+ // If no global proxy template was passed in, simply use the global in the
+ // snapshot. If a global proxy template was passed in it's used to recreate
+ // the global object and its protype chain, and the data properties from the
+ // deserialized global are copied onto it.
+ if (context_snapshot_index == 0 && !global_proxy_template.IsEmpty()) {
Handle<JSGlobalObject> global_object =
CreateNewGlobals(global_proxy_template, global_proxy);
HookUpGlobalObject(global_object);
-
- if (!ConfigureGlobalObjects(global_proxy_template)) return;
+ if (!ConfigureGlobalObject(global_proxy_template)) return;
} else {
// The global proxy needs to be integrated into the native context.
HookUpGlobalProxy(global_proxy);
}
+ DCHECK_EQ(global_proxy->native_context(), *native_context());
DCHECK(!global_proxy->IsDetachedFrom(native_context()->global_object()));
} else {
DCHECK(native_context().is_null());
@@ -5506,7 +5498,7 @@ Genesis::Genesis(
if (!InstallABunchOfRandomThings()) return;
if (!InstallExtrasBindings()) return;
- if (!ConfigureGlobalObjects(global_proxy_template)) return;
+ if (!ConfigureGlobalObject(global_proxy_template)) return;
isolate->counters()->contexts_created_from_scratch()->Increment();
diff --git a/chromium/v8/src/init/bootstrapper.h b/chromium/v8/src/init/bootstrapper.h
index 19f028048e9..0309c38c572 100644
--- a/chromium/v8/src/init/bootstrapper.h
+++ b/chromium/v8/src/init/bootstrapper.h
@@ -5,6 +5,9 @@
#ifndef V8_INIT_BOOTSTRAPPER_H_
#define V8_INIT_BOOTSTRAPPER_H_
+#include "include/v8-context.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-snapshot.h"
#include "src/heap/factory.h"
#include "src/objects/fixed-array.h"
#include "src/objects/shared-function-info.h"
@@ -77,9 +80,6 @@ class Bootstrapper final {
MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_object_template);
- // Detach the environment from its outer global object.
- void DetachGlobal(Handle<Context> env);
-
// Traverses the pointers for memory management.
void Iterate(RootVisitor* v);
diff --git a/chromium/v8/src/init/heap-symbols.h b/chromium/v8/src/init/heap-symbols.h
index d4737bf331a..f30192526e2 100644
--- a/chromium/v8/src/init/heap-symbols.h
+++ b/chromium/v8/src/init/heap-symbols.h
@@ -198,13 +198,14 @@
V(_, dot_string, ".") \
V(_, dot_switch_tag_string, ".switch_tag") \
V(_, dotAll_string, "dotAll") \
- V(_, enumerable_string, "enumerable") \
- V(_, element_string, "element") \
V(_, Error_string, "Error") \
- V(_, errors_string, "errors") \
+ V(_, EvalError_string, "EvalError") \
+ V(_, element_string, "element") \
+ V(_, enumerable_string, "enumerable") \
V(_, error_to_string, "[object Error]") \
+ V(_, errors_string, "errors") \
V(_, eval_string, "eval") \
- V(_, EvalError_string, "EvalError") \
+ V(_, exception_string, "exception") \
V(_, exec_string, "exec") \
V(_, false_string, "false") \
V(_, FinalizationRegistry_string, "FinalizationRegistry") \
@@ -226,7 +227,7 @@
V(_, groups_string, "groups") \
V(_, growable_string, "growable") \
V(_, has_string, "has") \
- V(_, has_indices_string, "hasIndices") \
+ V(_, hasIndices_string, "hasIndices") \
V(_, ignoreCase_string, "ignoreCase") \
V(_, illegal_access_string, "illegal access") \
V(_, illegal_argument_string, "illegal argument") \
diff --git a/chromium/v8/src/init/isolate-allocator.cc b/chromium/v8/src/init/isolate-allocator.cc
index a479f1ab945..c790f5c09a1 100644
--- a/chromium/v8/src/init/isolate-allocator.cc
+++ b/chromium/v8/src/init/isolate-allocator.cc
@@ -8,6 +8,7 @@
#include "src/common/ptr-compr.h"
#include "src/execution/isolate.h"
#include "src/heap/code-range.h"
+#include "src/init/vm-cage.h"
#include "src/utils/memcopy.h"
#include "src/utils/utils.h"
@@ -74,7 +75,38 @@ void IsolateAllocator::FreeProcessWidePtrComprCageForTesting() {
void IsolateAllocator::InitializeOncePerProcess() {
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
PtrComprCageReservationParams params;
- if (!GetProcessWidePtrComprCage()->InitReservation(params)) {
+ base::AddressRegion existing_reservation;
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ // TODO(chromium:1218005) avoid the name collision with
+ // v8::internal::VirtualMemoryCage and ideally figure out a clear naming
+ // scheme for the different types of virtual memory cages.
+
+ // For now, we allow the virtual memory cage to be disabled even when
+ // compiling with v8_enable_virtual_memory_cage. This fallback will be
+ // disallowed in the future, at the latest once ArrayBuffers are referenced
+ // through an offset rather than a raw pointer.
+ if (GetProcessWideVirtualMemoryCage()->is_disabled()) {
+ CHECK(kAllowBackingStoresOutsideCage);
+ } else {
+ auto cage = GetProcessWideVirtualMemoryCage();
+ CHECK(cage->is_initialized());
+ // The pointer compression cage must be placed at the start of the virtual
+ // memory cage.
+ // TODO(chromium:12180) this currently assumes that no other pages were
+ // allocated through the cage's page allocator in the meantime. In the
+ // future, the cage initialization will happen just before this function
+ // runs, and so this will be guaranteed. Currently however, it is possible
+ // that the embedder accidentally uses the cage's page allocator prior to
+ // initializing V8, in which case this CHECK will likely fail.
+ CHECK(cage->page_allocator()->AllocatePagesAt(
+ cage->base(), params.reservation_size, PageAllocator::kNoAccess));
+ existing_reservation =
+ base::AddressRegion(cage->base(), params.reservation_size);
+ params.page_allocator = cage->page_allocator();
+ }
+#endif
+ if (!GetProcessWidePtrComprCage()->InitReservation(params,
+ existing_reservation)) {
V8::FatalProcessOutOfMemory(
nullptr,
"Failed to reserve virtual memory for process-wide V8 "
diff --git a/chromium/v8/src/init/startup-data-util.cc b/chromium/v8/src/init/startup-data-util.cc
index d480e3dcc2b..ba3a123651f 100644
--- a/chromium/v8/src/init/startup-data-util.cc
+++ b/chromium/v8/src/init/startup-data-util.cc
@@ -7,6 +7,8 @@
#include <stdlib.h>
#include <string.h>
+#include "include/v8-initialization.h"
+#include "include/v8-snapshot.h"
#include "src/base/file-utils.h"
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
@@ -76,11 +78,6 @@ void LoadFromFile(const char* snapshot_blob) {
void InitializeExternalStartupData(const char* directory_path) {
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
const char* snapshot_name = "snapshot_blob.bin";
-#ifdef V8_MULTI_SNAPSHOTS
- if (!FLAG_untrusted_code_mitigations) {
- snapshot_name = "snapshot_blob_trusted.bin";
- }
-#endif
std::unique_ptr<char[]> snapshot =
base::RelativePath(directory_path, snapshot_name);
LoadFromFile(snapshot.get());
diff --git a/chromium/v8/src/init/startup-data-util.h b/chromium/v8/src/init/startup-data-util.h
index 5d49b0b1a17..90751e558e7 100644
--- a/chromium/v8/src/init/startup-data-util.h
+++ b/chromium/v8/src/init/startup-data-util.h
@@ -5,8 +5,6 @@
#ifndef V8_INIT_STARTUP_DATA_UTIL_H_
#define V8_INIT_STARTUP_DATA_UTIL_H_
-#include "include/v8.h"
-
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/init/v8.cc b/chromium/v8/src/init/v8.cc
index 70367d06975..a7b558bbadf 100644
--- a/chromium/v8/src/init/v8.cc
+++ b/chromium/v8/src/init/v8.cc
@@ -20,6 +20,7 @@
#include "src/execution/runtime-profiler.h"
#include "src/execution/simulator.h"
#include "src/init/bootstrapper.h"
+#include "src/init/vm-cage.h"
#include "src/libsampler/sampler.h"
#include "src/objects/elements.h"
#include "src/objects/objects-inl.h"
@@ -73,6 +74,17 @@ void V8::TearDown() {
}
void V8::InitializeOncePerProcessImpl() {
+ CHECK(platform_);
+
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ if (!GetProcessWideVirtualMemoryCage()->is_initialized()) {
+ // For now, we still allow the cage to be disabled even if V8 was compiled
+ // with V8_VIRTUAL_MEMORY_CAGE. This will eventually be forbidden.
+ CHECK(kAllowBackingStoresOutsideCage);
+ GetProcessWideVirtualMemoryCage()->Disable();
+ }
+#endif
+
// Update logging information before enforcing flag implications.
bool* log_all_flags[] = {&FLAG_turbo_profiling_log_builtins,
&FLAG_log_all,
@@ -169,6 +181,8 @@ void V8::InitializeOncePerProcessImpl() {
if (FLAG_random_seed) SetRandomMmapSeed(FLAG_random_seed);
+ if (FLAG_print_flag_values) FlagList::PrintValues();
+
#if defined(V8_USE_PERFETTO)
if (perfetto::Tracing::IsInitialized()) TrackEvent::Register();
#endif
@@ -207,6 +221,15 @@ void V8::InitializePlatform(v8::Platform* platform) {
#endif
}
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+bool V8::InitializeVirtualMemoryCage() {
+ // Platform must have been initialized already.
+ CHECK(platform_);
+ v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
+ return GetProcessWideVirtualMemoryCage()->Initialize(page_allocator);
+}
+#endif
+
void V8::ShutdownPlatform() {
CHECK(platform_);
#if defined(V8_OS_WIN) && defined(V8_ENABLE_SYSTEM_INSTRUMENTATION)
@@ -216,6 +239,13 @@ void V8::ShutdownPlatform() {
#endif
v8::tracing::TracingCategoryObserver::TearDown();
v8::base::SetPrintStackTrace(nullptr);
+
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ // TODO(chromium:1218005) alternatively, this could move to its own
+ // public TearDownVirtualMemoryCage function.
+ GetProcessWideVirtualMemoryCage()->TearDown();
+#endif
+
platform_ = nullptr;
}
diff --git a/chromium/v8/src/init/v8.h b/chromium/v8/src/init/v8.h
index a8cd6832cdc..bbde9bfd13a 100644
--- a/chromium/v8/src/init/v8.h
+++ b/chromium/v8/src/init/v8.h
@@ -29,6 +29,10 @@ class V8 : public AllStatic {
const char* location,
bool is_heap_oom = false);
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ static bool InitializeVirtualMemoryCage();
+#endif
+
static void InitializePlatform(v8::Platform* platform);
static void ShutdownPlatform();
V8_EXPORT_PRIVATE static v8::Platform* GetCurrentPlatform();
diff --git a/chromium/v8/src/init/vm-cage.cc b/chromium/v8/src/init/vm-cage.cc
new file mode 100644
index 00000000000..f62b7d4cd6f
--- /dev/null
+++ b/chromium/v8/src/init/vm-cage.cc
@@ -0,0 +1,97 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/init/vm-cage.h"
+
+#include "include/v8-internal.h"
+#include "src/base/bits.h"
+#include "src/base/bounded-page-allocator.h"
+#include "src/base/lazy-instance.h"
+#include "src/utils/allocation.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+
+bool V8VirtualMemoryCage::Initialize(PageAllocator* page_allocator) {
+ constexpr bool use_guard_regions = true;
+ return Initialize(page_allocator, kVirtualMemoryCageSize, use_guard_regions);
+}
+
+bool V8VirtualMemoryCage::Initialize(v8::PageAllocator* page_allocator,
+ size_t size, bool use_guard_regions) {
+ CHECK(!initialized_);
+ CHECK(!disabled_);
+ CHECK(base::bits::IsPowerOfTwo(size));
+ CHECK_GE(size, kVirtualMemoryCageMinimumSize);
+
+ // Currently, we allow the cage to be smaller than the requested size. This
+ // way, we can gracefully handle cage reservation failures during the initial
+ // rollout and can collect data on how often these occur. In the future, we
+ // will likely either require the cage to always have a fixed size or will
+ // design CagedPointers (pointers that are guaranteed to point into the cage,
+ // e.g. because they are stored as offsets from the cage base) in a way that
+ // doesn't reduce the cage's security properties if it has a smaller size.
+ // Which of these options is ultimately taken likey depends on how frequently
+ // cage reservation failures occur in practice.
+ while (!base_ && size >= kVirtualMemoryCageMinimumSize) {
+ size_t reservation_size = size;
+ if (use_guard_regions) {
+ reservation_size += 2 * kVirtualMemoryCageGuardRegionSize;
+ }
+ base_ = reinterpret_cast<Address>(page_allocator->AllocatePages(
+ nullptr, reservation_size, kVirtualMemoryCageAlignment,
+ PageAllocator::kNoAccess));
+ if (!base_) {
+ size /= 2;
+ }
+ }
+
+ if (!base_) return false;
+
+ if (use_guard_regions) {
+ base_ += kVirtualMemoryCageGuardRegionSize;
+ has_guard_regions_ = true;
+ }
+
+ page_allocator_ = page_allocator;
+ size_ = size;
+
+ cage_page_allocator_ = std::make_unique<base::BoundedPageAllocator>(
+ page_allocator_, base_, size_, page_allocator_->AllocatePageSize(),
+ base::PageInitializationMode::kAllocatedPagesMustBeZeroInitialized);
+
+ initialized_ = true;
+
+ return true;
+}
+
+void V8VirtualMemoryCage::TearDown() {
+ if (initialized_) {
+ cage_page_allocator_.reset();
+ Address reservation_base = base_;
+ size_t reservation_size = size_;
+ if (has_guard_regions_) {
+ reservation_base -= kVirtualMemoryCageGuardRegionSize;
+ reservation_size += 2 * kVirtualMemoryCageGuardRegionSize;
+ }
+ CHECK(page_allocator_->FreePages(reinterpret_cast<void*>(reservation_base),
+ reservation_size));
+ page_allocator_ = nullptr;
+ base_ = kNullAddress;
+ size_ = 0;
+ initialized_ = false;
+ has_guard_regions_ = false;
+ }
+ disabled_ = false;
+}
+
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(V8VirtualMemoryCage,
+ GetProcessWideVirtualMemoryCage)
+
+#endif
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/init/vm-cage.h b/chromium/v8/src/init/vm-cage.h
new file mode 100644
index 00000000000..d7e0728ca1d
--- /dev/null
+++ b/chromium/v8/src/init/vm-cage.h
@@ -0,0 +1,129 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INIT_VM_CAGE_H_
+#define V8_INIT_VM_CAGE_H_
+
+#include "include/v8-internal.h"
+#include "src/base/bounded-page-allocator.h"
+#include "src/common/globals.h"
+
+namespace v8 {
+
+class PageAllocator;
+
+namespace internal {
+
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+
+/**
+ * V8 Virtual Memory Cage.
+ *
+ * When the virtual memory cage is enabled, V8 will reserve a large region of
+ * virtual address space - the cage - and place most of its objects inside of
+ * it. This allows these objects to reference each other through offsets rather
+ * than raw pointers, which in turn makes it harder for an attacker to abuse
+ * them in an exploit.
+ *
+ * The pointer compression region, which contains most V8 objects, and inside
+ * of which compressed (32-bit) pointers are used, is located at the start of
+ * the virtual memory cage. The remainder of the cage is mostly used for memory
+ * buffers, in particular ArrayBuffer backing stores and WASM memory cages.
+ *
+ * It should be assumed that an attacker is able to corrupt data arbitrarily
+ * and concurrently inside the virtual memory cage. The heap sandbox, of which
+ * the virtual memory cage is one building block, attempts to then stop an
+ * attacker from corrupting data outside of the cage.
+ *
+ * As the embedder is responsible for providing ArrayBuffer allocators, v8
+ * exposes a page allocator for the virtual memory cage to the embedder.
+ *
+ * TODO(chromium:1218005) come up with a coherent naming scheme for this class
+ * and the other "cages" in v8.
+ */
+class V8_EXPORT_PRIVATE V8VirtualMemoryCage {
+ public:
+ // +- ~~~ -+---------------------------------------- ~~~ -+- ~~~ -+
+ // | 32 GB | (Ideally) 1 TB | 32 GB |
+ // | | | |
+ // | Guard | 4 GB : ArrayBuffer backing stores, | Guard |
+ // | Region | V8 Heap : WASM memory buffers, and | Region |
+ // | (front) | Region : any other caged objects. | (back) |
+ // +- ~~~ -+----------------+----------------------- ~~~ -+- ~~~ -+
+ // ^ ^
+ // base base + size
+
+ V8VirtualMemoryCage() = default;
+
+ V8VirtualMemoryCage(const V8VirtualMemoryCage&) = delete;
+ V8VirtualMemoryCage& operator=(V8VirtualMemoryCage&) = delete;
+
+ bool Initialize(v8::PageAllocator* page_allocator);
+ void Disable() {
+ CHECK(!initialized_);
+ disabled_ = true;
+ }
+
+ void TearDown();
+
+ bool is_initialized() const { return initialized_; }
+ bool is_disabled() const { return disabled_; }
+ bool is_enabled() const { return !disabled_; }
+
+ Address base() const { return base_; }
+ size_t size() const { return size_; }
+
+ base::BoundedPageAllocator* page_allocator() const {
+ return cage_page_allocator_.get();
+ }
+
+ bool Contains(Address addr) const {
+ return addr >= base_ && addr < base_ + size_;
+ }
+
+ bool Contains(void* ptr) const {
+ return Contains(reinterpret_cast<Address>(ptr));
+ }
+
+ private:
+ // The SequentialUnmapperTest calls the private Initialize method to create a
+ // cage without guard regions, which would otherwise consume too much memory.
+ friend class SequentialUnmapperTest;
+
+ // We allow tests to disable the guard regions around the cage. This is useful
+ // for example for tests like the SequentialUnmapperTest which track page
+ // allocations and so would incur a large overhead from the guard regions.
+ bool Initialize(v8::PageAllocator* page_allocator, size_t total_size,
+ bool use_guard_regions);
+
+ Address base_ = kNullAddress;
+ size_t size_ = 0;
+ bool has_guard_regions_ = false;
+ bool initialized_ = false;
+ bool disabled_ = false;
+ // The PageAllocator through which the virtual memory of the cage was
+ // allocated.
+ v8::PageAllocator* page_allocator_ = nullptr;
+ // The BoundedPageAllocator to allocate pages inside the cage.
+ std::unique_ptr<base::BoundedPageAllocator> cage_page_allocator_;
+};
+
+V8_EXPORT_PRIVATE V8VirtualMemoryCage* GetProcessWideVirtualMemoryCage();
+
+#endif // V8_VIRTUAL_MEMORY_CAGE
+
+V8_INLINE bool IsValidBackingStorePointer(void* ptr) {
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ Address addr = reinterpret_cast<Address>(ptr);
+ return kAllowBackingStoresOutsideCage || addr == kNullAddress ||
+ GetProcessWideVirtualMemoryCage()->Contains(addr);
+#else
+ return true;
+#endif
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INIT_VM_CAGE_H_
diff --git a/chromium/v8/src/inspector/DEPS b/chromium/v8/src/inspector/DEPS
index 1c3ef433143..08b97ea3e96 100644
--- a/chromium/v8/src/inspector/DEPS
+++ b/chromium/v8/src/inspector/DEPS
@@ -13,7 +13,6 @@ include_rules = [
"+src/base/safe_conversions.h",
"+src/base/template-utils.h",
"+src/base/v8-fallthrough.h",
- "+src/logging/tracing-flags.h",
"+src/numbers/conversions.h",
"+src/inspector",
"+src/tracing",
diff --git a/chromium/v8/src/inspector/custom-preview.cc b/chromium/v8/src/inspector/custom-preview.cc
index d8e88861cb2..97b0a07210e 100644
--- a/chromium/v8/src/inspector/custom-preview.cc
+++ b/chromium/v8/src/inspector/custom-preview.cc
@@ -5,6 +5,11 @@
#include "src/inspector/custom-preview.h"
#include "../../third_party/inspector_protocol/crdtp/json.h"
+#include "include/v8-container.h"
+#include "include/v8-context.h"
+#include "include/v8-function.h"
+#include "include/v8-json.h"
+#include "include/v8-microtask-queue.h"
#include "src/debug/debug-interface.h"
#include "src/inspector/injected-script.h"
#include "src/inspector/inspected-context.h"
diff --git a/chromium/v8/src/inspector/injected-script.cc b/chromium/v8/src/inspector/injected-script.cc
index fc029e937aa..9cd481e96bf 100644
--- a/chromium/v8/src/inspector/injected-script.cc
+++ b/chromium/v8/src/inspector/injected-script.cc
@@ -34,7 +34,11 @@
#include <unordered_set>
#include "../../third_party/inspector_protocol/crdtp/json.h"
+#include "include/v8-container.h"
+#include "include/v8-context.h"
+#include "include/v8-function.h"
#include "include/v8-inspector.h"
+#include "include/v8-microtask-queue.h"
#include "src/debug/debug-interface.h"
#include "src/inspector/custom-preview.h"
#include "src/inspector/inspected-context.h"
@@ -354,8 +358,8 @@ class PropertyAccumulator : public ValueMirror::PropertyAccumulator {
Response InjectedScript::getProperties(
v8::Local<v8::Object> object, const String16& groupName, bool ownProperties,
- bool accessorPropertiesOnly, WrapMode wrapMode,
- std::unique_ptr<Array<PropertyDescriptor>>* properties,
+ bool accessorPropertiesOnly, bool nonIndexedPropertiesOnly,
+ WrapMode wrapMode, std::unique_ptr<Array<PropertyDescriptor>>* properties,
Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
v8::HandleScope handles(m_context->isolate());
v8::Local<v8::Context> context = m_context->context();
@@ -367,7 +371,8 @@ Response InjectedScript::getProperties(
std::vector<PropertyMirror> mirrors;
PropertyAccumulator accumulator(&mirrors);
if (!ValueMirror::getProperties(context, object, ownProperties,
- accessorPropertiesOnly, &accumulator)) {
+ accessorPropertiesOnly,
+ nonIndexedPropertiesOnly, &accumulator)) {
return createExceptionDetails(tryCatch, groupName, exceptionDetails);
}
for (const PropertyMirror& mirror : mirrors) {
@@ -604,9 +609,9 @@ std::unique_ptr<protocol::Runtime::RemoteObject> InjectedScript::wrapTable(
}
}
if (!selectedColumns.empty()) {
- for (const std::unique_ptr<PropertyPreview>& column :
+ for (const std::unique_ptr<PropertyPreview>& prop :
*preview->getProperties()) {
- ObjectPreview* columnPreview = column->getValuePreview(nullptr);
+ ObjectPreview* columnPreview = prop->getValuePreview(nullptr);
if (!columnPreview) continue;
// Use raw pointer here since the lifetime of each PropertyPreview is
// ensured by columnPreview. This saves an additional clone.
diff --git a/chromium/v8/src/inspector/injected-script.h b/chromium/v8/src/inspector/injected-script.h
index 9971d7da3a7..86bcf60b171 100644
--- a/chromium/v8/src/inspector/injected-script.h
+++ b/chromium/v8/src/inspector/injected-script.h
@@ -35,6 +35,9 @@
#include <unordered_map>
#include <unordered_set>
+#include "include/v8-exception.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-persistent-handle.h"
#include "src/base/macros.h"
#include "src/inspector/inspected-context.h"
#include "src/inspector/protocol/Forward.h"
@@ -42,8 +45,6 @@
#include "src/inspector/v8-console.h"
#include "src/inspector/v8-debugger.h"
-#include "include/v8.h"
-
namespace v8_inspector {
class RemoteObjectId;
@@ -76,7 +77,8 @@ class InjectedScript final {
Response getProperties(
v8::Local<v8::Object>, const String16& groupName, bool ownProperties,
- bool accessorPropertiesOnly, WrapMode wrapMode,
+ bool accessorPropertiesOnly, bool nonIndexedPropertiesOnly,
+ WrapMode wrapMode,
std::unique_ptr<protocol::Array<protocol::Runtime::PropertyDescriptor>>*
result,
Maybe<protocol::Runtime::ExceptionDetails>*);
diff --git a/chromium/v8/src/inspector/inspected-context.cc b/chromium/v8/src/inspector/inspected-context.cc
index a47df1ef123..59d186e43fb 100644
--- a/chromium/v8/src/inspector/inspected-context.cc
+++ b/chromium/v8/src/inspector/inspected-context.cc
@@ -4,14 +4,14 @@
#include "src/inspector/inspected-context.h"
+#include "include/v8-context.h"
+#include "include/v8-inspector.h"
#include "src/debug/debug-interface.h"
#include "src/inspector/injected-script.h"
#include "src/inspector/string-util.h"
#include "src/inspector/v8-console.h"
#include "src/inspector/v8-inspector-impl.h"
-#include "include/v8-inspector.h"
-
namespace v8_inspector {
class InspectedContext::WeakCallbackData {
@@ -126,12 +126,15 @@ void InspectedContext::discardInjectedScript(int sessionId) {
bool InspectedContext::addInternalObject(v8::Local<v8::Object> object,
V8InternalValueType type) {
if (m_internalObjects.IsEmpty()) {
- m_internalObjects.Reset(isolate(), v8::debug::WeakMap::New(isolate()));
+ m_internalObjects.Reset(isolate(),
+ v8::debug::EphemeronTable::New(isolate()));
}
- return !m_internalObjects.Get(isolate())
- ->Set(m_context.Get(isolate()), object,
- v8::Integer::New(isolate(), static_cast<int>(type)))
- .IsEmpty();
+ v8::Local<v8::debug::EphemeronTable> new_map =
+ m_internalObjects.Get(isolate())->Set(
+ isolate(), object,
+ v8::Integer::New(isolate(), static_cast<int>(type)));
+ m_internalObjects.Reset(isolate(), new_map);
+ return true;
}
V8InternalValueType InspectedContext::getInternalType(
@@ -139,7 +142,7 @@ V8InternalValueType InspectedContext::getInternalType(
if (m_internalObjects.IsEmpty()) return V8InternalValueType::kNone;
v8::Local<v8::Value> typeValue;
if (!m_internalObjects.Get(isolate())
- ->Get(m_context.Get(isolate()), object)
+ ->Get(isolate(), object)
.ToLocal(&typeValue) ||
!typeValue->IsUint32()) {
return V8InternalValueType::kNone;
diff --git a/chromium/v8/src/inspector/inspected-context.h b/chromium/v8/src/inspector/inspected-context.h
index d3f0fe012b8..50e5a87bb3a 100644
--- a/chromium/v8/src/inspector/inspected-context.h
+++ b/chromium/v8/src/inspector/inspected-context.h
@@ -9,12 +9,18 @@
#include <unordered_map>
#include <unordered_set>
-#include "include/v8.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-persistent-handle.h"
#include "src/base/macros.h"
#include "src/debug/debug-interface.h"
#include "src/inspector/string-16.h"
#include "src/inspector/v8-debugger-id.h"
+namespace v8 {
+class Context;
+class Object;
+} // namespace v8
+
namespace v8_inspector {
class InjectedScript;
@@ -71,7 +77,7 @@ class InspectedContext {
std::unordered_set<int> m_reportedSessionIds;
std::unordered_map<int, std::unique_ptr<InjectedScript>> m_injectedScripts;
WeakCallbackData* m_weakCallbackData;
- v8::Global<v8::debug::WeakMap> m_internalObjects;
+ v8::Global<v8::debug::EphemeronTable> m_internalObjects;
};
} // namespace v8_inspector
diff --git a/chromium/v8/src/inspector/test-interface.h b/chromium/v8/src/inspector/test-interface.h
index cf16c6936e1..406ba02fa99 100644
--- a/chromium/v8/src/inspector/test-interface.h
+++ b/chromium/v8/src/inspector/test-interface.h
@@ -5,7 +5,7 @@
#ifndef V8_INSPECTOR_TEST_INTERFACE_H_
#define V8_INSPECTOR_TEST_INTERFACE_H_
-#include "include/v8.h"
+#include "include/v8config.h"
namespace v8_inspector {
diff --git a/chromium/v8/src/inspector/v8-console-message.cc b/chromium/v8/src/inspector/v8-console-message.cc
index 78622aa8d39..2734c67876b 100644
--- a/chromium/v8/src/inspector/v8-console-message.cc
+++ b/chromium/v8/src/inspector/v8-console-message.cc
@@ -4,7 +4,11 @@
#include "src/inspector/v8-console-message.h"
+#include "include/v8-container.h"
+#include "include/v8-context.h"
#include "include/v8-inspector.h"
+#include "include/v8-microtask-queue.h"
+#include "include/v8-primitive-object.h"
#include "src/debug/debug-interface.h"
#include "src/inspector/inspected-context.h"
#include "src/inspector/protocol/Protocol.h"
diff --git a/chromium/v8/src/inspector/v8-console-message.h b/chromium/v8/src/inspector/v8-console-message.h
index 4dc521ee1c0..cd960cf7978 100644
--- a/chromium/v8/src/inspector/v8-console-message.h
+++ b/chromium/v8/src/inspector/v8-console-message.h
@@ -10,7 +10,8 @@
#include <memory>
#include <set>
-#include "include/v8.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-persistent-handle.h"
#include "src/inspector/protocol/Console.h"
#include "src/inspector/protocol/Forward.h"
#include "src/inspector/protocol/Runtime.h"
diff --git a/chromium/v8/src/inspector/v8-console.cc b/chromium/v8/src/inspector/v8-console.cc
index 93a73f25803..55b620b0fca 100644
--- a/chromium/v8/src/inspector/v8-console.cc
+++ b/chromium/v8/src/inspector/v8-console.cc
@@ -4,6 +4,11 @@
#include "src/inspector/v8-console.h"
+#include "include/v8-container.h"
+#include "include/v8-context.h"
+#include "include/v8-function.h"
+#include "include/v8-inspector.h"
+#include "include/v8-microtask-queue.h"
#include "src/base/macros.h"
#include "src/inspector/injected-script.h"
#include "src/inspector/inspected-context.h"
@@ -17,8 +22,6 @@
#include "src/inspector/v8-stack-trace-impl.h"
#include "src/inspector/v8-value-utils.h"
-#include "include/v8-inspector.h"
-
namespace v8_inspector {
namespace {
diff --git a/chromium/v8/src/inspector/v8-console.h b/chromium/v8/src/inspector/v8-console.h
index 59d7a8152f6..cd10f11a8ad 100644
--- a/chromium/v8/src/inspector/v8-console.h
+++ b/chromium/v8/src/inspector/v8-console.h
@@ -5,11 +5,16 @@
#ifndef V8_INSPECTOR_V8_CONSOLE_H_
#define V8_INSPECTOR_V8_CONSOLE_H_
+#include "include/v8-array-buffer.h"
+#include "include/v8-external.h"
+#include "include/v8-local-handle.h"
#include "src/base/macros.h"
-
-#include "include/v8.h"
#include "src/debug/interface-types.h"
+namespace v8 {
+class Set;
+} // namespace v8
+
namespace v8_inspector {
class InspectedContext;
diff --git a/chromium/v8/src/inspector/v8-debugger-agent-impl.cc b/chromium/v8/src/inspector/v8-debugger-agent-impl.cc
index c49903f8c3e..c19e2b72afe 100644
--- a/chromium/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/chromium/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -7,7 +7,10 @@
#include <algorithm>
#include "../../third_party/inspector_protocol/crdtp/json.h"
+#include "include/v8-context.h"
+#include "include/v8-function.h"
#include "include/v8-inspector.h"
+#include "include/v8-microtask-queue.h"
#include "src/base/safe_conversions.h"
#include "src/debug/debug-interface.h"
#include "src/inspector/injected-script.h"
diff --git a/chromium/v8/src/inspector/v8-debugger-script.h b/chromium/v8/src/inspector/v8-debugger-script.h
index a8fd6775b05..d4486eb85e6 100644
--- a/chromium/v8/src/inspector/v8-debugger-script.h
+++ b/chromium/v8/src/inspector/v8-debugger-script.h
@@ -32,12 +32,16 @@
#include <memory>
+#include "include/v8-local-handle.h"
+#include "include/v8-maybe.h"
#include "src/base/macros.h"
+#include "src/debug/debug-interface.h"
#include "src/inspector/string-16.h"
#include "src/inspector/string-util.h"
-#include "include/v8.h"
-#include "src/debug/debug-interface.h"
+namespace v8 {
+class Isolate;
+}
namespace v8_inspector {
diff --git a/chromium/v8/src/inspector/v8-debugger.cc b/chromium/v8/src/inspector/v8-debugger.cc
index 0ac934a4d38..da75adcd592 100644
--- a/chromium/v8/src/inspector/v8-debugger.cc
+++ b/chromium/v8/src/inspector/v8-debugger.cc
@@ -4,6 +4,11 @@
#include "src/inspector/v8-debugger.h"
+#include "include/v8-container.h"
+#include "include/v8-context.h"
+#include "include/v8-function.h"
+#include "include/v8-microtask-queue.h"
+#include "include/v8-util.h"
#include "src/inspector/inspected-context.h"
#include "src/inspector/protocol/Protocol.h"
#include "src/inspector/string-util.h"
@@ -14,13 +19,11 @@
#include "src/inspector/v8-stack-trace-impl.h"
#include "src/inspector/v8-value-utils.h"
-#include "include/v8-util.h"
-
namespace v8_inspector {
namespace {
-static const int kMaxAsyncTaskStacks = 128 * 1024;
+static const int kMaxAsyncTaskStacks = 8 * 1024;
static const int kNoBreakpointId = 0;
template <typename Map>
@@ -535,10 +538,6 @@ size_t HeapLimitForDebugging(size_t initial_heap_limit) {
size_t V8Debugger::nearHeapLimitCallback(void* data, size_t current_heap_limit,
size_t initial_heap_limit) {
V8Debugger* thisPtr = static_cast<V8Debugger*>(data);
-// TODO(solanes, v8:10876): Remove when bug is solved.
-#if DEBUG
- printf("nearHeapLimitCallback\n");
-#endif
thisPtr->m_originalHeapLimit = current_heap_limit;
thisPtr->m_scheduledOOMBreak = true;
v8::Local<v8::Context> context =
@@ -658,7 +657,7 @@ void V8Debugger::AsyncEventOccurred(v8::debug::DebugAsyncActionType type,
break;
case v8::debug::kAsyncFunctionSuspended: {
if (m_asyncTaskStacks.find(task) == m_asyncTaskStacks.end()) {
- asyncTaskScheduledForStack("async function", task, true);
+ asyncTaskScheduledForStack("await", task, true, true);
}
auto stackIt = m_asyncTaskStacks.find(task);
if (stackIt != m_asyncTaskStacks.end() && !stackIt->second.expired()) {
@@ -771,12 +770,13 @@ v8::MaybeLocal<v8::Value> V8Debugger::generatorScopes(
}
v8::MaybeLocal<v8::Array> V8Debugger::collectionsEntries(
- v8::Local<v8::Context> context, v8::Local<v8::Value> value) {
+ v8::Local<v8::Context> context, v8::Local<v8::Value> collection) {
v8::Isolate* isolate = context->GetIsolate();
v8::Local<v8::Array> entries;
bool isKeyValue = false;
- if (!value->IsObject() ||
- !value.As<v8::Object>()->PreviewEntries(&isKeyValue).ToLocal(&entries)) {
+ if (!collection->IsObject() || !collection.As<v8::Object>()
+ ->PreviewEntries(&isKeyValue)
+ .ToLocal(&entries)) {
return v8::MaybeLocal<v8::Array>();
}
@@ -977,11 +977,13 @@ void V8Debugger::asyncTaskFinished(void* task) {
}
void V8Debugger::asyncTaskScheduledForStack(const String16& taskName,
- void* task, bool recurring) {
+ void* task, bool recurring,
+ bool skipTopFrame) {
if (!m_maxAsyncCallStackDepth) return;
v8::HandleScope scope(m_isolate);
std::shared_ptr<AsyncStackTrace> asyncStack = AsyncStackTrace::capture(
- this, taskName, V8StackTraceImpl::maxCallStackSizeToCapture);
+ this, taskName, V8StackTraceImpl::maxCallStackSizeToCapture,
+ skipTopFrame);
if (asyncStack) {
m_asyncTaskStacks[task] = asyncStack;
if (recurring) m_recurringTasks.insert(task);
diff --git a/chromium/v8/src/inspector/v8-debugger.h b/chromium/v8/src/inspector/v8-debugger.h
index fc790a93279..c39e39d6a2f 100644
--- a/chromium/v8/src/inspector/v8-debugger.h
+++ b/chromium/v8/src/inspector/v8-debugger.h
@@ -161,7 +161,7 @@ class V8Debugger : public v8::debug::DebugDelegate,
v8::Local<v8::Value> value);
void asyncTaskScheduledForStack(const String16& taskName, void* task,
- bool recurring);
+ bool recurring, bool skipTopFrame = false);
void asyncTaskCanceledForStack(void* task);
void asyncTaskStartedForStack(void* task);
void asyncTaskFinishedForStack(void* task);
diff --git a/chromium/v8/src/inspector/v8-heap-profiler-agent-impl.cc b/chromium/v8/src/inspector/v8-heap-profiler-agent-impl.cc
index ed6901292c8..955d7bcf76d 100644
--- a/chromium/v8/src/inspector/v8-heap-profiler-agent-impl.cc
+++ b/chromium/v8/src/inspector/v8-heap-profiler-agent-impl.cc
@@ -4,6 +4,7 @@
#include "src/inspector/v8-heap-profiler-agent-impl.h"
+#include "include/v8-context.h"
#include "include/v8-inspector.h"
#include "include/v8-platform.h"
#include "include/v8-profiler.h"
diff --git a/chromium/v8/src/inspector/v8-heap-profiler-agent-impl.h b/chromium/v8/src/inspector/v8-heap-profiler-agent-impl.h
index feda75ffb71..cd92bd32d04 100644
--- a/chromium/v8/src/inspector/v8-heap-profiler-agent-impl.h
+++ b/chromium/v8/src/inspector/v8-heap-profiler-agent-impl.h
@@ -11,7 +11,9 @@
#include "src/inspector/protocol/Forward.h"
#include "src/inspector/protocol/HeapProfiler.h"
-#include "include/v8.h"
+namespace v8 {
+class Isolate;
+}
namespace v8_inspector {
diff --git a/chromium/v8/src/inspector/v8-inspector-impl.cc b/chromium/v8/src/inspector/v8-inspector-impl.cc
index f0cfa9b2c75..3f48449a99f 100644
--- a/chromium/v8/src/inspector/v8-inspector-impl.cc
+++ b/chromium/v8/src/inspector/v8-inspector-impl.cc
@@ -32,6 +32,9 @@
#include <vector>
+#include "include/v8-context.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-microtask-queue.h"
#include "include/v8-platform.h"
#include "src/base/platform/mutex.h"
#include "src/debug/debug-interface.h"
@@ -333,39 +336,6 @@ void V8InspectorImpl::allAsyncTasksCanceled() {
m_debugger->allAsyncTasksCanceled();
}
-V8Inspector::Counters::Counters(v8::Isolate* isolate) : m_isolate(isolate) {
- CHECK(m_isolate);
- auto* inspector =
- static_cast<V8InspectorImpl*>(v8::debug::GetInspector(m_isolate));
- CHECK(inspector);
- CHECK(!inspector->m_counters);
- inspector->m_counters = this;
- m_isolate->SetCounterFunction(&Counters::getCounterPtr);
-}
-
-V8Inspector::Counters::~Counters() {
- auto* inspector =
- static_cast<V8InspectorImpl*>(v8::debug::GetInspector(m_isolate));
- CHECK(inspector);
- inspector->m_counters = nullptr;
- m_isolate->SetCounterFunction(nullptr);
-}
-
-int* V8Inspector::Counters::getCounterPtr(const char* name) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
- DCHECK(isolate);
- V8Inspector* inspector = v8::debug::GetInspector(isolate);
- DCHECK(inspector);
- auto* instance = static_cast<V8InspectorImpl*>(inspector)->m_counters;
- DCHECK(instance);
- return &(instance->m_countersMap[name]);
-}
-
-std::shared_ptr<V8Inspector::Counters> V8InspectorImpl::enableCounters() {
- if (m_counters) return m_counters->shared_from_this();
- return std::make_shared<Counters>(m_isolate);
-}
-
v8::MaybeLocal<v8::Context> V8InspectorImpl::regexContext() {
if (m_regexContext.IsEmpty()) {
m_regexContext.Reset(m_isolate, v8::Context::New(m_isolate));
@@ -516,19 +486,17 @@ bool V8InspectorImpl::associateExceptionData(v8::Local<v8::Context>,
v8::Context::Scope contextScope(context);
v8::HandleScope handles(m_isolate);
if (m_exceptionMetaData.IsEmpty())
- m_exceptionMetaData.Reset(m_isolate, v8::debug::WeakMap::New(m_isolate));
+ m_exceptionMetaData.Reset(m_isolate,
+ v8::debug::EphemeronTable::New(m_isolate));
- v8::Local<v8::debug::WeakMap> map = m_exceptionMetaData.Get(m_isolate);
- v8::MaybeLocal<v8::Value> entry = map->Get(context, exception);
+ v8::Local<v8::debug::EphemeronTable> map = m_exceptionMetaData.Get(m_isolate);
+ v8::MaybeLocal<v8::Value> entry = map->Get(m_isolate, exception);
v8::Local<v8::Object> object;
if (entry.IsEmpty() || !entry.ToLocalChecked()->IsObject()) {
object =
v8::Object::New(m_isolate, v8::Null(m_isolate), nullptr, nullptr, 0);
- v8::MaybeLocal<v8::debug::WeakMap> new_map =
- map->Set(context, exception, object);
- if (!new_map.IsEmpty()) {
- m_exceptionMetaData.Reset(m_isolate, new_map.ToLocalChecked());
- }
+ m_exceptionMetaData.Reset(m_isolate,
+ map->Set(m_isolate, exception, object));
} else {
object = entry.ToLocalChecked().As<v8::Object>();
}
@@ -548,8 +516,8 @@ v8::MaybeLocal<v8::Object> V8InspectorImpl::getAssociatedExceptionData(
!exceptionMetaDataContext().ToLocal(&context)) {
return v8::MaybeLocal<v8::Object>();
}
- v8::Local<v8::debug::WeakMap> map = m_exceptionMetaData.Get(m_isolate);
- auto entry = map->Get(context, exception);
+ v8::Local<v8::debug::EphemeronTable> map = m_exceptionMetaData.Get(m_isolate);
+ auto entry = map->Get(m_isolate, exception);
v8::Local<v8::Value> object;
if (!entry.ToLocal(&object) || !object->IsObject())
return v8::MaybeLocal<v8::Object>();
diff --git a/chromium/v8/src/inspector/v8-inspector-impl.h b/chromium/v8/src/inspector/v8-inspector-impl.h
index e1607f88c0e..d628c57a206 100644
--- a/chromium/v8/src/inspector/v8-inspector-impl.h
+++ b/chromium/v8/src/inspector/v8-inspector-impl.h
@@ -56,7 +56,7 @@ class V8StackTraceImpl;
class V8InspectorImpl : public V8Inspector {
public:
- V8InspectorImpl(v8::Isolate*, V8InspectorClient*);
+ V8_EXPORT_PRIVATE V8InspectorImpl(v8::Isolate*, V8InspectorClient*);
~V8InspectorImpl() override;
V8InspectorImpl(const V8InspectorImpl&) = delete;
V8InspectorImpl& operator=(const V8InspectorImpl&) = delete;
@@ -110,12 +110,9 @@ class V8InspectorImpl : public V8Inspector {
void externalAsyncTaskStarted(const V8StackTraceId& parent) override;
void externalAsyncTaskFinished(const V8StackTraceId& parent) override;
- std::shared_ptr<Counters> enableCounters() override;
-
- bool associateExceptionData(v8::Local<v8::Context>,
- v8::Local<v8::Value> exception,
- v8::Local<v8::Name> key,
- v8::Local<v8::Value> value) override;
+ V8_EXPORT_PRIVATE bool associateExceptionData(
+ v8::Local<v8::Context>, v8::Local<v8::Value> exception,
+ v8::Local<v8::Name> key, v8::Local<v8::Value> value) override;
unsigned nextExceptionId() { return ++m_lastExceptionId; }
void enableStackCapturingIfNeeded();
@@ -136,7 +133,7 @@ class V8InspectorImpl : public V8Inspector {
int contextGroupId,
const std::function<void(V8InspectorSessionImpl*)>& callback);
int64_t generateUniqueId();
- v8::MaybeLocal<v8::Object> getAssociatedExceptionData(
+ V8_EXPORT_PRIVATE v8::MaybeLocal<v8::Object> getAssociatedExceptionData(
v8::Local<v8::Value> exception);
class EvaluateScope {
@@ -157,14 +154,12 @@ class V8InspectorImpl : public V8Inspector {
};
private:
- friend class Counters;
-
v8::Isolate* m_isolate;
V8InspectorClient* m_client;
std::unique_ptr<V8Debugger> m_debugger;
v8::Global<v8::Context> m_regexContext;
v8::Global<v8::Context> m_exceptionMetaDataContext;
- v8::Global<v8::debug::WeakMap> m_exceptionMetaData;
+ v8::Global<v8::debug::EphemeronTable> m_exceptionMetaData;
int m_capturingStackTracesCount;
unsigned m_lastExceptionId;
int m_lastContextId;
@@ -191,8 +186,6 @@ class V8InspectorImpl : public V8Inspector {
std::map<std::pair<int64_t, int64_t>, int> m_uniqueIdToContextId;
std::unique_ptr<V8Console> m_console;
-
- Counters* m_counters = nullptr;
};
} // namespace v8_inspector
diff --git a/chromium/v8/src/inspector/v8-profiler-agent-impl.cc b/chromium/v8/src/inspector/v8-profiler-agent-impl.cc
index b2c04842cc8..6b444590825 100644
--- a/chromium/v8/src/inspector/v8-profiler-agent-impl.cc
+++ b/chromium/v8/src/inspector/v8-profiler-agent-impl.cc
@@ -16,7 +16,6 @@
#include "src/inspector/v8-inspector-impl.h"
#include "src/inspector/v8-inspector-session-impl.h"
#include "src/inspector/v8-stack-trace-impl.h"
-#include "src/logging/tracing-flags.h"
namespace v8_inspector {
@@ -30,8 +29,6 @@ static const char preciseCoverageDetailed[] = "preciseCoverageDetailed";
static const char preciseCoverageAllowTriggeredUpdates[] =
"preciseCoverageAllowTriggeredUpdates";
static const char typeProfileStarted[] = "typeProfileStarted";
-static const char countersEnabled[] = "countersEnabled";
-static const char runtimeCallStatsEnabled[] = "runtimeCallStatsEnabled";
} // namespace ProfilerAgentState
namespace {
@@ -243,16 +240,6 @@ Response V8ProfilerAgentImpl::disable() {
m_state->setBoolean(ProfilerAgentState::profilerEnabled, false);
}
- if (m_counters) {
- disableCounters();
- m_state->setBoolean(ProfilerAgentState::countersEnabled, false);
- }
-
- if (m_runtime_call_stats_enabled) {
- disableRuntimeCallStats();
- m_state->setBoolean(ProfilerAgentState::runtimeCallStatsEnabled, false);
- }
-
return Response::Success();
}
@@ -287,15 +274,6 @@ void V8ProfilerAgentImpl::restore() {
Maybe<bool>(updatesAllowed), &timestamp);
}
}
-
- if (m_state->booleanProperty(ProfilerAgentState::countersEnabled, false)) {
- enableCounters();
- }
-
- if (m_state->booleanProperty(ProfilerAgentState::runtimeCallStatsEnabled,
- false)) {
- enableRuntimeCallStats();
- }
}
Response V8ProfilerAgentImpl::start() {
@@ -551,104 +529,6 @@ Response V8ProfilerAgentImpl::takeTypeProfile(
return Response::Success();
}
-Response V8ProfilerAgentImpl::enableCounters() {
- if (m_counters)
- return Response::ServerError("Counters collection already enabled.");
-
- if (V8Inspector* inspector = v8::debug::GetInspector(m_isolate))
- m_counters = inspector->enableCounters();
- else
- return Response::ServerError("No inspector found.");
-
- return Response::Success();
-}
-
-Response V8ProfilerAgentImpl::disableCounters() {
- if (m_counters) m_counters.reset();
- return Response::Success();
-}
-
-Response V8ProfilerAgentImpl::getCounters(
- std::unique_ptr<protocol::Array<protocol::Profiler::CounterInfo>>*
- out_result) {
- if (!m_counters)
- return Response::ServerError("Counters collection is not enabled.");
-
- *out_result =
- std::make_unique<protocol::Array<protocol::Profiler::CounterInfo>>();
-
- for (const auto& counter : m_counters->getCountersMap()) {
- (*out_result)
- ->emplace_back(
- protocol::Profiler::CounterInfo::create()
- .setName(String16(counter.first.data(), counter.first.length()))
- .setValue(counter.second)
- .build());
- }
-
- return Response::Success();
-}
-
-Response V8ProfilerAgentImpl::enableRuntimeCallStats() {
- if (v8::internal::TracingFlags::runtime_stats.load()) {
- return Response::ServerError(
- "Runtime Call Stats collection is already enabled.");
- }
-
- v8::internal::TracingFlags::runtime_stats.store(true);
- m_runtime_call_stats_enabled = true;
-
- return Response::Success();
-}
-
-Response V8ProfilerAgentImpl::disableRuntimeCallStats() {
- if (!v8::internal::TracingFlags::runtime_stats.load()) {
- return Response::ServerError(
- "Runtime Call Stats collection is not enabled.");
- }
-
- if (!m_runtime_call_stats_enabled) {
- return Response::ServerError(
- "Runtime Call Stats collection was not enabled by this session.");
- }
-
- v8::internal::TracingFlags::runtime_stats.store(false);
- m_runtime_call_stats_enabled = false;
-
- return Response::Success();
-}
-
-Response V8ProfilerAgentImpl::getRuntimeCallStats(
- std::unique_ptr<
- protocol::Array<protocol::Profiler::RuntimeCallCounterInfo>>*
- out_result) {
- if (!m_runtime_call_stats_enabled) {
- return Response::ServerError(
- "Runtime Call Stats collection is not enabled.");
- }
-
- if (!v8::internal::TracingFlags::runtime_stats.load()) {
- return Response::ServerError(
- "Runtime Call Stats collection was disabled outside of this session.");
- }
-
- *out_result = std::make_unique<
- protocol::Array<protocol::Profiler::RuntimeCallCounterInfo>>();
-
- v8::debug::EnumerateRuntimeCallCounters(
- m_isolate,
- [&](const char* name, int64_t count, v8::base::TimeDelta time) {
- (*out_result)
- ->emplace_back(protocol::Profiler::RuntimeCallCounterInfo::create()
- .setName(String16(name))
- .setValue(static_cast<double>(count))
- .setTime(time.InSecondsF())
- .build());
- });
-
- return Response::Success();
-}
-
String16 V8ProfilerAgentImpl::nextProfileId() {
return String16::fromInteger(
v8::base::Relaxed_AtomicIncrement(&s_lastProfileId, 1));
diff --git a/chromium/v8/src/inspector/v8-profiler-agent-impl.h b/chromium/v8/src/inspector/v8-profiler-agent-impl.h
index 7cafa0cb01e..4fba6e6c704 100644
--- a/chromium/v8/src/inspector/v8-profiler-agent-impl.h
+++ b/chromium/v8/src/inspector/v8-profiler-agent-impl.h
@@ -59,19 +59,6 @@ class V8ProfilerAgentImpl : public protocol::Profiler::Backend {
std::unique_ptr<protocol::Array<protocol::Profiler::ScriptTypeProfile>>*
out_result) override;
- Response enableCounters() override;
- Response disableCounters() override;
- Response getCounters(
- std::unique_ptr<protocol::Array<protocol::Profiler::CounterInfo>>*
- out_result) override;
-
- Response enableRuntimeCallStats() override;
- Response disableRuntimeCallStats() override;
- Response getRuntimeCallStats(
- std::unique_ptr<
- protocol::Array<protocol::Profiler::RuntimeCallCounterInfo>>*
- out_result) override;
-
void consoleProfile(const String16& title);
void consoleProfileEnd(const String16& title);
@@ -95,8 +82,6 @@ class V8ProfilerAgentImpl : public protocol::Profiler::Backend {
std::vector<ProfileDescriptor> m_startedProfiles;
String16 m_frontendInitiatedProfileId;
int m_startedProfilesCount = 0;
- std::shared_ptr<V8Inspector::Counters> m_counters;
- bool m_runtime_call_stats_enabled = false;
};
} // namespace v8_inspector
diff --git a/chromium/v8/src/inspector/v8-regex.cc b/chromium/v8/src/inspector/v8-regex.cc
index 55b00d50aef..fd44a6a2584 100644
--- a/chromium/v8/src/inspector/v8-regex.cc
+++ b/chromium/v8/src/inspector/v8-regex.cc
@@ -6,11 +6,15 @@
#include <limits.h>
+#include "include/v8-container.h"
+#include "include/v8-context.h"
+#include "include/v8-function.h"
+#include "include/v8-inspector.h"
+#include "include/v8-microtask-queue.h"
+#include "include/v8-regexp.h"
#include "src/inspector/string-util.h"
#include "src/inspector/v8-inspector-impl.h"
-#include "include/v8-inspector.h"
-
namespace v8_inspector {
V8Regex::V8Regex(V8InspectorImpl* inspector, const String16& pattern,
diff --git a/chromium/v8/src/inspector/v8-regex.h b/chromium/v8/src/inspector/v8-regex.h
index 9ce31cf4cec..75d972f15ae 100644
--- a/chromium/v8/src/inspector/v8-regex.h
+++ b/chromium/v8/src/inspector/v8-regex.h
@@ -5,10 +5,13 @@
#ifndef V8_INSPECTOR_V8_REGEX_H_
#define V8_INSPECTOR_V8_REGEX_H_
+#include "include/v8-persistent-handle.h"
#include "src/base/macros.h"
#include "src/inspector/string-16.h"
-#include "include/v8.h"
+namespace v8 {
+class RegExp;
+}
namespace v8_inspector {
diff --git a/chromium/v8/src/inspector/v8-runtime-agent-impl.cc b/chromium/v8/src/inspector/v8-runtime-agent-impl.cc
index b78b641edf3..3a8277639cb 100644
--- a/chromium/v8/src/inspector/v8-runtime-agent-impl.cc
+++ b/chromium/v8/src/inspector/v8-runtime-agent-impl.cc
@@ -33,6 +33,11 @@
#include <inttypes.h>
#include "../../third_party/inspector_protocol/crdtp/json.h"
+#include "include/v8-container.h"
+#include "include/v8-context.h"
+#include "include/v8-function.h"
+#include "include/v8-inspector.h"
+#include "include/v8-microtask-queue.h"
#include "src/debug/debug-interface.h"
#include "src/inspector/injected-script.h"
#include "src/inspector/inspected-context.h"
@@ -47,8 +52,6 @@
#include "src/inspector/v8-value-utils.h"
#include "src/tracing/trace-event.h"
-#include "include/v8-inspector.h"
-
namespace v8_inspector {
namespace V8RuntimeAgentImplState {
@@ -418,6 +421,7 @@ void V8RuntimeAgentImpl::callFunctionOn(
Response V8RuntimeAgentImpl::getProperties(
const String16& objectId, Maybe<bool> ownProperties,
Maybe<bool> accessorPropertiesOnly, Maybe<bool> generatePreview,
+ Maybe<bool> nonIndexedPropertiesOnly,
std::unique_ptr<protocol::Array<protocol::Runtime::PropertyDescriptor>>*
result,
Maybe<protocol::Array<protocol::Runtime::InternalPropertyDescriptor>>*
@@ -442,6 +446,7 @@ Response V8RuntimeAgentImpl::getProperties(
response = scope.injectedScript()->getProperties(
object, scope.objectGroupName(), ownProperties.fromMaybe(false),
accessorPropertiesOnly.fromMaybe(false),
+ nonIndexedPropertiesOnly.fromMaybe(false),
generatePreview.fromMaybe(false) ? WrapMode::kWithPreview
: WrapMode::kNoPreview,
result, exceptionDetails);
diff --git a/chromium/v8/src/inspector/v8-runtime-agent-impl.h b/chromium/v8/src/inspector/v8-runtime-agent-impl.h
index eadc596ca39..0ab39e8da2b 100644
--- a/chromium/v8/src/inspector/v8-runtime-agent-impl.h
+++ b/chromium/v8/src/inspector/v8-runtime-agent-impl.h
@@ -35,11 +35,16 @@
#include <set>
#include <unordered_map>
-#include "include/v8.h"
+#include "include/v8-persistent-handle.h"
+// #include "include/v8-function-callback.h"
#include "src/base/macros.h"
#include "src/inspector/protocol/Forward.h"
#include "src/inspector/protocol/Runtime.h"
+namespace v8 {
+class Script;
+} // namespace v8
+
namespace v8_inspector {
class InjectedScript;
@@ -88,6 +93,7 @@ class V8RuntimeAgentImpl : public protocol::Runtime::Backend {
Response getProperties(
const String16& objectId, Maybe<bool> ownProperties,
Maybe<bool> accessorPropertiesOnly, Maybe<bool> generatePreview,
+ Maybe<bool> nonIndexedPropertiesOnly,
std::unique_ptr<protocol::Array<protocol::Runtime::PropertyDescriptor>>*
result,
Maybe<protocol::Array<protocol::Runtime::InternalPropertyDescriptor>>*
diff --git a/chromium/v8/src/inspector/v8-stack-trace-impl.cc b/chromium/v8/src/inspector/v8-stack-trace-impl.cc
index 6400506610a..b1b584c3637 100644
--- a/chromium/v8/src/inspector/v8-stack-trace-impl.cc
+++ b/chromium/v8/src/inspector/v8-stack-trace-impl.cc
@@ -389,7 +389,6 @@ void V8StackTraceImpl::StackFrameIterator::next() {
while (m_currentIt == m_currentEnd && m_parent) {
const std::vector<std::shared_ptr<StackFrame>>& frames = m_parent->frames();
m_currentIt = frames.begin();
- if (m_parent->description() == "async function") ++m_currentIt;
m_currentEnd = frames.end();
m_parent = m_parent->parent().lock().get();
}
@@ -405,7 +404,8 @@ StackFrame* V8StackTraceImpl::StackFrameIterator::frame() {
// static
std::shared_ptr<AsyncStackTrace> AsyncStackTrace::capture(
- V8Debugger* debugger, const String16& description, int maxStackSize) {
+ V8Debugger* debugger, const String16& description, int maxStackSize,
+ bool skipTopFrame) {
DCHECK(debugger);
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"),
@@ -419,6 +419,9 @@ std::shared_ptr<AsyncStackTrace> AsyncStackTrace::capture(
v8::Local<v8::StackTrace> v8StackTrace = v8::StackTrace::CurrentStackTrace(
isolate, maxStackSize, stackTraceOptions);
frames = toFramesVector(debugger, v8StackTrace, maxStackSize);
+ if (skipTopFrame && !frames.empty()) {
+ frames.erase(frames.begin());
+ }
}
std::shared_ptr<AsyncStackTrace> asyncParent;
diff --git a/chromium/v8/src/inspector/v8-stack-trace-impl.h b/chromium/v8/src/inspector/v8-stack-trace-impl.h
index cd86659fdb7..8cefffee123 100644
--- a/chromium/v8/src/inspector/v8-stack-trace-impl.h
+++ b/chromium/v8/src/inspector/v8-stack-trace-impl.h
@@ -9,11 +9,16 @@
#include <vector>
#include "include/v8-inspector.h"
-#include "include/v8.h"
+#include "include/v8-local-handle.h"
#include "src/base/macros.h"
#include "src/inspector/protocol/Runtime.h"
#include "src/inspector/string-16.h"
+namespace v8 {
+class StackFrame;
+class StackTrace;
+} // namespace v8
+
namespace v8_inspector {
class AsyncStackTrace;
@@ -113,7 +118,8 @@ class AsyncStackTrace {
AsyncStackTrace& operator=(const AsyncStackTrace&) = delete;
static std::shared_ptr<AsyncStackTrace> capture(V8Debugger*,
const String16& description,
- int maxStackSize);
+ int maxStackSize,
+ bool skipTopFrame = false);
static uintptr_t store(V8Debugger* debugger,
std::shared_ptr<AsyncStackTrace> stack);
diff --git a/chromium/v8/src/inspector/v8-value-utils.cc b/chromium/v8/src/inspector/v8-value-utils.cc
index dd73c2919dc..4b9f0b7a1ab 100644
--- a/chromium/v8/src/inspector/v8-value-utils.cc
+++ b/chromium/v8/src/inspector/v8-value-utils.cc
@@ -4,6 +4,10 @@
#include "src/inspector/v8-value-utils.h"
+#include "include/v8-container.h"
+#include "include/v8-context.h"
+#include "include/v8-exception.h"
+
namespace v8_inspector {
v8::Maybe<bool> createDataProperty(v8::Local<v8::Context> context,
diff --git a/chromium/v8/src/inspector/v8-value-utils.h b/chromium/v8/src/inspector/v8-value-utils.h
index 6817d9fbb66..7eae23d9b14 100644
--- a/chromium/v8/src/inspector/v8-value-utils.h
+++ b/chromium/v8/src/inspector/v8-value-utils.h
@@ -5,10 +5,9 @@
#ifndef V8_INSPECTOR_V8_VALUE_UTILS_H_
#define V8_INSPECTOR_V8_VALUE_UTILS_H_
+#include "include/v8-local-handle.h"
#include "src/inspector/protocol/Protocol.h"
-#include "include/v8.h"
-
namespace v8_inspector {
v8::Maybe<bool> createDataProperty(v8::Local<v8::Context>,
diff --git a/chromium/v8/src/inspector/value-mirror.cc b/chromium/v8/src/inspector/value-mirror.cc
index 78078f4c174..a89aca72090 100644
--- a/chromium/v8/src/inspector/value-mirror.cc
+++ b/chromium/v8/src/inspector/value-mirror.cc
@@ -7,6 +7,15 @@
#include <algorithm>
#include <cmath>
+#include "include/v8-container.h"
+#include "include/v8-date.h"
+#include "include/v8-function.h"
+#include "include/v8-microtask-queue.h"
+#include "include/v8-primitive-object.h"
+#include "include/v8-proxy.h"
+#include "include/v8-regexp.h"
+#include "include/v8-typed-array.h"
+#include "include/v8-wasm.h"
#include "src/base/optional.h"
#include "src/debug/debug-interface.h"
#include "src/inspector/v8-debugger.h"
@@ -786,7 +795,7 @@ class PreviewPropertyAccumulator : public ValueMirror::PropertyAccumulator {
!mirror.value) {
return true;
}
- if (!mirror.isOwn) return true;
+ if (!mirror.isOwn && !mirror.isSynthetic) return true;
if (std::find(m_blocklist.begin(), m_blocklist.end(), mirror.name) !=
m_blocklist.end()) {
return true;
@@ -844,7 +853,7 @@ bool getPropertiesForPreview(v8::Local<v8::Context> context,
: -1;
PreviewPropertyAccumulator accumulator(blocklist, skipIndex, nameLimit,
indexLimit, overflow, properties);
- return ValueMirror::getProperties(context, object, false, false,
+ return ValueMirror::getProperties(context, object, false, false, false,
&accumulator);
}
@@ -1178,6 +1187,7 @@ ValueMirror::~ValueMirror() = default;
bool ValueMirror::getProperties(v8::Local<v8::Context> context,
v8::Local<v8::Object> object,
bool ownProperties, bool accessorPropertiesOnly,
+ bool nonIndexedPropertiesOnly,
PropertyAccumulator* accumulator) {
v8::Isolate* isolate = context->GetIsolate();
v8::TryCatch tryCatch(isolate);
@@ -1201,7 +1211,8 @@ bool ValueMirror::getProperties(v8::Local<v8::Context> context,
}
}
- auto iterator = v8::debug::PropertyIterator::Create(context, object);
+ auto iterator = v8::debug::PropertyIterator::Create(context, object,
+ nonIndexedPropertiesOnly);
if (!iterator) {
CHECK(tryCatch.HasCaught());
return false;
@@ -1241,9 +1252,10 @@ bool ValueMirror::getProperties(v8::Local<v8::Context> context,
bool configurable = false;
bool isAccessorProperty = false;
- v8::TryCatch tryCatch(isolate);
+ v8::TryCatch tryCatchAttributes(isolate);
if (!iterator->attributes().To(&attributes)) {
- exceptionMirror = ValueMirror::create(context, tryCatch.Exception());
+ exceptionMirror =
+ ValueMirror::create(context, tryCatchAttributes.Exception());
} else {
if (iterator->is_native_accessor()) {
if (iterator->has_native_getter()) {
@@ -1257,10 +1269,11 @@ bool ValueMirror::getProperties(v8::Local<v8::Context> context,
configurable = !(attributes & v8::PropertyAttribute::DontDelete);
isAccessorProperty = getterMirror || setterMirror;
} else {
- v8::TryCatch tryCatch(isolate);
+ v8::TryCatch tryCatchDescriptor(isolate);
v8::debug::PropertyDescriptor descriptor;
if (!iterator->descriptor().To(&descriptor)) {
- exceptionMirror = ValueMirror::create(context, tryCatch.Exception());
+ exceptionMirror =
+ ValueMirror::create(context, tryCatchDescriptor.Exception());
} else {
writable = descriptor.has_writable ? descriptor.writable : false;
enumerable =
@@ -1282,15 +1295,19 @@ bool ValueMirror::getProperties(v8::Local<v8::Context> context,
isAccessorProperty = getterMirror || setterMirror;
if (name != "__proto__" && !getterFunction.IsEmpty() &&
getterFunction->ScriptId() == v8::UnboundScript::kNoScriptId) {
- v8::TryCatch tryCatch(isolate);
+ v8::TryCatch tryCatchFunction(isolate);
v8::Local<v8::Value> value;
if (v8::debug::CallFunctionOn(context, getterFunction, object, 0,
nullptr, true)
.ToLocal(&value)) {
- valueMirror = ValueMirror::create(context, value);
- isOwn = true;
- setterMirror = nullptr;
- getterMirror = nullptr;
+ if (value->IsPromise() &&
+ value.As<v8::Promise>()->State() == v8::Promise::kRejected) {
+ value.As<v8::Promise>()->MarkAsHandled();
+ } else {
+ valueMirror = ValueMirror::create(context, value);
+ setterMirror = nullptr;
+ getterMirror = nullptr;
+ }
}
}
}
@@ -1303,6 +1320,7 @@ bool ValueMirror::getProperties(v8::Local<v8::Context> context,
enumerable,
isOwn,
iterator->is_array_index(),
+ isAccessorProperty && valueMirror,
std::move(valueMirror),
std::move(getterMirror),
std::move(setterMirror),
@@ -1441,10 +1459,10 @@ String16 descriptionForNode(v8::Local<v8::Context> context,
}
}
if (!description.length()) {
- v8::Local<v8::Value> value;
+ v8::Local<v8::Value> constructor;
if (!object->Get(context, toV8String(isolate, "constructor"))
- .ToLocal(&value) ||
- !value->IsObject()) {
+ .ToLocal(&constructor) ||
+ !constructor->IsObject()) {
return String16();
}
if (!value.As<v8::Object>()
diff --git a/chromium/v8/src/inspector/value-mirror.h b/chromium/v8/src/inspector/value-mirror.h
index 88b4ad27117..721695e74d5 100644
--- a/chromium/v8/src/inspector/value-mirror.h
+++ b/chromium/v8/src/inspector/value-mirror.h
@@ -8,7 +8,7 @@
#include <memory>
#include "include/v8-inspector.h"
-#include "include/v8.h"
+#include "include/v8-local-handle.h"
#include "src/base/macros.h"
#include "src/inspector/protocol/Protocol.h"
#include "src/inspector/protocol/Runtime.h"
@@ -38,6 +38,7 @@ struct PropertyMirror {
bool enumerable;
bool isOwn;
bool isIndex;
+ bool isSynthetic;
std::unique_ptr<ValueMirror> value;
std::unique_ptr<ValueMirror> getter;
std::unique_ptr<ValueMirror> setter;
@@ -74,6 +75,7 @@ class ValueMirror {
static bool getProperties(v8::Local<v8::Context> context,
v8::Local<v8::Object> object, bool ownProperties,
bool accessorPropertiesOnly,
+ bool nonIndexedPropertiesOnly,
PropertyAccumulator* accumulator);
static void getInternalProperties(
v8::Local<v8::Context> context, v8::Local<v8::Object> object,
diff --git a/chromium/v8/src/interpreter/OWNERS b/chromium/v8/src/interpreter/OWNERS
index 481caea50b1..e61606034b7 100644
--- a/chromium/v8/src/interpreter/OWNERS
+++ b/chromium/v8/src/interpreter/OWNERS
@@ -1,3 +1,2 @@
leszeks@chromium.org
-mythria@chromium.org
-rmcilroy@chromium.org
+syg@chromium.org
diff --git a/chromium/v8/src/interpreter/bytecode-generator.cc b/chromium/v8/src/interpreter/bytecode-generator.cc
index f78330bea1d..f82a71202c0 100644
--- a/chromium/v8/src/interpreter/bytecode-generator.cc
+++ b/chromium/v8/src/interpreter/bytecode-generator.cc
@@ -8,6 +8,7 @@
#include <unordered_map>
#include <unordered_set>
+#include "include/v8-extension.h"
#include "src/api/api-inl.h"
#include "src/ast/ast-source-ranges.h"
#include "src/ast/ast.h"
@@ -1669,7 +1670,7 @@ void BytecodeGenerator::VisitModuleDeclarations(Declaration::List* decls) {
top_level_builder()->record_module_variable_declaration();
}
} else {
- RegisterAllocationScope register_scope(this);
+ RegisterAllocationScope inner_register_scope(this);
Visit(decl);
}
}
@@ -2525,7 +2526,7 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) {
const AstRawString* class_name =
expr->scope()->class_variable() != nullptr
? expr->scope()->class_variable()->raw_name()
- : ast_string_constants()->empty_string();
+ : ast_string_constants()->anonymous_string();
builder()
->LoadLiteral(class_name)
.StoreAccumulatorInRegister(brand)
@@ -3004,96 +3005,104 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
BuildCreateObjectLiteral(literal, flags, entry);
}
- // Store computed values into the literal.
- AccessorTable<ObjectLiteral::Property> accessor_table(zone());
- for (; property_index < expr->properties()->length(); property_index++) {
- ObjectLiteral::Property* property = expr->properties()->at(property_index);
- if (property->is_computed_name()) break;
- if (!clone_object_spread && property->IsCompileTimeValue()) continue;
-
- RegisterAllocationScope inner_register_scope(this);
- Literal* key = property->key()->AsLiteral();
- switch (property->kind()) {
- case ObjectLiteral::Property::SPREAD:
- UNREACHABLE();
- case ObjectLiteral::Property::CONSTANT:
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- DCHECK(clone_object_spread || !property->value()->IsCompileTimeValue());
- V8_FALLTHROUGH;
- case ObjectLiteral::Property::COMPUTED: {
- // It is safe to use [[Put]] here because the boilerplate already
- // contains computed properties with an uninitialized value.
- if (key->IsStringLiteral()) {
- DCHECK(key->IsPropertyName());
- object_literal_context_scope.SetEnteredIf(
- property->value()->IsConciseMethodDefinition());
- if (property->emit_store()) {
- builder()->SetExpressionPosition(property->value());
- VisitForAccumulatorValue(property->value());
- FeedbackSlot slot = feedback_spec()->AddStoreOwnICSlot();
- builder()->StoreNamedOwnProperty(literal, key->AsRawPropertyName(),
- feedback_index(slot));
+ // If we used CloneObject for the first element is spread case, we already
+ // copied accessors. Therefore skip the static initialization and treat all
+ // properties after the spread as dynamic.
+ // TOOD(v8:9888): Use new Define ICs instead of Set ICs in the clone object
+ // spread case.
+ if (!clone_object_spread) {
+ // Store computed values into the literal.
+ AccessorTable<ObjectLiteral::Property> accessor_table(zone());
+ for (; property_index < expr->properties()->length(); property_index++) {
+ ObjectLiteral::Property* property =
+ expr->properties()->at(property_index);
+ if (property->is_computed_name()) break;
+ if (property->IsCompileTimeValue()) continue;
+
+ RegisterAllocationScope inner_register_scope(this);
+ Literal* key = property->key()->AsLiteral();
+ switch (property->kind()) {
+ case ObjectLiteral::Property::SPREAD:
+ case ObjectLiteral::Property::CONSTANT:
+ UNREACHABLE();
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ DCHECK(!property->value()->IsCompileTimeValue());
+ V8_FALLTHROUGH;
+ case ObjectLiteral::Property::COMPUTED: {
+ // It is safe to use [[Put]] here because the boilerplate already
+ // contains computed properties with an uninitialized value.
+ if (key->IsStringLiteral()) {
+ DCHECK(key->IsPropertyName());
+ object_literal_context_scope.SetEnteredIf(
+ property->value()->IsConciseMethodDefinition());
+ if (property->emit_store()) {
+ builder()->SetExpressionPosition(property->value());
+ VisitForAccumulatorValue(property->value());
+ FeedbackSlot slot = feedback_spec()->AddStoreOwnICSlot();
+ builder()->StoreNamedOwnProperty(
+ literal, key->AsRawPropertyName(), feedback_index(slot));
+ } else {
+ builder()->SetExpressionPosition(property->value());
+ VisitForEffect(property->value());
+ }
} else {
+ RegisterList args = register_allocator()->NewRegisterList(3);
+
+ builder()->MoveRegister(literal, args[0]);
+ builder()->SetExpressionPosition(property->key());
+ VisitForRegisterValue(property->key(), args[1]);
+
+ object_literal_context_scope.SetEnteredIf(
+ property->value()->IsConciseMethodDefinition());
builder()->SetExpressionPosition(property->value());
- VisitForEffect(property->value());
+ VisitForRegisterValue(property->value(), args[2]);
+ if (property->emit_store()) {
+ builder()->CallRuntime(Runtime::kSetKeyedProperty, args);
+ }
}
- } else {
- RegisterList args = register_allocator()->NewRegisterList(3);
-
+ break;
+ }
+ case ObjectLiteral::Property::PROTOTYPE: {
+ // __proto__:null is handled by CreateObjectLiteral.
+ if (property->IsNullPrototype()) break;
+ DCHECK(property->emit_store());
+ DCHECK(!property->NeedsSetFunctionName());
+ RegisterList args = register_allocator()->NewRegisterList(2);
builder()->MoveRegister(literal, args[0]);
- builder()->SetExpressionPosition(property->key());
- VisitForRegisterValue(property->key(), args[1]);
-
- object_literal_context_scope.SetEnteredIf(
- property->value()->IsConciseMethodDefinition());
+ object_literal_context_scope.SetEnteredIf(false);
builder()->SetExpressionPosition(property->value());
- VisitForRegisterValue(property->value(), args[2]);
+ VisitForRegisterValue(property->value(), args[1]);
+ builder()->CallRuntime(Runtime::kInternalSetPrototype, args);
+ break;
+ }
+ case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
- builder()->CallRuntime(Runtime::kSetKeyedProperty, args);
+ accessor_table.LookupOrInsert(key)->getter = property;
}
- }
- break;
- }
- case ObjectLiteral::Property::PROTOTYPE: {
- // __proto__:null is handled by CreateObjectLiteral.
- if (property->IsNullPrototype()) break;
- DCHECK(property->emit_store());
- DCHECK(!property->NeedsSetFunctionName());
- RegisterList args = register_allocator()->NewRegisterList(2);
- builder()->MoveRegister(literal, args[0]);
- object_literal_context_scope.SetEnteredIf(false);
- builder()->SetExpressionPosition(property->value());
- VisitForRegisterValue(property->value(), args[1]);
- builder()->CallRuntime(Runtime::kInternalSetPrototype, args);
- break;
+ break;
+ case ObjectLiteral::Property::SETTER:
+ if (property->emit_store()) {
+ accessor_table.LookupOrInsert(key)->setter = property;
+ }
+ break;
}
- case ObjectLiteral::Property::GETTER:
- if (property->emit_store()) {
- accessor_table.LookupOrInsert(key)->getter = property;
- }
- break;
- case ObjectLiteral::Property::SETTER:
- if (property->emit_store()) {
- accessor_table.LookupOrInsert(key)->setter = property;
- }
- break;
}
- }
- // Define accessors, using only a single call to the runtime for each pair of
- // corresponding getters and setters.
- object_literal_context_scope.SetEnteredIf(true);
- for (auto accessors : accessor_table.ordered_accessors()) {
- RegisterAllocationScope inner_register_scope(this);
- RegisterList args = register_allocator()->NewRegisterList(5);
- builder()->MoveRegister(literal, args[0]);
- VisitForRegisterValue(accessors.first, args[1]);
- VisitLiteralAccessor(accessors.second->getter, args[2]);
- VisitLiteralAccessor(accessors.second->setter, args[3]);
- builder()
- ->LoadLiteral(Smi::FromInt(NONE))
- .StoreAccumulatorInRegister(args[4])
- .CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, args);
+ // Define accessors, using only a single call to the runtime for each pair
+ // of corresponding getters and setters.
+ object_literal_context_scope.SetEnteredIf(true);
+ for (auto accessors : accessor_table.ordered_accessors()) {
+ RegisterAllocationScope inner_register_scope(this);
+ RegisterList args = register_allocator()->NewRegisterList(5);
+ builder()->MoveRegister(literal, args[0]);
+ VisitForRegisterValue(accessors.first, args[1]);
+ VisitLiteralAccessor(accessors.second->getter, args[2]);
+ VisitLiteralAccessor(accessors.second->setter, args[3]);
+ builder()
+ ->LoadLiteral(Smi::FromInt(NONE))
+ .StoreAccumulatorInRegister(args[4])
+ .CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, args);
+ }
}
// Object literals have two parts. The "static" part on the left contains no
@@ -3647,8 +3656,7 @@ void BytecodeGenerator::BuildVariableAssignment(
break;
}
case VariableLocation::UNALLOCATED: {
- FeedbackSlot slot = GetCachedStoreGlobalICSlot(language_mode(), variable);
- builder()->StoreGlobal(variable->raw_name(), feedback_index(slot));
+ BuildStoreGlobal(variable);
break;
}
case VariableLocation::CONTEXT: {
@@ -3737,9 +3745,7 @@ void BytecodeGenerator::BuildVariableAssignment(
if (mode == VariableMode::kConst) {
builder()->CallRuntime(Runtime::kThrowConstAssignError);
} else {
- FeedbackSlot slot =
- GetCachedStoreGlobalICSlot(language_mode(), variable);
- builder()->StoreGlobal(variable->raw_name(), feedback_index(slot));
+ BuildStoreGlobal(variable);
}
}
break;
@@ -3772,6 +3778,21 @@ void BytecodeGenerator::BuildStoreNamedProperty(const Expression* object_expr,
}
}
+void BytecodeGenerator::BuildStoreGlobal(Variable* variable) {
+ Register value;
+ if (!execution_result()->IsEffect()) {
+ value = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(value);
+ }
+
+ FeedbackSlot slot = GetCachedStoreGlobalICSlot(language_mode(), variable);
+ builder()->StoreGlobal(variable->raw_name(), feedback_index(slot));
+
+ if (!execution_result()->IsEffect()) {
+ builder()->LoadAccumulatorWithRegister(value);
+ }
+}
+
// static
BytecodeGenerator::AssignmentLhsData
BytecodeGenerator::AssignmentLhsData::NonProperty(Expression* expr) {
@@ -3909,7 +3930,7 @@ void BytecodeGenerator::BuildFinalizeIteration(
ToBooleanMode::kConvertToBoolean, iterator_is_done.New());
{
- RegisterAllocationScope register_scope(this);
+ RegisterAllocationScope inner_register_scope(this);
BuildTryCatch(
// try {
// let method = iterator.return
@@ -4208,7 +4229,7 @@ void BytecodeGenerator::BuildDestructuringArrayAssignment(
void BytecodeGenerator::BuildDestructuringObjectAssignment(
ObjectLiteral* pattern, Token::Value op,
LookupHoistingMode lookup_hoisting_mode) {
- RegisterAllocationScope scope(this);
+ RegisterAllocationScope register_scope(this);
// Store the assignment value in a register.
Register value;
@@ -4251,7 +4272,7 @@ void BytecodeGenerator::BuildDestructuringObjectAssignment(
int i = 0;
for (ObjectLiteralProperty* pattern_property : *pattern->properties()) {
- RegisterAllocationScope scope(this);
+ RegisterAllocationScope inner_register_scope(this);
// The key of the pattern becomes the key into the RHS value, and the value
// of the pattern becomes the target of the assignment.
@@ -4348,12 +4369,16 @@ void BytecodeGenerator::BuildAssignment(
// Assign the value to the LHS.
switch (lhs_data.assign_type()) {
case NON_PROPERTY: {
- if (ObjectLiteral* pattern = lhs_data.expr()->AsObjectLiteral()) {
+ if (ObjectLiteral* pattern_as_object =
+ lhs_data.expr()->AsObjectLiteral()) {
// Split object literals into destructuring.
- BuildDestructuringObjectAssignment(pattern, op, lookup_hoisting_mode);
- } else if (ArrayLiteral* pattern = lhs_data.expr()->AsArrayLiteral()) {
+ BuildDestructuringObjectAssignment(pattern_as_object, op,
+ lookup_hoisting_mode);
+ } else if (ArrayLiteral* pattern_as_array =
+ lhs_data.expr()->AsArrayLiteral()) {
// Split object literals into destructuring.
- BuildDestructuringArrayAssignment(pattern, op, lookup_hoisting_mode);
+ BuildDestructuringArrayAssignment(pattern_as_array, op,
+ lookup_hoisting_mode);
} else {
DCHECK(lhs_data.expr()->IsVariableProxy());
VariableProxy* proxy = lhs_data.expr()->AsVariableProxy();
@@ -4836,7 +4861,7 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
if (iterator_type == IteratorType::kNormal) {
builder()->LoadAccumulatorWithRegister(output);
} else {
- RegisterAllocationScope register_scope(this);
+ RegisterAllocationScope inner_register_scope(this);
DCHECK_EQ(iterator_type, IteratorType::kAsync);
// If generatorKind is async, perform AsyncGeneratorYield(output.value),
// which will await `output.value` before resolving the current
@@ -6291,7 +6316,7 @@ void BytecodeGenerator::BuildIteratorClose(const IteratorRecord& iterator,
builder()->JumpIfJSReceiver(done.New());
{
- RegisterAllocationScope register_scope(this);
+ RegisterAllocationScope inner_register_scope(this);
Register return_result = register_allocator()->NewRegister();
builder()
->StoreAccumulatorInRegister(return_result)
diff --git a/chromium/v8/src/interpreter/bytecode-generator.h b/chromium/v8/src/interpreter/bytecode-generator.h
index 01f4b2a5b6b..d3cc86acf5f 100644
--- a/chromium/v8/src/interpreter/bytecode-generator.h
+++ b/chromium/v8/src/interpreter/bytecode-generator.h
@@ -241,6 +241,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
const AstRawString* name);
void BuildStoreNamedProperty(const Expression* object_expr, Register object,
const AstRawString* name);
+ void BuildStoreGlobal(Variable* variable);
void BuildVariableLoad(Variable* variable, HoleCheckMode hole_check_mode,
TypeofMode typeof_mode = TypeofMode::kNotInside);
diff --git a/chromium/v8/src/interpreter/bytecodes.h b/chromium/v8/src/interpreter/bytecodes.h
index d938aff5a5e..61734b90444 100644
--- a/chromium/v8/src/interpreter/bytecodes.h
+++ b/chromium/v8/src/interpreter/bytecodes.h
@@ -106,7 +106,7 @@ namespace interpreter {
OperandType::kIdx) \
V(LdaGlobalInsideTypeof, ImplicitRegisterUse::kWriteAccumulator, \
OperandType::kIdx, OperandType::kIdx) \
- V(StaGlobal, ImplicitRegisterUse::kReadAccumulator, OperandType::kIdx, \
+ V(StaGlobal, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kIdx, \
OperandType::kIdx) \
\
/* Context operations */ \
@@ -393,7 +393,7 @@ namespace interpreter {
\
/* Complex flow control For..in */ \
V(ForInEnumerate, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg) \
- V(ForInPrepare, ImplicitRegisterUse::kReadAccumulator, \
+ V(ForInPrepare, ImplicitRegisterUse::kReadWriteAccumulator, \
OperandType::kRegOutTriple, OperandType::kIdx) \
V(ForInContinue, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg, \
OperandType::kReg) \
diff --git a/chromium/v8/src/interpreter/interpreter-assembler.cc b/chromium/v8/src/interpreter/interpreter-assembler.cc
index c6d6e44a2f0..cba90c7893e 100644
--- a/chromium/v8/src/interpreter/interpreter-assembler.cc
+++ b/chromium/v8/src/interpreter/interpreter-assembler.cc
@@ -157,7 +157,7 @@ TNode<Object> InterpreterAssembler::GetAccumulator() {
DCHECK(Bytecodes::ReadsAccumulator(bytecode_));
implicit_register_use_ =
implicit_register_use_ | ImplicitRegisterUse::kReadAccumulator;
- return TaggedPoisonOnSpeculation(GetAccumulatorUnchecked());
+ return GetAccumulatorUnchecked();
}
void InterpreterAssembler::SetAccumulator(TNode<Object> value) {
@@ -204,8 +204,8 @@ TNode<Context> InterpreterAssembler::GetContextAtDepth(TNode<Context> context,
TNode<IntPtrT> InterpreterAssembler::RegisterLocation(
TNode<IntPtrT> reg_index) {
- return Signed(WordPoisonOnSpeculation(
- IntPtrAdd(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index))));
+ return Signed(
+ IntPtrAdd(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index)));
}
TNode<IntPtrT> InterpreterAssembler::RegisterLocation(Register reg) {
@@ -218,8 +218,7 @@ TNode<IntPtrT> InterpreterAssembler::RegisterFrameOffset(TNode<IntPtrT> index) {
TNode<Object> InterpreterAssembler::LoadRegister(TNode<IntPtrT> reg_index) {
return LoadFullTagged(GetInterpretedFramePointer(),
- RegisterFrameOffset(reg_index),
- LoadSensitivity::kCritical);
+ RegisterFrameOffset(reg_index));
}
TNode<Object> InterpreterAssembler::LoadRegister(Register reg) {
@@ -242,16 +241,14 @@ TNode<IntPtrT> InterpreterAssembler::LoadAndUntagRegister(Register reg) {
TNode<Object> InterpreterAssembler::LoadRegisterAtOperandIndex(
int operand_index) {
- return LoadRegister(
- BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
+ return LoadRegister(BytecodeOperandReg(operand_index));
}
std::pair<TNode<Object>, TNode<Object>>
InterpreterAssembler::LoadRegisterPairAtOperandIndex(int operand_index) {
DCHECK_EQ(OperandType::kRegPair,
Bytecodes::GetOperandType(bytecode_, operand_index));
- TNode<IntPtrT> first_reg_index =
- BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
+ TNode<IntPtrT> first_reg_index = BytecodeOperandReg(operand_index);
TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
return std::make_pair(LoadRegister(first_reg_index),
LoadRegister(second_reg_index));
@@ -263,8 +260,7 @@ InterpreterAssembler::GetRegisterListAtOperandIndex(int operand_index) {
Bytecodes::GetOperandType(bytecode_, operand_index)));
DCHECK_EQ(OperandType::kRegCount,
Bytecodes::GetOperandType(bytecode_, operand_index + 1));
- TNode<IntPtrT> base_reg = RegisterLocation(
- BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
+ TNode<IntPtrT> base_reg = RegisterLocation(BytecodeOperandReg(operand_index));
TNode<Uint32T> reg_count = BytecodeOperandCount(operand_index + 1);
return RegListNodePair(base_reg, reg_count);
}
@@ -272,13 +268,12 @@ InterpreterAssembler::GetRegisterListAtOperandIndex(int operand_index) {
TNode<Object> InterpreterAssembler::LoadRegisterFromRegisterList(
const RegListNodePair& reg_list, int index) {
TNode<IntPtrT> location = RegisterLocationInRegisterList(reg_list, index);
- // Location is already poisoned on speculation, so no need to poison here.
return LoadFullTagged(location);
}
TNode<IntPtrT> InterpreterAssembler::RegisterLocationInRegisterList(
const RegListNodePair& reg_list, int index) {
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
Uint32GreaterThan(reg_list.reg_count(), Int32Constant(index)));
TNode<IntPtrT> offset = RegisterFrameOffset(IntPtrConstant(index));
// Register indexes are negative, so subtract index from base location to get
@@ -304,10 +299,10 @@ void InterpreterAssembler::StoreRegisterForShortStar(TNode<Object> value,
implicit_register_use_ =
implicit_register_use_ | ImplicitRegisterUse::kWriteShortStar;
- CSA_ASSERT(
+ CSA_DCHECK(
this, UintPtrGreaterThanOrEqual(opcode, UintPtrConstant(static_cast<int>(
Bytecode::kFirstShortStar))));
- CSA_ASSERT(
+ CSA_DCHECK(
this,
UintPtrLessThanOrEqual(
opcode, UintPtrConstant(static_cast<int>(Bytecode::kLastShortStar))));
@@ -329,8 +324,7 @@ void InterpreterAssembler::StoreRegisterForShortStar(TNode<Object> value,
void InterpreterAssembler::StoreRegisterAtOperandIndex(TNode<Object> value,
int operand_index) {
- StoreRegister(value,
- BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
+ StoreRegister(value, BytecodeOperandReg(operand_index));
}
void InterpreterAssembler::StoreRegisterPairAtOperandIndex(TNode<Object> value1,
@@ -338,8 +332,7 @@ void InterpreterAssembler::StoreRegisterPairAtOperandIndex(TNode<Object> value1,
int operand_index) {
DCHECK_EQ(OperandType::kRegOutPair,
Bytecodes::GetOperandType(bytecode_, operand_index));
- TNode<IntPtrT> first_reg_index =
- BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
+ TNode<IntPtrT> first_reg_index = BytecodeOperandReg(operand_index);
StoreRegister(value1, first_reg_index);
TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
StoreRegister(value2, second_reg_index);
@@ -350,8 +343,7 @@ void InterpreterAssembler::StoreRegisterTripleAtOperandIndex(
int operand_index) {
DCHECK_EQ(OperandType::kRegOutTriple,
Bytecodes::GetOperandType(bytecode_, operand_index));
- TNode<IntPtrT> first_reg_index =
- BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
+ TNode<IntPtrT> first_reg_index = BytecodeOperandReg(operand_index);
StoreRegister(value1, first_reg_index);
TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
StoreRegister(value2, second_reg_index);
@@ -370,30 +362,27 @@ TNode<IntPtrT> InterpreterAssembler::OperandOffset(int operand_index) {
}
TNode<Uint8T> InterpreterAssembler::BytecodeOperandUnsignedByte(
- int operand_index, LoadSensitivity needs_poisoning) {
+ int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
bytecode_, operand_index, operand_scale()));
TNode<IntPtrT> operand_offset = OperandOffset(operand_index);
return Load<Uint8T>(BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), operand_offset),
- needs_poisoning);
+ IntPtrAdd(BytecodeOffset(), operand_offset));
}
TNode<Int8T> InterpreterAssembler::BytecodeOperandSignedByte(
- int operand_index, LoadSensitivity needs_poisoning) {
+ int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
bytecode_, operand_index, operand_scale()));
TNode<IntPtrT> operand_offset = OperandOffset(operand_index);
return Load<Int8T>(BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), operand_offset),
- needs_poisoning);
+ IntPtrAdd(BytecodeOffset(), operand_offset));
}
TNode<Word32T> InterpreterAssembler::BytecodeOperandReadUnaligned(
- int relative_offset, MachineType result_type,
- LoadSensitivity needs_poisoning) {
+ int relative_offset, MachineType result_type) {
static const int kMaxCount = 4;
DCHECK(!TargetSupportsUnalignedAccess());
@@ -430,9 +419,8 @@ TNode<Word32T> InterpreterAssembler::BytecodeOperandReadUnaligned(
TNode<IntPtrT> offset =
IntPtrConstant(relative_offset + msb_offset + i * kStep);
TNode<IntPtrT> array_offset = IntPtrAdd(BytecodeOffset(), offset);
- bytes[i] =
- UncheckedCast<Word32T>(Load(machine_type, BytecodeArrayTaggedPointer(),
- array_offset, needs_poisoning));
+ bytes[i] = UncheckedCast<Word32T>(
+ Load(machine_type, BytecodeArrayTaggedPointer(), array_offset));
}
// Pack LSB to MSB.
@@ -446,7 +434,7 @@ TNode<Word32T> InterpreterAssembler::BytecodeOperandReadUnaligned(
}
TNode<Uint16T> InterpreterAssembler::BytecodeOperandUnsignedShort(
- int operand_index, LoadSensitivity needs_poisoning) {
+ int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(
OperandSize::kShort,
@@ -456,16 +444,15 @@ TNode<Uint16T> InterpreterAssembler::BytecodeOperandUnsignedShort(
if (TargetSupportsUnalignedAccess()) {
return Load<Uint16T>(
BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
- needs_poisoning);
+ IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
} else {
- return UncheckedCast<Uint16T>(BytecodeOperandReadUnaligned(
- operand_offset, MachineType::Uint16(), needs_poisoning));
+ return UncheckedCast<Uint16T>(
+ BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint16()));
}
}
TNode<Int16T> InterpreterAssembler::BytecodeOperandSignedShort(
- int operand_index, LoadSensitivity needs_poisoning) {
+ int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(
OperandSize::kShort,
@@ -475,16 +462,15 @@ TNode<Int16T> InterpreterAssembler::BytecodeOperandSignedShort(
if (TargetSupportsUnalignedAccess()) {
return Load<Int16T>(
BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
- needs_poisoning);
+ IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
} else {
- return UncheckedCast<Int16T>(BytecodeOperandReadUnaligned(
- operand_offset, MachineType::Int16(), needs_poisoning));
+ return UncheckedCast<Int16T>(
+ BytecodeOperandReadUnaligned(operand_offset, MachineType::Int16()));
}
}
TNode<Uint32T> InterpreterAssembler::BytecodeOperandUnsignedQuad(
- int operand_index, LoadSensitivity needs_poisoning) {
+ int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
bytecode_, operand_index, operand_scale()));
@@ -493,16 +479,15 @@ TNode<Uint32T> InterpreterAssembler::BytecodeOperandUnsignedQuad(
if (TargetSupportsUnalignedAccess()) {
return Load<Uint32T>(
BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
- needs_poisoning);
+ IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
} else {
- return UncheckedCast<Uint32T>(BytecodeOperandReadUnaligned(
- operand_offset, MachineType::Uint32(), needs_poisoning));
+ return UncheckedCast<Uint32T>(
+ BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint32()));
}
}
TNode<Int32T> InterpreterAssembler::BytecodeOperandSignedQuad(
- int operand_index, LoadSensitivity needs_poisoning) {
+ int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
bytecode_, operand_index, operand_scale()));
@@ -511,43 +496,40 @@ TNode<Int32T> InterpreterAssembler::BytecodeOperandSignedQuad(
if (TargetSupportsUnalignedAccess()) {
return Load<Int32T>(
BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
- needs_poisoning);
+ IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
} else {
- return UncheckedCast<Int32T>(BytecodeOperandReadUnaligned(
- operand_offset, MachineType::Int32(), needs_poisoning));
+ return UncheckedCast<Int32T>(
+ BytecodeOperandReadUnaligned(operand_offset, MachineType::Int32()));
}
}
TNode<Int32T> InterpreterAssembler::BytecodeSignedOperand(
- int operand_index, OperandSize operand_size,
- LoadSensitivity needs_poisoning) {
+ int operand_index, OperandSize operand_size) {
DCHECK(!Bytecodes::IsUnsignedOperandType(
Bytecodes::GetOperandType(bytecode_, operand_index)));
switch (operand_size) {
case OperandSize::kByte:
- return BytecodeOperandSignedByte(operand_index, needs_poisoning);
+ return BytecodeOperandSignedByte(operand_index);
case OperandSize::kShort:
- return BytecodeOperandSignedShort(operand_index, needs_poisoning);
+ return BytecodeOperandSignedShort(operand_index);
case OperandSize::kQuad:
- return BytecodeOperandSignedQuad(operand_index, needs_poisoning);
+ return BytecodeOperandSignedQuad(operand_index);
case OperandSize::kNone:
UNREACHABLE();
}
}
TNode<Uint32T> InterpreterAssembler::BytecodeUnsignedOperand(
- int operand_index, OperandSize operand_size,
- LoadSensitivity needs_poisoning) {
+ int operand_index, OperandSize operand_size) {
DCHECK(Bytecodes::IsUnsignedOperandType(
Bytecodes::GetOperandType(bytecode_, operand_index)));
switch (operand_size) {
case OperandSize::kByte:
- return BytecodeOperandUnsignedByte(operand_index, needs_poisoning);
+ return BytecodeOperandUnsignedByte(operand_index);
case OperandSize::kShort:
- return BytecodeOperandUnsignedShort(operand_index, needs_poisoning);
+ return BytecodeOperandUnsignedShort(operand_index);
case OperandSize::kQuad:
- return BytecodeOperandUnsignedQuad(operand_index, needs_poisoning);
+ return BytecodeOperandUnsignedQuad(operand_index);
case OperandSize::kNone:
UNREACHABLE();
}
@@ -629,23 +611,22 @@ TNode<TaggedIndex> InterpreterAssembler::BytecodeOperandIdxTaggedIndex(
}
TNode<UintPtrT> InterpreterAssembler::BytecodeOperandConstantPoolIdx(
- int operand_index, LoadSensitivity needs_poisoning) {
+ int operand_index) {
DCHECK_EQ(OperandType::kIdx,
Bytecodes::GetOperandType(bytecode_, operand_index));
OperandSize operand_size =
Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
return ChangeUint32ToWord(
- BytecodeUnsignedOperand(operand_index, operand_size, needs_poisoning));
+ BytecodeUnsignedOperand(operand_index, operand_size));
}
-TNode<IntPtrT> InterpreterAssembler::BytecodeOperandReg(
- int operand_index, LoadSensitivity needs_poisoning) {
+TNode<IntPtrT> InterpreterAssembler::BytecodeOperandReg(int operand_index) {
DCHECK(Bytecodes::IsRegisterOperandType(
Bytecodes::GetOperandType(bytecode_, operand_index)));
OperandSize operand_size =
Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
return ChangeInt32ToIntPtr(
- BytecodeSignedOperand(operand_index, operand_size, needs_poisoning));
+ BytecodeSignedOperand(operand_index, operand_size));
}
TNode<Uint32T> InterpreterAssembler::BytecodeOperandRuntimeId(
@@ -682,8 +663,7 @@ TNode<Object> InterpreterAssembler::LoadConstantPoolEntry(TNode<WordT> index) {
TNode<FixedArray> constant_pool = CAST(LoadObjectField(
BytecodeArrayTaggedPointer(), BytecodeArray::kConstantPoolOffset));
return UnsafeLoadFixedArrayElement(constant_pool,
- UncheckedCast<IntPtrT>(index), 0,
- LoadSensitivity::kCritical);
+ UncheckedCast<IntPtrT>(index), 0);
}
TNode<IntPtrT> InterpreterAssembler::LoadAndUntagConstantPoolEntry(
@@ -693,8 +673,7 @@ TNode<IntPtrT> InterpreterAssembler::LoadAndUntagConstantPoolEntry(
TNode<Object> InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex(
int operand_index) {
- TNode<UintPtrT> index =
- BytecodeOperandConstantPoolIdx(operand_index, LoadSensitivity::kSafe);
+ TNode<UintPtrT> index = BytecodeOperandConstantPoolIdx(operand_index);
return LoadConstantPoolEntry(index);
}
@@ -733,14 +712,16 @@ void InterpreterAssembler::CallJSAndDispatch(
bytecode_ == Bytecode::kInvokeIntrinsic);
DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
- TNode<Word32T> args_count;
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- // The receiver is implied, so it is not in the argument list.
- args_count = args.reg_count();
- } else {
- // Subtract the receiver from the argument count.
+ TNode<Word32T> args_count = args.reg_count();
+ const bool receiver_included =
+ receiver_mode != ConvertReceiverMode::kNullOrUndefined;
+ if (kJSArgcIncludesReceiver && !receiver_included) {
+ // Add receiver if we want to include it in argc and it isn't already.
+ args_count = Int32Add(args_count, Int32Constant(kJSArgcReceiverSlots));
+ } else if (!kJSArgcIncludesReceiver && receiver_included) {
+ // Subtract receiver if we don't want to include it, but it is included.
TNode<Int32T> receiver_count = Int32Constant(1);
- args_count = Int32Sub(args.reg_count(), receiver_count);
+ args_count = Int32Sub(args_count, receiver_count);
}
Callable callable = CodeFactory::InterpreterPushArgsThenCall(
@@ -768,6 +749,7 @@ void InterpreterAssembler::CallJSAndDispatch(TNode<Object> function,
Callable callable = CodeFactory::Call(isolate());
TNode<Code> code_target = HeapConstant(callable.code());
+ arg_count = JSParameterCount(arg_count);
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// The first argument parameter (the receiver) is implied to be undefined.
TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
@@ -812,8 +794,11 @@ void InterpreterAssembler::CallJSWithSpreadAndDispatch(
InterpreterPushArgsMode::kWithFinalSpread);
TNode<Code> code_target = HeapConstant(callable.code());
- TNode<Int32T> receiver_count = Int32Constant(1);
- TNode<Word32T> args_count = Int32Sub(args.reg_count(), receiver_count);
+ TNode<Word32T> args_count = args.reg_count();
+ if (!kJSArgcIncludesReceiver) {
+ TNode<Int32T> receiver_count = Int32Constant(1);
+ args_count = Int32Sub(args_count, receiver_count);
+ }
TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
args_count, args.base_reg_location(),
function);
@@ -832,6 +817,7 @@ TNode<Object> InterpreterAssembler::Construct(
Label return_result(this), construct_generic(this),
construct_array(this, &var_site);
+ TNode<Word32T> args_count = JSParameterCount(args.reg_count());
CollectConstructFeedback(context, target, new_target, maybe_feedback_vector,
slot_id, UpdateFeedbackMode::kOptionalFeedback,
&construct_generic, &construct_array, &var_site);
@@ -843,7 +829,7 @@ TNode<Object> InterpreterAssembler::Construct(
Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
isolate(), InterpreterPushArgsMode::kOther);
var_result =
- CallStub(callable, context, args.reg_count(), args.base_reg_location(),
+ CallStub(callable, context, args_count, args.base_reg_location(),
target, new_target, UndefinedConstant());
Goto(&return_result);
}
@@ -856,7 +842,7 @@ TNode<Object> InterpreterAssembler::Construct(
Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
isolate(), InterpreterPushArgsMode::kArrayFunction);
var_result =
- CallStub(callable, context, args.reg_count(), args.base_reg_location(),
+ CallStub(callable, context, args_count, args.base_reg_location(),
target, new_target, var_site.value());
Goto(&return_result);
}
@@ -982,7 +968,8 @@ TNode<Object> InterpreterAssembler::ConstructWithSpread(
Comment("call using ConstructWithSpread builtin");
Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
isolate(), InterpreterPushArgsMode::kWithFinalSpread);
- return CallStub(callable, context, args.reg_count(), args.base_reg_location(),
+ TNode<Word32T> args_count = JSParameterCount(args.reg_count());
+ return CallStub(callable, context, args_count, args.base_reg_location(),
target, new_target, UndefinedConstant());
}
@@ -1026,7 +1013,7 @@ void InterpreterAssembler::UpdateInterruptBudget(TNode<Int32T> weight,
// Assert that the weight is positive (negative weights should be implemented
// as backward updates).
- CSA_ASSERT(this, Int32GreaterThanOrEqual(weight, Int32Constant(0)));
+ CSA_DCHECK(this, Int32GreaterThanOrEqual(weight, Int32Constant(0)));
Label load_budget_from_bytecode(this), load_budget_done(this);
TNode<JSFunction> function = CAST(LoadRegister(Register::function_closure()));
@@ -1224,13 +1211,9 @@ void InterpreterAssembler::DispatchToBytecode(
void InterpreterAssembler::DispatchToBytecodeHandlerEntry(
TNode<RawPtrT> handler_entry, TNode<IntPtrT> bytecode_offset) {
- // Propagate speculation poisoning.
- TNode<RawPtrT> poisoned_handler_entry =
- UncheckedCast<RawPtrT>(WordPoisonOnSpeculation(handler_entry));
- TailCallBytecodeDispatch(InterpreterDispatchDescriptor{},
- poisoned_handler_entry, GetAccumulatorUnchecked(),
- bytecode_offset, BytecodeArrayTaggedPointer(),
- DispatchTablePointer());
+ TailCallBytecodeDispatch(
+ InterpreterDispatchDescriptor{}, handler_entry, GetAccumulatorUnchecked(),
+ bytecode_offset, BytecodeArrayTaggedPointer(), DispatchTablePointer());
}
void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
@@ -1325,7 +1308,7 @@ void InterpreterAssembler::OnStackReplacement(TNode<Context> context,
TNode<Uint16T> data_type = LoadInstanceType(CAST(sfi_data));
Label baseline(this);
- GotoIf(InstanceTypeEqual(data_type, BASELINE_DATA_TYPE), &baseline);
+ GotoIf(InstanceTypeEqual(data_type, CODET_TYPE), &baseline);
{
Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate());
CallStub(callable, context);
@@ -1382,7 +1365,7 @@ bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
return false;
#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC || \
- V8_TARGET_ARCH_PPC64
+ V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_LOONG64
return true;
#else
#error "Unknown Architecture"
@@ -1416,7 +1399,7 @@ TNode<FixedArray> InterpreterAssembler::ExportParametersAndRegisterFile(
Signed(ChangeUint32ToWord(formal_parameter_count));
TNode<UintPtrT> register_count = ChangeUint32ToWord(registers.reg_count());
if (FLAG_debug_code) {
- CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
+ CSA_DCHECK(this, IntPtrEqual(registers.base_reg_location(),
RegisterLocation(Register(0))));
AbortIfRegisterCountInvalid(array, formal_parameter_count_intptr,
register_count);
@@ -1488,7 +1471,7 @@ TNode<FixedArray> InterpreterAssembler::ImportRegisterFile(
Signed(ChangeUint32ToWord(formal_parameter_count));
TNode<UintPtrT> register_count = ChangeUint32ToWord(registers.reg_count());
if (FLAG_debug_code) {
- CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
+ CSA_DCHECK(this, IntPtrEqual(registers.base_reg_location(),
RegisterLocation(Register(0))));
AbortIfRegisterCountInvalid(array, formal_parameter_count_intptr,
register_count);
diff --git a/chromium/v8/src/interpreter/interpreter-assembler.h b/chromium/v8/src/interpreter/interpreter-assembler.h
index bf4641200bb..d89c05e2d38 100644
--- a/chromium/v8/src/interpreter/interpreter-assembler.h
+++ b/chromium/v8/src/interpreter/interpreter-assembler.h
@@ -308,51 +308,32 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// The |result_type| determines the size and signedness. of the
// value read. This method should only be used on architectures that
// do not support unaligned memory accesses.
- TNode<Word32T> BytecodeOperandReadUnaligned(
- int relative_offset, MachineType result_type,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
+ TNode<Word32T> BytecodeOperandReadUnaligned(int relative_offset,
+ MachineType result_type);
// Returns zero- or sign-extended to word32 value of the operand.
- TNode<Uint8T> BytecodeOperandUnsignedByte(
- int operand_index,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
- TNode<Int8T> BytecodeOperandSignedByte(
- int operand_index,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
- TNode<Uint16T> BytecodeOperandUnsignedShort(
- int operand_index,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
- TNode<Int16T> BytecodeOperandSignedShort(
- int operand_index,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
- TNode<Uint32T> BytecodeOperandUnsignedQuad(
- int operand_index,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
- TNode<Int32T> BytecodeOperandSignedQuad(
- int operand_index,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
+ TNode<Uint8T> BytecodeOperandUnsignedByte(int operand_index);
+ TNode<Int8T> BytecodeOperandSignedByte(int operand_index);
+ TNode<Uint16T> BytecodeOperandUnsignedShort(int operand_index);
+ TNode<Int16T> BytecodeOperandSignedShort(int operand_index);
+ TNode<Uint32T> BytecodeOperandUnsignedQuad(int operand_index);
+ TNode<Int32T> BytecodeOperandSignedQuad(int operand_index);
// Returns zero- or sign-extended to word32 value of the operand of
// given size.
- TNode<Int32T> BytecodeSignedOperand(
- int operand_index, OperandSize operand_size,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
- TNode<Uint32T> BytecodeUnsignedOperand(
- int operand_index, OperandSize operand_size,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
+ TNode<Int32T> BytecodeSignedOperand(int operand_index,
+ OperandSize operand_size);
+ TNode<Uint32T> BytecodeUnsignedOperand(int operand_index,
+ OperandSize operand_size);
// Returns the word-size sign-extended register index for bytecode operand
- // |operand_index| in the current bytecode. Value is not poisoned on
- // speculation since the value loaded from the register is poisoned instead.
- TNode<IntPtrT> BytecodeOperandReg(
- int operand_index,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
+ // |operand_index| in the current bytecode.
+ TNode<IntPtrT> BytecodeOperandReg(int operand_index);
// Returns the word zero-extended index immediate for bytecode operand
- // |operand_index| in the current bytecode for use when loading a .
- TNode<UintPtrT> BytecodeOperandConstantPoolIdx(
- int operand_index,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
+ // |operand_index| in the current bytecode for use when loading a constant
+ // pool element.
+ TNode<UintPtrT> BytecodeOperandConstantPoolIdx(int operand_index);
// Jump relative to the current bytecode by the |jump_offset|. If |backward|,
// then jump backward (subtract the offset), otherwise jump forward (add the
diff --git a/chromium/v8/src/interpreter/interpreter-generator.cc b/chromium/v8/src/interpreter/interpreter-generator.cc
index e010ab2f640..5fd642fee52 100644
--- a/chromium/v8/src/interpreter/interpreter-generator.cc
+++ b/chromium/v8/src/interpreter/interpreter-generator.cc
@@ -236,8 +236,14 @@ IGNITION_HANDLER(StaGlobal, InterpreterAssembler) {
TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(1);
TNode<HeapObject> maybe_vector = LoadFeedbackVector();
- CallBuiltin(Builtin::kStoreGlobalIC, context, name, value, slot,
- maybe_vector);
+ TNode<Object> result = CallBuiltin(Builtin::kStoreGlobalIC, context, name,
+ value, slot, maybe_vector);
+ // To avoid special logic in the deoptimizer to re-materialize the value in
+ // the accumulator, we overwrite the accumulator after the IC call. It
+ // doesn't really matter what we write to the accumulator here, since we
+ // restore to the correct value on the outside. Storing the result means we
+ // don't need to keep unnecessary state alive across the callstub.
+ SetAccumulator(result);
Dispatch();
}
@@ -473,7 +479,7 @@ IGNITION_HANDLER(StaLookupSlot, InterpreterAssembler) {
BIND(&strict);
{
- CSA_ASSERT(this, IsClearWord32<StoreLookupSlotFlags::LookupHoistingModeBit>(
+ CSA_DCHECK(this, IsClearWord32<StoreLookupSlotFlags::LookupHoistingModeBit>(
bytecode_flags));
var_result =
CallRuntime(Runtime::kStoreLookupSlot_Strict, context, name, value);
@@ -598,14 +604,14 @@ class InterpreterStoreNamedPropertyAssembler : public InterpreterAssembler {
TNode<HeapObject> maybe_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- TVARIABLE(Object, var_result);
- var_result = CallStub(ic, context, object, name, value, slot, maybe_vector);
+ TNode<Object> result =
+ CallStub(ic, context, object, name, value, slot, maybe_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
// restore to the correct value on the outside. Storing the result means we
// don't need to keep unnecessary state alive across the callstub.
- SetAccumulator(var_result.value());
+ SetAccumulator(result);
Dispatch();
}
};
@@ -642,15 +648,14 @@ IGNITION_HANDLER(StaKeyedProperty, InterpreterAssembler) {
TNode<HeapObject> maybe_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- TVARIABLE(Object, var_result);
- var_result = CallBuiltin(Builtin::kKeyedStoreIC, context, object, name, value,
- slot, maybe_vector);
+ TNode<Object> result = CallBuiltin(Builtin::kKeyedStoreIC, context, object,
+ name, value, slot, maybe_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
// restore to the correct value on the outside. Storing the result means we
// don't need to keep unnecessary state alive across the callstub.
- SetAccumulator(var_result.value());
+ SetAccumulator(result);
Dispatch();
}
@@ -666,15 +671,15 @@ IGNITION_HANDLER(StaInArrayLiteral, InterpreterAssembler) {
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- TVARIABLE(Object, var_result);
- var_result = CallBuiltin(Builtin::kStoreInArrayLiteralIC, context, array,
- index, value, slot, feedback_vector);
+ TNode<Object> result =
+ CallBuiltin(Builtin::kStoreInArrayLiteralIC, context, array, index, value,
+ slot, feedback_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
// restore to the correct value on the outside. Storing the result means we
// don't need to keep unnecessary state alive across the callstub.
- SetAccumulator(var_result.value());
+ SetAccumulator(result);
Dispatch();
}
@@ -1264,7 +1269,7 @@ IGNITION_HANDLER(LogicalNot, InterpreterAssembler) {
}
BIND(&if_false);
{
- CSA_ASSERT(this, TaggedEqual(value, false_value));
+ CSA_DCHECK(this, TaggedEqual(value, false_value));
result = true_value;
Goto(&end);
}
@@ -1767,11 +1772,11 @@ IGNITION_HANDLER(TestTypeOf, InterpreterAssembler) {
Label if_true(this), if_false(this), end(this);
- // We juse use the final label as the default and properly CSA_ASSERT
+ // We just use the final label as the default and properly CSA_DCHECK
// that the {literal_flag} is valid here; this significantly improves
// the generated code (compared to having a default label that aborts).
unsigned const num_cases = arraysize(cases);
- CSA_ASSERT(this, Uint32LessThan(literal_flag, Int32Constant(num_cases)));
+ CSA_DCHECK(this, Uint32LessThan(literal_flag, Int32Constant(num_cases)));
Switch(literal_flag, labels[num_cases - 1], cases, labels, num_cases - 1);
BIND(&if_number);
@@ -1888,7 +1893,7 @@ IGNITION_HANDLER(JumpConstant, InterpreterAssembler) {
IGNITION_HANDLER(JumpIfTrue, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
- CSA_ASSERT(this, IsBoolean(CAST(accumulator)));
+ CSA_DCHECK(this, IsBoolean(CAST(accumulator)));
JumpIfTaggedEqual(accumulator, TrueConstant(), relative_jump);
}
@@ -1900,7 +1905,7 @@ IGNITION_HANDLER(JumpIfTrue, InterpreterAssembler) {
IGNITION_HANDLER(JumpIfTrueConstant, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
TNode<IntPtrT> relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
- CSA_ASSERT(this, IsBoolean(CAST(accumulator)));
+ CSA_DCHECK(this, IsBoolean(CAST(accumulator)));
JumpIfTaggedEqual(accumulator, TrueConstant(), relative_jump);
}
@@ -1912,7 +1917,7 @@ IGNITION_HANDLER(JumpIfTrueConstant, InterpreterAssembler) {
IGNITION_HANDLER(JumpIfFalse, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
- CSA_ASSERT(this, IsBoolean(CAST(accumulator)));
+ CSA_DCHECK(this, IsBoolean(CAST(accumulator)));
JumpIfTaggedEqual(accumulator, FalseConstant(), relative_jump);
}
@@ -1924,7 +1929,7 @@ IGNITION_HANDLER(JumpIfFalse, InterpreterAssembler) {
IGNITION_HANDLER(JumpIfFalseConstant, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
TNode<IntPtrT> relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
- CSA_ASSERT(this, IsBoolean(CAST(accumulator)));
+ CSA_DCHECK(this, IsBoolean(CAST(accumulator)));
JumpIfTaggedEqual(accumulator, FalseConstant(), relative_jump);
}
@@ -2195,7 +2200,7 @@ IGNITION_HANDLER(SwitchOnSmiNoFeedback, InterpreterAssembler) {
// TNode<IntPtrT> acc_intptr = TryTaggedToInt32AsIntPtr(acc, &fall_through);
// TNode<IntPtrT> case_value = IntPtrSub(acc_intptr, case_value_base);
- CSA_ASSERT(this, TaggedIsSmi(acc));
+ CSA_DCHECK(this, TaggedIsSmi(acc));
TNode<IntPtrT> case_value = IntPtrSub(SmiUntag(CAST(acc)), case_value_base);
@@ -2834,6 +2839,11 @@ IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) {
ForInPrepare(enumerator, vector_index, maybe_feedback_vector, &cache_array,
&cache_length, UpdateFeedbackMode::kOptionalFeedback);
+ // The accumulator is clobbered soon after ForInPrepare, so avoid keeping it
+ // alive too long and instead set it to cache_array to match the first return
+ // value of Builtin::kForInPrepare.
+ SetAccumulator(cache_array);
+
StoreRegisterTripleAtOperandIndex(cache_type, cache_array, cache_length, 0);
Dispatch();
}
@@ -2970,8 +2980,8 @@ IGNITION_HANDLER(SuspendGenerator, InterpreterAssembler) {
TNode<SharedFunctionInfo> shared =
CAST(LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset));
- TNode<Int32T> formal_parameter_count = LoadObjectField<Uint16T>(
- shared, SharedFunctionInfo::kFormalParameterCountOffset);
+ TNode<Int32T> formal_parameter_count =
+ LoadSharedFunctionInfoFormalParameterCountWithoutReceiver(shared);
ExportParametersAndRegisterFile(array, registers, formal_parameter_count);
StoreObjectField(generator, JSGeneratorObject::kContextOffset, context);
@@ -3014,17 +3024,17 @@ IGNITION_HANDLER(SwitchOnGeneratorState, InterpreterAssembler) {
SetContext(context);
TNode<UintPtrT> table_start = BytecodeOperandIdx(1);
- // TODO(leszeks): table_length is only used for a CSA_ASSERT, we don't
+ // TODO(leszeks): table_length is only used for a CSA_DCHECK, we don't
// actually need it otherwise.
TNode<UintPtrT> table_length = BytecodeOperandUImmWord(2);
// The state must be a Smi.
- CSA_ASSERT(this, TaggedIsSmi(state));
+ CSA_DCHECK(this, TaggedIsSmi(state));
TNode<IntPtrT> case_value = SmiUntag(state);
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(case_value, IntPtrConstant(0)));
- CSA_ASSERT(this, IntPtrLessThan(case_value, table_length));
+ CSA_DCHECK(this, IntPtrGreaterThanOrEqual(case_value, IntPtrConstant(0)));
+ CSA_DCHECK(this, IntPtrLessThan(case_value, table_length));
USE(table_length);
TNode<WordT> entry = IntPtrAdd(table_start, case_value);
@@ -3046,8 +3056,8 @@ IGNITION_HANDLER(ResumeGenerator, InterpreterAssembler) {
TNode<SharedFunctionInfo> shared =
CAST(LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset));
- TNode<Int32T> formal_parameter_count = LoadObjectField<Uint16T>(
- shared, SharedFunctionInfo::kFormalParameterCountOffset);
+ TNode<Int32T> formal_parameter_count =
+ LoadSharedFunctionInfoFormalParameterCountWithoutReceiver(shared);
ImportRegisterFile(
CAST(LoadObjectField(generator,
@@ -3074,9 +3084,6 @@ Handle<Code> GenerateBytecodeHandler(Isolate* isolate, const char* debug_name,
compiler::CodeAssemblerState state(
isolate, &zone, InterpreterDispatchDescriptor{},
CodeKind::BYTECODE_HANDLER, debug_name,
- FLAG_untrusted_code_mitigations
- ? PoisoningMitigationLevel::kPoisonCriticalOnly
- : PoisoningMitigationLevel::kDontPoison,
builtin);
switch (bytecode) {
diff --git a/chromium/v8/src/interpreter/interpreter.cc b/chromium/v8/src/interpreter/interpreter.cc
index a874954157c..88d7706c726 100644
--- a/chromium/v8/src/interpreter/interpreter.cc
+++ b/chromium/v8/src/interpreter/interpreter.cc
@@ -12,6 +12,7 @@
#include "src/ast/scopes.h"
#include "src/codegen/compiler.h"
#include "src/codegen/unoptimized-compilation-info.h"
+#include "src/common/globals.h"
#include "src/execution/local-isolate.h"
#include "src/heap/parked-scope.h"
#include "src/init/bootstrapper.h"
@@ -389,11 +390,9 @@ uintptr_t Interpreter::GetDispatchCounter(Bytecode from, Bytecode to) const {
to_index];
}
-Local<v8::Object> Interpreter::GetDispatchCountersObject() {
- v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(isolate_);
- Local<v8::Context> context = isolate->GetCurrentContext();
-
- Local<v8::Object> counters_map = v8::Object::New(isolate);
+Handle<JSObject> Interpreter::GetDispatchCountersObject() {
+ Handle<JSObject> counters_map =
+ isolate_->factory()->NewJSObjectWithNullProto();
// Output is a JSON-encoded object of objects.
//
@@ -408,30 +407,23 @@ Local<v8::Object> Interpreter::GetDispatchCountersObject() {
for (int from_index = 0; from_index < kNumberOfBytecodes; ++from_index) {
Bytecode from_bytecode = Bytecodes::FromByte(from_index);
- Local<v8::Object> counters_row = v8::Object::New(isolate);
+ Handle<JSObject> counters_row =
+ isolate_->factory()->NewJSObjectWithNullProto();
for (int to_index = 0; to_index < kNumberOfBytecodes; ++to_index) {
Bytecode to_bytecode = Bytecodes::FromByte(to_index);
uintptr_t counter = GetDispatchCounter(from_bytecode, to_bytecode);
if (counter > 0) {
- std::string to_name = Bytecodes::ToString(to_bytecode);
- Local<v8::String> to_name_object =
- v8::String::NewFromUtf8(isolate, to_name.c_str()).ToLocalChecked();
- Local<v8::Number> counter_object = v8::Number::New(isolate, counter);
- CHECK(counters_row
- ->DefineOwnProperty(context, to_name_object, counter_object)
- .IsJust());
+ Handle<Object> value = isolate_->factory()->NewNumberFromSize(counter);
+ JSObject::AddProperty(isolate_, counters_row,
+ Bytecodes::ToString(to_bytecode), value, NONE);
}
}
- std::string from_name = Bytecodes::ToString(from_bytecode);
- Local<v8::String> from_name_object =
- v8::String::NewFromUtf8(isolate, from_name.c_str()).ToLocalChecked();
-
- CHECK(
- counters_map->DefineOwnProperty(context, from_name_object, counters_row)
- .IsJust());
+ JSObject::AddProperty(isolate_, counters_map,
+ Bytecodes::ToString(from_bytecode), counters_row,
+ NONE);
}
return counters_map;
diff --git a/chromium/v8/src/interpreter/interpreter.h b/chromium/v8/src/interpreter/interpreter.h
index 95a3c4ef79e..9daa886e659 100644
--- a/chromium/v8/src/interpreter/interpreter.h
+++ b/chromium/v8/src/interpreter/interpreter.h
@@ -72,7 +72,7 @@ class Interpreter {
// Disassembler support.
V8_EXPORT_PRIVATE const char* LookupNameOfBytecodeHandler(const Code code);
- V8_EXPORT_PRIVATE Local<v8::Object> GetDispatchCountersObject();
+ V8_EXPORT_PRIVATE Handle<JSObject> GetDispatchCountersObject();
void ForEachBytecode(const std::function<void(Bytecode, OperandScale)>& f);
diff --git a/chromium/v8/src/json/json-parser.cc b/chromium/v8/src/json/json-parser.cc
index 1c76143d596..89a3850437e 100644
--- a/chromium/v8/src/json/json-parser.cc
+++ b/chromium/v8/src/json/json-parser.cc
@@ -5,6 +5,7 @@
#include "src/json/json-parser.h"
#include "src/base/strings.h"
+#include "src/common/globals.h"
#include "src/common/message-template.h"
#include "src/debug/debug.h"
#include "src/numbers/conversions.h"
@@ -210,19 +211,21 @@ JsonParser<Char>::JsonParser(Isolate* isolate, Handle<String> source)
original_source_(source) {
size_t start = 0;
size_t length = source->length();
- if (source->IsSlicedString()) {
+ PtrComprCageBase cage_base(isolate);
+ if (source->IsSlicedString(cage_base)) {
SlicedString string = SlicedString::cast(*source);
start = string.offset();
- String parent = string.parent();
- if (parent.IsThinString()) parent = ThinString::cast(parent).actual();
+ String parent = string.parent(cage_base);
+ if (parent.IsThinString(cage_base))
+ parent = ThinString::cast(parent).actual(cage_base);
source_ = handle(parent, isolate);
} else {
source_ = String::Flatten(isolate, source);
}
- if (StringShape(*source_).IsExternal()) {
- chars_ =
- static_cast<const Char*>(SeqExternalString::cast(*source_).GetChars());
+ if (StringShape(*source_, cage_base).IsExternal()) {
+ chars_ = static_cast<const Char*>(
+ SeqExternalString::cast(*source_).GetChars(cage_base));
chars_may_relocate_ = false;
} else {
DisallowGarbageCollection no_gc;
diff --git a/chromium/v8/src/json/json-parser.h b/chromium/v8/src/json/json-parser.h
index 03e75375127..4819f9d64e7 100644
--- a/chromium/v8/src/json/json-parser.h
+++ b/chromium/v8/src/json/json-parser.h
@@ -5,6 +5,7 @@
#ifndef V8_JSON_JSON_PARSER_H_
#define V8_JSON_JSON_PARSER_H_
+#include "include/v8-callbacks.h"
#include "src/base/small-vector.h"
#include "src/base/strings.h"
#include "src/execution/isolate.h"
diff --git a/chromium/v8/src/json/json-stringifier.cc b/chromium/v8/src/json/json-stringifier.cc
index c86ab12a653..beebbc3fbf8 100644
--- a/chromium/v8/src/json/json-stringifier.cc
+++ b/chromium/v8/src/json/json-stringifier.cc
@@ -315,9 +315,9 @@ bool JsonStringifier::InitializeGap(Handle<Object> gap) {
gap_[gap_length] = '\0';
}
} else if (gap->IsNumber()) {
- int num_value = DoubleToInt32(gap->Number());
- if (num_value > 0) {
- int gap_length = std::min(num_value, 10);
+ double value = std::min(gap->Number(), 10.0);
+ if (value > 0) {
+ int gap_length = DoubleToInt32(value);
gap_ = NewArray<base::uc16>(gap_length + 1);
for (int i = 0; i < gap_length; i++) gap_[i] = ' ';
gap_[gap_length] = '\0';
@@ -782,7 +782,8 @@ JsonStringifier::Result JsonStringifier::SerializeJSObject(
map->instance_descriptors(isolate_).GetDetails(i);
if (details.IsDontEnum()) continue;
Handle<Object> property;
- if (details.location() == kField && *map == object->map()) {
+ if (details.location() == PropertyLocation::kField &&
+ *map == object->map()) {
DCHECK_EQ(kData, details.kind());
FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
property = JSObject::FastPropertyAt(object, details.representation(),
diff --git a/chromium/v8/src/libplatform/default-platform.cc b/chromium/v8/src/libplatform/default-platform.cc
index 66057e1a398..1cbc01193dc 100644
--- a/chromium/v8/src/libplatform/default-platform.cc
+++ b/chromium/v8/src/libplatform/default-platform.cc
@@ -8,6 +8,7 @@
#include <queue>
#include "include/libplatform/libplatform.h"
+#include "src/base/bounded-page-allocator.h"
#include "src/base/debug/stack_trace.h"
#include "src/base/logging.h"
#include "src/base/page-allocator.h"
diff --git a/chromium/v8/src/libsampler/sampler.cc b/chromium/v8/src/libsampler/sampler.cc
index 49c8406533d..fb94972b85a 100644
--- a/chromium/v8/src/libsampler/sampler.cc
+++ b/chromium/v8/src/libsampler/sampler.cc
@@ -4,6 +4,9 @@
#include "src/libsampler/sampler.h"
+#include "include/v8-isolate.h"
+#include "include/v8-unwinder.h"
+
#ifdef USE_SIGNALS
#include <errno.h>
@@ -412,6 +415,10 @@ void SignalHandler::FillRegisterState(void* context, RegisterState* state) {
state->pc = reinterpret_cast<void*>(mcontext.pc);
state->sp = reinterpret_cast<void*>(mcontext.gregs[29]);
state->fp = reinterpret_cast<void*>(mcontext.gregs[30]);
+#elif V8_HOST_ARCH_LOONG64
+ state->pc = reinterpret_cast<void*>(mcontext.__pc);
+ state->sp = reinterpret_cast<void*>(mcontext.__gregs[3]);
+ state->fp = reinterpret_cast<void*>(mcontext.__gregs[22]);
#elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64
#if V8_LIBC_GLIBC
state->pc = reinterpret_cast<void*>(ucontext->uc_mcontext.regs->nip);
diff --git a/chromium/v8/src/libsampler/sampler.h b/chromium/v8/src/libsampler/sampler.h
index 35bcf23546c..98c06061513 100644
--- a/chromium/v8/src/libsampler/sampler.h
+++ b/chromium/v8/src/libsampler/sampler.h
@@ -8,8 +8,8 @@
#include <atomic>
#include <memory>
#include <unordered_map>
+#include <vector>
-#include "include/v8.h"
#include "src/base/lazy-instance.h"
#include "src/base/macros.h"
@@ -18,6 +18,10 @@
#endif
namespace v8 {
+
+class Isolate;
+struct RegisterState;
+
namespace sampler {
// ----------------------------------------------------------------------------
diff --git a/chromium/v8/src/logging/counters-definitions.h b/chromium/v8/src/logging/counters-definitions.h
index 0fcb2e15af7..2ed7a0758fa 100644
--- a/chromium/v8/src/logging/counters-definitions.h
+++ b/chromium/v8/src/logging/counters-definitions.h
@@ -102,7 +102,9 @@ namespace internal {
HR(turbofan_ticks, V8.TurboFan1KTicks, 0, 100000, 200) \
/* Backtracks observed in a single regexp interpreter execution */ \
/* The maximum of 100M backtracks takes roughly 2 seconds on my machine. */ \
- HR(regexp_backtracks, V8.RegExpBacktracks, 1, 100000000, 50)
+ HR(regexp_backtracks, V8.RegExpBacktracks, 1, 100000000, 50) \
+ /* See the CagedMemoryAllocationOutcome enum in backing-store.cc */ \
+ HR(caged_memory_allocation_outcome, V8.CagedMemoryAllocationOutcome, 0, 2, 3)
#define NESTED_TIMED_HISTOGRAM_LIST(HT) \
/* Timer histograms, not thread safe: HT(name, caption, max, unit) */ \
diff --git a/chromium/v8/src/logging/counters.h b/chromium/v8/src/logging/counters.h
index 3a2527f49cc..08e35352cfd 100644
--- a/chromium/v8/src/logging/counters.h
+++ b/chromium/v8/src/logging/counters.h
@@ -7,7 +7,7 @@
#include <memory>
-#include "include/v8.h"
+#include "include/v8-callbacks.h"
#include "src/base/atomic-utils.h"
#include "src/base/optional.h"
#include "src/base/platform/elapsed-timer.h"
diff --git a/chromium/v8/src/logging/log-utils.cc b/chromium/v8/src/logging/log-utils.cc
index 67a52a58738..69567b53d9e 100644
--- a/chromium/v8/src/logging/log-utils.cc
+++ b/chromium/v8/src/logging/log-utils.cc
@@ -12,7 +12,10 @@
#include "src/base/strings.h"
#include "src/base/vector.h"
#include "src/common/assert-scope.h"
+#include "src/common/globals.h"
+#include "src/execution/isolate-utils.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/string-inl.h"
#include "src/strings/string-stream.h"
#include "src/utils/version.h"
@@ -108,10 +111,12 @@ void Log::MessageBuilder::AppendString(String str,
if (str.is_null()) return;
DisallowGarbageCollection no_gc; // Ensure string stays valid.
+ PtrComprCageBase cage_base = GetPtrComprCageBase(str);
+ SharedStringAccessGuardIfNeeded access_guard(str);
int length = str.length();
if (length_limit) length = std::min(length, *length_limit);
for (int i = 0; i < length; i++) {
- uint16_t c = str.Get(i);
+ uint16_t c = str.Get(i, cage_base, access_guard);
if (c <= 0xFF) {
AppendCharacter(static_cast<char>(c));
} else {
diff --git a/chromium/v8/src/logging/log.cc b/chromium/v8/src/logging/log.cc
index 4f6aa856d77..5ef24c15356 100644
--- a/chromium/v8/src/logging/log.cc
+++ b/chromium/v8/src/logging/log.cc
@@ -9,6 +9,7 @@
#include <memory>
#include <sstream>
+#include "include/v8-locker.h"
#include "src/api/api-inl.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/platform.h"
@@ -102,7 +103,7 @@ static const char* ComputeMarker(SharedFunctionInfo shared, AbstractCode code) {
#if V8_ENABLE_WEBASSEMBLY
static const char* ComputeMarker(const wasm::WasmCode* code) {
switch (code->kind()) {
- case wasm::WasmCode::kFunction:
+ case wasm::WasmCode::kWasmFunction:
return code->is_liftoff() ? "" : "*";
default:
return "";
@@ -614,6 +615,8 @@ void LowLevelLogger::LogCodeInfo() {
const char arch[] = "ppc64";
#elif V8_TARGET_ARCH_MIPS
const char arch[] = "mips";
+#elif V8_TARGET_ARCH_LOONG64
+ const char arch[] = "loong64";
#elif V8_TARGET_ARCH_ARM64
const char arch[] = "arm64";
#elif V8_TARGET_ARCH_S390
@@ -730,7 +733,7 @@ void JitLogger::LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
int length) {
JitCodeEvent event = {};
event.type = JitCodeEvent::CODE_ADDED;
- event.code_type = JitCodeEvent::JIT_CODE;
+ event.code_type = JitCodeEvent::WASM_CODE;
event.code_start = code->instructions().begin();
event.code_len = code->instructions().length();
event.name.str = name;
@@ -941,9 +944,10 @@ class Ticker : public sampler::Sampler {
void SampleStack(const v8::RegisterState& state) override {
if (!profiler_) return;
Isolate* isolate = reinterpret_cast<Isolate*>(this->isolate());
- if (v8::Locker::IsActive() && (!isolate->thread_manager()->IsLockedByThread(
- perThreadData_->thread_id()) ||
- perThreadData_->thread_state() != nullptr))
+ if (v8::Locker::WasEverUsed() &&
+ (!isolate->thread_manager()->IsLockedByThread(
+ perThreadData_->thread_id()) ||
+ perThreadData_->thread_state() != nullptr))
return;
TickSample sample;
sample.Init(isolate, state, TickSample::kIncludeCEntryFrame, true);
@@ -1558,12 +1562,14 @@ void Logger::CodeLinePosInfoRecordEvent(Address code_start,
CodeLinePosEvent(*jit_logger_, code_start, iter, code_type);
}
-void Logger::CodeLinePosInfoRecordEvent(
+#if V8_ENABLE_WEBASSEMBLY
+void Logger::WasmCodeLinePosInfoRecordEvent(
Address code_start, base::Vector<const byte> source_position_table) {
if (!jit_logger_) return;
SourcePositionTableIterator iter(source_position_table);
- CodeLinePosEvent(*jit_logger_, code_start, iter, JitCodeEvent::JIT_CODE);
+ CodeLinePosEvent(*jit_logger_, code_start, iter, JitCodeEvent::WASM_CODE);
}
+#endif // V8_ENABLE_WEBASSEMBLY
void Logger::CodeNameEvent(Address addr, int pos, const char* code_name) {
if (code_name == nullptr) return; // Not a code object.
@@ -2217,12 +2223,11 @@ void ExistingCodeLogger::LogCompiledFunctions() {
Handle<AbstractCode>(
AbstractCode::cast(shared->InterpreterTrampoline()), isolate_));
}
- if (shared->HasBaselineData()) {
+ if (shared->HasBaselineCode()) {
LogExistingFunction(
- shared,
- Handle<AbstractCode>(
- AbstractCode::cast(shared->baseline_data().baseline_code()),
- isolate_));
+ shared, Handle<AbstractCode>(
+ AbstractCode::cast(shared->baseline_code(kAcquireLoad)),
+ isolate_));
}
if (pair.second.is_identical_to(BUILTIN_CODE(isolate_, CompileLazy)))
continue;
diff --git a/chromium/v8/src/logging/log.h b/chromium/v8/src/logging/log.h
index 612c2a2df7d..b9e7a75c20d 100644
--- a/chromium/v8/src/logging/log.h
+++ b/chromium/v8/src/logging/log.h
@@ -10,6 +10,7 @@
#include <set>
#include <string>
+#include "include/v8-callbacks.h"
#include "include/v8-profiler.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/execution/isolate.h"
@@ -241,8 +242,10 @@ class Logger : public CodeEventListener {
void CodeLinePosInfoRecordEvent(Address code_start,
ByteArray source_position_table,
JitCodeEvent::CodeType code_type);
- void CodeLinePosInfoRecordEvent(
+#if V8_ENABLE_WEBASSEMBLY
+ void WasmCodeLinePosInfoRecordEvent(
Address code_start, base::Vector<const byte> source_position_table);
+#endif // V8_ENABLE_WEBASSEMBLY
void CodeNameEvent(Address addr, int pos, const char* code_name);
diff --git a/chromium/v8/src/logging/runtime-call-stats-scope.h b/chromium/v8/src/logging/runtime-call-stats-scope.h
index 1be12f06dab..6b3db25ae88 100644
--- a/chromium/v8/src/logging/runtime-call-stats-scope.h
+++ b/chromium/v8/src/logging/runtime-call-stats-scope.h
@@ -17,8 +17,10 @@ namespace internal {
#ifdef V8_RUNTIME_CALL_STATS
-#define RCS_SCOPE(...) \
- v8::internal::RuntimeCallTimerScope rcs_timer_scope(__VA_ARGS__)
+// Make the line number part of the scope's name to avoid -Wshadow warnings.
+#define RCS_SCOPE(...) \
+ v8::internal::RuntimeCallTimerScope CONCAT(rcs_timer_scope, \
+ __LINE__)(__VA_ARGS__)
RuntimeCallTimerScope::RuntimeCallTimerScope(Isolate* isolate,
RuntimeCallCounterId counter_id) {
diff --git a/chromium/v8/src/logging/runtime-call-stats.cc b/chromium/v8/src/logging/runtime-call-stats.cc
index 86e3215f74e..a326c59c4c0 100644
--- a/chromium/v8/src/logging/runtime-call-stats.cc
+++ b/chromium/v8/src/logging/runtime-call-stats.cc
@@ -25,17 +25,17 @@ base::TimeTicks RuntimeCallTimer::NowCPUTime() {
class RuntimeCallStatEntries {
public:
void Print(std::ostream& os) {
- if (total_call_count == 0) return;
- std::sort(entries.rbegin(), entries.rend());
+ if (total_call_count_ == 0) return;
+ std::sort(entries_.rbegin(), entries_.rend());
os << std::setw(50) << "Runtime Function/C++ Builtin" << std::setw(12)
<< "Time" << std::setw(18) << "Count" << std::endl
<< std::string(88, '=') << std::endl;
- for (Entry& entry : entries) {
- entry.SetTotal(total_time, total_call_count);
+ for (Entry& entry : entries_) {
+ entry.SetTotal(total_time_, total_call_count_);
entry.Print(os);
}
os << std::string(88, '-') << std::endl;
- Entry("Total", total_time, total_call_count).Print(os);
+ Entry("Total", total_time_, total_call_count_).Print(os);
}
// By default, the compiler will usually inline this, which results in a large
@@ -43,10 +43,10 @@ class RuntimeCallStatEntries {
// instructions, and this function is invoked repeatedly by macros.
V8_NOINLINE void Add(RuntimeCallCounter* counter) {
if (counter->count() == 0) return;
- entries.push_back(
+ entries_.push_back(
Entry(counter->name(), counter->time(), counter->count()));
- total_time += counter->time();
- total_call_count += counter->count();
+ total_time_ += counter->time();
+ total_call_count_ += counter->count();
}
private:
@@ -94,9 +94,9 @@ class RuntimeCallStatEntries {
double count_percent_;
};
- uint64_t total_call_count = 0;
- base::TimeDelta total_time;
- std::vector<Entry> entries;
+ uint64_t total_call_count_ = 0;
+ base::TimeDelta total_time_;
+ std::vector<Entry> entries_;
};
void RuntimeCallCounter::Reset() {
@@ -260,17 +260,6 @@ void RuntimeCallStats::Print(std::ostream& os) {
entries.Print(os);
}
-void RuntimeCallStats::EnumerateCounters(
- debug::RuntimeCallCounterCallback callback) {
- if (current_timer_.Value() != nullptr) {
- current_timer_.Value()->Snapshot();
- }
- for (int i = 0; i < kNumberOfCounters; i++) {
- RuntimeCallCounter* counter = GetCounter(i);
- callback(counter->name(), counter->count(), counter->time());
- }
-}
-
void RuntimeCallStats::Reset() {
if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
diff --git a/chromium/v8/src/logging/runtime-call-stats.h b/chromium/v8/src/logging/runtime-call-stats.h
index 5b3284a0c9c..4e54e0ab716 100644
--- a/chromium/v8/src/logging/runtime-call-stats.h
+++ b/chromium/v8/src/logging/runtime-call-stats.h
@@ -5,8 +5,6 @@
#ifndef V8_LOGGING_RUNTIME_CALL_STATS_H_
#define V8_LOGGING_RUNTIME_CALL_STATS_H_
-#include "include/v8.h"
-
#ifdef V8_RUNTIME_CALL_STATS
#include "src/base/atomic-utils.h"
@@ -339,6 +337,7 @@ class RuntimeCallTimer final {
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, FrameElision) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, GenericLowering) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Inlining) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, JSWasmInlining) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, JumpThreading) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierPopulateReferenceMaps) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierRegisterAllocator) \
@@ -597,9 +596,6 @@ class RuntimeCallStats final {
V8_EXPORT_PRIVATE void Print();
V8_NOINLINE void Dump(v8::tracing::TracedValue* value);
- V8_EXPORT_PRIVATE void EnumerateCounters(
- debug::RuntimeCallCounterCallback callback);
-
ThreadId thread_id() const { return thread_id_; }
RuntimeCallTimer* current_timer() { return current_timer_.Value(); }
RuntimeCallCounter* current_counter() { return current_counter_.Value(); }
diff --git a/chromium/v8/src/numbers/conversions.cc b/chromium/v8/src/numbers/conversions.cc
index 79497a791b8..a12a3f1c728 100644
--- a/chromium/v8/src/numbers/conversions.cc
+++ b/chromium/v8/src/numbers/conversions.cc
@@ -670,19 +670,19 @@ double InternalStringToDouble(Iterator current, EndMark end, int flags,
int insignificant_digits = 0;
bool nonzero_digit_dropped = false;
- enum Sign { NONE, NEGATIVE, POSITIVE };
+ enum class Sign { kNone, kNegative, kPositive };
- Sign sign = NONE;
+ Sign sign = Sign::kNone;
if (*current == '+') {
// Ignore leading sign.
++current;
if (current == end) return JunkStringValue();
- sign = POSITIVE;
+ sign = Sign::kPositive;
} else if (*current == '-') {
++current;
if (current == end) return JunkStringValue();
- sign = NEGATIVE;
+ sign = Sign::kNegative;
}
static const char kInfinityString[] = "Infinity";
@@ -696,20 +696,20 @@ double InternalStringToDouble(Iterator current, EndMark end, int flags,
}
DCHECK_EQ(buffer_pos, 0);
- return (sign == NEGATIVE) ? -V8_INFINITY : V8_INFINITY;
+ return (sign == Sign::kNegative) ? -V8_INFINITY : V8_INFINITY;
}
bool leading_zero = false;
if (*current == '0') {
++current;
- if (current == end) return SignedZero(sign == NEGATIVE);
+ if (current == end) return SignedZero(sign == Sign::kNegative);
leading_zero = true;
// It could be hexadecimal value.
if ((flags & ALLOW_HEX) && (*current == 'x' || *current == 'X')) {
++current;
- if (current == end || !isDigit(*current, 16) || sign != NONE) {
+ if (current == end || !isDigit(*current, 16) || sign != Sign::kNone) {
return JunkStringValue(); // "0x".
}
@@ -719,7 +719,7 @@ double InternalStringToDouble(Iterator current, EndMark end, int flags,
// It could be an explicit octal value.
} else if ((flags & ALLOW_OCTAL) && (*current == 'o' || *current == 'O')) {
++current;
- if (current == end || !isDigit(*current, 8) || sign != NONE) {
+ if (current == end || !isDigit(*current, 8) || sign != Sign::kNone) {
return JunkStringValue(); // "0o".
}
@@ -729,7 +729,7 @@ double InternalStringToDouble(Iterator current, EndMark end, int flags,
// It could be a binary value.
} else if ((flags & ALLOW_BINARY) && (*current == 'b' || *current == 'B')) {
++current;
- if (current == end || !isBinaryDigit(*current) || sign != NONE) {
+ if (current == end || !isBinaryDigit(*current) || sign != Sign::kNone) {
return JunkStringValue(); // "0b".
}
@@ -740,7 +740,7 @@ double InternalStringToDouble(Iterator current, EndMark end, int flags,
// Ignore leading zeros in the integer part.
while (*current == '0') {
++current;
- if (current == end) return SignedZero(sign == NEGATIVE);
+ if (current == end) return SignedZero(sign == Sign::kNegative);
}
}
@@ -785,7 +785,7 @@ double InternalStringToDouble(Iterator current, EndMark end, int flags,
// leading zeros (if any).
while (*current == '0') {
++current;
- if (current == end) return SignedZero(sign == NEGATIVE);
+ if (current == end) return SignedZero(sign == Sign::kNegative);
exponent--; // Move this 0 into the exponent.
}
}
@@ -826,9 +826,9 @@ double InternalStringToDouble(Iterator current, EndMark end, int flags,
return JunkStringValue();
}
}
- char sign = '+';
+ char exponent_sign = '+';
if (*current == '+' || *current == '-') {
- sign = static_cast<char>(*current);
+ exponent_sign = static_cast<char>(*current);
++current;
if (current == end) {
if (allow_trailing_junk) {
@@ -862,7 +862,7 @@ double InternalStringToDouble(Iterator current, EndMark end, int flags,
++current;
} while (current != end && *current >= '0' && *current <= '9');
- exponent += (sign == '-' ? -num : num);
+ exponent += (exponent_sign == '-' ? -num : num);
}
if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
@@ -874,7 +874,8 @@ parsing_done:
if (octal) {
return InternalStringToIntDouble<3>(buffer, buffer + buffer_pos,
- sign == NEGATIVE, allow_trailing_junk);
+ sign == Sign::kNegative,
+ allow_trailing_junk);
}
if (nonzero_digit_dropped) {
@@ -887,7 +888,7 @@ parsing_done:
double converted =
Strtod(base::Vector<const char>(buffer, buffer_pos), exponent);
- return (sign == NEGATIVE) ? -converted : converted;
+ return (sign == Sign::kNegative) ? -converted : converted;
}
double StringToDouble(const char* str, int flags, double empty_string_val) {
@@ -1363,7 +1364,7 @@ char* DoubleToRadixCString(double value, int radix) {
}
char c = buffer[fraction_cursor];
// Reconstruct digit.
- int digit = c > '9' ? (c - 'a' + 10) : (c - '0');
+ digit = c > '9' ? (c - 'a' + 10) : (c - '0');
if (digit + 1 < radix) {
buffer[fraction_cursor++] = chars[digit + 1];
break;
@@ -1425,7 +1426,7 @@ base::Optional<double> TryStringToDouble(LocalIsolate* isolate,
const int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY;
auto buffer = std::make_unique<base::uc16[]>(max_length_for_conversion);
SharedStringAccessGuardIfNeeded access_guard(isolate);
- String::WriteToFlat(*object, buffer.get(), 0, length, access_guard);
+ String::WriteToFlat(*object, buffer.get(), 0, length, isolate, access_guard);
base::Vector<const base::uc16> v(buffer.get(), length);
return StringToDouble(v, flags);
}
diff --git a/chromium/v8/src/objects/allocation-site-inl.h b/chromium/v8/src/objects/allocation-site-inl.h
index 9d17048958b..1fc6709a5e2 100644
--- a/chromium/v8/src/objects/allocation-site-inl.h
+++ b/chromium/v8/src/objects/allocation-site-inl.h
@@ -5,9 +5,9 @@
#ifndef V8_OBJECTS_ALLOCATION_SITE_INL_H_
#define V8_OBJECTS_ALLOCATION_SITE_INL_H_
-#include "src/objects/allocation-site.h"
-
+#include "src/common/globals.h"
#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects/allocation-site.h"
#include "src/objects/js-objects-inl.h"
// Has to be the last include (doesn't have include guards):
@@ -30,8 +30,7 @@ ACCESSORS(AllocationSite, transition_info_or_boilerplate, Object,
RELEASE_ACQUIRE_ACCESSORS(AllocationSite, transition_info_or_boilerplate,
Object, kTransitionInfoOrBoilerplateOffset)
ACCESSORS(AllocationSite, nested_site, Object, kNestedSiteOffset)
-IMPLICIT_TAG_RELAXED_INT32_ACCESSORS(AllocationSite, pretenure_data,
- kPretenureDataOffset)
+RELAXED_INT32_ACCESSORS(AllocationSite, pretenure_data, kPretenureDataOffset)
INT32_ACCESSORS(AllocationSite, pretenure_create_count,
kPretenureCreateCountOffset)
ACCESSORS(AllocationSite, dependent_code, DependentCode, kDependentCodeOffset)
@@ -73,7 +72,7 @@ void AllocationSite::Initialize() {
set_transition_info_or_boilerplate(Smi::zero());
SetElementsKind(GetInitialFastElementsKind());
set_nested_site(Smi::zero());
- set_pretenure_data(0);
+ set_pretenure_data(0, kRelaxedStore);
set_pretenure_create_count(0);
set_dependent_code(
DependentCode::cast(GetReadOnlyRoots().empty_weak_fixed_array()),
@@ -139,36 +138,39 @@ inline bool AllocationSite::CanTrack(InstanceType type) {
}
AllocationSite::PretenureDecision AllocationSite::pretenure_decision() const {
- return PretenureDecisionBits::decode(pretenure_data());
+ return PretenureDecisionBits::decode(pretenure_data(kRelaxedLoad));
}
void AllocationSite::set_pretenure_decision(PretenureDecision decision) {
- int32_t value = pretenure_data();
- set_pretenure_data(PretenureDecisionBits::update(value, decision));
+ int32_t value = pretenure_data(kRelaxedLoad);
+ set_pretenure_data(PretenureDecisionBits::update(value, decision),
+ kRelaxedStore);
}
bool AllocationSite::deopt_dependent_code() const {
- return DeoptDependentCodeBit::decode(pretenure_data());
+ return DeoptDependentCodeBit::decode(pretenure_data(kRelaxedLoad));
}
void AllocationSite::set_deopt_dependent_code(bool deopt) {
- int32_t value = pretenure_data();
- set_pretenure_data(DeoptDependentCodeBit::update(value, deopt));
+ int32_t value = pretenure_data(kRelaxedLoad);
+ set_pretenure_data(DeoptDependentCodeBit::update(value, deopt),
+ kRelaxedStore);
}
int AllocationSite::memento_found_count() const {
- return MementoFoundCountBits::decode(pretenure_data());
+ return MementoFoundCountBits::decode(pretenure_data(kRelaxedLoad));
}
inline void AllocationSite::set_memento_found_count(int count) {
- int32_t value = pretenure_data();
+ int32_t value = pretenure_data(kRelaxedLoad);
// Verify that we can count more mementos than we can possibly find in one
// new space collection.
DCHECK((GetHeap()->MaxSemiSpaceSize() /
(Heap::kMinObjectSizeInTaggedWords * kTaggedSize +
AllocationMemento::kSize)) < MementoFoundCountBits::kMax);
DCHECK_LT(count, MementoFoundCountBits::kMax);
- set_pretenure_data(MementoFoundCountBits::update(value, count));
+ set_pretenure_data(MementoFoundCountBits::update(value, count),
+ kRelaxedStore);
}
int AllocationSite::memento_create_count() const {
diff --git a/chromium/v8/src/objects/allocation-site.h b/chromium/v8/src/objects/allocation-site.h
index a069279c6e5..4d673b4caf9 100644
--- a/chromium/v8/src/objects/allocation-site.h
+++ b/chromium/v8/src/objects/allocation-site.h
@@ -51,7 +51,7 @@ class AllocationSite : public Struct {
DECL_ACCESSORS(nested_site, Object)
// Bitfield containing pretenuring information.
- DECL_INT32_ACCESSORS(pretenure_data)
+ DECL_RELAXED_INT32_ACCESSORS(pretenure_data)
DECL_INT32_ACCESSORS(pretenure_create_count)
DECL_ACCESSORS(dependent_code, DependentCode)
diff --git a/chromium/v8/src/objects/api-callbacks.tq b/chromium/v8/src/objects/api-callbacks.tq
index cf94f743c4e..913dd58ea61 100644
--- a/chromium/v8/src/objects/api-callbacks.tq
+++ b/chromium/v8/src/objects/api-callbacks.tq
@@ -16,7 +16,6 @@ bitfield struct InterceptorInfoFlags extends uint31 {
has_no_side_effect: bool: 1 bit;
}
-@generatePrint
extern class InterceptorInfo extends Struct {
getter: NonNullForeign|Zero|Undefined;
setter: NonNullForeign|Zero|Undefined;
@@ -29,7 +28,6 @@ extern class InterceptorInfo extends Struct {
flags: SmiTagged<InterceptorInfoFlags>;
}
-@generatePrint
extern class AccessCheckInfo extends Struct {
callback: Foreign|Zero|Undefined;
named_interceptor: InterceptorInfo|Zero|Undefined;
@@ -50,7 +48,6 @@ bitfield struct AccessorInfoFlags extends uint31 {
initial_attributes: PropertyAttributes: 3 bit;
}
-@generatePrint
extern class AccessorInfo extends Struct {
name: Name;
flags: SmiTagged<AccessorInfoFlags>;
diff --git a/chromium/v8/src/objects/arguments.h b/chromium/v8/src/objects/arguments.h
index 372fc745e4d..661e0759f60 100644
--- a/chromium/v8/src/objects/arguments.h
+++ b/chromium/v8/src/objects/arguments.h
@@ -8,7 +8,6 @@
#include "src/objects/fixed-array.h"
#include "src/objects/js-objects.h"
#include "src/objects/struct.h"
-#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -29,12 +28,10 @@ class JSArgumentsObject
// JSSloppyArgumentsObject is just a JSArgumentsObject with specific initial
// map. This initial map adds in-object properties for "length" and "callee".
-class JSSloppyArgumentsObject : public JSArgumentsObject {
+class JSSloppyArgumentsObject
+ : public TorqueGeneratedJSSloppyArgumentsObject<JSSloppyArgumentsObject,
+ JSArgumentsObject> {
public:
- DEFINE_FIELD_OFFSET_CONSTANTS(
- JSArgumentsObject::kHeaderSize,
- TORQUE_GENERATED_JS_SLOPPY_ARGUMENTS_OBJECT_FIELDS)
-
// Indices of in-object properties.
static const int kLengthIndex = 0;
static const int kCalleeIndex = kLengthIndex + 1;
@@ -45,13 +42,10 @@ class JSSloppyArgumentsObject : public JSArgumentsObject {
// JSStrictArgumentsObject is just a JSArgumentsObject with specific initial
// map. This initial map adds an in-object property for "length".
-class JSStrictArgumentsObject : public JSArgumentsObject {
+class JSStrictArgumentsObject
+ : public TorqueGeneratedJSStrictArgumentsObject<JSStrictArgumentsObject,
+ JSArgumentsObject> {
public:
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(
- JSArgumentsObject::kHeaderSize,
- TORQUE_GENERATED_JS_STRICT_ARGUMENTS_OBJECT_FIELDS)
-
// Indices of in-object properties.
static const int kLengthIndex = 0;
STATIC_ASSERT(kLengthIndex == JSSloppyArgumentsObject::kLengthIndex);
diff --git a/chromium/v8/src/objects/arguments.tq b/chromium/v8/src/objects/arguments.tq
index cc60e62f708..8f1385c9e1c 100644
--- a/chromium/v8/src/objects/arguments.tq
+++ b/chromium/v8/src/objects/arguments.tq
@@ -14,14 +14,12 @@ macro IsJSArgumentsObjectWithLength(implicit context: Context)(o: Object):
}
// Just a starting shape for JSObject; properties can move after initialization.
-@doNotGenerateCppClass
extern shape JSSloppyArgumentsObject extends JSArgumentsObject {
length: JSAny;
callee: JSAny;
}
// Just a starting shape for JSObject; properties can move after initialization.
-@doNotGenerateCppClass
extern shape JSStrictArgumentsObject extends JSArgumentsObject {
length: JSAny;
}
@@ -50,7 +48,7 @@ extern shape JSStrictArgumentsObject extends JSArgumentsObject {
// arguments array is not a fixed array or if key >= elements.arguments.length.
//
// Otherwise, t = elements.mapped_entries[key]. If t is the hole, then the
-// entry has been deleted fron the arguments object, and value is looked up in
+// entry has been deleted from the arguments object, and value is looked up in
// the unmapped arguments array, as described above. Otherwise, t is a Smi
// index into the context array specified at elements.context, and the return
// value is elements.context[t].
@@ -90,10 +88,7 @@ macro NewSloppyArgumentsElements<Iterator: type>(
SloppyArgumentsElements{length, context, arguments, mapped_entries: ...it};
}
-@generatePrint
-extern class AliasedArgumentsEntry extends Struct {
- aliased_context_slot: Smi;
-}
+extern class AliasedArgumentsEntry extends Struct { aliased_context_slot: Smi; }
// TODO(danno): This should be a namespace {} once supported
namespace arguments {
diff --git a/chromium/v8/src/objects/backing-store.cc b/chromium/v8/src/objects/backing-store.cc
index e72698858ae..cfe355c6064 100644
--- a/chromium/v8/src/objects/backing-store.cc
+++ b/chromium/v8/src/objects/backing-store.cc
@@ -9,6 +9,7 @@
#include "src/base/platform/wrappers.h"
#include "src/execution/isolate.h"
#include "src/handles/global-handles.h"
+#include "src/init/vm-cage.h"
#include "src/logging/counters.h"
#if V8_ENABLE_WEBASSEMBLY
@@ -38,24 +39,11 @@ constexpr uint64_t kFullGuardSize = uint64_t{10} * GB;
#endif // V8_ENABLE_WEBASSEMBLY
-#if V8_TARGET_ARCH_MIPS64
-// MIPS64 has a user space of 2^40 bytes on most processors,
-// address space limits needs to be smaller.
-constexpr size_t kAddressSpaceLimit = 0x8000000000L; // 512 GiB
-#elif V8_TARGET_ARCH_RISCV64
-// RISC-V64 has a user space of 256GB on the Sv39 scheme.
-constexpr size_t kAddressSpaceLimit = 0x4000000000L; // 256 GiB
-#elif V8_TARGET_ARCH_64_BIT
-constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4 GiB
-#else
-constexpr size_t kAddressSpaceLimit = 0xC0000000; // 3 GiB
-#endif
-
-std::atomic<uint64_t> reserved_address_space_{0};
+std::atomic<uint32_t> next_backing_store_id_{1};
// Allocation results are reported to UMA
//
-// See wasm_memory_allocation_result in counters.h
+// See wasm_memory_allocation_result in counters-definitions.h
enum class AllocationStatus {
kSuccess, // Succeeded on the first try
@@ -67,6 +55,19 @@ enum class AllocationStatus {
kOtherFailure // Failed for an unknown reason
};
+// Attempts to allocate memory inside the virtual memory cage currently fall
+// back to allocating memory outside of the cage if necessary. Once this
+// fallback is no longer allowed/possible, these cases will become allocation
+// failures instead. To track the frequency of such events, the outcome of
+// memory allocation attempts inside the cage is reported to UMA.
+//
+// See caged_memory_allocation_outcome in counters-definitions.h
+enum class CagedMemoryAllocationOutcome {
+ kSuccess, // Allocation succeeded inside the cage
+ kOutsideCage, // Allocation failed inside the cage but succeeded outside
+ kFailure, // Allocation failed inside and outside of the cage
+};
+
base::AddressRegion GetReservedRegion(bool has_guard_regions,
void* buffer_start,
size_t byte_capacity) {
@@ -106,6 +107,29 @@ void RecordStatus(Isolate* isolate, AllocationStatus status) {
static_cast<int>(status));
}
+// When the virtual memory cage is active, this function records the outcome of
+// attempts to allocate memory inside the cage which fall back to allocating
+// memory outside of the cage. Passing a value of nullptr for the result
+// indicates that the memory could not be allocated at all.
+void RecordCagedMemoryAllocationResult(Isolate* isolate, void* result) {
+ // This metric is only meaningful when the virtual memory cage is active.
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ if (GetProcessWideVirtualMemoryCage()->is_initialized()) {
+ CagedMemoryAllocationOutcome outcome;
+ if (result) {
+ bool allocation_in_cage =
+ GetProcessWideVirtualMemoryCage()->Contains(result);
+ outcome = allocation_in_cage ? CagedMemoryAllocationOutcome::kSuccess
+ : CagedMemoryAllocationOutcome::kOutsideCage;
+ } else {
+ outcome = CagedMemoryAllocationOutcome::kFailure;
+ }
+ isolate->counters()->caged_memory_allocation_outcome()->AddSample(
+ static_cast<int>(outcome));
+ }
+#endif
+}
+
inline void DebugCheckZero(void* start, size_t byte_length) {
#if DEBUG
// Double check memory is zero-initialized. Despite being DEBUG-only,
@@ -144,6 +168,34 @@ void BackingStore::Clear() {
type_specific_data_.v8_api_array_buffer_allocator = nullptr;
}
+BackingStore::BackingStore(void* buffer_start, size_t byte_length,
+ size_t max_byte_length, size_t byte_capacity,
+ SharedFlag shared, ResizableFlag resizable,
+ bool is_wasm_memory, bool free_on_destruct,
+ bool has_guard_regions, bool custom_deleter,
+ bool empty_deleter)
+ : buffer_start_(buffer_start),
+ byte_length_(byte_length),
+ max_byte_length_(max_byte_length),
+ byte_capacity_(byte_capacity),
+ id_(next_backing_store_id_.fetch_add(1)),
+ is_shared_(shared == SharedFlag::kShared),
+ is_resizable_(resizable == ResizableFlag::kResizable),
+ is_wasm_memory_(is_wasm_memory),
+ holds_shared_ptr_to_allocator_(false),
+ free_on_destruct_(free_on_destruct),
+ has_guard_regions_(has_guard_regions),
+ globally_registered_(false),
+ custom_deleter_(custom_deleter),
+ empty_deleter_(empty_deleter) {
+ // TODO(v8:11111): RAB / GSAB - Wasm integration.
+ DCHECK_IMPLIES(is_wasm_memory_, !is_resizable_);
+ DCHECK_IMPLIES(is_resizable_, !custom_deleter_);
+ DCHECK_IMPLIES(is_resizable_, free_on_destruct_);
+ DCHECK_IMPLIES(!is_wasm_memory && !is_resizable_,
+ byte_length_ == max_byte_length_);
+}
+
BackingStore::~BackingStore() {
GlobalBackingStoreRegistry::Unregister(this);
@@ -152,6 +204,18 @@ BackingStore::~BackingStore() {
return;
}
+ PageAllocator* page_allocator = GetPlatformPageAllocator();
+ // TODO(saelo) here and elsewhere in this file, replace with
+ // GetArrayBufferPageAllocator once the fallback to the platform page
+ // allocator is no longer allowed.
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ if (GetProcessWideVirtualMemoryCage()->Contains(buffer_start_)) {
+ page_allocator = GetVirtualMemoryCagePageAllocator();
+ } else {
+ DCHECK(kAllowBackingStoresOutsideCage);
+ }
+#endif
+
#if V8_ENABLE_WEBASSEMBLY
if (is_wasm_memory_) {
// TODO(v8:11111): RAB / GSAB - Wasm integration.
@@ -176,10 +240,9 @@ BackingStore::~BackingStore() {
bool pages_were_freed =
region.size() == 0 /* no need to free any pages */ ||
- FreePages(GetPlatformPageAllocator(),
- reinterpret_cast<void*>(region.begin()), region.size());
+ FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
+ region.size());
CHECK(pages_were_freed);
- BackingStore::ReleaseReservation(reservation_size);
Clear();
return;
}
@@ -188,17 +251,14 @@ BackingStore::~BackingStore() {
if (is_resizable_) {
DCHECK(free_on_destruct_);
DCHECK(!custom_deleter_);
- size_t reservation_size =
- GetReservationSize(has_guard_regions_, byte_capacity_);
auto region =
GetReservedRegion(has_guard_regions_, buffer_start_, byte_capacity_);
bool pages_were_freed =
region.size() == 0 /* no need to free any pages */ ||
- FreePages(GetPlatformPageAllocator(),
- reinterpret_cast<void*>(region.begin()), region.size());
+ FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
+ region.size());
CHECK(pages_were_freed);
- BackingStore::ReleaseReservation(reservation_size);
Clear();
return;
}
@@ -263,6 +323,8 @@ std::unique_ptr<BackingStore> BackingStore::Allocate(
counters->array_buffer_new_size_failures()->AddSample(mb_length);
return {};
}
+
+ DCHECK(IsValidBackingStorePointer(buffer_start));
}
auto result = new BackingStore(buffer_start, // start
@@ -318,25 +380,6 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
}
#endif // V8_ENABLE_WEBASSEMBLY
-bool BackingStore::ReserveAddressSpace(uint64_t num_bytes) {
- uint64_t reservation_limit = kAddressSpaceLimit;
- uint64_t old_count = reserved_address_space_.load(std::memory_order_relaxed);
- while (true) {
- if (old_count > reservation_limit) return false;
- if (reservation_limit - old_count < num_bytes) return false;
- if (reserved_address_space_.compare_exchange_weak(
- old_count, old_count + num_bytes, std::memory_order_acq_rel)) {
- return true;
- }
- }
-}
-
-void BackingStore::ReleaseReservation(uint64_t num_bytes) {
- uint64_t old_reserved = reserved_address_space_.fetch_sub(num_bytes);
- USE(old_reserved);
- DCHECK_LE(num_bytes, old_reserved);
-}
-
std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
Isolate* isolate, size_t byte_length, size_t max_byte_length,
size_t page_size, size_t initial_pages, size_t maximum_pages,
@@ -379,41 +422,39 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
size_t reservation_size = GetReservationSize(guards, byte_capacity);
//--------------------------------------------------------------------------
- // 1. Enforce maximum address space reservation per engine.
- //--------------------------------------------------------------------------
- auto reserve_memory_space = [&] {
- return BackingStore::ReserveAddressSpace(reservation_size);
- };
-
- if (!gc_retry(reserve_memory_space)) {
- // Crash on out-of-memory if the correctness fuzzer is running.
- if (FLAG_correctness_fuzzer_suppressions) {
- FATAL("could not allocate wasm memory backing store");
- }
- RecordStatus(isolate, AllocationStatus::kAddressSpaceLimitReachedFailure);
- TRACE_BS("BSw:try failed to reserve address space (size %zu)\n",
- reservation_size);
- return {};
- }
-
- //--------------------------------------------------------------------------
- // 2. Allocate pages (inaccessible by default).
+ // Allocate pages (inaccessible by default).
//--------------------------------------------------------------------------
void* allocation_base = nullptr;
+ PageAllocator* page_allocator = GetPlatformPageAllocator();
auto allocate_pages = [&] {
- allocation_base =
- AllocatePages(GetPlatformPageAllocator(), nullptr, reservation_size,
- page_size, PageAllocator::kNoAccess);
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ page_allocator = GetVirtualMemoryCagePageAllocator();
+ allocation_base = AllocatePages(page_allocator, nullptr, reservation_size,
+ page_size, PageAllocator::kNoAccess);
+ if (allocation_base) return true;
+ // We currently still allow falling back to the platform page allocator if
+ // the cage page allocator fails. This will eventually be removed.
+ // TODO(chromium:1218005) once we forbid the fallback, we should have a
+ // single API, e.g. GetArrayBufferPageAllocator(), that returns the correct
+ // page allocator to use here depending on whether the virtual memory cage
+ // is enabled or not.
+ if (!kAllowBackingStoresOutsideCage) return false;
+ page_allocator = GetPlatformPageAllocator();
+#endif
+ allocation_base = AllocatePages(page_allocator, nullptr, reservation_size,
+ page_size, PageAllocator::kNoAccess);
return allocation_base != nullptr;
};
if (!gc_retry(allocate_pages)) {
// Page allocator could not reserve enough pages.
- BackingStore::ReleaseReservation(reservation_size);
RecordStatus(isolate, AllocationStatus::kOtherFailure);
+ RecordCagedMemoryAllocationResult(isolate, nullptr);
TRACE_BS("BSw:try failed to allocate pages\n");
return {};
}
+ DCHECK(IsValidBackingStorePointer(allocation_base));
+
// Get a pointer to the start of the buffer, skipping negative guard region
// if necessary.
#if V8_ENABLE_WEBASSEMBLY
@@ -423,14 +464,15 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
DCHECK(!guards);
byte* buffer_start = reinterpret_cast<byte*>(allocation_base);
#endif
+
//--------------------------------------------------------------------------
- // 3. Commit the initial pages (allow read/write).
+ // Commit the initial pages (allow read/write).
//--------------------------------------------------------------------------
size_t committed_byte_length = initial_pages * page_size;
auto commit_memory = [&] {
return committed_byte_length == 0 ||
- SetPermissions(GetPlatformPageAllocator(), buffer_start,
- committed_byte_length, PageAllocator::kReadWrite);
+ SetPermissions(page_allocator, buffer_start, committed_byte_length,
+ PageAllocator::kReadWrite);
};
if (!gc_retry(commit_memory)) {
TRACE_BS("BSw:try failed to set permissions (%p, %zu)\n", buffer_start,
@@ -443,6 +485,7 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
RecordStatus(isolate, did_retry ? AllocationStatus::kSuccessAfterRetry
: AllocationStatus::kSuccess);
+ RecordCagedMemoryAllocationResult(isolate, allocation_base);
ResizableFlag resizable =
is_wasm_memory ? ResizableFlag::kNotResizable : ResizableFlag::kResizable;
@@ -708,6 +751,7 @@ BackingStore::ResizeOrGrowResult BackingStore::GrowInPlace(
std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
Isolate* isolate, void* allocation_base, size_t allocation_length,
SharedFlag shared, bool free_on_destruct) {
+ DCHECK(IsValidBackingStorePointer(allocation_base));
auto result = new BackingStore(allocation_base, // start
allocation_length, // length
allocation_length, // max length
@@ -729,6 +773,7 @@ std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
void* allocation_base, size_t allocation_length,
v8::BackingStore::DeleterCallback deleter, void* deleter_data,
SharedFlag shared) {
+ DCHECK(IsValidBackingStorePointer(allocation_base));
bool is_empty_deleter = (deleter == v8::BackingStore::EmptyDeleter);
auto result = new BackingStore(allocation_base, // start
allocation_length, // length
@@ -766,7 +811,7 @@ std::unique_ptr<BackingStore> BackingStore::EmptyBackingStore(
bool BackingStore::Reallocate(Isolate* isolate, size_t new_byte_length) {
CHECK(!is_wasm_memory_ && !custom_deleter_ && !globally_registered_ &&
- free_on_destruct_);
+ free_on_destruct_ && !is_resizable_);
auto allocator = get_v8_api_array_buffer_allocator();
CHECK_EQ(isolate->array_buffer_allocator(), allocator);
CHECK_EQ(byte_length_, byte_capacity_);
@@ -776,6 +821,7 @@ bool BackingStore::Reallocate(Isolate* isolate, size_t new_byte_length) {
buffer_start_ = new_start;
byte_capacity_ = new_byte_length;
byte_length_ = new_byte_length;
+ max_byte_length_ = new_byte_length;
return true;
}
diff --git a/chromium/v8/src/objects/backing-store.h b/chromium/v8/src/objects/backing-store.h
index 013a97a5263..5ba95a2ba82 100644
--- a/chromium/v8/src/objects/backing-store.h
+++ b/chromium/v8/src/objects/backing-store.h
@@ -7,8 +7,8 @@
#include <memory>
+#include "include/v8-array-buffer.h"
#include "include/v8-internal.h"
-#include "include/v8.h"
#include "src/base/optional.h"
#include "src/handles/handles.h"
@@ -138,12 +138,6 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
static void UpdateSharedWasmMemoryObjects(Isolate* isolate);
#endif // V8_ENABLE_WEBASSEMBLY
- // TODO(wasm): address space limitations should be enforced in page alloc.
- // These methods enforce a limit on the total amount of address space,
- // which is used for both backing stores and wasm memory.
- static bool ReserveAddressSpace(uint64_t num_bytes);
- static void ReleaseReservation(uint64_t num_bytes);
-
// Returns the size of the external memory owned by this backing store.
// It is used for triggering GCs based on the external memory pressure.
size_t PerIsolateAccountingLength() {
@@ -163,44 +157,29 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
return byte_length();
}
+ uint32_t id() const { return id_; }
+
private:
friend class GlobalBackingStoreRegistry;
BackingStore(void* buffer_start, size_t byte_length, size_t max_byte_length,
size_t byte_capacity, SharedFlag shared, ResizableFlag resizable,
bool is_wasm_memory, bool free_on_destruct,
- bool has_guard_regions, bool custom_deleter, bool empty_deleter)
- : buffer_start_(buffer_start),
- byte_length_(byte_length),
- max_byte_length_(max_byte_length),
- byte_capacity_(byte_capacity),
- is_shared_(shared == SharedFlag::kShared),
- is_resizable_(resizable == ResizableFlag::kResizable),
- is_wasm_memory_(is_wasm_memory),
- holds_shared_ptr_to_allocator_(false),
- free_on_destruct_(free_on_destruct),
- has_guard_regions_(has_guard_regions),
- globally_registered_(false),
- custom_deleter_(custom_deleter),
- empty_deleter_(empty_deleter) {
- // TODO(v8:11111): RAB / GSAB - Wasm integration.
- DCHECK_IMPLIES(is_wasm_memory_, !is_resizable_);
- DCHECK_IMPLIES(is_resizable_, !custom_deleter_);
- DCHECK_IMPLIES(is_resizable_, free_on_destruct_);
- DCHECK_IMPLIES(!is_wasm_memory && !is_resizable_,
- byte_length_ == max_byte_length_);
- }
+ bool has_guard_regions, bool custom_deleter, bool empty_deleter);
BackingStore(const BackingStore&) = delete;
BackingStore& operator=(const BackingStore&) = delete;
void SetAllocatorFromIsolate(Isolate* isolate);
void* buffer_start_ = nullptr;
- std::atomic<size_t> byte_length_{0};
+ std::atomic<size_t> byte_length_;
// Max byte length of the corresponding JSArrayBuffer(s).
- size_t max_byte_length_ = 0;
+ size_t max_byte_length_;
// Amount of the memory allocated
- size_t byte_capacity_ = 0;
-
+ size_t byte_capacity_;
+ // Unique ID of this backing store. Currently only used by DevTools, to
+ // identify stores used by several ArrayBuffers or WebAssembly memories
+ // (reported by the inspector as [[ArrayBufferData]] internal property)
+ uint32_t id_;
struct DeleterInfo {
v8::BackingStore::DeleterCallback callback;
void* data;
diff --git a/chromium/v8/src/objects/bigint.cc b/chromium/v8/src/objects/bigint.cc
index 5d21adfb89c..5f323aa4ec1 100644
--- a/chromium/v8/src/objects/bigint.cc
+++ b/chromium/v8/src/objects/bigint.cc
@@ -81,50 +81,11 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
}
// Internal helpers.
- static MaybeHandle<MutableBigInt> BitwiseAnd(Isolate* isolate,
- Handle<BigInt> x,
- Handle<BigInt> y);
- static MaybeHandle<MutableBigInt> BitwiseXor(Isolate* isolate,
- Handle<BigInt> x,
- Handle<BigInt> y);
- static MaybeHandle<MutableBigInt> BitwiseOr(Isolate* isolate,
- Handle<BigInt> x,
- Handle<BigInt> y);
-
- static Handle<BigInt> TruncateToNBits(Isolate* isolate, int n,
- Handle<BigInt> x);
- static Handle<BigInt> TruncateAndSubFromPowerOfTwo(Isolate* isolate, int n,
- Handle<BigInt> x,
- bool result_sign);
-
static MaybeHandle<MutableBigInt> AbsoluteAddOne(
Isolate* isolate, Handle<BigIntBase> x, bool sign,
MutableBigInt result_storage = MutableBigInt());
static Handle<MutableBigInt> AbsoluteSubOne(Isolate* isolate,
Handle<BigIntBase> x);
- static MaybeHandle<MutableBigInt> AbsoluteSubOne(Isolate* isolate,
- Handle<BigIntBase> x,
- int result_length);
-
- enum ExtraDigitsHandling { kCopy, kSkip };
- enum SymmetricOp { kSymmetric, kNotSymmetric };
- static inline Handle<MutableBigInt> AbsoluteBitwiseOp(
- Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
- MutableBigInt result_storage, ExtraDigitsHandling extra_digits,
- SymmetricOp symmetric,
- const std::function<digit_t(digit_t, digit_t)>& op);
- static Handle<MutableBigInt> AbsoluteAnd(
- Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
- MutableBigInt result_storage = MutableBigInt());
- static Handle<MutableBigInt> AbsoluteAndNot(
- Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
- MutableBigInt result_storage = MutableBigInt());
- static Handle<MutableBigInt> AbsoluteOr(
- Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
- MutableBigInt result_storage = MutableBigInt());
- static Handle<MutableBigInt> AbsoluteXor(
- Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
- MutableBigInt result_storage = MutableBigInt());
// Specialized helpers for shift operations.
static MaybeHandle<BigInt> LeftShiftByAbsolute(Isolate* isolate,
@@ -145,9 +106,6 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
// representation.
static uint64_t GetRawBits(BigIntBase x, bool* lossless);
- // Digit arithmetic helpers.
- static inline digit_t digit_add(digit_t a, digit_t b, digit_t* carry);
- static inline digit_t digit_sub(digit_t a, digit_t b, digit_t* borrow);
static inline bool digit_ismax(digit_t x) {
return static_cast<digit_t>(~x) == 0;
}
@@ -406,7 +364,7 @@ MaybeHandle<BigInt> BigInt::BitwiseNot(Isolate* isolate, Handle<BigInt> x) {
MaybeHandle<MutableBigInt> result;
if (x->sign()) {
// ~(-x) == ~(~(x-1)) == x-1
- result = MutableBigInt::AbsoluteSubOne(isolate, x, x->length());
+ result = MutableBigInt::AbsoluteSubOne(isolate, x);
} else {
// ~x == -x-1 == -(x+1)
result = MutableBigInt::AbsoluteAddOne(isolate, x, true);
@@ -673,96 +631,82 @@ bool BigInt::EqualToBigInt(BigInt x, BigInt y) {
MaybeHandle<BigInt> BigInt::BitwiseAnd(Isolate* isolate, Handle<BigInt> x,
Handle<BigInt> y) {
- return MutableBigInt::MakeImmutable(MutableBigInt::BitwiseAnd(isolate, x, y));
-}
-
-MaybeHandle<MutableBigInt> MutableBigInt::BitwiseAnd(Isolate* isolate,
- Handle<BigInt> x,
- Handle<BigInt> y) {
- if (!x->sign() && !y->sign()) {
- return AbsoluteAnd(isolate, x, y);
- } else if (x->sign() && y->sign()) {
- int result_length = std::max(x->length(), y->length()) + 1;
- // (-x) & (-y) == ~(x-1) & ~(y-1) == ~((x-1) | (y-1))
- // == -(((x-1) | (y-1)) + 1)
- Handle<MutableBigInt> result;
- if (!AbsoluteSubOne(isolate, x, result_length).ToHandle(&result)) {
- return MaybeHandle<MutableBigInt>();
+ bool x_sign = x->sign();
+ bool y_sign = y->sign();
+ Handle<MutableBigInt> result;
+ if (!x_sign && !y_sign) {
+ int result_length =
+ bigint::BitwiseAnd_PosPos_ResultLength(x->length(), y->length());
+ result = MutableBigInt::New(isolate, result_length).ToHandleChecked();
+ bigint::BitwiseAnd_PosPos(GetRWDigits(result), GetDigits(x), GetDigits(y));
+ DCHECK(!result->sign());
+ } else if (x_sign && y_sign) {
+ int result_length =
+ bigint::BitwiseAnd_NegNeg_ResultLength(x->length(), y->length());
+ if (!MutableBigInt::New(isolate, result_length).ToHandle(&result)) {
+ return {};
}
- Handle<MutableBigInt> y_1 = AbsoluteSubOne(isolate, y);
- result = AbsoluteOr(isolate, result, y_1, *result);
- return AbsoluteAddOne(isolate, result, true, *result);
+ bigint::BitwiseAnd_NegNeg(GetRWDigits(result), GetDigits(x), GetDigits(y));
+ result->set_sign(true);
} else {
- DCHECK(x->sign() != y->sign());
- // Assume that x is the positive BigInt.
- if (x->sign()) std::swap(x, y);
- // x & (-y) == x & ~(y-1) == x &~ (y-1)
- Handle<MutableBigInt> y_1 = AbsoluteSubOne(isolate, y);
- return AbsoluteAndNot(isolate, x, y_1);
+ if (x_sign) std::swap(x, y);
+ int result_length = bigint::BitwiseAnd_PosNeg_ResultLength(x->length());
+ result = MutableBigInt::New(isolate, result_length).ToHandleChecked();
+ bigint::BitwiseAnd_PosNeg(GetRWDigits(result), GetDigits(x), GetDigits(y));
+ DCHECK(!result->sign());
}
+ return MutableBigInt::MakeImmutable(result);
}
MaybeHandle<BigInt> BigInt::BitwiseXor(Isolate* isolate, Handle<BigInt> x,
Handle<BigInt> y) {
- return MutableBigInt::MakeImmutable(MutableBigInt::BitwiseXor(isolate, x, y));
-}
-
-MaybeHandle<MutableBigInt> MutableBigInt::BitwiseXor(Isolate* isolate,
- Handle<BigInt> x,
- Handle<BigInt> y) {
- if (!x->sign() && !y->sign()) {
- return AbsoluteXor(isolate, x, y);
- } else if (x->sign() && y->sign()) {
- int result_length = std::max(x->length(), y->length());
- // (-x) ^ (-y) == ~(x-1) ^ ~(y-1) == (x-1) ^ (y-1)
- Handle<MutableBigInt> result =
- AbsoluteSubOne(isolate, x, result_length).ToHandleChecked();
- Handle<MutableBigInt> y_1 = AbsoluteSubOne(isolate, y);
- return AbsoluteXor(isolate, result, y_1, *result);
+ bool x_sign = x->sign();
+ bool y_sign = y->sign();
+ Handle<MutableBigInt> result;
+ if (!x_sign && !y_sign) {
+ int result_length =
+ bigint::BitwiseXor_PosPos_ResultLength(x->length(), y->length());
+ result = MutableBigInt::New(isolate, result_length).ToHandleChecked();
+ bigint::BitwiseXor_PosPos(GetRWDigits(result), GetDigits(x), GetDigits(y));
+ DCHECK(!result->sign());
+ } else if (x_sign && y_sign) {
+ int result_length =
+ bigint::BitwiseXor_NegNeg_ResultLength(x->length(), y->length());
+ result = MutableBigInt::New(isolate, result_length).ToHandleChecked();
+ bigint::BitwiseXor_NegNeg(GetRWDigits(result), GetDigits(x), GetDigits(y));
+ DCHECK(!result->sign());
} else {
- DCHECK(x->sign() != y->sign());
- int result_length = std::max(x->length(), y->length()) + 1;
- // Assume that x is the positive BigInt.
- if (x->sign()) std::swap(x, y);
- // x ^ (-y) == x ^ ~(y-1) == ~(x ^ (y-1)) == -((x ^ (y-1)) + 1)
- Handle<MutableBigInt> result;
- if (!AbsoluteSubOne(isolate, y, result_length).ToHandle(&result)) {
- return MaybeHandle<MutableBigInt>();
+ if (x_sign) std::swap(x, y);
+ int result_length =
+ bigint::BitwiseXor_PosNeg_ResultLength(x->length(), y->length());
+ if (!MutableBigInt::New(isolate, result_length).ToHandle(&result)) {
+ return {};
}
- result = AbsoluteXor(isolate, result, x, *result);
- return AbsoluteAddOne(isolate, result, true, *result);
+ bigint::BitwiseXor_PosNeg(GetRWDigits(result), GetDigits(x), GetDigits(y));
+ result->set_sign(true);
}
+ return MutableBigInt::MakeImmutable(result);
}
MaybeHandle<BigInt> BigInt::BitwiseOr(Isolate* isolate, Handle<BigInt> x,
Handle<BigInt> y) {
- return MutableBigInt::MakeImmutable(MutableBigInt::BitwiseOr(isolate, x, y));
-}
-
-MaybeHandle<MutableBigInt> MutableBigInt::BitwiseOr(Isolate* isolate,
- Handle<BigInt> x,
- Handle<BigInt> y) {
- int result_length = std::max(x->length(), y->length());
- if (!x->sign() && !y->sign()) {
- return AbsoluteOr(isolate, x, y);
- } else if (x->sign() && y->sign()) {
- // (-x) | (-y) == ~(x-1) | ~(y-1) == ~((x-1) & (y-1))
- // == -(((x-1) & (y-1)) + 1)
- Handle<MutableBigInt> result =
- AbsoluteSubOne(isolate, x, result_length).ToHandleChecked();
- Handle<MutableBigInt> y_1 = AbsoluteSubOne(isolate, y);
- result = AbsoluteAnd(isolate, result, y_1, *result);
- return AbsoluteAddOne(isolate, result, true, *result);
+ bool x_sign = x->sign();
+ bool y_sign = y->sign();
+ int result_length = bigint::BitwiseOrResultLength(x->length(), y->length());
+ Handle<MutableBigInt> result =
+ MutableBigInt::New(isolate, result_length).ToHandleChecked();
+ if (!x_sign && !y_sign) {
+ bigint::BitwiseOr_PosPos(GetRWDigits(result), GetDigits(x), GetDigits(y));
+ DCHECK(!result->sign());
+ } else if (x_sign && y_sign) {
+ bigint::BitwiseOr_NegNeg(GetRWDigits(result), GetDigits(x), GetDigits(y));
+ result->set_sign(true);
} else {
- DCHECK(x->sign() != y->sign());
- // Assume that x is the positive BigInt.
- if (x->sign()) std::swap(x, y);
- // x | (-y) == x | ~(y-1) == ~((y-1) &~ x) == -(((y-1) &~ x) + 1)
- Handle<MutableBigInt> result =
- AbsoluteSubOne(isolate, y, result_length).ToHandleChecked();
- result = AbsoluteAndNot(isolate, result, x, *result);
- return AbsoluteAddOne(isolate, result, true, *result);
+ if (x_sign) std::swap(x, y);
+ bigint::BitwiseOr_PosNeg(GetRWDigits(result), GetDigits(x), GetDigits(y));
+ result->set_sign(true);
}
+ return MutableBigInt::MakeImmutable(result);
}
MaybeHandle<BigInt> BigInt::Increment(Isolate* isolate, Handle<BigInt> x) {
@@ -1107,8 +1051,19 @@ MaybeHandle<BigInt> BigInt::FromObject(Isolate* isolate, Handle<Object> obj) {
if (isolate->has_pending_exception()) {
return MaybeHandle<BigInt>();
} else {
+ Handle<String> str = Handle<String>::cast(obj);
+ constexpr int kMaxRenderedLength = 1000;
+ if (str->length() > kMaxRenderedLength) {
+ Factory* factory = isolate->factory();
+ Handle<String> prefix =
+ factory->NewProperSubString(str, 0, kMaxRenderedLength);
+ Handle<SeqTwoByteString> ellipsis =
+ factory->NewRawTwoByteString(1).ToHandleChecked();
+ ellipsis->SeqTwoByteStringSet(0, 0x2026);
+ str = factory->NewConsString(prefix, ellipsis).ToHandleChecked();
+ }
THROW_NEW_ERROR(isolate,
- NewSyntaxError(MessageTemplate::kBigIntFromObject, obj),
+ NewSyntaxError(MessageTemplate::kBigIntFromObject, str),
BigInt);
}
}
@@ -1259,16 +1214,12 @@ MaybeHandle<MutableBigInt> MutableBigInt::AbsoluteAddOne(
} else {
DCHECK(result->length() == result_length);
}
- digit_t carry = 1;
- for (int i = 0; i < input_length; i++) {
- digit_t new_carry = 0;
- result->set_digit(i, digit_add(x->digit(i), carry, &new_carry));
- carry = new_carry;
- }
- if (result_length > input_length) {
- result->set_digit(input_length, carry);
+ if (input_length == 0) {
+ result->set_digit(0, 1);
+ } else if (input_length == 1 && !will_overflow) {
+ result->set_digit(0, x->digit(0) + 1);
} else {
- DCHECK_EQ(carry, 0);
+ bigint::AddOne(GetRWDigits(result), GetDigits(x));
}
result->set_sign(sign);
return result;
@@ -1278,134 +1229,16 @@ MaybeHandle<MutableBigInt> MutableBigInt::AbsoluteAddOne(
Handle<MutableBigInt> MutableBigInt::AbsoluteSubOne(Isolate* isolate,
Handle<BigIntBase> x) {
DCHECK(!x->is_zero());
- // Requesting a result length identical to an existing BigInt's length
- // cannot overflow the limit.
- return AbsoluteSubOne(isolate, x, x->length()).ToHandleChecked();
-}
-
-// Like the above, but you can specify that the allocated result should have
-// length {result_length}, which must be at least as large as {x->length()}.
-MaybeHandle<MutableBigInt> MutableBigInt::AbsoluteSubOne(Isolate* isolate,
- Handle<BigIntBase> x,
- int result_length) {
- DCHECK(!x->is_zero());
- DCHECK(result_length >= x->length());
- Handle<MutableBigInt> result;
- if (!New(isolate, result_length).ToHandle(&result)) {
- return MaybeHandle<MutableBigInt>();
- }
int length = x->length();
- digit_t borrow = 1;
- for (int i = 0; i < length; i++) {
- digit_t new_borrow = 0;
- result->set_digit(i, digit_sub(x->digit(i), borrow, &new_borrow));
- borrow = new_borrow;
- }
- DCHECK_EQ(borrow, 0);
- for (int i = length; i < result_length; i++) {
- result->set_digit(i, borrow);
- }
- return result;
-}
-
-// Helper for Absolute{And,AndNot,Or,Xor}.
-// Performs the given binary {op} on digit pairs of {x} and {y}; when the
-// end of the shorter of the two is reached, {extra_digits} configures how
-// remaining digits in the longer input (if {symmetric} == kSymmetric, in
-// {x} otherwise) are handled: copied to the result or ignored.
-// If {result_storage} is non-nullptr, it will be used for the result and
-// any extra digits in it will be zeroed out, otherwise a new BigInt (with
-// the same length as the longer input) will be allocated.
-// {result_storage} may alias {x} or {y} for in-place modification.
-// Example:
-// y: [ y2 ][ y1 ][ y0 ]
-// x: [ x3 ][ x2 ][ x1 ][ x0 ]
-// | | | |
-// (kCopy) (op) (op) (op)
-// | | | |
-// v v v v
-// result_storage: [ 0 ][ x3 ][ r2 ][ r1 ][ r0 ]
-inline Handle<MutableBigInt> MutableBigInt::AbsoluteBitwiseOp(
- Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
- MutableBigInt result_storage, ExtraDigitsHandling extra_digits,
- SymmetricOp symmetric, const std::function<digit_t(digit_t, digit_t)>& op) {
- int x_length = x->length();
- int y_length = y->length();
- int num_pairs = y_length;
- if (x_length < y_length) {
- num_pairs = x_length;
- if (symmetric == kSymmetric) {
- std::swap(x, y);
- std::swap(x_length, y_length);
- }
- }
- DCHECK(num_pairs == std::min(x_length, y_length));
- Handle<MutableBigInt> result(result_storage, isolate);
- int result_length = extra_digits == kCopy ? x_length : num_pairs;
- if (result_storage.is_null()) {
- result = New(isolate, result_length).ToHandleChecked();
+ Handle<MutableBigInt> result = New(isolate, length).ToHandleChecked();
+ if (length == 1) {
+ result->set_digit(0, x->digit(0) - 1);
} else {
- DCHECK(result_storage.length() >= result_length);
- result_length = result_storage.length();
- }
- int i = 0;
- for (; i < num_pairs; i++) {
- result->set_digit(i, op(x->digit(i), y->digit(i)));
- }
- if (extra_digits == kCopy) {
- for (; i < x_length; i++) {
- result->set_digit(i, x->digit(i));
- }
- }
- for (; i < result_length; i++) {
- result->set_digit(i, 0);
+ bigint::SubtractOne(GetRWDigits(result), GetDigits(x));
}
return result;
}
-// If {result_storage} is non-nullptr, it will be used for the result,
-// otherwise a new BigInt of appropriate length will be allocated.
-// {result_storage} may alias {x} or {y} for in-place modification.
-Handle<MutableBigInt> MutableBigInt::AbsoluteAnd(Isolate* isolate,
- Handle<BigIntBase> x,
- Handle<BigIntBase> y,
- MutableBigInt result_storage) {
- return AbsoluteBitwiseOp(isolate, x, y, result_storage, kSkip, kSymmetric,
- [](digit_t a, digit_t b) { return a & b; });
-}
-
-// If {result_storage} is non-nullptr, it will be used for the result,
-// otherwise a new BigInt of appropriate length will be allocated.
-// {result_storage} may alias {x} or {y} for in-place modification.
-Handle<MutableBigInt> MutableBigInt::AbsoluteAndNot(
- Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
- MutableBigInt result_storage) {
- return AbsoluteBitwiseOp(isolate, x, y, result_storage, kCopy, kNotSymmetric,
- [](digit_t a, digit_t b) { return a & ~b; });
-}
-
-// If {result_storage} is non-nullptr, it will be used for the result,
-// otherwise a new BigInt of appropriate length will be allocated.
-// {result_storage} may alias {x} or {y} for in-place modification.
-Handle<MutableBigInt> MutableBigInt::AbsoluteOr(Isolate* isolate,
- Handle<BigIntBase> x,
- Handle<BigIntBase> y,
- MutableBigInt result_storage) {
- return AbsoluteBitwiseOp(isolate, x, y, result_storage, kCopy, kSymmetric,
- [](digit_t a, digit_t b) { return a | b; });
-}
-
-// If {result_storage} is non-nullptr, it will be used for the result,
-// otherwise a new BigInt of appropriate length will be allocated.
-// {result_storage} may alias {x} or {y} for in-place modification.
-Handle<MutableBigInt> MutableBigInt::AbsoluteXor(Isolate* isolate,
- Handle<BigIntBase> x,
- Handle<BigIntBase> y,
- MutableBigInt result_storage) {
- return AbsoluteBitwiseOp(isolate, x, y, result_storage, kCopy, kSymmetric,
- [](digit_t a, digit_t b) { return a ^ b; });
-}
-
MaybeHandle<BigInt> MutableBigInt::LeftShiftByAbsolute(Isolate* isolate,
Handle<BigIntBase> x,
Handle<BigIntBase> y) {
@@ -1649,160 +1482,41 @@ MaybeHandle<BigInt> BigInt::FromSerializedDigits(
}
Handle<BigInt> BigInt::AsIntN(Isolate* isolate, uint64_t n, Handle<BigInt> x) {
- if (x->is_zero()) return x;
+ if (x->is_zero() || n > kMaxLengthBits) return x;
if (n == 0) return MutableBigInt::Zero(isolate);
- uint64_t needed_length = (n + kDigitBits - 1) / kDigitBits;
- uint64_t x_length = static_cast<uint64_t>(x->length());
- // If {x} has less than {n} bits, return it directly.
- if (x_length < needed_length) return x;
- DCHECK_LE(needed_length, kMaxInt);
- digit_t top_digit = x->digit(static_cast<int>(needed_length) - 1);
- digit_t compare_digit = static_cast<digit_t>(1) << ((n - 1) % kDigitBits);
- if (x_length == needed_length && top_digit < compare_digit) return x;
- // Otherwise we have to truncate (which is a no-op in the special case
- // of x == -2^(n-1)), and determine the right sign. We also might have
- // to subtract from 2^n to simulate having two's complement representation.
- // In most cases, the result's sign is x->sign() xor "(n-1)th bit present".
- // The only exception is when x is negative, has the (n-1)th bit, and all
- // its bits below (n-1) are zero. In that case, the result is the minimum
- // n-bit integer (example: asIntN(3, -12n) => -4n).
- bool has_bit = (top_digit & compare_digit) == compare_digit;
- DCHECK_LE(n, kMaxInt);
- int N = static_cast<int>(n);
- if (!has_bit) {
- return MutableBigInt::TruncateToNBits(isolate, N, x);
- }
- if (!x->sign()) {
- return MutableBigInt::TruncateAndSubFromPowerOfTwo(isolate, N, x, true);
- }
- // Negative numbers must subtract from 2^n, except for the special case
- // described above.
- if ((top_digit & (compare_digit - 1)) == 0) {
- for (int i = static_cast<int>(needed_length) - 2; i >= 0; i--) {
- if (x->digit(i) != 0) {
- return MutableBigInt::TruncateAndSubFromPowerOfTwo(isolate, N, x,
- false);
- }
- }
- // Truncation is no-op if x == -2^(n-1).
- if (x_length == needed_length && top_digit == compare_digit) return x;
- return MutableBigInt::TruncateToNBits(isolate, N, x);
- }
- return MutableBigInt::TruncateAndSubFromPowerOfTwo(isolate, N, x, false);
+ int needed_length =
+ bigint::AsIntNResultLength(GetDigits(x), x->sign(), static_cast<int>(n));
+ if (needed_length == -1) return x;
+ Handle<MutableBigInt> result =
+ MutableBigInt::New(isolate, needed_length).ToHandleChecked();
+ bool negative = bigint::AsIntN(GetRWDigits(result), GetDigits(x), x->sign(),
+ static_cast<int>(n));
+ result->set_sign(negative);
+ return MutableBigInt::MakeImmutable(result);
}
MaybeHandle<BigInt> BigInt::AsUintN(Isolate* isolate, uint64_t n,
Handle<BigInt> x) {
if (x->is_zero()) return x;
if (n == 0) return MutableBigInt::Zero(isolate);
- // If {x} is negative, simulate two's complement representation.
+ Handle<MutableBigInt> result;
if (x->sign()) {
if (n > kMaxLengthBits) {
return ThrowBigIntTooBig<BigInt>(isolate);
}
- return MutableBigInt::TruncateAndSubFromPowerOfTwo(
- isolate, static_cast<int>(n), x, false);
- }
- // If {x} is positive and has up to {n} bits, return it directly.
- if (n >= kMaxLengthBits) return x;
- STATIC_ASSERT(kMaxLengthBits < kMaxInt - kDigitBits);
- int needed_length = static_cast<int>((n + kDigitBits - 1) / kDigitBits);
- if (x->length() < needed_length) return x;
- int bits_in_top_digit = n % kDigitBits;
- if (x->length() == needed_length) {
- if (bits_in_top_digit == 0) return x;
- digit_t top_digit = x->digit(needed_length - 1);
- if ((top_digit >> bits_in_top_digit) == 0) return x;
- }
- // Otherwise, truncate.
- DCHECK_LE(n, kMaxInt);
- return MutableBigInt::TruncateToNBits(isolate, static_cast<int>(n), x);
-}
-
-Handle<BigInt> MutableBigInt::TruncateToNBits(Isolate* isolate, int n,
- Handle<BigInt> x) {
- // Only call this when there's something to do.
- DCHECK_NE(n, 0);
- DCHECK_GT(x->length(), n / kDigitBits);
-
- int needed_digits = (n + (kDigitBits - 1)) / kDigitBits;
- DCHECK_LE(needed_digits, x->length());
- Handle<MutableBigInt> result = New(isolate, needed_digits).ToHandleChecked();
-
- // Copy all digits except the MSD.
- int last = needed_digits - 1;
- for (int i = 0; i < last; i++) {
- result->set_digit(i, x->digit(i));
- }
-
- // The MSD might contain extra bits that we don't want.
- digit_t msd = x->digit(last);
- if (n % kDigitBits != 0) {
- int drop = kDigitBits - (n % kDigitBits);
- msd = (msd << drop) >> drop;
- }
- result->set_digit(last, msd);
- result->set_sign(x->sign());
- return MakeImmutable(result);
-}
-
-// Subtracts the least significant n bits of abs(x) from 2^n.
-Handle<BigInt> MutableBigInt::TruncateAndSubFromPowerOfTwo(Isolate* isolate,
- int n,
- Handle<BigInt> x,
- bool result_sign) {
- DCHECK_NE(n, 0);
- DCHECK_LE(n, kMaxLengthBits);
-
- int needed_digits = (n + (kDigitBits - 1)) / kDigitBits;
- DCHECK_LE(needed_digits, kMaxLength); // Follows from n <= kMaxLengthBits.
- Handle<MutableBigInt> result = New(isolate, needed_digits).ToHandleChecked();
-
- // Process all digits except the MSD.
- int i = 0;
- int last = needed_digits - 1;
- int x_length = x->length();
- digit_t borrow = 0;
- // Take digits from {x} unless its length is exhausted.
- int limit = std::min(last, x_length);
- for (; i < limit; i++) {
- digit_t new_borrow = 0;
- digit_t difference = digit_sub(0, x->digit(i), &new_borrow);
- difference = digit_sub(difference, borrow, &new_borrow);
- result->set_digit(i, difference);
- borrow = new_borrow;
- }
- // Then simulate leading zeroes in {x} as needed.
- for (; i < last; i++) {
- digit_t new_borrow = 0;
- digit_t difference = digit_sub(0, borrow, &new_borrow);
- result->set_digit(i, difference);
- borrow = new_borrow;
- }
-
- // The MSD might contain extra bits that we don't want.
- digit_t msd = last < x_length ? x->digit(last) : 0;
- int msd_bits_consumed = n % kDigitBits;
- digit_t result_msd;
- if (msd_bits_consumed == 0) {
- digit_t new_borrow = 0;
- result_msd = digit_sub(0, msd, &new_borrow);
- result_msd = digit_sub(result_msd, borrow, &new_borrow);
+ int result_length = bigint::AsUintN_Neg_ResultLength(static_cast<int>(n));
+ result = MutableBigInt::New(isolate, result_length).ToHandleChecked();
+ bigint::AsUintN_Neg(GetRWDigits(result), GetDigits(x), static_cast<int>(n));
} else {
- int drop = kDigitBits - msd_bits_consumed;
- msd = (msd << drop) >> drop;
- digit_t minuend_msd = static_cast<digit_t>(1) << (kDigitBits - drop);
- digit_t new_borrow = 0;
- result_msd = digit_sub(minuend_msd, msd, &new_borrow);
- result_msd = digit_sub(result_msd, borrow, &new_borrow);
- DCHECK_EQ(new_borrow, 0); // result < 2^n.
- // If all subtracted bits were zero, we have to get rid of the
- // materialized minuend_msd again.
- result_msd &= (minuend_msd - 1);
- }
- result->set_digit(last, result_msd);
- result->set_sign(result_sign);
- return MakeImmutable(result);
+ if (n >= kMaxLengthBits) return x;
+ int result_length =
+ bigint::AsUintN_Pos_ResultLength(GetDigits(x), static_cast<int>(n));
+ if (result_length < 0) return x;
+ result = MutableBigInt::New(isolate, result_length).ToHandleChecked();
+ bigint::AsUintN_Pos(GetRWDigits(result), GetDigits(x), static_cast<int>(n));
+ }
+ DCHECK(!result->sign());
+ return MutableBigInt::MakeImmutable(result);
}
Handle<BigInt> BigInt::FromInt64(Isolate* isolate, int64_t n) {
@@ -1928,49 +1642,6 @@ uint64_t BigInt::AsUint64(bool* lossless) {
return result;
}
-// Digit arithmetic helpers.
-
-#if V8_TARGET_ARCH_32_BIT
-#define HAVE_TWODIGIT_T 1
-using twodigit_t = uint64_t;
-#elif defined(__SIZEOF_INT128__)
-// Both Clang and GCC support this on x64.
-#define HAVE_TWODIGIT_T 1
-using twodigit_t = __uint128_t;
-#endif
-
-// {carry} must point to an initialized digit_t and will either be incremented
-// by one or left alone.
-inline BigInt::digit_t MutableBigInt::digit_add(digit_t a, digit_t b,
- digit_t* carry) {
-#if HAVE_TWODIGIT_T
- twodigit_t result = static_cast<twodigit_t>(a) + static_cast<twodigit_t>(b);
- *carry += result >> kDigitBits;
- return static_cast<digit_t>(result);
-#else
- digit_t result = a + b;
- if (result < a) *carry += 1;
- return result;
-#endif
-}
-
-// {borrow} must point to an initialized digit_t and will either be incremented
-// by one or left alone.
-inline BigInt::digit_t MutableBigInt::digit_sub(digit_t a, digit_t b,
- digit_t* borrow) {
-#if HAVE_TWODIGIT_T
- twodigit_t result = static_cast<twodigit_t>(a) - static_cast<twodigit_t>(b);
- *borrow += (result >> kDigitBits) & 1;
- return static_cast<digit_t>(result);
-#else
- digit_t result = a - b;
- if (result > a) *borrow += 1;
- return static_cast<digit_t>(result);
-#endif
-}
-
-#undef HAVE_TWODIGIT_T
-
void MutableBigInt::set_64_bits(uint64_t bits) {
STATIC_ASSERT(kDigitBits == 64 || kDigitBits == 32);
if (kDigitBits == 64) {
diff --git a/chromium/v8/src/objects/bigint.tq b/chromium/v8/src/objects/bigint.tq
index 2d8275b2d52..3b8a4e7cc41 100644
--- a/chromium/v8/src/objects/bigint.tq
+++ b/chromium/v8/src/objects/bigint.tq
@@ -9,12 +9,11 @@ extern class BigIntBase extends PrimitiveHeapObject
type BigInt extends BigIntBase;
-@noVerifier
@hasSameInstanceTypeAsParent
@doNotGenerateCast
extern class MutableBigInt extends BigIntBase generates 'TNode<BigInt>';
Convert<BigInt, MutableBigInt>(i: MutableBigInt): BigInt {
- assert(bigint::IsCanonicalized(i));
+ dcheck(bigint::IsCanonicalized(i));
return %RawDownCast<BigInt>(Convert<BigIntBase>(i));
}
diff --git a/chromium/v8/src/objects/cell-inl.h b/chromium/v8/src/objects/cell-inl.h
index 5c809b8172a..d47b49504ec 100644
--- a/chromium/v8/src/objects/cell-inl.h
+++ b/chromium/v8/src/objects/cell-inl.h
@@ -20,6 +20,10 @@ namespace internal {
TQ_OBJECT_CONSTRUCTORS_IMPL(Cell)
+DEF_RELAXED_GETTER(Cell, value, Object) {
+ return TaggedField<Object, kValueOffset>::Relaxed_Load(cage_base, *this);
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/objects/cell.h b/chromium/v8/src/objects/cell.h
index 4076dea0e6f..56c1016bd5f 100644
--- a/chromium/v8/src/objects/cell.h
+++ b/chromium/v8/src/objects/cell.h
@@ -19,6 +19,9 @@ class Cell : public TorqueGeneratedCell<Cell, HeapObject> {
public:
inline Address ValueAddress() { return address() + kValueOffset; }
+ using TorqueGeneratedCell::value;
+ DECL_RELAXED_GETTER(value, Object)
+
using BodyDescriptor = FixedBodyDescriptor<kValueOffset, kSize, kSize>;
TQ_OBJECT_CONSTRUCTORS(Cell)
diff --git a/chromium/v8/src/objects/cell.tq b/chromium/v8/src/objects/cell.tq
index c318d400654..68172717606 100644
--- a/chromium/v8/src/objects/cell.tq
+++ b/chromium/v8/src/objects/cell.tq
@@ -2,7 +2,4 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@generatePrint
-extern class Cell extends HeapObject {
- value: Object;
-}
+extern class Cell extends HeapObject { value: Object; }
diff --git a/chromium/v8/src/objects/code-inl.h b/chromium/v8/src/objects/code-inl.h
index cae02edc23b..4cbb1595c96 100644
--- a/chromium/v8/src/objects/code-inl.h
+++ b/chromium/v8/src/objects/code-inl.h
@@ -9,6 +9,7 @@
#include "src/baseline/bytecode-offset-iterator.h"
#include "src/codegen/code-desc.h"
#include "src/common/assert-scope.h"
+#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
@@ -205,15 +206,26 @@ CODE_ACCESSORS_CHECKED(relocation_info_or_undefined, HeapObject,
kRelocationInfoOffset,
value.IsUndefined() || value.IsByteArray())
-CODE_ACCESSORS(deoptimization_data, FixedArray, kDeoptimizationDataOffset)
-#define IS_BASELINE() (kind() == CodeKind::BASELINE)
+ACCESSORS_CHECKED2(Code, deoptimization_data, FixedArray,
+ kDeoptimizationDataOrInterpreterDataOffset,
+ kind() != CodeKind::BASELINE,
+ kind() != CodeKind::BASELINE &&
+ !ObjectInYoungGeneration(value))
+ACCESSORS_CHECKED2(Code, bytecode_or_interpreter_data, HeapObject,
+ kDeoptimizationDataOrInterpreterDataOffset,
+ kind() == CodeKind::BASELINE,
+ kind() == CodeKind::BASELINE &&
+ !ObjectInYoungGeneration(value))
+
ACCESSORS_CHECKED2(Code, source_position_table, ByteArray, kPositionTableOffset,
- !IS_BASELINE(),
- !IS_BASELINE() && !ObjectInYoungGeneration(value))
+ kind() != CodeKind::BASELINE,
+ kind() != CodeKind::BASELINE &&
+ !ObjectInYoungGeneration(value))
ACCESSORS_CHECKED2(Code, bytecode_offset_table, ByteArray, kPositionTableOffset,
- IS_BASELINE(),
- IS_BASELINE() && !ObjectInYoungGeneration(value))
-#undef IS_BASELINE
+ kind() == CodeKind::BASELINE,
+ kind() == CodeKind::BASELINE &&
+ !ObjectInYoungGeneration(value))
+
// Concurrent marker needs to access kind specific flags in code data container.
RELEASE_ACQUIRE_CODE_ACCESSORS(code_data_container, CodeDataContainer,
kCodeDataContainerOffset)
@@ -268,7 +280,8 @@ inline CodeDataContainer CodeDataContainerFromCodeT(CodeT code) {
void Code::WipeOutHeader() {
WRITE_FIELD(*this, kRelocationInfoOffset, Smi::FromInt(0));
- WRITE_FIELD(*this, kDeoptimizationDataOffset, Smi::FromInt(0));
+ WRITE_FIELD(*this, kDeoptimizationDataOrInterpreterDataOffset,
+ Smi::FromInt(0));
WRITE_FIELD(*this, kPositionTableOffset, Smi::FromInt(0));
WRITE_FIELD(*this, kCodeDataContainerOffset, Smi::FromInt(0));
}
@@ -381,7 +394,9 @@ int Code::MetadataSize() const {
int Code::SizeIncludingMetadata() const {
int size = CodeSize();
size += relocation_info().Size();
- size += deoptimization_data().Size();
+ if (kind() != CodeKind::BASELINE) {
+ size += deoptimization_data().Size();
+ }
return size;
}
@@ -553,44 +568,47 @@ inline bool Code::is_turbofanned() const {
inline bool Code::can_have_weak_objects() const {
DCHECK(CodeKindIsOptimizedJSFunction(kind()));
- int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
+ int32_t flags =
+ code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad);
return CanHaveWeakObjectsField::decode(flags);
}
inline void Code::set_can_have_weak_objects(bool value) {
DCHECK(CodeKindIsOptimizedJSFunction(kind()));
CodeDataContainer container = code_data_container(kAcquireLoad);
- int32_t previous = container.kind_specific_flags();
+ int32_t previous = container.kind_specific_flags(kRelaxedLoad);
int32_t updated = CanHaveWeakObjectsField::update(previous, value);
- container.set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated, kRelaxedStore);
}
inline bool Code::is_promise_rejection() const {
DCHECK(kind() == CodeKind::BUILTIN);
- int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
+ int32_t flags =
+ code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad);
return IsPromiseRejectionField::decode(flags);
}
inline void Code::set_is_promise_rejection(bool value) {
DCHECK(kind() == CodeKind::BUILTIN);
CodeDataContainer container = code_data_container(kAcquireLoad);
- int32_t previous = container.kind_specific_flags();
+ int32_t previous = container.kind_specific_flags(kRelaxedLoad);
int32_t updated = IsPromiseRejectionField::update(previous, value);
- container.set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated, kRelaxedStore);
}
inline bool Code::is_exception_caught() const {
DCHECK(kind() == CodeKind::BUILTIN);
- int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
+ int32_t flags =
+ code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad);
return IsExceptionCaughtField::decode(flags);
}
inline void Code::set_is_exception_caught(bool value) {
DCHECK(kind() == CodeKind::BUILTIN);
CodeDataContainer container = code_data_container(kAcquireLoad);
- int32_t previous = container.kind_specific_flags();
+ int32_t previous = container.kind_specific_flags(kRelaxedLoad);
int32_t updated = IsExceptionCaughtField::update(previous, value);
- container.set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated, kRelaxedStore);
}
inline bool Code::is_off_heap_trampoline() const {
@@ -642,7 +660,8 @@ int Code::stack_slots() const {
bool Code::marked_for_deoptimization() const {
DCHECK(CodeKindCanDeoptimize(kind()));
- int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
+ int32_t flags =
+ code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad);
return MarkedForDeoptimizationField::decode(flags);
}
@@ -650,14 +669,15 @@ void Code::set_marked_for_deoptimization(bool flag) {
DCHECK(CodeKindCanDeoptimize(kind()));
DCHECK_IMPLIES(flag, AllowDeoptimization::IsAllowed(GetIsolate()));
CodeDataContainer container = code_data_container(kAcquireLoad);
- int32_t previous = container.kind_specific_flags();
+ int32_t previous = container.kind_specific_flags(kRelaxedLoad);
int32_t updated = MarkedForDeoptimizationField::update(previous, flag);
- container.set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated, kRelaxedStore);
}
int Code::deoptimization_count() const {
DCHECK(CodeKindCanDeoptimize(kind()));
- int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
+ int32_t flags =
+ code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad);
int count = DeoptCountField::decode(flags);
DCHECK_GE(count, 0);
return count;
@@ -666,17 +686,18 @@ int Code::deoptimization_count() const {
void Code::increment_deoptimization_count() {
DCHECK(CodeKindCanDeoptimize(kind()));
CodeDataContainer container = code_data_container(kAcquireLoad);
- int32_t flags = container.kind_specific_flags();
+ int32_t flags = container.kind_specific_flags(kRelaxedLoad);
int32_t count = DeoptCountField::decode(flags);
DCHECK_GE(count, 0);
CHECK_LE(count + 1, DeoptCountField::kMax);
int32_t updated = DeoptCountField::update(flags, count + 1);
- container.set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated, kRelaxedStore);
}
bool Code::embedded_objects_cleared() const {
DCHECK(CodeKindIsOptimizedJSFunction(kind()));
- int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
+ int32_t flags =
+ code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad);
return EmbeddedObjectsClearedField::decode(flags);
}
@@ -684,14 +705,15 @@ void Code::set_embedded_objects_cleared(bool flag) {
DCHECK(CodeKindIsOptimizedJSFunction(kind()));
DCHECK_IMPLIES(flag, marked_for_deoptimization());
CodeDataContainer container = code_data_container(kAcquireLoad);
- int32_t previous = container.kind_specific_flags();
+ int32_t previous = container.kind_specific_flags(kRelaxedLoad);
int32_t updated = EmbeddedObjectsClearedField::update(previous, flag);
- container.set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated, kRelaxedStore);
}
bool Code::deopt_already_counted() const {
DCHECK(CodeKindCanDeoptimize(kind()));
- int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
+ int32_t flags =
+ code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad);
return DeoptAlreadyCountedField::decode(flags);
}
@@ -699,9 +721,9 @@ void Code::set_deopt_already_counted(bool flag) {
DCHECK(CodeKindCanDeoptimize(kind()));
DCHECK_IMPLIES(flag, AllowDeoptimization::IsAllowed(GetIsolate()));
CodeDataContainer container = code_data_container(kAcquireLoad);
- int32_t previous = container.kind_specific_flags();
+ int32_t previous = container.kind_specific_flags(kRelaxedLoad);
int32_t updated = DeoptAlreadyCountedField::update(previous, flag);
- container.set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated, kRelaxedStore);
}
bool Code::is_optimized_code() const {
@@ -800,8 +822,8 @@ bool Code::IsExecutable() {
// concurrent marker.
STATIC_ASSERT(FIELD_SIZE(CodeDataContainer::kKindSpecificFlagsOffset) ==
kInt32Size);
-IMPLICIT_TAG_RELAXED_INT32_ACCESSORS(CodeDataContainer, kind_specific_flags,
- kKindSpecificFlagsOffset)
+RELAXED_INT32_ACCESSORS(CodeDataContainer, kind_specific_flags,
+ kKindSpecificFlagsOffset)
ACCESSORS_CHECKED(CodeDataContainer, raw_code, Object, kCodeOffset,
V8_EXTERNAL_CODE_SPACE_BOOL)
RELAXED_ACCESSORS_CHECKED(CodeDataContainer, raw_code, Object, kCodeOffset,
diff --git a/chromium/v8/src/objects/code.cc b/chromium/v8/src/objects/code.cc
index e2a4528d0d0..b3f9953be15 100644
--- a/chromium/v8/src/objects/code.cc
+++ b/chromium/v8/src/objects/code.cc
@@ -333,7 +333,7 @@ bool Code::IsIsolateIndependent(Isolate* isolate) {
#elif defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \
defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_IA32) || \
- defined(V8_TARGET_ARCH_RISCV64)
+ defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_LOONG64)
for (RelocIterator it(*this, kModeMask); !it.done(); it.next()) {
// On these platforms we emit relative builtin-to-builtin
// jumps for isolate independent builtins in the snapshot. They are later
@@ -349,10 +349,10 @@ bool Code::IsIsolateIndependent(Isolate* isolate) {
}
return false;
}
+ return true;
#else
#error Unsupported architecture.
#endif
- return true;
}
bool Code::Inlines(SharedFunctionInfo sfi) {
@@ -775,7 +775,7 @@ void DependentCode::SetDependentCode(Handle<HeapObject> object,
void DependentCode::InstallDependency(Isolate* isolate, Handle<Code> code,
Handle<HeapObject> object,
DependencyGroup group) {
- if (V8_UNLIKELY(FLAG_trace_code_dependencies)) {
+ if (V8_UNLIKELY(FLAG_trace_compilation_dependencies)) {
StdoutStream{} << "Installing dependency of [" << code->GetHeapObject()
<< "] on [" << object << "] in group ["
<< DependencyGroupName(group) << "]\n";
diff --git a/chromium/v8/src/objects/code.h b/chromium/v8/src/objects/code.h
index 2d6fc3e9836..2b2c874d865 100644
--- a/chromium/v8/src/objects/code.h
+++ b/chromium/v8/src/objects/code.h
@@ -43,7 +43,7 @@ class CodeDataContainer : public HeapObject {
public:
NEVER_READ_ONLY_SPACE
DECL_ACCESSORS(next_code_link, Object)
- DECL_INT_ACCESSORS(kind_specific_flags)
+ DECL_RELAXED_INT32_ACCESSORS(kind_specific_flags)
// Clear uninitialized padding space. This ensures that the snapshot content
// is deterministic.
@@ -279,8 +279,12 @@ class Code : public HeapObject {
// This function should be called only from GC.
void ClearEmbeddedObjects(Heap* heap);
- // [deoptimization_data]: Array containing data for deopt.
+ // [deoptimization_data]: Array containing data for deopt for non-baseline
+ // code.
DECL_ACCESSORS(deoptimization_data, FixedArray)
+ // [bytecode_or_interpreter_data]: BytecodeArray or InterpreterData for
+ // baseline code.
+ DECL_ACCESSORS(bytecode_or_interpreter_data, HeapObject)
// [source_position_table]: ByteArray for the source positions table for
// non-baseline code.
@@ -511,7 +515,7 @@ class Code : public HeapObject {
// Layout description.
#define CODE_FIELDS(V) \
V(kRelocationInfoOffset, kTaggedSize) \
- V(kDeoptimizationDataOffset, kTaggedSize) \
+ V(kDeoptimizationDataOrInterpreterDataOffset, kTaggedSize) \
V(kPositionTableOffset, kTaggedSize) \
V(kCodeDataContainerOffset, kTaggedSize) \
/* Data or code not directly visited by GC directly starts here. */ \
@@ -544,8 +548,10 @@ class Code : public HeapObject {
static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 12 : 24;
#elif V8_TARGET_ARCH_MIPS64
static constexpr int kHeaderPaddingSize = 24;
+#elif V8_TARGET_ARCH_LOONG64
+ static constexpr int kHeaderPaddingSize = 24;
#elif V8_TARGET_ARCH_X64
- static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 12 : 24;
+ static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 12 : 56;
#elif V8_TARGET_ARCH_ARM
static constexpr int kHeaderPaddingSize = 12;
#elif V8_TARGET_ARCH_IA32
@@ -647,6 +653,10 @@ class Code::OptimizedCodeIterator {
inline CodeT ToCodeT(Code code);
inline Code FromCodeT(CodeT code);
inline Code FromCodeT(CodeT code, RelaxedLoadTag);
+inline Code FromCodeT(CodeT code, AcquireLoadTag);
+inline Code FromCodeT(CodeT code, PtrComprCageBase);
+inline Code FromCodeT(CodeT code, PtrComprCageBase, RelaxedLoadTag);
+inline Code FromCodeT(CodeT code, PtrComprCageBase, AcquireLoadTag);
inline CodeDataContainer CodeDataContainerFromCodeT(CodeT code);
class AbstractCode : public HeapObject {
diff --git a/chromium/v8/src/objects/contexts.h b/chromium/v8/src/objects/contexts.h
index 7fae0c9e0da..d2df5395c11 100644
--- a/chromium/v8/src/objects/contexts.h
+++ b/chromium/v8/src/objects/contexts.h
@@ -5,11 +5,11 @@
#ifndef V8_OBJECTS_CONTEXTS_H_
#define V8_OBJECTS_CONTEXTS_H_
+#include "include/v8-promise.h"
#include "src/objects/fixed-array.h"
#include "src/objects/function-kind.h"
#include "src/objects/ordered-hash-table.h"
#include "src/objects/osr-optimized-code-cache.h"
-#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -43,13 +43,8 @@ enum ContextLookupFlags {
V(GENERATOR_NEXT_INTERNAL, JSFunction, generator_next_internal) \
V(ASYNC_MODULE_EVALUATE_INTERNAL, JSFunction, \
async_module_evaluate_internal) \
- V(OBJECT_CREATE, JSFunction, object_create) \
V(REFLECT_APPLY_INDEX, JSFunction, reflect_apply) \
V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct) \
- V(MATH_FLOOR_INDEX, JSFunction, math_floor) \
- V(MATH_POW_INDEX, JSFunction, math_pow) \
- V(PROMISE_INTERNAL_CONSTRUCTOR_INDEX, JSFunction, \
- promise_internal_constructor) \
V(PROMISE_THEN_INDEX, JSFunction, promise_then) \
V(FUNCTION_PROTOTYPE_APPLY_INDEX, JSFunction, function_prototype_apply)
diff --git a/chromium/v8/src/objects/contexts.tq b/chromium/v8/src/objects/contexts.tq
index 83c43cc7f57..f7c0b875ef2 100644
--- a/chromium/v8/src/objects/contexts.tq
+++ b/chromium/v8/src/objects/contexts.tq
@@ -61,11 +61,12 @@ type Slot<Container : type extends Context, T : type extends Object> extends
// slot has the right type already.
macro InitContextSlot<
ArgumentContext: type, AnnotatedContext: type, T: type, U: type>(
- context: ArgumentContext, index: Slot<AnnotatedContext, T>, value: U) {
+ context: ArgumentContext, index: Slot<AnnotatedContext, T>,
+ value: U): void {
// Make sure the arguments have the right type.
const context: AnnotatedContext = context;
const value: T = value;
- assert(TaggedEqual(context.elements[index], kInitialContextSlotValue));
+ dcheck(TaggedEqual(context.elements[index], kInitialContextSlotValue));
context.elements[index] = value;
}
@@ -179,17 +180,17 @@ macro LoadContextElement(c: Context, i: constexpr int32): Object {
}
@export
-macro StoreContextElement(c: Context, i: intptr, o: Object) {
+macro StoreContextElement(c: Context, i: intptr, o: Object): void {
c.elements[i] = o;
}
@export
-macro StoreContextElement(c: Context, i: Smi, o: Object) {
+macro StoreContextElement(c: Context, i: Smi, o: Object): void {
c.elements[i] = o;
}
@export
-macro StoreContextElement(c: Context, i: constexpr int32, o: Object) {
+macro StoreContextElement(c: Context, i: constexpr int32, o: Object): void {
c.elements[i] = o;
}
diff --git a/chromium/v8/src/objects/data-handler.h b/chromium/v8/src/objects/data-handler.h
index 9310824af04..6461e481f38 100644
--- a/chromium/v8/src/objects/data-handler.h
+++ b/chromium/v8/src/objects/data-handler.h
@@ -6,7 +6,6 @@
#define V8_OBJECTS_DATA_HANDLER_H_
#include "src/objects/struct.h"
-#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/chromium/v8/src/objects/data-handler.tq b/chromium/v8/src/objects/data-handler.tq
index 78bd31e5365..46af3263481 100644
--- a/chromium/v8/src/objects/data-handler.tq
+++ b/chromium/v8/src/objects/data-handler.tq
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// This class does not use the generated verifier, so if you change anything
+// here, please also update DataHandlerVerify in objects-debug.cc.
@abstract
extern class DataHandler extends Struct {
// [smi_handler]: A Smi which encodes a handler or Code object (we still
@@ -15,7 +17,7 @@ extern class DataHandler extends Struct {
validity_cell: Smi|Cell;
// Space for the following fields may or may not be allocated.
- @noVerifier data1: MaybeObject;
- @noVerifier data2: MaybeObject;
- @noVerifier data3: MaybeObject;
+ data1: MaybeObject;
+ data2: MaybeObject;
+ data3: MaybeObject;
}
diff --git a/chromium/v8/src/objects/debug-objects.tq b/chromium/v8/src/objects/debug-objects.tq
index 16e5cb43c67..d00b4abf4cc 100644
--- a/chromium/v8/src/objects/debug-objects.tq
+++ b/chromium/v8/src/objects/debug-objects.tq
@@ -2,13 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@generatePrint
extern class BreakPoint extends Struct {
id: Smi;
condition: String;
}
-@generatePrint
extern class BreakPointInfo extends Struct {
// The position in the source for the break position.
source_position: Smi;
@@ -32,7 +30,6 @@ bitfield struct DebuggerHints extends uint31 {
debugging_id: int32: 20 bit;
}
-@generatePrint
extern class DebugInfo extends Struct {
shared: SharedFunctionInfo;
// Bit field containing various information collected for debugging.
diff --git a/chromium/v8/src/objects/descriptor-array-inl.h b/chromium/v8/src/objects/descriptor-array-inl.h
index 9bb01ffc4db..387ae8d2764 100644
--- a/chromium/v8/src/objects/descriptor-array-inl.h
+++ b/chromium/v8/src/objects/descriptor-array-inl.h
@@ -189,7 +189,7 @@ void DescriptorArray::SetDetails(InternalIndex descriptor_number,
}
int DescriptorArray::GetFieldIndex(InternalIndex descriptor_number) {
- DCHECK_EQ(GetDetails(descriptor_number).location(), kField);
+ DCHECK_EQ(GetDetails(descriptor_number).location(), PropertyLocation::kField);
return GetDetails(descriptor_number).field_index();
}
@@ -200,7 +200,7 @@ FieldType DescriptorArray::GetFieldType(InternalIndex descriptor_number) {
FieldType DescriptorArray::GetFieldType(PtrComprCageBase cage_base,
InternalIndex descriptor_number) {
- DCHECK_EQ(GetDetails(descriptor_number).location(), kField);
+ DCHECK_EQ(GetDetails(descriptor_number).location(), PropertyLocation::kField);
MaybeObject wrapped_type = GetValue(cage_base, descriptor_number);
return Map::UnwrapFieldType(wrapped_type);
}
diff --git a/chromium/v8/src/objects/descriptor-array.tq b/chromium/v8/src/objects/descriptor-array.tq
index a97722d4b97..9e15812cb29 100644
--- a/chromium/v8/src/objects/descriptor-array.tq
+++ b/chromium/v8/src/objects/descriptor-array.tq
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@generatePrint
extern class EnumCache extends Struct {
keys: FixedArray;
indices: FixedArray;
diff --git a/chromium/v8/src/objects/elements-kind.h b/chromium/v8/src/objects/elements-kind.h
index 0d546aba8dc..4599710f8b5 100644
--- a/chromium/v8/src/objects/elements-kind.h
+++ b/chromium/v8/src/objects/elements-kind.h
@@ -58,6 +58,23 @@ namespace internal {
V(BigUint64, rab_gsab_biguint64, RAB_GSAB_BIGUINT64, uint64_t) \
V(BigInt64, rab_gsab_bigint64, RAB_GSAB_BIGINT64, int64_t)
+// Like RAB_GSAB_TYPED_ARRAYS but has an additional parameter for
+// for the corresponding non-RAB/GSAB ElementsKind.
+#define RAB_GSAB_TYPED_ARRAYS_WITH_NON_RAB_GSAB_ELEMENTS_KIND(V) \
+ V(RabGsabUint8, rab_gsab_uint8, RAB_GSAB_UINT8, uint8_t, UINT8) \
+ V(RabGsabInt8, rab_gsab_int8, RAB_GSAB_INT8, int8_t, INT8) \
+ V(RabGsabUint16, rab_gsab_uint16, RAB_GSAB_UINT16, uint16_t, UINT16) \
+ V(RabGsabInt16, rab_gsab_int16, RAB_GSAB_INT16, int16_t, INT16) \
+ V(RabGsabUint32, rab_gsab_uint32, RAB_GSAB_UINT32, uint32_t, UINT32) \
+ V(RabGsabInt32, rab_gsab_int32, RAB_GSAB_INT32, int32_t, INT32) \
+ V(RabGsabFloat32, rab_gsab_float32, RAB_GSAB_FLOAT32, float, FLOAT32) \
+ V(RabGsabFloat64, rab_gsab_float64, RAB_GSAB_FLOAT64, double, FLOAT64) \
+ V(RabGsabUint8Clamped, rab_gsab_uint8_clamped, RAB_GSAB_UINT8_CLAMPED, \
+ uint8_t, UINT8_CLAMPED) \
+ V(RabGsabBigUint64, rab_gsab_biguint64, RAB_GSAB_BIGUINT64, uint64_t, \
+ BIGUINT64) \
+ V(RabGsabBigInt64, rab_gsab_bigint64, RAB_GSAB_BIGINT64, int64_t, BIGINT64)
+
enum ElementsKind : uint8_t {
// The "fast" kind for elements that only contain SMI values. Must be first
// to make it possible to efficiently check maps for this kind.
diff --git a/chromium/v8/src/objects/elements.cc b/chromium/v8/src/objects/elements.cc
index 4eedf3d6c02..85386ba6395 100644
--- a/chromium/v8/src/objects/elements.cc
+++ b/chromium/v8/src/objects/elements.cc
@@ -3327,8 +3327,6 @@ class TypedElementsAccessor
DisallowGarbageCollection no_gc;
JSTypedArray typed_array = JSTypedArray::cast(*receiver);
- // TODO(caitp): return Just(false) here when implementing strict throwing on
- // detached views.
if (typed_array.WasDetached()) {
return Just(value->IsUndefined(isolate) && length > start_from);
}
@@ -3541,7 +3539,7 @@ class TypedElementsAccessor
CHECK(!source.WasDetached());
CHECK(!destination.WasDetached());
DCHECK_LE(start, end);
- DCHECK_LE(end, source.length());
+ DCHECK_LE(end, source.GetLength());
size_t count = end - start;
DCHECK_LE(count, destination.length());
ElementType* dest_data = static_cast<ElementType*>(destination.DataPtr());
@@ -3559,6 +3557,16 @@ class TypedElementsAccessor
}
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, NON_RAB_GSAB_TYPE) \
+ case TYPE##_ELEMENTS: { \
+ ctype* source_data = reinterpret_cast<ctype*>(source.DataPtr()) + start; \
+ CopyBetweenBackingStores<NON_RAB_GSAB_TYPE##_ELEMENTS, ctype>( \
+ source_data, dest_data, count, is_shared); \
+ break; \
+ }
+ RAB_GSAB_TYPED_ARRAYS_WITH_NON_RAB_GSAB_ELEMENTS_KIND(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
default:
UNREACHABLE();
break;
diff --git a/chromium/v8/src/objects/embedder-data-array-inl.h b/chromium/v8/src/objects/embedder-data-array-inl.h
index 6eb10762870..d32fcfcd7b8 100644
--- a/chromium/v8/src/objects/embedder-data-array-inl.h
+++ b/chromium/v8/src/objects/embedder-data-array-inl.h
@@ -6,7 +6,7 @@
#define V8_OBJECTS_EMBEDDER_DATA_ARRAY_INL_H_
#include "src/objects/embedder-data-array.h"
-
+#include "src/objects/heap-object-inl.h"
#include "src/objects/instance-type-inl.h"
#include "src/objects/maybe-object-inl.h"
diff --git a/chromium/v8/src/objects/feedback-cell-inl.h b/chromium/v8/src/objects/feedback-cell-inl.h
index 84257e544c4..46d92b8447c 100644
--- a/chromium/v8/src/objects/feedback-cell-inl.h
+++ b/chromium/v8/src/objects/feedback-cell-inl.h
@@ -40,7 +40,7 @@ void FeedbackCell::reset_feedback_vector(
CHECK(value().IsFeedbackVector());
ClosureFeedbackCellArray closure_feedback_cell_array =
FeedbackVector::cast(value()).closure_feedback_cell_array();
- set_value(closure_feedback_cell_array);
+ set_value(closure_feedback_cell_array, kReleaseStore);
if (gc_notify_updated_slot) {
(*gc_notify_updated_slot)(*this, RawField(FeedbackCell::kValueOffset),
closure_feedback_cell_array);
diff --git a/chromium/v8/src/objects/feedback-vector.cc b/chromium/v8/src/objects/feedback-vector.cc
index f50121aa611..bc562b29a70 100644
--- a/chromium/v8/src/objects/feedback-vector.cc
+++ b/chromium/v8/src/objects/feedback-vector.cc
@@ -390,12 +390,13 @@ void FeedbackVector::SetOptimizedCode(Handle<FeedbackVector> vector,
Handle<Code> code,
FeedbackCell feedback_cell) {
DCHECK(CodeKindIsOptimizedJSFunction(code->kind()));
- // We should only set optimized code only when there is no valid optimized
- // code or we are tiering up.
+ // We should set optimized code only when there is no valid optimized code or
+ // we are tiering up.
DCHECK(!vector->has_optimized_code() ||
vector->optimized_code().marked_for_deoptimization() ||
(vector->optimized_code().kind() == CodeKind::TURBOPROP &&
- code->kind() == CodeKind::TURBOFAN));
+ code->kind() == CodeKind::TURBOFAN) ||
+ FLAG_stress_concurrent_inlining_attach_code);
// TODO(mythria): We could see a CompileOptimized marker here either from
// tests that use %OptimizeFunctionOnNextCall, --always-opt or because we
// re-mark the function for non-concurrent optimization after an OSR. We
diff --git a/chromium/v8/src/objects/fixed-array.tq b/chromium/v8/src/objects/fixed-array.tq
index 3daa5bad490..c769b6b90d8 100644
--- a/chromium/v8/src/objects/fixed-array.tq
+++ b/chromium/v8/src/objects/fixed-array.tq
@@ -86,10 +86,11 @@ extern operator '.floats[]=' macro StoreFixedDoubleArrayElement(
extern operator '.floats[]' macro LoadFixedDoubleArrayElement(
FixedDoubleArray, intptr): float64;
operator '[]=' macro StoreFixedDoubleArrayDirect(
- a: FixedDoubleArray, i: Smi, v: Number) {
+ a: FixedDoubleArray, i: Smi, v: Number): void {
a.floats[i] = Convert<float64_or_hole>(Convert<float64>(v));
}
-operator '[]=' macro StoreFixedArrayDirect(a: FixedArray, i: Smi, v: Object) {
+operator '[]=' macro StoreFixedArrayDirect(
+ a: FixedArray, i: Smi, v: Object): void {
a.objects[i] = v;
}
diff --git a/chromium/v8/src/objects/heap-object.h b/chromium/v8/src/objects/heap-object.h
index 94fdf7eeb15..016e0c77d33 100644
--- a/chromium/v8/src/objects/heap-object.h
+++ b/chromium/v8/src/objects/heap-object.h
@@ -152,6 +152,7 @@ class HeapObject : public Object {
// during marking GC.
inline ObjectSlot RawField(int byte_offset) const;
inline MaybeObjectSlot RawMaybeWeakField(int byte_offset) const;
+ inline CodeObjectSlot RawCodeField(int byte_offset) const;
DECL_CAST(HeapObject)
diff --git a/chromium/v8/src/objects/instance-type.h b/chromium/v8/src/objects/instance-type.h
index f7cdd28c058..71f349deadd 100644
--- a/chromium/v8/src/objects/instance-type.h
+++ b/chromium/v8/src/objects/instance-type.h
@@ -128,6 +128,9 @@ enum InstanceType : uint16_t {
FIRST_UNIQUE_NAME_TYPE = INTERNALIZED_STRING_TYPE,
LAST_UNIQUE_NAME_TYPE = SYMBOL_TYPE,
FIRST_NONSTRING_TYPE = SYMBOL_TYPE,
+ // Callable JS Functions are all JS Functions except class constructors.
+ FIRST_CALLABLE_JS_FUNCTION_TYPE = FIRST_JS_FUNCTION_TYPE,
+ LAST_CALLABLE_JS_FUNCTION_TYPE = JS_CLASS_CONSTRUCTOR_TYPE - 1,
// Boundary for testing JSReceivers that need special property lookup handling
LAST_SPECIAL_RECEIVER_TYPE = LAST_JS_SPECIAL_OBJECT_TYPE,
// Boundary case for testing JSReceivers that may have elements while having
@@ -139,6 +142,12 @@ enum InstanceType : uint16_t {
FIRST_TYPE = FIRST_HEAP_OBJECT_TYPE,
LAST_TYPE = LAST_HEAP_OBJECT_TYPE,
BIGINT_TYPE = BIG_INT_BASE_TYPE,
+
+#ifdef V8_EXTERNAL_CODE_SPACE
+ CODET_TYPE = CODE_DATA_CONTAINER_TYPE,
+#else
+ CODET_TYPE = CODE_TYPE,
+#endif
};
// This constant is defined outside of the InstanceType enum because the
@@ -165,6 +174,13 @@ STRING_TYPE_LIST(CHECK_STRING_RANGE)
TORQUE_ASSIGNED_INSTANCE_TYPE_LIST(CHECK_NONSTRING_RANGE)
#undef CHECK_NONSTRING_RANGE
+// classConstructor type has to be the last one in the JS Function type range.
+STATIC_ASSERT(JS_CLASS_CONSTRUCTOR_TYPE == LAST_JS_FUNCTION_TYPE);
+static_assert(JS_CLASS_CONSTRUCTOR_TYPE < FIRST_CALLABLE_JS_FUNCTION_TYPE ||
+ JS_CLASS_CONSTRUCTOR_TYPE > LAST_CALLABLE_JS_FUNCTION_TYPE,
+ "JS_CLASS_CONSTRUCTOR_TYPE must not be in the callable JS "
+ "function type range");
+
// Two ranges don't cleanly follow the inheritance hierarchy. Here we ensure
// that only expected types fall within these ranges.
// - From FIRST_JS_RECEIVER_TYPE to LAST_SPECIAL_RECEIVER_TYPE should correspond
diff --git a/chromium/v8/src/objects/intl-objects.cc b/chromium/v8/src/objects/intl-objects.cc
index ac43c319f56..7a3940d3001 100644
--- a/chromium/v8/src/objects/intl-objects.cc
+++ b/chromium/v8/src/objects/intl-objects.cc
@@ -23,7 +23,9 @@
#include "src/objects/js-locale-inl.h"
#include "src/objects/js-locale.h"
#include "src/objects/js-number-format-inl.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/option-utils.h"
#include "src/objects/property-descriptor.h"
#include "src/objects/smi.h"
#include "src/objects/string.h"
@@ -181,12 +183,12 @@ const UChar* GetUCharBufferFromFlat(const String::FlatContent& flat,
template <typename T>
MaybeHandle<T> New(Isolate* isolate, Handle<JSFunction> constructor,
Handle<Object> locales, Handle<Object> options,
- const char* method) {
+ const char* method_name) {
Handle<Map> map;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, map,
JSFunction::GetDerivedMap(isolate, constructor, constructor), T);
- return T::New(isolate, map, locales, options, method);
+ return T::New(isolate, map, locales, options, method_name);
}
} // namespace
@@ -427,7 +429,9 @@ std::string Intl::GetNumberingSystem(const icu::Locale& icu_locale) {
UErrorCode status = U_ZERO_ERROR;
std::unique_ptr<icu::NumberingSystem> numbering_system(
icu::NumberingSystem::createInstance(icu_locale, status));
- if (U_SUCCESS(status)) return numbering_system->getName();
+ if (U_SUCCESS(status) && !numbering_system->isAlgorithmic()) {
+ return numbering_system->getName();
+ }
return "latn";
}
@@ -652,82 +656,6 @@ MaybeHandle<Object> Intl::LegacyUnwrapReceiver(Isolate* isolate,
return receiver;
}
-Maybe<bool> Intl::GetStringOption(Isolate* isolate, Handle<JSReceiver> options,
- const char* property,
- std::vector<const char*> values,
- const char* service,
- std::unique_ptr<char[]>* result) {
- Handle<String> property_str =
- isolate->factory()->NewStringFromAsciiChecked(property);
-
- // 1. Let value be ? Get(options, property).
- Handle<Object> value;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, value,
- Object::GetPropertyOrElement(isolate, options, property_str),
- Nothing<bool>());
-
- if (value->IsUndefined(isolate)) {
- return Just(false);
- }
-
- // 2. c. Let value be ? ToString(value).
- Handle<String> value_str;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, value_str, Object::ToString(isolate, value), Nothing<bool>());
- std::unique_ptr<char[]> value_cstr = value_str->ToCString();
-
- // 2. d. if values is not undefined, then
- if (values.size() > 0) {
- // 2. d. i. If values does not contain an element equal to value,
- // throw a RangeError exception.
- for (size_t i = 0; i < values.size(); i++) {
- if (strcmp(values.at(i), value_cstr.get()) == 0) {
- // 2. e. return value
- *result = std::move(value_cstr);
- return Just(true);
- }
- }
-
- Handle<String> service_str =
- isolate->factory()->NewStringFromAsciiChecked(service);
- THROW_NEW_ERROR_RETURN_VALUE(
- isolate,
- NewRangeError(MessageTemplate::kValueOutOfRange, value, service_str,
- property_str),
- Nothing<bool>());
- }
-
- // 2. e. return value
- *result = std::move(value_cstr);
- return Just(true);
-}
-
-V8_WARN_UNUSED_RESULT Maybe<bool> Intl::GetBoolOption(
- Isolate* isolate, Handle<JSReceiver> options, const char* property,
- const char* service, bool* result) {
- Handle<String> property_str =
- isolate->factory()->NewStringFromAsciiChecked(property);
-
- // 1. Let value be ? Get(options, property).
- Handle<Object> value;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, value,
- Object::GetPropertyOrElement(isolate, options, property_str),
- Nothing<bool>());
-
- // 2. If value is not undefined, then
- if (!value->IsUndefined(isolate)) {
- // 2. b. i. Let value be ToBoolean(value).
- *result = value->BooleanValue(isolate);
-
- // 2. e. return value
- return Just(true);
- }
-
- return Just(false);
-}
-
namespace {
bool IsTwoLetterLanguage(const std::string& locale) {
@@ -999,9 +927,9 @@ MaybeHandle<String> Intl::StringLocaleConvertCase(Isolate* isolate,
}
}
-MaybeHandle<Object> Intl::StringLocaleCompare(
+base::Optional<int> Intl::StringLocaleCompare(
Isolate* isolate, Handle<String> string1, Handle<String> string2,
- Handle<Object> locales, Handle<Object> options, const char* method) {
+ Handle<Object> locales, Handle<Object> options, const char* method_name) {
// We only cache the instance when locales is a string/undefined and
// options is undefined, as that is the only case when the specified
// side-effects of examining those arguments are unobservable.
@@ -1025,9 +953,9 @@ MaybeHandle<Object> Intl::StringLocaleCompare(
isolate);
Handle<JSCollator> collator;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, collator,
- New<JSCollator>(isolate, constructor, locales, options, method), Object);
+ MaybeHandle<JSCollator> maybe_collator =
+ New<JSCollator>(isolate, constructor, locales, options, method_name);
+ if (!maybe_collator.ToHandle(&collator)) return {};
if (can_cache) {
isolate->set_icu_object_in_cache(
Isolate::ICUObjectCacheType::kDefaultCollator, locales,
@@ -1038,26 +966,19 @@ MaybeHandle<Object> Intl::StringLocaleCompare(
}
// ecma402/#sec-collator-comparestrings
-Handle<Object> Intl::CompareStrings(Isolate* isolate,
- const icu::Collator& icu_collator,
- Handle<String> string1,
- Handle<String> string2) {
- Factory* factory = isolate->factory();
-
+int Intl::CompareStrings(Isolate* isolate, const icu::Collator& icu_collator,
+ Handle<String> string1, Handle<String> string2) {
// Early return for identical strings.
if (string1.is_identical_to(string2)) {
- return factory->NewNumberFromInt(UCollationResult::UCOL_EQUAL);
+ return UCollationResult::UCOL_EQUAL;
}
// Early return for empty strings.
if (string1->length() == 0) {
- return factory->NewNumberFromInt(string2->length() == 0
- ? UCollationResult::UCOL_EQUAL
- : UCollationResult::UCOL_LESS);
- }
- if (string2->length() == 0) {
- return factory->NewNumberFromInt(UCollationResult::UCOL_GREATER);
+ return string2->length() == 0 ? UCollationResult::UCOL_EQUAL
+ : UCollationResult::UCOL_LESS;
}
+ if (string2->length() == 0) return UCollationResult::UCOL_GREATER;
string1 = String::Flatten(isolate, string1);
string2 = String::Flatten(isolate, string2);
@@ -1070,7 +991,7 @@ Handle<Object> Intl::CompareStrings(Isolate* isolate,
if (!string_piece2.empty()) {
result = icu_collator.compareUTF8(string_piece1, string_piece2, status);
DCHECK(U_SUCCESS(status));
- return factory->NewNumberFromInt(result);
+ return result;
}
}
@@ -1078,8 +999,7 @@ Handle<Object> Intl::CompareStrings(Isolate* isolate,
icu::UnicodeString string_val2 = Intl::ToICUUnicodeString(isolate, string2);
result = icu_collator.compare(string_val1, string_val2, status);
DCHECK(U_SUCCESS(status));
-
- return factory->NewNumberFromInt(result);
+ return result;
}
// ecma402/#sup-properties-of-the-number-prototype-object
@@ -1087,7 +1007,7 @@ MaybeHandle<String> Intl::NumberToLocaleString(Isolate* isolate,
Handle<Object> num,
Handle<Object> locales,
Handle<Object> options,
- const char* method) {
+ const char* method_name) {
Handle<Object> numeric_obj;
ASSIGN_RETURN_ON_EXCEPTION(isolate, numeric_obj,
Object::ToNumeric(isolate, num), String);
@@ -1117,7 +1037,7 @@ MaybeHandle<String> Intl::NumberToLocaleString(Isolate* isolate,
// 2. Let numberFormat be ? Construct(%NumberFormat%, « locales, options »).
ASSIGN_RETURN_ON_EXCEPTION(
isolate, number_format,
- New<JSNumberFormat>(isolate, constructor, locales, options, method),
+ New<JSNumberFormat>(isolate, constructor, locales, options, method_name),
String);
if (can_cache) {
@@ -1134,55 +1054,6 @@ MaybeHandle<String> Intl::NumberToLocaleString(Isolate* isolate,
numeric_obj);
}
-namespace {
-
-// ecma402/#sec-defaultnumberoption
-Maybe<int> DefaultNumberOption(Isolate* isolate, Handle<Object> value, int min,
- int max, int fallback, Handle<String> property) {
- // 2. Else, return fallback.
- if (value->IsUndefined()) return Just(fallback);
-
- // 1. If value is not undefined, then
- // a. Let value be ? ToNumber(value).
- Handle<Object> value_num;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, value_num, Object::ToNumber(isolate, value), Nothing<int>());
- DCHECK(value_num->IsNumber());
-
- // b. If value is NaN or less than minimum or greater than maximum, throw a
- // RangeError exception.
- if (value_num->IsNaN() || value_num->Number() < min ||
- value_num->Number() > max) {
- THROW_NEW_ERROR_RETURN_VALUE(
- isolate,
- NewRangeError(MessageTemplate::kPropertyValueOutOfRange, property),
- Nothing<int>());
- }
-
- // The max and min arguments are integers and the above check makes
- // sure that we are within the integer range making this double to
- // int conversion safe.
- //
- // c. Return floor(value).
- return Just(FastD2I(floor(value_num->Number())));
-}
-
-} // namespace
-
-// ecma402/#sec-getnumberoption
-Maybe<int> Intl::GetNumberOption(Isolate* isolate, Handle<JSReceiver> options,
- Handle<String> property, int min, int max,
- int fallback) {
- // 1. Let value be ? Get(options, property).
- Handle<Object> value;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, value, JSReceiver::GetProperty(isolate, options, property),
- Nothing<int>());
-
- // Return ? DefaultNumberOption(value, minimum, maximum, fallback).
- return DefaultNumberOption(isolate, value, min, max, fallback, property);
-}
-
Maybe<Intl::NumberFormatDigitOptions> Intl::SetNumberFormatDigitOptions(
Isolate* isolate, Handle<JSReceiver> options, int mnfd_default,
int mxfd_default, bool notation_is_compact) {
@@ -1192,8 +1063,8 @@ Maybe<Intl::NumberFormatDigitOptions> Intl::SetNumberFormatDigitOptions(
// 5. Let mnid be ? GetNumberOption(options, "minimumIntegerDigits,", 1, 21,
// 1).
int mnid = 1;
- if (!Intl::GetNumberOption(isolate, options,
- factory->minimumIntegerDigits_string(), 1, 21, 1)
+ if (!GetNumberOption(isolate, options, factory->minimumIntegerDigits_string(),
+ 1, 21, 1)
.To(&mnid)) {
return Nothing<NumberFormatDigitOptions>();
}
@@ -1613,7 +1484,7 @@ MaybeHandle<JSArray> CreateArrayFromList(Isolate* isolate,
// ECMA 402 9.2.9 SupportedLocales(availableLocales, requestedLocales, options)
// https://tc39.github.io/ecma402/#sec-supportedlocales
MaybeHandle<JSObject> SupportedLocales(
- Isolate* isolate, const char* method,
+ Isolate* isolate, const char* method_name,
const std::set<std::string>& available_locales,
const std::vector<std::string>& requested_locales, Handle<Object> options) {
std::vector<std::string> supported_locales;
@@ -1622,12 +1493,12 @@ MaybeHandle<JSObject> SupportedLocales(
Handle<JSReceiver> options_obj;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, options_obj,
- Intl::CoerceOptionsToObject(isolate, options, method), JSObject);
+ CoerceOptionsToObject(isolate, options, method_name), JSObject);
// 2. Let matcher be ? GetOption(options, "localeMatcher", "string",
// « "lookup", "best fit" », "best fit").
Maybe<Intl::MatcherOption> maybe_locale_matcher =
- Intl::GetLocaleMatcher(isolate, options_obj, method);
+ Intl::GetLocaleMatcher(isolate, options_obj, method_name);
MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSObject>());
Intl::MatcherOption matcher = maybe_locale_matcher.FromJust();
@@ -1666,9 +1537,158 @@ MaybeHandle<JSArray> Intl::GetCanonicalLocales(Isolate* isolate,
return CreateArrayFromList(isolate, maybe_ll.FromJust(), attr);
}
+namespace {
+
+MaybeHandle<JSArray> AvailableCollations(Isolate* isolate) {
+ UErrorCode status = U_ZERO_ERROR;
+ std::unique_ptr<icu::StringEnumeration> enumeration(
+ icu::Collator::getKeywordValues("collation", status));
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSArray);
+ }
+ return Intl::ToJSArray(isolate, "co", enumeration.get(),
+ Intl::RemoveCollation, true);
+}
+
+MaybeHandle<JSArray> VectorToJSArray(Isolate* isolate,
+ const std::vector<std::string>& array) {
+ Factory* factory = isolate->factory();
+ Handle<FixedArray> fixed_array =
+ factory->NewFixedArray(static_cast<int32_t>(array.size()));
+ int32_t index = 0;
+ for (std::string item : array) {
+ Handle<String> str = factory->NewStringFromAsciiChecked(item.c_str());
+ fixed_array->set(index++, *str);
+ }
+ return factory->NewJSArrayWithElements(fixed_array);
+}
+
+MaybeHandle<JSArray> AvailableCurrencies(Isolate* isolate) {
+ UErrorCode status = U_ZERO_ERROR;
+ UEnumeration* ids =
+ ucurr_openISOCurrencies(UCURR_COMMON | UCURR_NON_DEPRECATED, &status);
+ const char* next = nullptr;
+ std::vector<std::string> array;
+ while (U_SUCCESS(status) &&
+ (next = uenum_next(ids, nullptr, &status)) != nullptr) {
+ // Work around the issue that we do not support VEF currency code
+ // in DisplayNames by not reporting it.
+ if (strcmp(next, "VEF") == 0) continue;
+ array.push_back(next);
+ }
+ // Work around the issue that we do support the following currency codes
+ // in DisplayNames but the ICU API does not reporting it.
+ array.push_back("SVC");
+ array.push_back("VES");
+ array.push_back("XDR");
+ array.push_back("XSU");
+ array.push_back("ZWL");
+ std::sort(array.begin(), array.end());
+ uenum_close(ids);
+ return VectorToJSArray(isolate, array);
+}
+
+MaybeHandle<JSArray> AvailableNumberingSystems(Isolate* isolate) {
+ UErrorCode status = U_ZERO_ERROR;
+ std::unique_ptr<icu::StringEnumeration> enumeration(
+ icu::NumberingSystem::getAvailableNames(status));
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSArray);
+ }
+ // Need to filter out isAlgorithmic
+ return Intl::ToJSArray(
+ isolate, "nu", enumeration.get(),
+ [](const char* value) {
+ UErrorCode status = U_ZERO_ERROR;
+ std::unique_ptr<icu::NumberingSystem> numbering_system(
+ icu::NumberingSystem::createInstanceByName(value, status));
+ // Skip algorithmic one since chrome filter out the resource.
+ return U_FAILURE(status) || numbering_system->isAlgorithmic();
+ },
+ true);
+}
+
+MaybeHandle<JSArray> AvailableTimeZones(Isolate* isolate) {
+ UErrorCode status = U_ZERO_ERROR;
+ std::unique_ptr<icu::StringEnumeration> enumeration(
+ icu::TimeZone::createTimeZoneIDEnumeration(
+ UCAL_ZONE_TYPE_CANONICAL_LOCATION, nullptr, nullptr, status));
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSArray);
+ }
+ return Intl::ToJSArray(isolate, nullptr, enumeration.get(), nullptr, true);
+}
+
+MaybeHandle<JSArray> AvailableUnits(Isolate* isolate) {
+ Factory* factory = isolate->factory();
+ std::set<std::string> sanctioned(Intl::SanctionedSimpleUnits());
+ Handle<FixedArray> fixed_array =
+ factory->NewFixedArray(static_cast<int32_t>(sanctioned.size()));
+ int32_t index = 0;
+ for (std::string item : sanctioned) {
+ Handle<String> str = factory->NewStringFromAsciiChecked(item.c_str());
+ fixed_array->set(index++, *str);
+ }
+ return factory->NewJSArrayWithElements(fixed_array);
+}
+
+} // namespace
+
+// ecma-402 #sec-intl.supportedvaluesof
+MaybeHandle<JSArray> Intl::SupportedValuesOf(Isolate* isolate,
+ Handle<Object> key_obj) {
+ Factory* factory = isolate->factory();
+ // 1. 1. Let key be ? ToString(key).
+ Handle<String> key_str;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, key_str,
+ Object::ToString(isolate, key_obj), JSArray);
+ // 2. If key is "calendar", then
+ if (factory->calendar_string()->Equals(*key_str)) {
+ // a. Let list be ! AvailableCalendars( ).
+ return Intl::AvailableCalendars(isolate);
+ }
+ // 3. Else if key is "collation", then
+ if (factory->collation_string()->Equals(*key_str)) {
+ // a. Let list be ! AvailableCollations( ).
+ return AvailableCollations(isolate);
+ }
+ // 4. Else if key is "currency", then
+ if (factory->currency_string()->Equals(*key_str)) {
+ // a. Let list be ! AvailableCurrencies( ).
+ return AvailableCurrencies(isolate);
+ }
+ // 5. Else if key is "numberingSystem", then
+ if (factory->numberingSystem_string()->Equals(*key_str)) {
+ // a. Let list be ! AvailableNumberingSystems( ).
+ return AvailableNumberingSystems(isolate);
+ }
+ // 6. Else if key is "timeZone", then
+ if (factory->timeZone_string()->Equals(*key_str)) {
+ // a. Let list be ! AvailableTimeZones( ).
+ return AvailableTimeZones(isolate);
+ }
+ // 7. Else if key is "unit", then
+ if (factory->unit_string()->Equals(*key_str)) {
+ // a. Let list be ! AvailableUnits( ).
+ return AvailableUnits(isolate);
+ }
+ // 8. Else,
+ // a. Throw a RangeError exception.
+ // 9. Return ! CreateArrayFromList( list ).
+
+ THROW_NEW_ERROR(
+ isolate,
+ NewRangeError(MessageTemplate::kInvalid,
+ factory->NewStringFromStaticChars("key"), key_str),
+ JSArray);
+}
+
// ECMA 402 Intl.*.supportedLocalesOf
MaybeHandle<JSObject> Intl::SupportedLocalesOf(
- Isolate* isolate, const char* method,
+ Isolate* isolate, const char* method_name,
const std::set<std::string>& available_locales, Handle<Object> locales,
Handle<Object> options) {
// Let availableLocales be %Collator%.[[AvailableLocales]].
@@ -1679,7 +1699,7 @@ MaybeHandle<JSObject> Intl::SupportedLocalesOf(
MAYBE_RETURN(requested_locales, MaybeHandle<JSObject>());
// Return ? SupportedLocales(availableLocales, requestedLocales, options).
- return SupportedLocales(isolate, method, available_locales,
+ return SupportedLocales(isolate, method_name, available_locales,
requested_locales.FromJust(), options);
}
@@ -1739,7 +1759,8 @@ bool Intl::IsValidNumberingSystem(const std::string& value) {
UErrorCode status = U_ZERO_ERROR;
std::unique_ptr<icu::NumberingSystem> numbering_system(
icu::NumberingSystem::createInstanceByName(value.c_str(), status));
- return U_SUCCESS(status) && numbering_system.get() != nullptr;
+ return U_SUCCESS(status) && numbering_system.get() != nullptr &&
+ !numbering_system->isAlgorithmic();
}
namespace {
@@ -2090,20 +2111,20 @@ base::TimezoneCache* Intl::CreateTimeZoneCache() {
Maybe<Intl::MatcherOption> Intl::GetLocaleMatcher(Isolate* isolate,
Handle<JSReceiver> options,
- const char* method) {
- return Intl::GetStringOption<Intl::MatcherOption>(
- isolate, options, "localeMatcher", method, {"best fit", "lookup"},
+ const char* method_name) {
+ return GetStringOption<Intl::MatcherOption>(
+ isolate, options, "localeMatcher", method_name, {"best fit", "lookup"},
{Intl::MatcherOption::kBestFit, Intl::MatcherOption::kLookup},
Intl::MatcherOption::kBestFit);
}
Maybe<bool> Intl::GetNumberingSystem(Isolate* isolate,
Handle<JSReceiver> options,
- const char* method,
+ const char* method_name,
std::unique_ptr<char[]>* result) {
const std::vector<const char*> empty_values = {};
- Maybe<bool> maybe = Intl::GetStringOption(isolate, options, "numberingSystem",
- empty_values, method, result);
+ Maybe<bool> maybe = GetStringOption(isolate, options, "numberingSystem",
+ empty_values, method_name, result);
MAYBE_RETURN(maybe, Nothing<bool>());
if (maybe.FromJust() && *result != nullptr) {
if (!IsWellFormedNumberingSystem(result->get())) {
@@ -2212,39 +2233,50 @@ MaybeHandle<String> Intl::FormattedToString(
return Intl::ToString(isolate, result);
}
-// ecma402/#sec-getoptionsobject
-MaybeHandle<JSReceiver> Intl::GetOptionsObject(Isolate* isolate,
- Handle<Object> options,
- const char* service) {
- // 1. If options is undefined, then
- if (options->IsUndefined(isolate)) {
- // a. Return ! ObjectCreate(null).
- return isolate->factory()->NewJSObjectWithNullProto();
- }
- // 2. If Type(options) is Object, then
- if (options->IsJSReceiver()) {
- // a. Return options.
- return Handle<JSReceiver>::cast(options);
- }
- // 3. Throw a TypeError exception.
- THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kInvalidArgument),
- JSReceiver);
-}
-
-// ecma402/#sec-coerceoptionstoobject
-MaybeHandle<JSReceiver> Intl::CoerceOptionsToObject(Isolate* isolate,
- Handle<Object> options,
- const char* service) {
- // 1. If options is undefined, then
- if (options->IsUndefined(isolate)) {
- // a. Return ! ObjectCreate(null).
- return isolate->factory()->NewJSObjectWithNullProto();
- }
- // 2. Return ? ToObject(options).
- ASSIGN_RETURN_ON_EXCEPTION(isolate, options,
- Object::ToObject(isolate, options, service),
- JSReceiver);
- return Handle<JSReceiver>::cast(options);
+MaybeHandle<JSArray> Intl::ToJSArray(
+ Isolate* isolate, const char* unicode_key,
+ icu::StringEnumeration* enumeration,
+ const std::function<bool(const char*)>& removes, bool sort) {
+ UErrorCode status = U_ZERO_ERROR;
+ std::vector<std::string> array;
+ for (const char* item = enumeration->next(nullptr, status);
+ U_SUCCESS(status) && item != nullptr;
+ item = enumeration->next(nullptr, status)) {
+ if (unicode_key != nullptr) {
+ item = uloc_toUnicodeLocaleType(unicode_key, item);
+ }
+ if (removes == nullptr || !(removes)(item)) {
+ array.push_back(item);
+ }
+ }
+
+ if (sort) {
+ std::sort(array.begin(), array.end());
+ }
+ return VectorToJSArray(isolate, array);
+}
+
+bool Intl::RemoveCollation(const char* collation) {
+ return strcmp("standard", collation) == 0 || strcmp("search", collation) == 0;
+}
+
+// See the list in ecma402 #sec-issanctionedsimpleunitidentifier
+std::set<std::string> Intl::SanctionedSimpleUnits() {
+ return std::set<std::string>({"acre", "bit", "byte",
+ "celsius", "centimeter", "day",
+ "degree", "fahrenheit", "fluid-ounce",
+ "foot", "gallon", "gigabit",
+ "gigabyte", "gram", "hectare",
+ "hour", "inch", "kilobit",
+ "kilobyte", "kilogram", "kilometer",
+ "liter", "megabit", "megabyte",
+ "meter", "mile", "mile-scandinavian",
+ "millimeter", "milliliter", "millisecond",
+ "minute", "month", "ounce",
+ "percent", "petabyte", "pound",
+ "second", "stone", "terabit",
+ "terabyte", "week", "yard",
+ "year"});
}
} // namespace internal
diff --git a/chromium/v8/src/objects/intl-objects.h b/chromium/v8/src/objects/intl-objects.h
index ec0eb93873b..a696e09410e 100644
--- a/chromium/v8/src/objects/intl-objects.h
+++ b/chromium/v8/src/objects/intl-objects.h
@@ -27,6 +27,7 @@ namespace U_ICU_NAMESPACE {
class BreakIterator;
class Collator;
class FormattedValue;
+class StringEnumeration;
class UnicodeString;
} // namespace U_ICU_NAMESPACE
@@ -61,76 +62,10 @@ class Intl {
static std::string GetNumberingSystem(const icu::Locale& icu_locale);
static V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> SupportedLocalesOf(
- Isolate* isolate, const char* method,
+ Isolate* isolate, const char* method_name,
const std::set<std::string>& available_locales, Handle<Object> locales_in,
Handle<Object> options_in);
- // ECMA402 9.2.10. GetOption( options, property, type, values, fallback)
- // ecma402/#sec-getoption
- //
- // This is specialized for the case when type is string.
- //
- // Instead of passing undefined for the values argument as the spec
- // defines, pass in an empty vector.
- //
- // Returns true if options object has the property and stores the
- // result in value. Returns false if the value is not found. The
- // caller is required to use fallback value appropriately in this
- // case.
- //
- // service is a string denoting the type of Intl object; used when
- // printing the error message.
- V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static Maybe<bool> GetStringOption(
- Isolate* isolate, Handle<JSReceiver> options, const char* property,
- std::vector<const char*> values, const char* service,
- std::unique_ptr<char[]>* result);
-
- // A helper template to get string from option into a enum.
- // The enum in the enum_values is the corresponding value to the strings
- // in the str_values. If the option does not contains name,
- // default_value will be return.
- template <typename T>
- V8_WARN_UNUSED_RESULT static Maybe<T> GetStringOption(
- Isolate* isolate, Handle<JSReceiver> options, const char* name,
- const char* method, const std::vector<const char*>& str_values,
- const std::vector<T>& enum_values, T default_value) {
- DCHECK_EQ(str_values.size(), enum_values.size());
- std::unique_ptr<char[]> cstr;
- Maybe<bool> found = Intl::GetStringOption(isolate, options, name,
- str_values, method, &cstr);
- MAYBE_RETURN(found, Nothing<T>());
- if (found.FromJust()) {
- DCHECK_NOT_NULL(cstr.get());
- for (size_t i = 0; i < str_values.size(); i++) {
- if (strcmp(cstr.get(), str_values[i]) == 0) {
- return Just(enum_values[i]);
- }
- }
- UNREACHABLE();
- }
- return Just(default_value);
- }
-
- // ECMA402 9.2.10. GetOption( options, property, type, values, fallback)
- // ecma402/#sec-getoption
- //
- // This is specialized for the case when type is boolean.
- //
- // Returns true if options object has the property and stores the
- // result in value. Returns false if the value is not found. The
- // caller is required to use fallback value appropriately in this
- // case.
- //
- // service is a string denoting the type of Intl object; used when
- // printing the error message.
- V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static Maybe<bool> GetBoolOption(
- Isolate* isolate, Handle<JSReceiver> options, const char* property,
- const char* service, bool* result);
-
- V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static Maybe<int> GetNumberOption(
- Isolate* isolate, Handle<JSReceiver> options, Handle<String> property,
- int min, int max, int fallback);
-
// https://tc39.github.io/ecma402/#sec-canonicalizelocalelist
// {only_return_one_result} is an optimization for callers that only
// care about the first result.
@@ -142,6 +77,10 @@ class Intl {
V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray> GetCanonicalLocales(
Isolate* isolate, Handle<Object> locales);
+ // ecma-402 #sec-intl.supportedvaluesof
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray> SupportedValuesOf(
+ Isolate* isolate, Handle<Object> key);
+
// For locale sensitive functions
V8_WARN_UNUSED_RESULT static MaybeHandle<String> StringLocaleConvertCase(
Isolate* isolate, Handle<String> s, bool is_upper,
@@ -153,18 +92,19 @@ class Intl {
V8_WARN_UNUSED_RESULT static MaybeHandle<String> ConvertToLower(
Isolate* isolate, Handle<String> s);
- V8_WARN_UNUSED_RESULT static MaybeHandle<Object> StringLocaleCompare(
+ V8_WARN_UNUSED_RESULT static base::Optional<int> StringLocaleCompare(
Isolate* isolate, Handle<String> s1, Handle<String> s2,
- Handle<Object> locales, Handle<Object> options, const char* method);
+ Handle<Object> locales, Handle<Object> options, const char* method_name);
- V8_WARN_UNUSED_RESULT static Handle<Object> CompareStrings(
- Isolate* isolate, const icu::Collator& collator, Handle<String> s1,
- Handle<String> s2);
+ V8_WARN_UNUSED_RESULT static int CompareStrings(Isolate* isolate,
+ const icu::Collator& collator,
+ Handle<String> s1,
+ Handle<String> s2);
// ecma402/#sup-properties-of-the-number-prototype-object
V8_WARN_UNUSED_RESULT static MaybeHandle<String> NumberToLocaleString(
Isolate* isolate, Handle<Object> num, Handle<Object> locales,
- Handle<Object> options, const char* method);
+ Handle<Object> options, const char* method_name);
// ecma402/#sec-setnfdigitoptions
struct NumberFormatDigitOptions {
@@ -232,11 +172,11 @@ class Intl {
// Shared function to read the "localeMatcher" option.
V8_WARN_UNUSED_RESULT static Maybe<MatcherOption> GetLocaleMatcher(
- Isolate* isolate, Handle<JSReceiver> options, const char* method);
+ Isolate* isolate, Handle<JSReceiver> options, const char* method_name);
// Shared function to read the "numberingSystem" option.
V8_WARN_UNUSED_RESULT static Maybe<bool> GetNumberingSystem(
- Isolate* isolate, Handle<JSReceiver> options, const char* method,
+ Isolate* isolate, Handle<JSReceiver> options, const char* method_name,
std::unique_ptr<char[]>* result);
// Check the calendar is valid or not for that locale.
@@ -331,13 +271,17 @@ class Intl {
static const std::set<std::string>& GetAvailableLocalesForDateFormat();
- // ecma402/#sec-getoptionsobject
- V8_WARN_UNUSED_RESULT static MaybeHandle<JSReceiver> GetOptionsObject(
- Isolate* isolate, Handle<Object> options, const char* service);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray> ToJSArray(
+ Isolate* isolate, const char* unicode_key,
+ icu::StringEnumeration* enumeration,
+ const std::function<bool(const char*)>& removes, bool sort);
+
+ static bool RemoveCollation(const char* collation);
+
+ static std::set<std::string> SanctionedSimpleUnits();
- // ecma402/#sec-coerceoptionstoobject
- V8_WARN_UNUSED_RESULT static MaybeHandle<JSReceiver> CoerceOptionsToObject(
- Isolate* isolate, Handle<Object> options, const char* service);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray> AvailableCalendars(
+ Isolate* isolate);
};
} // namespace internal
diff --git a/chromium/v8/src/objects/js-array-buffer-inl.h b/chromium/v8/src/objects/js-array-buffer-inl.h
index 7ea8aeb3e58..67ae6b78774 100644
--- a/chromium/v8/src/objects/js-array-buffer-inl.h
+++ b/chromium/v8/src/objects/js-array-buffer-inl.h
@@ -30,10 +30,6 @@ ACCESSORS(JSTypedArray, base_pointer, Object, kBasePointerOffset)
RELEASE_ACQUIRE_ACCESSORS(JSTypedArray, base_pointer, Object,
kBasePointerOffset)
-void JSArrayBuffer::AllocateExternalPointerEntries(Isolate* isolate) {
- InitExternalPointerField(kBackingStoreOffset, isolate);
-}
-
size_t JSArrayBuffer::byte_length() const {
return ReadField<size_t>(kByteLengthOffset);
}
@@ -43,26 +39,20 @@ void JSArrayBuffer::set_byte_length(size_t value) {
}
DEF_GETTER(JSArrayBuffer, backing_store, void*) {
- Isolate* isolate = GetIsolateForHeapSandbox(*this);
- Address value = ReadExternalPointerField(kBackingStoreOffset, isolate,
- kArrayBufferBackingStoreTag);
- return reinterpret_cast<void*>(value);
+ return reinterpret_cast<void*>(ReadField<Address>(kBackingStoreOffset));
}
-void JSArrayBuffer::set_backing_store(Isolate* isolate, void* value) {
- WriteExternalPointerField(kBackingStoreOffset, isolate,
- reinterpret_cast<Address>(value),
- kArrayBufferBackingStoreTag);
+void JSArrayBuffer::set_backing_store(void* value) {
+ DCHECK(IsValidBackingStorePointer(value));
+ WriteField<Address>(kBackingStoreOffset, reinterpret_cast<Address>(value));
}
uint32_t JSArrayBuffer::GetBackingStoreRefForDeserialization() const {
- return static_cast<uint32_t>(
- ReadField<ExternalPointer_t>(kBackingStoreOffset));
+ return static_cast<uint32_t>(ReadField<Address>(kBackingStoreOffset));
}
void JSArrayBuffer::SetBackingStoreRefForSerialization(uint32_t ref) {
- WriteField<ExternalPointer_t>(kBackingStoreOffset,
- static_cast<ExternalPointer_t>(ref));
+ WriteField<Address>(kBackingStoreOffset, static_cast<Address>(ref));
}
ArrayBufferExtension* JSArrayBuffer::extension() const {
@@ -205,13 +195,13 @@ size_t JSTypedArray::GetLengthOrOutOfBounds(bool& out_of_bounds) const {
if (WasDetached()) return 0;
if (is_length_tracking()) {
if (is_backed_by_rab()) {
- if (byte_offset() >= buffer().byte_length()) {
+ if (byte_offset() > buffer().byte_length()) {
out_of_bounds = true;
return 0;
}
return (buffer().byte_length() - byte_offset()) / element_size();
}
- if (byte_offset() >=
+ if (byte_offset() >
buffer().GetBackingStore()->byte_length(std::memory_order_seq_cst)) {
out_of_bounds = true;
return 0;
@@ -238,10 +228,6 @@ size_t JSTypedArray::GetLength() const {
return GetLengthOrOutOfBounds(out_of_bounds);
}
-void JSTypedArray::AllocateExternalPointerEntries(Isolate* isolate) {
- InitExternalPointerField(kExternalPointerOffset, isolate);
-}
-
size_t JSTypedArray::length() const {
DCHECK(!is_length_tracking());
DCHECK(!is_backed_by_rab());
@@ -257,18 +243,16 @@ void JSTypedArray::set_length(size_t value) {
}
DEF_GETTER(JSTypedArray, external_pointer, Address) {
- Isolate* isolate = GetIsolateForHeapSandbox(*this);
- return ReadExternalPointerField(kExternalPointerOffset, isolate,
- kTypedArrayExternalPointerTag);
+ return ReadField<Address>(kExternalPointerOffset);
}
-DEF_GETTER(JSTypedArray, external_pointer_raw, ExternalPointer_t) {
- return ReadField<ExternalPointer_t>(kExternalPointerOffset);
+DEF_GETTER(JSTypedArray, external_pointer_raw, Address) {
+ return ReadField<Address>(kExternalPointerOffset);
}
void JSTypedArray::set_external_pointer(Isolate* isolate, Address value) {
- WriteExternalPointerField(kExternalPointerOffset, isolate, value,
- kTypedArrayExternalPointerTag);
+ DCHECK(IsValidBackingStorePointer(reinterpret_cast<void*>(value)));
+ WriteField<Address>(kExternalPointerOffset, value);
}
Address JSTypedArray::ExternalPointerCompensationForOnHeapArray(
@@ -282,14 +266,12 @@ Address JSTypedArray::ExternalPointerCompensationForOnHeapArray(
uint32_t JSTypedArray::GetExternalBackingStoreRefForDeserialization() const {
DCHECK(!is_on_heap());
- return static_cast<uint32_t>(
- ReadField<ExternalPointer_t>(kExternalPointerOffset));
+ return static_cast<uint32_t>(ReadField<Address>(kExternalPointerOffset));
}
void JSTypedArray::SetExternalBackingStoreRefForSerialization(uint32_t ref) {
DCHECK(!is_on_heap());
- WriteField<ExternalPointer_t>(kExternalPointerOffset,
- static_cast<ExternalPointer_t>(ref));
+ WriteField<Address>(kExternalPointerOffset, static_cast<Address>(ref));
}
void JSTypedArray::RemoveExternalPointerCompensationForSerialization(
@@ -390,19 +372,12 @@ MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
}
DEF_GETTER(JSDataView, data_pointer, void*) {
- Isolate* isolate = GetIsolateForHeapSandbox(*this);
- return reinterpret_cast<void*>(ReadExternalPointerField(
- kDataPointerOffset, isolate, kDataViewDataPointerTag));
-}
-
-void JSDataView::AllocateExternalPointerEntries(Isolate* isolate) {
- InitExternalPointerField(kDataPointerOffset, isolate);
+ return reinterpret_cast<void*>(ReadField<Address>(kDataPointerOffset));
}
void JSDataView::set_data_pointer(Isolate* isolate, void* value) {
- WriteExternalPointerField(kDataPointerOffset, isolate,
- reinterpret_cast<Address>(value),
- kDataViewDataPointerTag);
+ DCHECK(IsValidBackingStorePointer(value));
+ WriteField<Address>(kDataPointerOffset, reinterpret_cast<Address>(value));
}
} // namespace internal
diff --git a/chromium/v8/src/objects/js-array-buffer.cc b/chromium/v8/src/objects/js-array-buffer.cc
index 917a055b466..fd9f3133a5f 100644
--- a/chromium/v8/src/objects/js-array-buffer.cc
+++ b/chromium/v8/src/objects/js-array-buffer.cc
@@ -55,9 +55,8 @@ void JSArrayBuffer::Setup(SharedFlag shared, ResizableFlag resizable,
SetEmbedderField(i, Smi::zero());
}
set_extension(nullptr);
- AllocateExternalPointerEntries(GetIsolate());
if (!backing_store) {
- set_backing_store(GetIsolate(), nullptr);
+ set_backing_store(nullptr);
set_byte_length(0);
set_max_byte_length(0);
} else {
@@ -77,24 +76,23 @@ void JSArrayBuffer::Attach(std::shared_ptr<BackingStore> backing_store) {
!backing_store->is_wasm_memory() && !backing_store->is_resizable(),
backing_store->byte_length() == backing_store->max_byte_length());
DCHECK(!was_detached());
- Isolate* isolate = GetIsolate();
- set_backing_store(isolate, backing_store->buffer_start());
+ set_backing_store(backing_store->buffer_start());
if (is_shared() && is_resizable()) {
// GSABs need to read their byte_length from the BackingStore. Maintain the
// invariant that their byte_length field is always 0.
set_byte_length(0);
} else {
+ CHECK_LE(backing_store->byte_length(), kMaxByteLength);
set_byte_length(backing_store->byte_length());
}
set_max_byte_length(backing_store->max_byte_length());
if (backing_store->is_wasm_memory()) set_is_detachable(false);
if (!backing_store->free_on_destruct()) set_is_external(true);
- Heap* heap = isolate->heap();
ArrayBufferExtension* extension = EnsureExtension();
size_t bytes = backing_store->PerIsolateAccountingLength();
extension->set_accounting_length(bytes);
extension->set_backing_store(std::move(backing_store));
- heap->AppendArrayBufferExtension(*this, extension);
+ GetIsolate()->heap()->AppendArrayBufferExtension(*this, extension);
}
void JSArrayBuffer::Detach(bool force_for_wasm_memory) {
@@ -123,14 +121,25 @@ void JSArrayBuffer::Detach(bool force_for_wasm_memory) {
DCHECK(!is_shared());
DCHECK(!is_asmjs_memory());
- set_backing_store(isolate, nullptr);
+ set_backing_store(nullptr);
set_byte_length(0);
set_was_detached(true);
}
-std::shared_ptr<BackingStore> JSArrayBuffer::GetBackingStore() {
- if (!extension()) return nullptr;
- return extension()->backing_store();
+std::shared_ptr<BackingStore> JSArrayBuffer::GetBackingStore() const {
+ if (!extension()) return nullptr;
+ return extension()->backing_store();
+}
+
+size_t JSArrayBuffer::GetByteLength() const {
+ if V8_UNLIKELY (is_shared() && is_resizable()) {
+ // Invariant: byte_length for GSAB is 0 (it needs to be read from the
+ // BackingStore).
+ DCHECK_EQ(0, byte_length());
+
+ return GetBackingStore()->byte_length(std::memory_order_seq_cst);
+ }
+ return byte_length();
}
ArrayBufferExtension* JSArrayBuffer::EnsureExtension() {
diff --git a/chromium/v8/src/objects/js-array-buffer.h b/chromium/v8/src/objects/js-array-buffer.h
index 1522f4b9513..dadc85659b7 100644
--- a/chromium/v8/src/objects/js-array-buffer.h
+++ b/chromium/v8/src/objects/js-array-buffer.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_JS_ARRAY_BUFFER_H_
#define V8_OBJECTS_JS_ARRAY_BUFFER_H_
+#include "include/v8-typed-array.h"
#include "src/objects/backing-store.h"
#include "src/objects/js-objects.h"
#include "torque-generated/bit-fields.h"
@@ -32,18 +33,12 @@ class JSArrayBuffer
static constexpr size_t kMaxByteLength = kMaxSafeInteger;
#endif
- // When soft sandbox is enabled, creates entries in external pointer table for
- // all JSArrayBuffer's fields that require soft sandbox protection (backing
- // store pointer, backing store length, etc.).
- // When sandbox is not enabled, it's a no-op.
- inline void AllocateExternalPointerEntries(Isolate* isolate);
-
// [byte_length]: length in bytes
DECL_PRIMITIVE_ACCESSORS(byte_length, size_t)
// [backing_store]: backing memory for this array
DECL_GETTER(backing_store, void*)
- inline void set_backing_store(Isolate* isolate, void* value);
+ inline void set_backing_store(void* value);
// [extension]: extension object used for GC
DECL_PRIMITIVE_ACCESSORS(extension, ArrayBufferExtension*)
@@ -109,7 +104,9 @@ class JSArrayBuffer
// Get a reference to backing store of this array buffer, if there is a
// backing store. Returns nullptr if there is no backing store (e.g. detached
// or a zero-length array buffer).
- std::shared_ptr<BackingStore> GetBackingStore();
+ std::shared_ptr<BackingStore> GetBackingStore() const;
+
+ size_t GetByteLength() const;
// Allocates an ArrayBufferExtension for this array buffer, unless it is
// already associated with an extension.
@@ -165,52 +162,27 @@ class JSArrayBuffer
// extension-object. The GC periodically iterates all extensions concurrently
// and frees unmarked ones.
// https://docs.google.com/document/d/1-ZrLdlFX1nXT3z-FAgLbKal1gI8Auiaya_My-a0UJ28/edit
-class ArrayBufferExtension : public Malloced {
- enum class GcState : uint8_t { Dead = 0, Copied, Promoted };
-
- std::atomic<bool> marked_;
- std::atomic<GcState> young_gc_state_;
- std::shared_ptr<BackingStore> backing_store_;
- ArrayBufferExtension* next_;
- std::atomic<size_t> accounting_length_;
-
- GcState young_gc_state() {
- return young_gc_state_.load(std::memory_order_relaxed);
- }
-
- void set_young_gc_state(GcState value) {
- young_gc_state_.store(value, std::memory_order_relaxed);
- }
-
+class ArrayBufferExtension final : public Malloced {
public:
- ArrayBufferExtension()
- : marked_(false),
- young_gc_state_(GcState::Dead),
- backing_store_(std::shared_ptr<BackingStore>()),
- next_(nullptr),
- accounting_length_(0) {}
+ ArrayBufferExtension() : backing_store_(std::shared_ptr<BackingStore>()) {}
explicit ArrayBufferExtension(std::shared_ptr<BackingStore> backing_store)
- : marked_(false),
- young_gc_state_(GcState::Dead),
- backing_store_(backing_store),
- next_(nullptr),
- accounting_length_(0) {}
+ : backing_store_(backing_store) {}
void Mark() { marked_.store(true, std::memory_order_relaxed); }
void Unmark() { marked_.store(false, std::memory_order_relaxed); }
- bool IsMarked() { return marked_.load(std::memory_order_relaxed); }
+ bool IsMarked() const { return marked_.load(std::memory_order_relaxed); }
void YoungMark() { set_young_gc_state(GcState::Copied); }
void YoungMarkPromoted() { set_young_gc_state(GcState::Promoted); }
void YoungUnmark() { set_young_gc_state(GcState::Dead); }
- bool IsYoungMarked() { return young_gc_state() != GcState::Dead; }
+ bool IsYoungMarked() const { return young_gc_state() != GcState::Dead; }
- bool IsYoungPromoted() { return young_gc_state() == GcState::Promoted; }
+ bool IsYoungPromoted() const { return young_gc_state() == GcState::Promoted; }
std::shared_ptr<BackingStore> backing_store() { return backing_store_; }
BackingStore* backing_store_raw() { return backing_store_.get(); }
- size_t accounting_length() {
+ size_t accounting_length() const {
return accounting_length_.load(std::memory_order_relaxed);
}
@@ -232,8 +204,25 @@ class ArrayBufferExtension : public Malloced {
void reset_backing_store() { backing_store_.reset(); }
- ArrayBufferExtension* next() { return next_; }
+ ArrayBufferExtension* next() const { return next_; }
void set_next(ArrayBufferExtension* extension) { next_ = extension; }
+
+ private:
+ enum class GcState : uint8_t { Dead = 0, Copied, Promoted };
+
+ std::atomic<bool> marked_{false};
+ std::atomic<GcState> young_gc_state_{GcState::Dead};
+ std::shared_ptr<BackingStore> backing_store_;
+ ArrayBufferExtension* next_ = nullptr;
+ std::atomic<size_t> accounting_length_{0};
+
+ GcState young_gc_state() const {
+ return young_gc_state_.load(std::memory_order_relaxed);
+ }
+
+ void set_young_gc_state(GcState value) {
+ young_gc_state_.store(value, std::memory_order_relaxed);
+ }
};
class JSArrayBufferView
@@ -283,12 +272,6 @@ class JSTypedArray
V8_EXPORT_PRIVATE Handle<JSArrayBuffer> GetBuffer();
- // When soft sandbox is enabled, creates entries in external pointer table for
- // all JSTypedArray's fields that require soft sandbox protection (external
- // pointer, offset, length, etc.).
- // When sandbox is not enabled, it's a no-op.
- inline void AllocateExternalPointerEntries(Isolate* isolate);
-
// The `DataPtr` is `base_ptr + external_pointer`, and `base_ptr` is nullptr
// for off-heap typed arrays.
static constexpr bool kOffHeapDataPtrEqualsExternalPointer = true;
@@ -392,12 +375,6 @@ class JSDataView
DECL_GETTER(data_pointer, void*)
inline void set_data_pointer(Isolate* isolate, void* value);
- // When soft sandbox is enabled, creates entries in external pointer table for
- // all JSDataView's fields that require soft sandbox protection (data pointer,
- // offset, length, etc.).
- // When sandbox is not enabled, it's a no-op.
- inline void AllocateExternalPointerEntries(Isolate* isolate);
-
// Dispatched behavior.
DECL_PRINTER(JSDataView)
DECL_VERIFIER(JSDataView)
diff --git a/chromium/v8/src/objects/js-array-inl.h b/chromium/v8/src/objects/js-array-inl.h
index ed7ab4e003f..3b9f7962637 100644
--- a/chromium/v8/src/objects/js-array-inl.h
+++ b/chromium/v8/src/objects/js-array-inl.h
@@ -15,11 +15,10 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(JSArray, JSObject)
-OBJECT_CONSTRUCTORS_IMPL(JSArrayIterator, JSObject)
+#include "torque-generated/src/objects/js-array-tq-inl.inc"
-CAST_ACCESSOR(JSArray)
-CAST_ACCESSOR(JSArrayIterator)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSArray)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSArrayIterator)
DEF_GETTER(JSArray, length, Object) {
return TaggedField<Object, kLengthOffset>::load(cage_base, *this);
@@ -70,9 +69,6 @@ bool JSArray::HasArrayPrototype(Isolate* isolate) {
return map().prototype() == *isolate->initial_array_prototype();
}
-ACCESSORS(JSArrayIterator, iterated_object, Object, kIteratedObjectOffset)
-ACCESSORS(JSArrayIterator, next_index, Object, kNextIndexOffset)
-
SMI_ACCESSORS(JSArrayIterator, raw_kind, kKindOffset)
IterationKind JSArrayIterator::kind() const {
diff --git a/chromium/v8/src/objects/js-array.h b/chromium/v8/src/objects/js-array.h
index 776cb4446b4..2cd2e3f3092 100644
--- a/chromium/v8/src/objects/js-array.h
+++ b/chromium/v8/src/objects/js-array.h
@@ -8,7 +8,6 @@
#include "src/objects/allocation-site.h"
#include "src/objects/fixed-array.h"
#include "src/objects/js-objects.h"
-#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -16,12 +15,14 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-array-tq.inc"
+
// The JSArray describes JavaScript Arrays
// Such an array can be in one of two modes:
// - fast, backing storage is a FixedArray and length <= elements.length();
// Please note: push and pop can be used to grow and shrink the array.
// - slow, backing storage is a HashTable with numbers as keys.
-class JSArray : public JSObject {
+class JSArray : public TorqueGeneratedJSArray<JSArray, JSObject> {
public:
// [length]: The length property.
DECL_ACCESSORS(length, Object)
@@ -109,8 +110,6 @@ class JSArray : public JSObject {
// to Proxies and objects with a hidden prototype.
inline bool HasArrayPrototype(Isolate* isolate);
- DECL_CAST(JSArray)
-
// Dispatched behavior.
DECL_PRINTER(JSArray)
DECL_VERIFIER(JSArray)
@@ -118,9 +117,6 @@ class JSArray : public JSObject {
// Number of element slots to pre-allocate for an empty array.
static const int kPreallocatedArrayElements = 4;
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JS_ARRAY_FIELDS)
-
static const int kLengthDescriptorIndex = 0;
// Max. number of elements being copied in Array builtins.
@@ -144,7 +140,7 @@ class JSArray : public JSObject {
AllocationMemento::kSize) >>
kDoubleSizeLog2;
- OBJECT_CONSTRUCTORS(JSArray, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(JSArray)
};
Handle<Object> CacheInitialJSArrayMaps(Isolate* isolate,
@@ -153,52 +149,20 @@ Handle<Object> CacheInitialJSArrayMaps(Isolate* isolate,
// The JSArrayIterator describes JavaScript Array Iterators Objects, as
// defined in ES section #sec-array-iterator-objects.
-class JSArrayIterator : public JSObject {
+class JSArrayIterator
+ : public TorqueGeneratedJSArrayIterator<JSArrayIterator, JSObject> {
public:
DECL_PRINTER(JSArrayIterator)
DECL_VERIFIER(JSArrayIterator)
- DECL_CAST(JSArrayIterator)
-
- // [iterated_object]: the [[IteratedObject]] inobject property.
- DECL_ACCESSORS(iterated_object, Object)
-
- // [next_index]: The [[ArrayIteratorNextIndex]] inobject property.
- // The next_index is always a positive integer, and it points to
- // the next index that is to be returned by this iterator. It's
- // possible range is fixed depending on the [[iterated_object]]:
- //
- // 1. For JSArray's the next_index is always in Unsigned32
- // range, and when the iterator reaches the end it's set
- // to kMaxUInt32 to indicate that this iterator should
- // never produce values anymore even if the "length"
- // property of the JSArray changes at some later point.
- // 2. For JSTypedArray's the next_index is always in
- // UnsignedSmall range, and when the iterator terminates
- // it's set to Smi::kMaxValue.
- // 3. For all other JSReceiver's it's always between 0 and
- // kMaxSafeInteger, and the latter value is used to mark
- // termination.
- //
- // It's important that for 1. and 2. the value fits into the
- // Unsigned32 range (UnsignedSmall is a subset of Unsigned32),
- // since we use this knowledge in the fast-path for the array
- // iterator next calls in TurboFan (in the JSCallReducer) to
- // keep the index in Word32 representation. This invariant is
- // checked in JSArrayIterator::JSArrayIteratorVerify().
- DECL_ACCESSORS(next_index, Object)
-
// [kind]: the [[ArrayIterationKind]] inobject property.
inline IterationKind kind() const;
inline void set_kind(IterationKind kind);
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JS_ARRAY_ITERATOR_FIELDS)
-
private:
DECL_INT_ACCESSORS(raw_kind)
- OBJECT_CONSTRUCTORS(JSArrayIterator, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(JSArrayIterator)
};
} // namespace internal
diff --git a/chromium/v8/src/objects/js-array.tq b/chromium/v8/src/objects/js-array.tq
index 3ccf37b1506..7e6103293e6 100644
--- a/chromium/v8/src/objects/js-array.tq
+++ b/chromium/v8/src/objects/js-array.tq
@@ -4,10 +4,34 @@
extern enum IterationKind extends uint31 { kKeys, kValues, kEntries }
-@doNotGenerateCppClass
extern class JSArrayIterator extends JSObject {
iterated_object: JSReceiver;
+
+ // [next_index]: The [[ArrayIteratorNextIndex]] inobject property.
+ // The next_index is always a positive integer, and it points to
+ // the next index that is to be returned by this iterator. It's
+ // possible range is fixed depending on the [[iterated_object]]:
+ //
+ // 1. For JSArray's the next_index is always in Unsigned32
+ // range, and when the iterator reaches the end it's set
+ // to kMaxUInt32 to indicate that this iterator should
+ // never produce values anymore even if the "length"
+ // property of the JSArray changes at some later point.
+ // 2. For JSTypedArray's the next_index is always in
+ // UnsignedSmall range, and when the iterator terminates
+ // it's set to Smi::kMaxValue.
+ // 3. For all other JSReceiver's it's always between 0 and
+ // kMaxSafeInteger, and the latter value is used to mark
+ // termination.
+ //
+ // It's important that for 1. and 2. the value fits into the
+ // Unsigned32 range (UnsignedSmall is a subset of Unsigned32),
+ // since we use this knowledge in the fast-path for the array
+ // iterator next calls in TurboFan (in the JSCallReducer) to
+ // keep the index in Word32 representation. This invariant is
+ // checked in JSArrayIterator::JSArrayIteratorVerify().
next_index: Number;
+
kind: SmiTagged<IterationKind>;
}
@@ -25,7 +49,6 @@ macro CreateArrayIterator(implicit context: NativeContext)(
};
}
-@doNotGenerateCppClass
extern class JSArray extends JSObject {
macro IsEmpty(): bool {
return this.length == 0;
@@ -164,7 +187,7 @@ struct FastJSArrayWitness {
return this.unstable;
}
- macro Recheck() labels CastError {
+ macro Recheck(): void labels CastError {
if (this.stable.map != this.map) goto CastError;
// We don't need to check elements kind or whether the prototype
// has changed away from the default JSArray prototype, because
@@ -187,7 +210,7 @@ struct FastJSArrayWitness {
}
}
- macro StoreHole(k: Smi) {
+ macro StoreHole(k: Smi): void {
if (this.hasDoubles) {
const elements = Cast<FixedDoubleArray>(this.unstable.elements)
otherwise unreachable;
@@ -207,19 +230,19 @@ struct FastJSArrayWitness {
}
}
- macro EnsureArrayPushable(implicit context: Context)() labels Failed {
+ macro EnsureArrayPushable(implicit context: Context)(): void labels Failed {
EnsureArrayPushable(this.map) otherwise Failed;
array::EnsureWriteableFastElements(this.unstable);
this.arrayIsPushable = true;
}
- macro ChangeLength(newLength: Smi) {
- assert(this.arrayIsPushable);
+ macro ChangeLength(newLength: Smi): void {
+ dcheck(this.arrayIsPushable);
this.unstable.length = newLength;
}
- macro Push(value: JSAny) labels Failed {
- assert(this.arrayIsPushable);
+ macro Push(value: JSAny): void labels Failed {
+ dcheck(this.arrayIsPushable);
if (this.hasDoubles) {
BuildAppendJSArray(
ElementsKind::HOLEY_DOUBLE_ELEMENTS, this.unstable, value)
@@ -228,7 +251,7 @@ struct FastJSArrayWitness {
BuildAppendJSArray(ElementsKind::HOLEY_SMI_ELEMENTS, this.unstable, value)
otherwise Failed;
} else {
- assert(
+ dcheck(
this.map.elements_kind == ElementsKind::HOLEY_ELEMENTS ||
this.map.elements_kind == ElementsKind::PACKED_ELEMENTS);
BuildAppendJSArray(ElementsKind::HOLEY_ELEMENTS, this.unstable, value)
@@ -236,8 +259,8 @@ struct FastJSArrayWitness {
}
}
- macro MoveElements(dst: intptr, src: intptr, length: intptr) {
- assert(this.arrayIsPushable);
+ macro MoveElements(dst: intptr, src: intptr, length: intptr): void {
+ dcheck(this.arrayIsPushable);
if (this.hasDoubles) {
const elements: FixedDoubleArray =
Cast<FixedDoubleArray>(this.unstable.elements)
@@ -280,7 +303,7 @@ struct FastJSArrayForReadWitness {
return this.unstable;
}
- macro Recheck() labels CastError {
+ macro Recheck(): void labels CastError {
if (this.stable.map != this.map) goto CastError;
// We don't need to check elements kind or whether the prototype
// has changed away from the default JSArray prototype, because
diff --git a/chromium/v8/src/objects/js-break-iterator.cc b/chromium/v8/src/objects/js-break-iterator.cc
index d8794b02ce4..c9558d5c78d 100644
--- a/chromium/v8/src/objects/js-break-iterator.cc
+++ b/chromium/v8/src/objects/js-break-iterator.cc
@@ -10,6 +10,8 @@
#include "src/objects/intl-objects.h"
#include "src/objects/js-break-iterator-inl.h"
+#include "src/objects/managed-inl.h"
+#include "src/objects/option-utils.h"
#include "unicode/brkiter.h"
namespace v8 {
@@ -56,7 +58,7 @@ MaybeHandle<JSV8BreakIterator> JSV8BreakIterator::New(
Intl::ResolvedLocale r = maybe_resolve_locale.FromJust();
// Extract type from options
- Maybe<Type> maybe_type = Intl::GetStringOption<Type>(
+ Maybe<Type> maybe_type = GetStringOption<Type>(
isolate, options, "type", service,
{"word", "character", "sentence", "line"},
{Type::WORD, Type::CHARACTER, Type::SENTENCE, Type::LINE}, Type::WORD);
diff --git a/chromium/v8/src/objects/js-break-iterator.h b/chromium/v8/src/objects/js-break-iterator.h
index 92104084add..6fc02c856b7 100644
--- a/chromium/v8/src/objects/js-break-iterator.h
+++ b/chromium/v8/src/objects/js-break-iterator.h
@@ -15,7 +15,6 @@
#include "src/objects/intl-objects.h"
#include "src/objects/managed.h"
#include "src/objects/objects.h"
-#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/chromium/v8/src/objects/js-collator.cc b/chromium/v8/src/objects/js-collator.cc
index be3541f29d1..dea0ce04225 100644
--- a/chromium/v8/src/objects/js-collator.cc
+++ b/chromium/v8/src/objects/js-collator.cc
@@ -11,7 +11,9 @@
#include "src/execution/isolate.h"
#include "src/objects/js-collator-inl.h"
#include "src/objects/js-locale.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/option-utils.h"
#include "unicode/coll.h"
#include "unicode/locid.h"
#include "unicode/strenum.h"
@@ -42,9 +44,9 @@ enum class Sensitivity {
enum class CaseFirst { kUndefined, kUpper, kLower, kFalse };
Maybe<CaseFirst> GetCaseFirst(Isolate* isolate, Handle<JSReceiver> options,
- const char* method) {
- return Intl::GetStringOption<CaseFirst>(
- isolate, options, "caseFirst", method, {"upper", "lower", "false"},
+ const char* method_name) {
+ return GetStringOption<CaseFirst>(
+ isolate, options, "caseFirst", method_name, {"upper", "lower", "false"},
{CaseFirst::kUpper, CaseFirst::kLower, CaseFirst::kFalse},
CaseFirst::kUndefined);
}
@@ -286,12 +288,12 @@ MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map,
// 2. Set options to ? CoerceOptionsToObject(options).
Handle<JSReceiver> options;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, options,
- Intl::CoerceOptionsToObject(isolate, options_obj, service), JSCollator);
+ isolate, options, CoerceOptionsToObject(isolate, options_obj, service),
+ JSCollator);
// 4. Let usage be ? GetOption(options, "usage", "string", « "sort",
// "search" », "sort").
- Maybe<Usage> maybe_usage = Intl::GetStringOption<Usage>(
+ Maybe<Usage> maybe_usage = GetStringOption<Usage>(
isolate, options, "usage", service, {"sort", "search"},
{Usage::SORT, Usage::SEARCH}, Usage::SORT);
MAYBE_RETURN(maybe_usage, MaybeHandle<JSCollator>());
@@ -309,7 +311,7 @@ MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map,
// *undefined*, *undefined*).
std::unique_ptr<char[]> collation_str = nullptr;
const std::vector<const char*> empty_values = {};
- Maybe<bool> maybe_collation = Intl::GetStringOption(
+ Maybe<bool> maybe_collation = GetStringOption(
isolate, options, "collation", empty_values, service, &collation_str);
MAYBE_RETURN(maybe_collation, MaybeHandle<JSCollator>());
// x. If _collation_ is not *undefined*, then
@@ -334,13 +336,13 @@ MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map,
// a. Let numeric be ! ToString(numeric).
//
// Note: We omit the ToString(numeric) operation as it's not
- // observable. Intl::GetBoolOption returns a Boolean and
+ // observable. GetBoolOption returns a Boolean and
// ToString(Boolean) is not side-effecting.
//
// 13. Set opt.[[kn]] to numeric.
bool numeric;
Maybe<bool> found_numeric =
- Intl::GetBoolOption(isolate, options, "numeric", service, &numeric);
+ GetBoolOption(isolate, options, "numeric", service, &numeric);
MAYBE_RETURN(found_numeric, MaybeHandle<JSCollator>());
// 14. Let caseFirst be ? GetOption(options, "caseFirst", "string",
@@ -477,12 +479,12 @@ MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map,
// 24. Let sensitivity be ? GetOption(options, "sensitivity",
// "string", « "base", "accent", "case", "variant" », undefined).
- Maybe<Sensitivity> maybe_sensitivity = Intl::GetStringOption<Sensitivity>(
- isolate, options, "sensitivity", service,
- {"base", "accent", "case", "variant"},
- {Sensitivity::kBase, Sensitivity::kAccent, Sensitivity::kCase,
- Sensitivity::kVariant},
- Sensitivity::kUndefined);
+ Maybe<Sensitivity> maybe_sensitivity =
+ GetStringOption<Sensitivity>(isolate, options, "sensitivity", service,
+ {"base", "accent", "case", "variant"},
+ {Sensitivity::kBase, Sensitivity::kAccent,
+ Sensitivity::kCase, Sensitivity::kVariant},
+ Sensitivity::kUndefined);
MAYBE_RETURN(maybe_sensitivity, MaybeHandle<JSCollator>());
Sensitivity sensitivity = maybe_sensitivity.FromJust();
@@ -518,7 +520,7 @@ MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map,
// 27.Let ignorePunctuation be ? GetOption(options,
// "ignorePunctuation", "boolean", undefined, false).
bool ignore_punctuation;
- Maybe<bool> found_ignore_punctuation = Intl::GetBoolOption(
+ Maybe<bool> found_ignore_punctuation = GetBoolOption(
isolate, options, "ignorePunctuation", service, &ignore_punctuation);
MAYBE_RETURN(found_ignore_punctuation, MaybeHandle<JSCollator>());
diff --git a/chromium/v8/src/objects/js-date-time-format-inl.h b/chromium/v8/src/objects/js-date-time-format-inl.h
index 6e24da0589e..8c93a8eeb63 100644
--- a/chromium/v8/src/objects/js-date-time-format-inl.h
+++ b/chromium/v8/src/objects/js-date-time-format-inl.h
@@ -28,7 +28,7 @@ ACCESSORS(JSDateTimeFormat, icu_simple_date_format,
ACCESSORS(JSDateTimeFormat, icu_date_interval_format,
Managed<icu::DateIntervalFormat>, kIcuDateIntervalFormatOffset)
-BOOL_ACCESSORS(JSDateTimeFormat, flags, iso8601, Iso8601Bit::kShift)
+BOOL_ACCESSORS(JSDateTimeFormat, flags, alt_calendar, AltCalendarBit::kShift)
inline void JSDateTimeFormat::set_hour_cycle(HourCycle hour_cycle) {
int hints = flags();
diff --git a/chromium/v8/src/objects/js-date-time-format.cc b/chromium/v8/src/objects/js-date-time-format.cc
index 7e2ece76a91..2258a1ffdf9 100644
--- a/chromium/v8/src/objects/js-date-time-format.cc
+++ b/chromium/v8/src/objects/js-date-time-format.cc
@@ -20,7 +20,8 @@
#include "src/heap/factory.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-date-time-format-inl.h"
-
+#include "src/objects/managed-inl.h"
+#include "src/objects/option-utils.h"
#include "unicode/calendar.h"
#include "unicode/dtitvfmt.h"
#include "unicode/dtptngen.h"
@@ -76,9 +77,9 @@ JSDateTimeFormat::HourCycle ToHourCycle(UDateFormatHourCycle hc) {
Maybe<JSDateTimeFormat::HourCycle> GetHourCycle(Isolate* isolate,
Handle<JSReceiver> options,
- const char* method) {
- return Intl::GetStringOption<JSDateTimeFormat::HourCycle>(
- isolate, options, "hourCycle", method, {"h11", "h12", "h23", "h24"},
+ const char* method_name) {
+ return GetStringOption<JSDateTimeFormat::HourCycle>(
+ isolate, options, "hourCycle", method_name, {"h11", "h12", "h23", "h24"},
{JSDateTimeFormat::HourCycle::kH11, JSDateTimeFormat::HourCycle::kH12,
JSDateTimeFormat::HourCycle::kH23, JSDateTimeFormat::HourCycle::kH24},
JSDateTimeFormat::HourCycle::kUndefined);
@@ -525,13 +526,17 @@ MaybeHandle<JSObject> JSDateTimeFormat::ResolvedOptions(
// and
// http://www.unicode.org/repos/cldr/tags/latest/common/bcp47/calendar.xml
if (calendar_str == "gregorian") {
- if (date_time_format->iso8601()) {
+ if (date_time_format->alt_calendar()) {
calendar_str = "iso8601";
} else {
calendar_str = "gregory";
}
} else if (calendar_str == "ethiopic-amete-alem") {
calendar_str = "ethioaa";
+ } else if (calendar_str == "islamic") {
+ if (date_time_format->alt_calendar()) {
+ calendar_str = "islamic-rgsa";
+ }
}
const icu::TimeZone& tz = calendar->getTimeZone();
@@ -771,7 +776,7 @@ Isolate::ICUObjectCacheType ConvertToCacheType(
MaybeHandle<String> JSDateTimeFormat::ToLocaleDateTime(
Isolate* isolate, Handle<Object> date, Handle<Object> locales,
Handle<Object> options, RequiredOption required, DefaultsOption defaults,
- const char* method) {
+ const char* method_name) {
Isolate::ICUObjectCacheType cache_type = ConvertToCacheType(defaults);
Factory* factory = isolate->factory();
@@ -821,7 +826,8 @@ MaybeHandle<String> JSDateTimeFormat::ToLocaleDateTime(
Handle<JSDateTimeFormat> date_time_format;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, date_time_format,
- JSDateTimeFormat::New(isolate, map, locales, internal_options, method),
+ JSDateTimeFormat::New(isolate, map, locales, internal_options,
+ method_name),
String);
if (can_cache) {
@@ -1499,7 +1505,7 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
const std::vector<const char*> empty_values = {};
// 6. Let calendar be ? GetOption(options, "calendar",
// "string", undefined, undefined).
- Maybe<bool> maybe_calendar = Intl::GetStringOption(
+ Maybe<bool> maybe_calendar = GetStringOption(
isolate, options, "calendar", empty_values, service, &calendar_str);
MAYBE_RETURN(maybe_calendar, MaybeHandle<JSDateTimeFormat>());
if (maybe_calendar.FromJust() && calendar_str != nullptr) {
@@ -1523,7 +1529,7 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
// undefined).
bool hour12;
Maybe<bool> maybe_get_hour12 =
- Intl::GetBoolOption(isolate, options, "hour12", service, &hour12);
+ GetBoolOption(isolate, options, "hour12", service, &hour12);
MAYBE_RETURN(maybe_get_hour12, Handle<JSDateTimeFormat>());
// 7. Let hourCycle be ? GetOption(options, "hourCycle", "string", « "h11",
@@ -1588,7 +1594,9 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
icu_locale.setUnicodeKeywordValue("ca", calendar_str.get(), status);
DCHECK(U_SUCCESS(status));
}
- bool iso8601 = strstr(icu_locale.getName(), "calendar=iso8601") != nullptr;
+ bool alt_calendar =
+ strstr(icu_locale.getName(), "calendar=iso8601") != nullptr ||
+ strstr(icu_locale.getName(), "calendar=islamic-rgsa") != nullptr;
if (numbering_system_str != nullptr &&
Intl::IsValidNumberingSystem(numbering_system_str.get())) {
@@ -1651,7 +1659,7 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
// 17. Let timeZone be ? Get(options, "timeZone").
std::unique_ptr<char[]> timezone = nullptr;
- Maybe<bool> maybe_timezone = Intl::GetStringOption(
+ Maybe<bool> maybe_timezone = GetStringOption(
isolate, options, "timeZone", empty_values, service, &timezone);
MAYBE_RETURN(maybe_timezone, Handle<JSDateTimeFormat>());
@@ -1689,7 +1697,7 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
if (item.property == "timeZoneName") {
// Let _value_ be ? GetNumberOption(options, "fractionalSecondDigits", 1,
// 3, *undefined*). The *undefined* is represented by value 0 here.
- Maybe<int> maybe_fsd = Intl::GetNumberOption(
+ Maybe<int> maybe_fsd = GetNumberOption(
isolate, options, factory->fractionalSecondDigits_string(), 1, 3, 0);
MAYBE_RETURN(maybe_fsd, MaybeHandle<JSDateTimeFormat>());
// Convert fractionalSecondDigits to skeleton.
@@ -1703,8 +1711,8 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
// ii. Let value be ? GetOption(options, prop, "string", « the strings
// given in the Values column of the row », undefined).
Maybe<bool> maybe_get_option =
- Intl::GetStringOption(isolate, options, item.property.c_str(),
- item.allowed_values, service, &input);
+ GetStringOption(isolate, options, item.property.c_str(),
+ item.allowed_values, service, &input);
MAYBE_RETURN(maybe_get_option, Handle<JSDateTimeFormat>());
if (maybe_get_option.FromJust()) {
if (item.property == "hour") {
@@ -1724,7 +1732,7 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
// c. Let matcher be ? GetOption(options, "formatMatcher", "string",
// « "basic", "best fit" », "best fit").
Maybe<FormatMatcherOption> maybe_format_matcher =
- Intl::GetStringOption<FormatMatcherOption>(
+ GetStringOption<FormatMatcherOption>(
isolate, options, "formatMatcher", service, {"best fit", "basic"},
{FormatMatcherOption::kBestFit, FormatMatcherOption::kBasic},
FormatMatcherOption::kBestFit);
@@ -1734,7 +1742,7 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
// 32. Let dateStyle be ? GetOption(options, "dateStyle", "string", «
// "full", "long", "medium", "short" », undefined).
- Maybe<DateTimeStyle> maybe_date_style = Intl::GetStringOption<DateTimeStyle>(
+ Maybe<DateTimeStyle> maybe_date_style = GetStringOption<DateTimeStyle>(
isolate, options, "dateStyle", service,
{"full", "long", "medium", "short"},
{DateTimeStyle::kFull, DateTimeStyle::kLong, DateTimeStyle::kMedium,
@@ -1746,7 +1754,7 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
// 34. Let timeStyle be ? GetOption(options, "timeStyle", "string", «
// "full", "long", "medium", "short" »).
- Maybe<DateTimeStyle> maybe_time_style = Intl::GetStringOption<DateTimeStyle>(
+ Maybe<DateTimeStyle> maybe_time_style = GetStringOption<DateTimeStyle>(
isolate, options, "timeStyle", service,
{"full", "long", "medium", "short"},
{DateTimeStyle::kFull, DateTimeStyle::kLong, DateTimeStyle::kMedium,
@@ -1896,7 +1904,7 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
date_time_format->set_time_style(time_style);
}
date_time_format->set_hour_cycle(dateTimeFormatHourCycle);
- date_time_format->set_iso8601(iso8601);
+ date_time_format->set_alt_calendar(alt_calendar);
date_time_format->set_locale(*locale_str);
date_time_format->set_icu_locale(*managed_locale);
date_time_format->set_icu_simple_date_format(*managed_format);
@@ -2209,8 +2217,8 @@ template <typename T>
MaybeHandle<T> FormatRangeCommon(
Isolate* isolate, Handle<JSDateTimeFormat> date_time_format, double x,
double y,
- MaybeHandle<T> (*formatToResult)(Isolate*, const icu::FormattedValue&,
- bool*),
+ const std::function<MaybeHandle<T>(Isolate*, const icu::FormattedValue&,
+ bool*)>& formatToResult,
bool* outputRange) {
// Track newer feature formateRange and formatRangeToParts
isolate->CountUsage(v8::Isolate::UseCounterFeature::kDateTimeFormatRange);
diff --git a/chromium/v8/src/objects/js-date-time-format.h b/chromium/v8/src/objects/js-date-time-format.h
index 335d80a2dbc..9c5b2f9dc85 100644
--- a/chromium/v8/src/objects/js-date-time-format.h
+++ b/chromium/v8/src/objects/js-date-time-format.h
@@ -16,7 +16,6 @@
#include "src/execution/isolate.h"
#include "src/objects/intl-objects.h"
#include "src/objects/managed.h"
-#include "torque-generated/field-offsets.h"
#include "unicode/uversion.h"
// Has to be the last include (doesn't have include guards):
@@ -82,7 +81,7 @@ class JSDateTimeFormat
V8_WARN_UNUSED_RESULT static MaybeHandle<String> ToLocaleDateTime(
Isolate* isolate, Handle<Object> date, Handle<Object> locales,
Handle<Object> options, RequiredOption required, DefaultsOption defaults,
- const char* method);
+ const char* method_name);
V8_EXPORT_PRIVATE static const std::set<std::string>& GetAvailableLocales();
@@ -128,7 +127,7 @@ class JSDateTimeFormat
DECL_ACCESSORS(icu_simple_date_format, Managed<icu::SimpleDateFormat>)
DECL_ACCESSORS(icu_date_interval_format, Managed<icu::DateIntervalFormat>)
- DECL_BOOLEAN_ACCESSORS(iso8601)
+ DECL_BOOLEAN_ACCESSORS(alt_calendar)
DECL_PRINTER(JSDateTimeFormat)
diff --git a/chromium/v8/src/objects/js-date-time-format.tq b/chromium/v8/src/objects/js-date-time-format.tq
index fedd761cdf0..ef0584e7901 100644
--- a/chromium/v8/src/objects/js-date-time-format.tq
+++ b/chromium/v8/src/objects/js-date-time-format.tq
@@ -10,7 +10,14 @@ bitfield struct JSDateTimeFormatFlags extends uint31 {
hour_cycle: HourCycle: 3 bit;
date_style: DateTimeStyle: 3 bit;
time_style: DateTimeStyle: 3 bit;
- iso8601: bool: 1bit;
+ // ICU report the same type "gregorian" for both "gregorian" calendar and
+ // "iso8601" calendar and the same type "islamic" for both "islamic" and
+ // "islamic-rgsa" calendar. We use the alt_calendar bit to distinguish between
+ // them. When the type is "gregorian" and the alt_calendar bit is set, it is
+ // "iso8601", otherwise the true "gregorian" calendar. While the type is
+ // "islamic" and the alt_calendar bit is set, it is "islamic-rgsa" calendar,
+ // otherwise "islamic" calendar.
+ alt_calendar: bool: 1bit;
}
extern class JSDateTimeFormat extends JSObject {
diff --git a/chromium/v8/src/objects/js-display-names.cc b/chromium/v8/src/objects/js-display-names.cc
index d4f05ad7390..d2e1064967b 100644
--- a/chromium/v8/src/objects/js-display-names.cc
+++ b/chromium/v8/src/objects/js-display-names.cc
@@ -15,8 +15,9 @@
#include "src/heap/factory.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-display-names-inl.h"
-#include "src/objects/managed.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/option-utils.h"
#include "unicode/dtfmtsym.h"
#include "unicode/dtptngen.h"
#include "unicode/localebuilder.h"
@@ -118,8 +119,11 @@ class LanguageNames : public LocaleDisplayNamesCommon {
LanguageNames(const icu::Locale& locale, JSDisplayNames::Style style,
bool fallback, bool dialect)
: LocaleDisplayNamesCommon(locale, style, fallback, dialect) {}
+
~LanguageNames() override = default;
+
const char* type() const override { return "language"; }
+
Maybe<icu::UnicodeString> of(Isolate* isolate,
const char* code) const override {
UErrorCode status = U_ZERO_ERROR;
@@ -152,8 +156,11 @@ class RegionNames : public LocaleDisplayNamesCommon {
RegionNames(const icu::Locale& locale, JSDisplayNames::Style style,
bool fallback, bool dialect)
: LocaleDisplayNamesCommon(locale, style, fallback, dialect) {}
+
~RegionNames() override = default;
+
const char* type() const override { return "region"; }
+
Maybe<icu::UnicodeString> of(Isolate* isolate,
const char* code) const override {
std::string code_str(code);
@@ -174,8 +181,11 @@ class ScriptNames : public LocaleDisplayNamesCommon {
ScriptNames(const icu::Locale& locale, JSDisplayNames::Style style,
bool fallback, bool dialect)
: LocaleDisplayNamesCommon(locale, style, fallback, dialect) {}
+
~ScriptNames() override = default;
+
const char* type() const override { return "script"; }
+
Maybe<icu::UnicodeString> of(Isolate* isolate,
const char* code) const override {
std::string code_str(code);
@@ -194,30 +204,47 @@ class ScriptNames : public LocaleDisplayNamesCommon {
class KeyValueDisplayNames : public LocaleDisplayNamesCommon {
public:
KeyValueDisplayNames(const icu::Locale& locale, JSDisplayNames::Style style,
- bool fallback, bool dialect, const char* key)
- : LocaleDisplayNamesCommon(locale, style, fallback, dialect), key_(key) {}
+ bool fallback, bool dialect, const char* key,
+ bool prevent_fallback)
+ : LocaleDisplayNamesCommon(locale, style, fallback, dialect),
+ key_(key),
+ prevent_fallback_(prevent_fallback) {}
+
~KeyValueDisplayNames() override = default;
+
const char* type() const override { return key_.c_str(); }
+
Maybe<icu::UnicodeString> of(Isolate* isolate,
const char* code) const override {
std::string code_str(code);
icu::UnicodeString result;
locale_display_names()->keyValueDisplayName(key_.c_str(), code_str.c_str(),
result);
+ // Work around the issue that the keyValueDisplayNames ignore no
+ // substituion and always fallback.
+ if (prevent_fallback_ && (result.length() == 3) &&
+ (code_str.length() == 3) &&
+ (result == icu::UnicodeString(code_str.c_str(), -1, US_INV))) {
+ result.setToBogus();
+ }
return Just(result);
}
private:
std::string key_;
+ bool prevent_fallback_;
};
class CurrencyNames : public KeyValueDisplayNames {
public:
CurrencyNames(const icu::Locale& locale, JSDisplayNames::Style style,
bool fallback, bool dialect)
- : KeyValueDisplayNames(locale, style, fallback, dialect, "currency") {}
+ : KeyValueDisplayNames(locale, style, fallback, dialect, "currency",
+ fallback == false) {}
+
~CurrencyNames() override = default;
+
Maybe<icu::UnicodeString> of(Isolate* isolate,
const char* code) const override {
std::string code_str(code);
@@ -234,8 +261,11 @@ class CalendarNames : public KeyValueDisplayNames {
public:
CalendarNames(const icu::Locale& locale, JSDisplayNames::Style style,
bool fallback, bool dialect)
- : KeyValueDisplayNames(locale, style, fallback, dialect, "calendar") {}
+ : KeyValueDisplayNames(locale, style, fallback, dialect, "calendar",
+ false) {}
+
~CalendarNames() override = default;
+
Maybe<icu::UnicodeString> of(Isolate* isolate,
const char* code) const override {
std::string code_str(code);
@@ -312,9 +342,13 @@ class DateTimeFieldNames : public DisplayNamesInternal {
icu::DateTimePatternGenerator::createInstance(locale_, status));
DCHECK(U_SUCCESS(status));
}
+
~DateTimeFieldNames() override = default;
+
const char* type() const override { return "dateTimeField"; }
+
icu::Locale locale() const override { return locale_; }
+
Maybe<icu::UnicodeString> of(Isolate* isolate,
const char* code) const override {
UDateTimePatternField field = StringToUDateTimePatternField(code);
@@ -372,9 +406,9 @@ MaybeHandle<JSDisplayNames> JSDisplayNames::New(Isolate* isolate,
maybe_requested_locales.FromJust();
// 4. Let options be ? GetOptionsObject(options).
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, options, Intl::GetOptionsObject(isolate, input_options, service),
- JSDisplayNames);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, options,
+ GetOptionsObject(isolate, input_options, service),
+ JSDisplayNames);
// Note: No need to create a record. It's not observable.
// 5. Let opt be a new Record.
@@ -409,7 +443,7 @@ MaybeHandle<JSDisplayNames> JSDisplayNames::New(Isolate* isolate,
// 10. Let s be ? GetOption(options, "style", "string",
// «"long", "short", "narrow"», "long").
- Maybe<Style> maybe_style = Intl::GetStringOption<Style>(
+ Maybe<Style> maybe_style = GetStringOption<Style>(
isolate, options, "style", service, {"long", "short", "narrow"},
{Style::kLong, Style::kShort, Style::kNarrow}, Style::kLong);
MAYBE_RETURN(maybe_style, MaybeHandle<JSDisplayNames>());
@@ -422,23 +456,22 @@ MaybeHandle<JSDisplayNames> JSDisplayNames::New(Isolate* isolate,
// undefined).
Maybe<Type> maybe_type =
FLAG_harmony_intl_displaynames_v2
- ? Intl::GetStringOption<Type>(
+ ? GetStringOption<Type>(
isolate, options, "type", service,
{"language", "region", "script", "currency", "calendar",
"dateTimeField"},
{Type::kLanguage, Type::kRegion, Type::kScript, Type::kCurrency,
Type::kCalendar, Type::kDateTimeField},
Type::kUndefined)
- : Intl::GetStringOption<Type>(
- isolate, options, "type", service,
- {"language", "region", "script", "currency"},
- {
- Type::kLanguage,
- Type::kRegion,
- Type::kScript,
- Type::kCurrency,
- },
- Type::kUndefined);
+ : GetStringOption<Type>(isolate, options, "type", service,
+ {"language", "region", "script", "currency"},
+ {
+ Type::kLanguage,
+ Type::kRegion,
+ Type::kScript,
+ Type::kCurrency,
+ },
+ Type::kUndefined);
MAYBE_RETURN(maybe_type, MaybeHandle<JSDisplayNames>());
Type type_enum = maybe_type.FromJust();
@@ -452,7 +485,7 @@ MaybeHandle<JSDisplayNames> JSDisplayNames::New(Isolate* isolate,
// 15. Let fallback be ? GetOption(options, "fallback", "string",
// « "code", "none" », "code").
- Maybe<Fallback> maybe_fallback = Intl::GetStringOption<Fallback>(
+ Maybe<Fallback> maybe_fallback = GetStringOption<Fallback>(
isolate, options, "fallback", service, {"code", "none"},
{Fallback::kCode, Fallback::kNone}, Fallback::kCode);
MAYBE_RETURN(maybe_fallback, MaybeHandle<JSDisplayNames>());
@@ -465,7 +498,7 @@ MaybeHandle<JSDisplayNames> JSDisplayNames::New(Isolate* isolate,
// 24. Let languageDisplay be ? GetOption(options, "languageDisplay",
// "string", « "dialect", "standard" », "dialect").
Maybe<LanguageDisplay> maybe_language_display =
- Intl::GetStringOption<LanguageDisplay>(
+ GetStringOption<LanguageDisplay>(
isolate, options, "languageDisplay", service,
{"dialect", "standard"},
{LanguageDisplay::kDialect, LanguageDisplay::kStandard},
diff --git a/chromium/v8/src/objects/js-function-inl.h b/chromium/v8/src/objects/js-function-inl.h
index 275ffba14d7..15634b8f024 100644
--- a/chromium/v8/src/objects/js-function-inl.h
+++ b/chromium/v8/src/objects/js-function-inl.h
@@ -27,9 +27,7 @@ namespace internal {
TQ_OBJECT_CONSTRUCTORS_IMPL(JSFunctionOrBoundFunction)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSBoundFunction)
-OBJECT_CONSTRUCTORS_IMPL(JSFunction, JSFunctionOrBoundFunction)
-
-CAST_ACCESSOR(JSFunction)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSFunction)
ACCESSORS(JSFunction, raw_feedback_cell, FeedbackCell, kFeedbackCellOffset)
RELEASE_ACQUIRE_ACCESSORS(JSFunction, raw_feedback_cell, FeedbackCell,
@@ -55,7 +53,7 @@ void JSFunction::ClearOptimizationMarker() {
}
bool JSFunction::ChecksOptimizationMarker() {
- return code(kAcquireLoad).checks_optimization_marker();
+ return code().checks_optimization_marker();
}
bool JSFunction::IsMarkedForOptimization() {
@@ -218,12 +216,6 @@ NativeContext JSFunction::native_context() {
return context().native_context();
}
-void JSFunction::set_context(HeapObject value, WriteBarrierMode mode) {
- DCHECK(value.IsUndefined() || value.IsContext());
- WRITE_FIELD(*this, kContextOffset, value);
- CONDITIONAL_WRITE_BARRIER(*this, kContextOffset, value, mode);
-}
-
RELEASE_ACQUIRE_ACCESSORS_CHECKED(JSFunction, prototype_or_initial_map,
HeapObject, kPrototypeOrInitialMapOffset,
map().has_prototype_slot())
@@ -332,7 +324,7 @@ bool JSFunction::NeedsResetDueToFlushedBytecode() {
}
bool JSFunction::NeedsResetDueToFlushedBaselineCode() {
- return code().kind() == CodeKind::BASELINE && !shared().HasBaselineData();
+ return code().kind() == CodeKind::BASELINE && !shared().HasBaselineCode();
}
void JSFunction::ResetIfCodeFlushed(
diff --git a/chromium/v8/src/objects/js-function.cc b/chromium/v8/src/objects/js-function.cc
index b2d086814f8..105f6388af5 100644
--- a/chromium/v8/src/objects/js-function.cc
+++ b/chromium/v8/src/objects/js-function.cc
@@ -19,19 +19,10 @@ namespace v8 {
namespace internal {
CodeKinds JSFunction::GetAttachedCodeKinds() const {
- // Note: There's a special case when bytecode has been aged away. After
- // flushing the bytecode, the JSFunction will still have the interpreter
- // entry trampoline attached, but the bytecode is no longer available.
- Code code = this->code(kAcquireLoad);
- if (code.is_interpreter_trampoline_builtin()) {
- return CodeKindFlag::INTERPRETED_FUNCTION;
- }
-
- const CodeKind kind = code.kind();
+ const CodeKind kind = code().kind();
if (!CodeKindIsJSFunction(kind)) return {};
-
- if (CodeKindIsOptimizedJSFunction(kind) && code.marked_for_deoptimization()) {
- // Nothing is attached.
+ if (CodeKindIsOptimizedJSFunction(kind) &&
+ code().marked_for_deoptimization()) {
return {};
}
return CodeKindToCodeKindFlag(kind);
@@ -49,7 +40,7 @@ CodeKinds JSFunction::GetAvailableCodeKinds() const {
if ((result & CodeKindFlag::BASELINE) == 0) {
// The SharedFunctionInfo could have attached baseline code.
- if (shared().HasBaselineData()) {
+ if (shared().HasBaselineCode()) {
result |= CodeKindFlag::BASELINE;
}
}
@@ -90,7 +81,8 @@ namespace {
// Returns false if no highest tier exists (i.e. the function is not compiled),
// otherwise returns true and sets highest_tier.
-bool HighestTierOf(CodeKinds kinds, CodeKind* highest_tier) {
+V8_WARN_UNUSED_RESULT bool HighestTierOf(CodeKinds kinds,
+ CodeKind* highest_tier) {
DCHECK_EQ((kinds & ~kJSFunctionCodeKindsMask), 0);
if ((kinds & CodeKindFlag::TURBOFAN) != 0) {
*highest_tier = CodeKind::TURBOFAN;
@@ -111,33 +103,43 @@ bool HighestTierOf(CodeKinds kinds, CodeKind* highest_tier) {
} // namespace
-bool JSFunction::ActiveTierIsIgnition() const {
- if (!shared().HasBytecodeArray()) return false;
- bool result = (GetActiveTier() == CodeKind::INTERPRETED_FUNCTION);
+base::Optional<CodeKind> JSFunction::GetActiveTier() const {
+#if V8_ENABLE_WEBASSEMBLY
+ // Asm/Wasm functions are currently not supported. For simplicity, this
+ // includes invalid asm.js functions whose code hasn't yet been updated to
+ // CompileLazy but is still the InstantiateAsmJs builtin.
+ if (shared().HasAsmWasmData() ||
+ code().builtin_id() == Builtin::kInstantiateAsmJs) {
+ return {};
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+
+ CodeKind highest_tier;
+ if (!HighestTierOf(GetAvailableCodeKinds(), &highest_tier)) return {};
+
#ifdef DEBUG
- Code code = this->code(kAcquireLoad);
- DCHECK_IMPLIES(result, code.is_interpreter_trampoline_builtin() ||
- (CodeKindIsOptimizedJSFunction(code.kind()) &&
- code.marked_for_deoptimization()) ||
- (code.builtin_id() == Builtin::kCompileLazy &&
- shared().IsInterpreted()));
+ CHECK(highest_tier == CodeKind::TURBOFAN ||
+ highest_tier == CodeKind::BASELINE ||
+ highest_tier == CodeKind::TURBOPROP ||
+ highest_tier == CodeKind::INTERPRETED_FUNCTION);
+
+ if (highest_tier == CodeKind::INTERPRETED_FUNCTION) {
+ CHECK(code().is_interpreter_trampoline_builtin() ||
+ (CodeKindIsOptimizedJSFunction(code().kind()) &&
+ code().marked_for_deoptimization()) ||
+ (code().builtin_id() == Builtin::kCompileLazy &&
+ shared().IsInterpreted()));
+ }
#endif // DEBUG
- return result;
-}
-CodeKind JSFunction::GetActiveTier() const {
- CodeKind highest_tier;
- DCHECK(shared().is_compiled());
- HighestTierOf(GetAvailableCodeKinds(), &highest_tier);
- DCHECK(highest_tier == CodeKind::TURBOFAN ||
- highest_tier == CodeKind::BASELINE ||
- highest_tier == CodeKind::TURBOPROP ||
- highest_tier == CodeKind::INTERPRETED_FUNCTION);
return highest_tier;
}
+bool JSFunction::ActiveTierIsIgnition() const {
+ return GetActiveTier() == CodeKind::INTERPRETED_FUNCTION;
+}
+
bool JSFunction::ActiveTierIsTurbofan() const {
- if (!shared().HasBytecodeArray()) return false;
return GetActiveTier() == CodeKind::TURBOFAN;
}
@@ -145,27 +147,20 @@ bool JSFunction::ActiveTierIsBaseline() const {
return GetActiveTier() == CodeKind::BASELINE;
}
-bool JSFunction::ActiveTierIsIgnitionOrBaseline() const {
- return ActiveTierIsIgnition() || ActiveTierIsBaseline();
-}
-
bool JSFunction::ActiveTierIsToptierTurboprop() const {
- if (!FLAG_turboprop_as_toptier) return false;
- if (!shared().HasBytecodeArray()) return false;
- return GetActiveTier() == CodeKind::TURBOPROP && FLAG_turboprop_as_toptier;
+ return FLAG_turboprop_as_toptier && GetActiveTier() == CodeKind::TURBOPROP;
}
bool JSFunction::ActiveTierIsMidtierTurboprop() const {
- if (!FLAG_turboprop) return false;
- if (!shared().HasBytecodeArray()) return false;
- return GetActiveTier() == CodeKind::TURBOPROP && !FLAG_turboprop_as_toptier;
+ return FLAG_turboprop && !FLAG_turboprop_as_toptier &&
+ GetActiveTier() == CodeKind::TURBOPROP;
}
CodeKind JSFunction::NextTier() const {
if (V8_UNLIKELY(FLAG_turboprop) && ActiveTierIsMidtierTurboprop()) {
return CodeKind::TURBOFAN;
} else if (V8_UNLIKELY(FLAG_turboprop)) {
- DCHECK(ActiveTierIsIgnitionOrBaseline());
+ DCHECK(ActiveTierIsIgnition() || ActiveTierIsBaseline());
return CodeKind::TURBOPROP;
}
return CodeKind::TURBOFAN;
@@ -554,6 +549,7 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
case JS_DATE_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
case JS_FUNCTION_TYPE:
+ case JS_CLASS_CONSTRUCTOR_TYPE:
case JS_PROMISE_CONSTRUCTOR_TYPE:
case JS_REG_EXP_CONSTRUCTOR_TYPE:
case JS_ARRAY_CONSTRUCTOR_TYPE:
diff --git a/chromium/v8/src/objects/js-function.h b/chromium/v8/src/objects/js-function.h
index 6d7b21abe95..85a1236e41b 100644
--- a/chromium/v8/src/objects/js-function.h
+++ b/chromium/v8/src/objects/js-function.h
@@ -7,7 +7,6 @@
#include "src/objects/code-kind.h"
#include "src/objects/js-objects.h"
-#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -53,7 +52,8 @@ class JSBoundFunction
};
// JSFunction describes JavaScript functions.
-class JSFunction : public JSFunctionOrBoundFunction {
+class JSFunction
+ : public TorqueGeneratedJSFunction<JSFunction, JSFunctionOrBoundFunction> {
public:
// [prototype_or_initial_map]:
DECL_RELEASE_ACQUIRE_ACCESSORS(prototype_or_initial_map, HeapObject)
@@ -70,8 +70,6 @@ class JSFunction : public JSFunctionOrBoundFunction {
inline Context context();
DECL_RELAXED_GETTER(context, Context)
inline bool has_context() const;
- inline void set_context(HeapObject context,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline JSGlobalProxy global_proxy();
inline NativeContext native_context();
inline int length();
@@ -106,7 +104,8 @@ class JSFunction : public JSFunctionOrBoundFunction {
// indirect means such as the feedback vector's optimized code cache.
// - Active: the single code kind that would be executed if this function
// were called in its current state. Note that there may not be an active
- // code kind if the function is not compiled.
+ // code kind if the function is not compiled. Also, asm/wasm functions are
+ // currently not supported.
//
// Note: code objects that are marked_for_deoptimization are not part of the
// attached/available/active sets. This is because the JSFunction might have
@@ -120,11 +119,10 @@ class JSFunction : public JSFunctionOrBoundFunction {
bool HasAttachedCodeKind(CodeKind kind) const;
bool HasAvailableCodeKind(CodeKind kind) const;
- CodeKind GetActiveTier() const;
+ base::Optional<CodeKind> GetActiveTier() const;
V8_EXPORT_PRIVATE bool ActiveTierIsIgnition() const;
bool ActiveTierIsTurbofan() const;
bool ActiveTierIsBaseline() const;
- bool ActiveTierIsIgnitionOrBaseline() const;
bool ActiveTierIsMidtierTurboprop() const;
bool ActiveTierIsToptierTurboprop() const;
@@ -275,8 +273,6 @@ class JSFunction : public JSFunctionOrBoundFunction {
// Prints the name of the function using PrintF.
void PrintName(FILE* out = stdout);
- DECL_CAST(JSFunction)
-
// Calculate the instance size and in-object properties count.
// {CalculateExpectedNofProperties} can trigger compilation.
static V8_WARN_UNUSED_RESULT int CalculateExpectedNofProperties(
@@ -310,18 +306,6 @@ class JSFunction : public JSFunctionOrBoundFunction {
// ES6 section 19.2.3.5 Function.prototype.toString ( ).
static Handle<String> ToString(Handle<JSFunction> function);
- struct FieldOffsets {
- DEFINE_FIELD_OFFSET_CONSTANTS(JSFunctionOrBoundFunction::kHeaderSize,
- TORQUE_GENERATED_JS_FUNCTION_FIELDS)
- };
- static constexpr int kSharedFunctionInfoOffset =
- FieldOffsets::kSharedFunctionInfoOffset;
- static constexpr int kContextOffset = FieldOffsets::kContextOffset;
- static constexpr int kFeedbackCellOffset = FieldOffsets::kFeedbackCellOffset;
- static constexpr int kCodeOffset = FieldOffsets::kCodeOffset;
- static constexpr int kPrototypeOrInitialMapOffset =
- FieldOffsets::kPrototypeOrInitialMapOffset;
-
class BodyDescriptor;
private:
@@ -329,9 +313,15 @@ class JSFunction : public JSFunctionOrBoundFunction {
DECL_RELEASE_ACQUIRE_ACCESSORS(raw_code, CodeT)
// JSFunction doesn't have a fixed header size:
- // Hide JSFunctionOrBoundFunction::kHeaderSize to avoid confusion.
+ // Hide TorqueGeneratedClass::kHeaderSize to avoid confusion.
static const int kHeaderSize;
+ // Hide generated accessors; custom accessors are called "shared".
+ DECL_ACCESSORS(shared_function_info, SharedFunctionInfo)
+
+ // Hide generated accessors; custom accessors are called "raw_feedback_cell".
+ DECL_ACCESSORS(feedback_cell, FeedbackCell)
+
// Returns the set of code kinds of compilation artifacts (bytecode,
// generated code) attached to this JSFunction.
// Note that attached code objects that are marked_for_deoptimization are not
@@ -348,9 +338,9 @@ class JSFunction : public JSFunctionOrBoundFunction {
public:
static constexpr int kSizeWithoutPrototype = kPrototypeOrInitialMapOffset;
- static constexpr int kSizeWithPrototype = FieldOffsets::kHeaderSize;
+ static constexpr int kSizeWithPrototype = TorqueGeneratedClass::kHeaderSize;
- OBJECT_CONSTRUCTORS(JSFunction, JSFunctionOrBoundFunction);
+ TQ_OBJECT_CONSTRUCTORS(JSFunction)
};
} // namespace internal
diff --git a/chromium/v8/src/objects/js-function.tq b/chromium/v8/src/objects/js-function.tq
index de934b82f47..59dd2d5dc25 100644
--- a/chromium/v8/src/objects/js-function.tq
+++ b/chromium/v8/src/objects/js-function.tq
@@ -17,8 +17,9 @@ extern class JSBoundFunction extends JSFunctionOrBoundFunction {
bound_arguments: FixedArray;
}
+// This class does not use the generated verifier, so if you change anything
+// here, please also update JSFunctionVerify in objects-debug.cc.
@highestInstanceTypeWithinParentClassRange
-@doNotGenerateCppClass
extern class JSFunction extends JSFunctionOrBoundFunction {
shared_function_info: SharedFunctionInfo;
context: Context;
@@ -26,7 +27,15 @@ extern class JSFunction extends JSFunctionOrBoundFunction {
@if(V8_EXTERNAL_CODE_SPACE) code: CodeDataContainer;
@ifnot(V8_EXTERNAL_CODE_SPACE) code: Code;
// Space for the following field may or may not be allocated.
- @noVerifier prototype_or_initial_map: JSReceiver|Map;
+ prototype_or_initial_map: JSReceiver|Map;
}
+// Class constructors are special, because they are callable, but [[Call]] will
+// raise an exception.
+// See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList ).
+@doNotGenerateCast
+@highestInstanceTypeWithinParentClassRange
+extern class JSClassConstructor extends JSFunction
+ generates 'TNode<JSFunction>';
+
type JSFunctionWithPrototypeSlot extends JSFunction;
diff --git a/chromium/v8/src/objects/js-list-format.cc b/chromium/v8/src/objects/js-list-format.cc
index 9ff9c82d123..6830d4f992c 100644
--- a/chromium/v8/src/objects/js-list-format.cc
+++ b/chromium/v8/src/objects/js-list-format.cc
@@ -18,8 +18,9 @@
#include "src/objects/intl-objects.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-list-format-inl.h"
-#include "src/objects/managed.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/option-utils.h"
#include "unicode/fieldpos.h"
#include "unicode/fpositer.h"
#include "unicode/listformatter.h"
@@ -69,9 +70,9 @@ MaybeHandle<JSListFormat> JSListFormat::New(Isolate* isolate, Handle<Map> map,
Handle<JSReceiver> options;
const char* service = "Intl.ListFormat";
// 4. Let options be GetOptionsObject(_options_).
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, options, Intl::GetOptionsObject(isolate, input_options, service),
- JSListFormat);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, options,
+ GetOptionsObject(isolate, input_options, service),
+ JSListFormat);
// Note: No need to create a record. It's not observable.
// 6. Let opt be a new Record.
@@ -100,7 +101,7 @@ MaybeHandle<JSListFormat> JSListFormat::New(Isolate* isolate, Handle<Map> map,
// 12. Let t be GetOption(options, "type", "string", «"conjunction",
// "disjunction", "unit"», "conjunction").
- Maybe<Type> maybe_type = Intl::GetStringOption<Type>(
+ Maybe<Type> maybe_type = GetStringOption<Type>(
isolate, options, "type", service, {"conjunction", "disjunction", "unit"},
{Type::CONJUNCTION, Type::DISJUNCTION, Type::UNIT}, Type::CONJUNCTION);
MAYBE_RETURN(maybe_type, MaybeHandle<JSListFormat>());
@@ -108,7 +109,7 @@ MaybeHandle<JSListFormat> JSListFormat::New(Isolate* isolate, Handle<Map> map,
// 14. Let s be ? GetOption(options, "style", "string",
// «"long", "short", "narrow"», "long").
- Maybe<Style> maybe_style = Intl::GetStringOption<Style>(
+ Maybe<Style> maybe_style = GetStringOption<Style>(
isolate, options, "style", service, {"long", "short", "narrow"},
{Style::LONG, Style::SHORT, Style::NARROW}, Style::LONG);
MAYBE_RETURN(maybe_style, MaybeHandle<JSListFormat>());
@@ -220,7 +221,8 @@ Maybe<std::vector<icu::UnicodeString>> ToUnicodeStringArray(
template <typename T>
MaybeHandle<T> FormatListCommon(
Isolate* isolate, Handle<JSListFormat> format, Handle<JSArray> list,
- MaybeHandle<T> (*formatToResult)(Isolate*, const icu::FormattedValue&)) {
+ const std::function<MaybeHandle<T>(Isolate*, const icu::FormattedValue&)>&
+ formatToResult) {
DCHECK(!list->IsUndefined());
Maybe<std::vector<icu::UnicodeString>> maybe_array =
ToUnicodeStringArray(isolate, list);
diff --git a/chromium/v8/src/objects/js-locale.cc b/chromium/v8/src/objects/js-locale.cc
index 64644abad23..05f4a7302d7 100644
--- a/chromium/v8/src/objects/js-locale.cc
+++ b/chromium/v8/src/objects/js-locale.cc
@@ -15,11 +15,12 @@
#include "src/api/api.h"
#include "src/execution/isolate.h"
-#include "src/handles/global-handles.h"
#include "src/heap/factory.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-locale-inl.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/option-utils.h"
#include "unicode/calendar.h"
#include "unicode/char16ptr.h"
#include "unicode/coll.h"
@@ -70,11 +71,11 @@ Maybe<bool> InsertOptionsIntoLocale(Isolate* isolate,
bool value_bool = false;
Maybe<bool> maybe_found =
option_to_bcp47.is_bool_value
- ? Intl::GetBoolOption(isolate, options, option_to_bcp47.name,
- "locale", &value_bool)
- : Intl::GetStringOption(isolate, options, option_to_bcp47.name,
- *(option_to_bcp47.possible_values),
- "locale", &value_str);
+ ? GetBoolOption(isolate, options, option_to_bcp47.name, "locale",
+ &value_bool)
+ : GetStringOption(isolate, options, option_to_bcp47.name,
+ *(option_to_bcp47.possible_values), "locale",
+ &value_str);
MAYBE_RETURN(maybe_found, Nothing<bool>());
// TODO(cira): Use fallback value if value is not found to make
@@ -177,19 +178,18 @@ int32_t weekdayFromEDaysOfWeek(icu::Calendar::EDaysOfWeek eDaysOfWeek) {
} // namespace
-bool JSLocale::Is38AlphaNumList(const std::string& value) {
- std::size_t found_dash = value.find("-");
- std::size_t found_underscore = value.find("_");
- if (found_dash == std::string::npos &&
- found_underscore == std::string::npos) {
- return IsAlphanum(value, 3, 8);
- }
- if (found_underscore == std::string::npos || found_dash < found_underscore) {
- return IsAlphanum(value.substr(0, found_dash), 3, 8) &&
- JSLocale::Is38AlphaNumList(value.substr(found_dash + 1));
+// Implemented as iteration instead of recursion to avoid stack overflow for
+// very long input strings.
+bool JSLocale::Is38AlphaNumList(const std::string& in) {
+ std::string value = in;
+ while (true) {
+ std::size_t found_dash = value.find("-");
+ if (found_dash == std::string::npos) {
+ return IsAlphanum(value, 3, 8);
+ }
+ if (!IsAlphanum(value.substr(0, found_dash), 3, 8)) return false;
+ value = value.substr(found_dash + 1);
}
- return IsAlphanum(value.substr(0, found_underscore), 3, 8) &&
- JSLocale::Is38AlphaNumList(value.substr(found_underscore + 1));
}
bool JSLocale::Is3Alpha(const std::string& value) {
@@ -267,8 +267,8 @@ Maybe<bool> ApplyOptionsToTag(Isolate* isolate, Handle<String> tag,
const std::vector<const char*> empty_values = {};
std::unique_ptr<char[]> language_str = nullptr;
Maybe<bool> maybe_language =
- Intl::GetStringOption(isolate, options, "language", empty_values,
- "ApplyOptionsToTag", &language_str);
+ GetStringOption(isolate, options, "language", empty_values,
+ "ApplyOptionsToTag", &language_str);
MAYBE_RETURN(maybe_language, Nothing<bool>());
// 4. If language is not undefined, then
if (maybe_language.FromJust()) {
@@ -285,8 +285,8 @@ Maybe<bool> ApplyOptionsToTag(Isolate* isolate, Handle<String> tag,
// undefined).
std::unique_ptr<char[]> script_str = nullptr;
Maybe<bool> maybe_script =
- Intl::GetStringOption(isolate, options, "script", empty_values,
- "ApplyOptionsToTag", &script_str);
+ GetStringOption(isolate, options, "script", empty_values,
+ "ApplyOptionsToTag", &script_str);
MAYBE_RETURN(maybe_script, Nothing<bool>());
// 6. If script is not undefined, then
if (maybe_script.FromJust()) {
@@ -302,8 +302,8 @@ Maybe<bool> ApplyOptionsToTag(Isolate* isolate, Handle<String> tag,
// undefined).
std::unique_ptr<char[]> region_str = nullptr;
Maybe<bool> maybe_region =
- Intl::GetStringOption(isolate, options, "region", empty_values,
- "ApplyOptionsToTag", &region_str);
+ GetStringOption(isolate, options, "region", empty_values,
+ "ApplyOptionsToTag", &region_str);
MAYBE_RETURN(maybe_region, Nothing<bool>());
// 8. If region is not undefined, then
if (maybe_region.FromJust()) {
@@ -476,57 +476,13 @@ MaybeHandle<JSLocale> JSLocale::Minimize(Isolate* isolate,
return Construct(isolate, result);
}
-MaybeHandle<JSArray> ToJSArray(Isolate* isolate, const char* unicode_key,
- icu::StringEnumeration* enumeration,
- const std::set<std::string>& removes) {
- UErrorCode status = U_ZERO_ERROR;
- Factory* factory = isolate->factory();
-
- int32_t count = 0;
- if (!removes.empty()) {
- // If we may remove items, then we need to go one pass first to count how
- // many items we will insert before we allocate the fixed array.
- for (const char* item = enumeration->next(nullptr, status);
- U_SUCCESS(status) && item != nullptr;
- item = enumeration->next(nullptr, status)) {
- if (unicode_key != nullptr) {
- item = uloc_toUnicodeLocaleType(unicode_key, item);
- }
- if (removes.find(item) == removes.end()) {
- count++;
- }
- }
- enumeration->reset(status);
- } else {
- count = enumeration->count(status);
- }
- Handle<FixedArray> fixed_array = factory->NewFixedArray(count);
-
- int32_t index = 0;
- for (const char* item = enumeration->next(nullptr, status);
- U_SUCCESS(status) && item != nullptr;
- item = enumeration->next(nullptr, status)) {
- if (unicode_key != nullptr) {
- item = uloc_toUnicodeLocaleType(unicode_key, item);
- }
- if (removes.find(item) != removes.end()) {
- continue;
- }
- Handle<String> str = factory->NewStringFromAsciiChecked(item);
- fixed_array->set(index++, *str);
- }
- CHECK(index == count);
- if (U_FAILURE(status)) {
- THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
- JSArray);
- }
- return factory->NewJSArrayWithElements(fixed_array);
-}
-
template <typename T>
-MaybeHandle<JSArray> GetKeywordValuesFromLocale(
- Isolate* isolate, const char* key, const char* unicode_key,
- const icu::Locale& locale, const std::set<std::string>& removes) {
+MaybeHandle<JSArray> GetKeywordValuesFromLocale(Isolate* isolate,
+ const char* key,
+ const char* unicode_key,
+ const icu::Locale& locale,
+ bool (*removes)(const char*),
+ bool commonly_used, bool sort) {
Factory* factory = isolate->factory();
UErrorCode status = U_ZERO_ERROR;
std::string ext =
@@ -539,27 +495,43 @@ MaybeHandle<JSArray> GetKeywordValuesFromLocale(
}
status = U_ZERO_ERROR;
std::unique_ptr<icu::StringEnumeration> enumeration(
- T::getKeywordValuesForLocale(key, locale, true, status));
+ T::getKeywordValuesForLocale(key, locale, commonly_used, status));
if (U_FAILURE(status)) {
THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
JSArray);
}
- return ToJSArray(isolate, unicode_key, enumeration.get(), removes);
+ return Intl::ToJSArray(isolate, unicode_key, enumeration.get(), removes,
+ sort);
+}
+
+namespace {
+
+MaybeHandle<JSArray> CalendarsForLocale(Isolate* isolate,
+ const icu::Locale& icu_locale,
+ bool commonly_used, bool sort) {
+ return GetKeywordValuesFromLocale<icu::Calendar>(
+ isolate, "calendar", "ca", icu_locale, nullptr, commonly_used, sort);
}
+} // namespace
+
MaybeHandle<JSArray> JSLocale::Calendars(Isolate* isolate,
Handle<JSLocale> locale) {
icu::Locale icu_locale(*(locale->icu_locale().raw()));
- return GetKeywordValuesFromLocale<icu::Calendar>(
- isolate, "calendar", "ca", icu_locale, std::set<std::string>());
+ return CalendarsForLocale(isolate, icu_locale, true, false);
+}
+
+MaybeHandle<JSArray> Intl::AvailableCalendars(Isolate* isolate) {
+ icu::Locale icu_locale("und");
+ return CalendarsForLocale(isolate, icu_locale, false, true);
}
MaybeHandle<JSArray> JSLocale::Collations(Isolate* isolate,
Handle<JSLocale> locale) {
icu::Locale icu_locale(*(locale->icu_locale().raw()));
- const std::set<std::string> removes({"standard", "search"});
- return GetKeywordValuesFromLocale<icu::Collator>(isolate, "collations", "co",
- icu_locale, removes);
+ return GetKeywordValuesFromLocale<icu::Collator>(
+ isolate, "collations", "co", icu_locale, Intl::RemoveCollation, true,
+ false);
}
MaybeHandle<JSArray> JSLocale::HourCycles(Isolate* isolate,
@@ -688,8 +660,7 @@ MaybeHandle<Object> JSLocale::TimeZones(Isolate* isolate,
THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
JSArray);
}
- return ToJSArray(isolate, nullptr, enumeration.get(),
- std::set<std::string>());
+ return Intl::ToJSArray(isolate, nullptr, enumeration.get(), nullptr, true);
}
MaybeHandle<JSObject> JSLocale::TextInfo(Isolate* isolate,
diff --git a/chromium/v8/src/objects/js-number-format.cc b/chromium/v8/src/objects/js-number-format.cc
index cc5b77a005f..cc337a0df2c 100644
--- a/chromium/v8/src/objects/js-number-format.cc
+++ b/chromium/v8/src/objects/js-number-format.cc
@@ -14,7 +14,9 @@
#include "src/execution/isolate.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-number-format-inl.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/option-utils.h"
#include "unicode/currunit.h"
#include "unicode/decimfmt.h"
#include "unicode/locid.h"
@@ -173,27 +175,11 @@ std::map<const std::string, icu::MeasureUnit> CreateUnitMap() {
int32_t total = icu::MeasureUnit::getAvailable(nullptr, 0, status);
CHECK(U_FAILURE(status));
status = U_ZERO_ERROR;
- // See the list in ecma402 #sec-issanctionedsimpleunitidentifier
- std::set<std::string> sanctioned(
- {"acre", "bit", "byte",
- "celsius", "centimeter", "day",
- "degree", "fahrenheit", "fluid-ounce",
- "foot", "gallon", "gigabit",
- "gigabyte", "gram", "hectare",
- "hour", "inch", "kilobit",
- "kilobyte", "kilogram", "kilometer",
- "liter", "megabit", "megabyte",
- "meter", "mile", "mile-scandinavian",
- "millimeter", "milliliter", "millisecond",
- "minute", "month", "ounce",
- "percent", "petabyte", "pound",
- "second", "stone", "terabit",
- "terabyte", "week", "yard",
- "year"});
std::vector<icu::MeasureUnit> units(total);
total = icu::MeasureUnit::getAvailable(units.data(), total, status);
CHECK(U_SUCCESS(status));
std::map<const std::string, icu::MeasureUnit> map;
+ std::set<std::string> sanctioned(Intl::SanctionedSimpleUnits());
for (auto it = units.begin(); it != units.end(); ++it) {
// Need to skip none/percent
if (sanctioned.count(it->getSubtype()) > 0 &&
@@ -832,8 +818,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
// 2. Set options to ? CoerceOptionsToObject(options).
Handle<JSReceiver> options;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, options,
- Intl::CoerceOptionsToObject(isolate, options_obj, service),
+ isolate, options, CoerceOptionsToObject(isolate, options_obj, service),
JSNumberFormat);
// 4. Let opt be a new Record.
@@ -915,7 +900,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
// 3. Let style be ? GetOption(options, "style", "string", « "decimal",
// "percent", "currency", "unit" », "decimal").
- Maybe<Style> maybe_style = Intl::GetStringOption<Style>(
+ Maybe<Style> maybe_style = GetStringOption<Style>(
isolate, options, "style", service,
{"decimal", "percent", "currency", "unit"},
{Style::DECIMAL, Style::PERCENT, Style::CURRENCY, Style::UNIT},
@@ -929,7 +914,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
// undefined).
std::unique_ptr<char[]> currency_cstr;
const std::vector<const char*> empty_values = {};
- Maybe<bool> found_currency = Intl::GetStringOption(
+ Maybe<bool> found_currency = GetStringOption(
isolate, options, "currency", empty_values, service, &currency_cstr);
MAYBE_RETURN(found_currency, MaybeHandle<JSNumberFormat>());
@@ -959,7 +944,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
// 8. Let currencyDisplay be ? GetOption(options, "currencyDisplay",
// "string", « "code", "symbol", "name", "narrowSymbol" », "symbol").
Maybe<CurrencyDisplay> maybe_currency_display =
- Intl::GetStringOption<CurrencyDisplay>(
+ GetStringOption<CurrencyDisplay>(
isolate, options, "currencyDisplay", service,
{"code", "symbol", "name", "narrowSymbol"},
{CurrencyDisplay::CODE, CurrencyDisplay::SYMBOL,
@@ -971,7 +956,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
CurrencySign currency_sign = CurrencySign::STANDARD;
// 9. Let currencySign be ? GetOption(options, "currencySign", "string", «
// "standard", "accounting" », "standard").
- Maybe<CurrencySign> maybe_currency_sign = Intl::GetStringOption<CurrencySign>(
+ Maybe<CurrencySign> maybe_currency_sign = GetStringOption<CurrencySign>(
isolate, options, "currencySign", service, {"standard", "accounting"},
{CurrencySign::STANDARD, CurrencySign::ACCOUNTING},
CurrencySign::STANDARD);
@@ -981,8 +966,8 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
// 10. Let unit be ? GetOption(options, "unit", "string", undefined,
// undefined).
std::unique_ptr<char[]> unit_cstr;
- Maybe<bool> found_unit = Intl::GetStringOption(
- isolate, options, "unit", empty_values, service, &unit_cstr);
+ Maybe<bool> found_unit = GetStringOption(isolate, options, "unit",
+ empty_values, service, &unit_cstr);
MAYBE_RETURN(found_unit, MaybeHandle<JSNumberFormat>());
std::pair<icu::MeasureUnit, icu::MeasureUnit> unit_pair;
@@ -1017,7 +1002,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
// 13. Let unitDisplay be ? GetOption(options, "unitDisplay", "string", «
// "short", "narrow", "long" », "short").
- Maybe<UnitDisplay> maybe_unit_display = Intl::GetStringOption<UnitDisplay>(
+ Maybe<UnitDisplay> maybe_unit_display = GetStringOption<UnitDisplay>(
isolate, options, "unitDisplay", service, {"short", "narrow", "long"},
{UnitDisplay::SHORT, UnitDisplay::NARROW, UnitDisplay::LONG},
UnitDisplay::SHORT);
@@ -1113,7 +1098,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
Notation notation = Notation::STANDARD;
// 25. Let notation be ? GetOption(options, "notation", "string", «
// "standard", "scientific", "engineering", "compact" », "standard").
- Maybe<Notation> maybe_notation = Intl::GetStringOption<Notation>(
+ Maybe<Notation> maybe_notation = GetStringOption<Notation>(
isolate, options, "notation", service,
{"standard", "scientific", "engineering", "compact"},
{Notation::STANDARD, Notation::SCIENTIFIC, Notation::ENGINEERING,
@@ -1135,10 +1120,9 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
// 28. Let compactDisplay be ? GetOption(options, "compactDisplay",
// "string", « "short", "long" », "short").
- Maybe<CompactDisplay> maybe_compact_display =
- Intl::GetStringOption<CompactDisplay>(
- isolate, options, "compactDisplay", service, {"short", "long"},
- {CompactDisplay::SHORT, CompactDisplay::LONG}, CompactDisplay::SHORT);
+ Maybe<CompactDisplay> maybe_compact_display = GetStringOption<CompactDisplay>(
+ isolate, options, "compactDisplay", service, {"short", "long"},
+ {CompactDisplay::SHORT, CompactDisplay::LONG}, CompactDisplay::SHORT);
MAYBE_RETURN(maybe_compact_display, MaybeHandle<JSNumberFormat>());
CompactDisplay compact_display = maybe_compact_display.FromJust();
@@ -1152,8 +1136,8 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
// 30. Let useGrouping be ? GetOption(options, "useGrouping", "boolean",
// undefined, true).
bool use_grouping = true;
- Maybe<bool> found_use_grouping = Intl::GetBoolOption(
- isolate, options, "useGrouping", service, &use_grouping);
+ Maybe<bool> found_use_grouping =
+ GetBoolOption(isolate, options, "useGrouping", service, &use_grouping);
MAYBE_RETURN(found_use_grouping, MaybeHandle<JSNumberFormat>());
// 31. Set numberFormat.[[UseGrouping]] to useGrouping.
if (!use_grouping) {
@@ -1163,7 +1147,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
// 32. Let signDisplay be ? GetOption(options, "signDisplay", "string", «
// "auto", "never", "always", "exceptZero" », "auto").
- Maybe<SignDisplay> maybe_sign_display = Intl::GetStringOption<SignDisplay>(
+ Maybe<SignDisplay> maybe_sign_display = GetStringOption<SignDisplay>(
isolate, options, "signDisplay", service,
{"auto", "never", "always", "exceptZero"},
{SignDisplay::AUTO, SignDisplay::NEVER, SignDisplay::ALWAYS,
diff --git a/chromium/v8/src/objects/js-objects-inl.h b/chromium/v8/src/objects/js-objects-inl.h
index 6be8267a553..dbe3f7f4010 100644
--- a/chromium/v8/src/objects/js-objects-inl.h
+++ b/chromium/v8/src/objects/js-objects-inl.h
@@ -31,25 +31,22 @@ namespace internal {
#include "torque-generated/src/objects/js-objects-tq-inl.inc"
-OBJECT_CONSTRUCTORS_IMPL(JSReceiver, HeapObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSReceiver)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSCustomElementsObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSSpecialObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSAsyncFromSyncIterator)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSDate)
-OBJECT_CONSTRUCTORS_IMPL(JSGlobalObject, JSSpecialObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSGlobalObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSGlobalProxy)
JSIteratorResult::JSIteratorResult(Address ptr) : JSObject(ptr) {}
-OBJECT_CONSTRUCTORS_IMPL(JSMessageObject, JSObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSMessageObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSPrimitiveWrapper)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSStringIterator)
NEVER_READ_ONLY_SPACE_IMPL(JSReceiver)
-CAST_ACCESSOR(JSGlobalObject)
CAST_ACCESSOR(JSIteratorResult)
-CAST_ACCESSOR(JSMessageObject)
-CAST_ACCESSOR(JSReceiver)
DEF_GETTER(JSObject, elements, FixedArrayBase) {
return TaggedField<FixedArrayBase, kElementsOffset>::load(cage_base, *this);
@@ -400,7 +397,7 @@ void JSObject::FastPropertyAtPut(FieldIndex index, Object value,
void JSObject::WriteToField(InternalIndex descriptor, PropertyDetails details,
Object value) {
- DCHECK_EQ(kField, details.location());
+ DCHECK_EQ(PropertyLocation::kField, details.location());
DCHECK_EQ(kData, details.kind());
DisallowGarbageCollection no_gc;
FieldIndex index = FieldIndex::ForDescriptor(map(), descriptor);
@@ -472,9 +469,6 @@ void JSObject::InitializeBody(Map map, int start_offset,
}
}
-ACCESSORS(JSGlobalObject, native_context, NativeContext, kNativeContextOffset)
-ACCESSORS(JSGlobalObject, global_proxy, JSGlobalProxy, kGlobalProxyOffset)
-
DEF_GETTER(JSGlobalObject, native_context_unchecked, Object) {
return TaggedField<Object, kNativeContextOffset>::load(cage_base, *this);
}
@@ -501,9 +495,6 @@ void JSMessageObject::set_type(MessageTemplate value) {
set_raw_type(static_cast<int>(value));
}
-ACCESSORS(JSMessageObject, argument, Object, kArgumentsOffset)
-ACCESSORS(JSMessageObject, script, Script, kScriptOffset)
-ACCESSORS(JSMessageObject, stack_frames, Object, kStackFramesOffset)
ACCESSORS(JSMessageObject, shared_info, HeapObject, kSharedInfoOffset)
ACCESSORS(JSMessageObject, bytecode_offset, Smi, kBytecodeOffsetOffset)
SMI_ACCESSORS(JSMessageObject, start_position, kStartPositionOffset)
diff --git a/chromium/v8/src/objects/js-objects.cc b/chromium/v8/src/objects/js-objects.cc
index cdd16a65a6a..008036b11f2 100644
--- a/chromium/v8/src/objects/js-objects.cc
+++ b/chromium/v8/src/objects/js-objects.cc
@@ -260,7 +260,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
PropertyDetails details = descriptors->GetDetails(i);
if (!details.IsEnumerable()) continue;
if (details.kind() == kData) {
- if (details.location() == kDescriptor) {
+ if (details.location() == PropertyLocation::kDescriptor) {
prop_value = handle(descriptors->GetStrongValue(i), isolate);
} else {
Representation representation = details.representation();
@@ -1055,6 +1055,11 @@ Maybe<bool> JSReceiver::DefineOwnProperty(Isolate* isolate,
return JSTypedArray::DefineOwnProperty(
isolate, Handle<JSTypedArray>::cast(object), key, desc, should_throw);
}
+ if (object->IsJSModuleNamespace()) {
+ return JSModuleNamespace::DefineOwnProperty(
+ isolate, Handle<JSModuleNamespace>::cast(object), key, desc,
+ should_throw);
+ }
// OrdinaryDefineOwnProperty, by virtue of calling
// DefineOwnPropertyIgnoreAttributes, can handle arguments
@@ -1996,7 +2001,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
PropertyDetails details = descriptors->GetDetails(index);
if (!details.IsEnumerable()) continue;
if (details.kind() == kData) {
- if (details.location() == kDescriptor) {
+ if (details.location() == PropertyLocation::kDescriptor) {
prop_value = handle(descriptors->GetStrongValue(index), isolate);
} else {
Representation representation = details.representation();
@@ -2238,6 +2243,7 @@ int JSObject::GetHeaderSize(InstanceType type,
case JS_BOUND_FUNCTION_TYPE:
return JSBoundFunction::kHeaderSize;
case JS_FUNCTION_TYPE:
+ case JS_CLASS_CONSTRUCTOR_TYPE:
case JS_PROMISE_CONSTRUCTOR_TYPE:
case JS_REG_EXP_CONSTRUCTOR_TYPE:
case JS_ARRAY_CONSTRUCTOR_TYPE:
@@ -2584,6 +2590,7 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
case TYPE##_TYPED_ARRAY_CONSTRUCTOR_TYPE:
TYPED_ARRAYS(TYPED_ARRAY_CONSTRUCTORS_SWITCH)
#undef TYPED_ARRAY_CONSTRUCTORS_SWITCH
+ case JS_CLASS_CONSTRUCTOR_TYPE:
case JS_FUNCTION_TYPE: {
JSFunction function = JSFunction::cast(*this);
std::unique_ptr<char[]> fun_name = function.shared().DebugNameCStr();
@@ -2710,8 +2717,8 @@ void JSObject::PrintInstanceMigration(FILE* file, Map original_map,
if (!o_r.Equals(n_r)) {
String::cast(o.GetKey(i)).PrintOn(file);
PrintF(file, ":%s->%s ", o_r.Mnemonic(), n_r.Mnemonic());
- } else if (o.GetDetails(i).location() == kDescriptor &&
- n.GetDetails(i).location() == kField) {
+ } else if (o.GetDetails(i).location() == PropertyLocation::kDescriptor &&
+ n.GetDetails(i).location() == PropertyLocation::kField) {
Name name = o.GetKey(i);
if (name.IsString()) {
String::cast(name).PrintOn(file);
@@ -2817,7 +2824,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
// If the map adds a new kDescriptor property, simply set the map.
PropertyDetails details = new_map->GetLastDescriptorDetails(isolate);
- if (details.location() == kDescriptor) {
+ if (details.location() == PropertyLocation::kDescriptor) {
object->set_map(*new_map, kReleaseStore);
return;
}
@@ -2852,7 +2859,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
} else {
value = isolate->factory()->uninitialized_value();
}
- DCHECK_EQ(kField, details.location());
+ DCHECK_EQ(PropertyLocation::kField, details.location());
DCHECK_EQ(kData, details.kind());
DCHECK(!index.is_inobject()); // Must be a backing store index.
new_storage->set(index.outobject_array_index(), *value);
@@ -2902,13 +2909,13 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
for (InternalIndex i : InternalIndex::Range(old_nof)) {
PropertyDetails details = new_descriptors->GetDetails(i);
- if (details.location() != kField) continue;
+ if (details.location() != PropertyLocation::kField) continue;
DCHECK_EQ(kData, details.kind());
PropertyDetails old_details = old_descriptors->GetDetails(i);
Representation old_representation = old_details.representation();
Representation representation = details.representation();
Handle<Object> value;
- if (old_details.location() == kDescriptor) {
+ if (old_details.location() == PropertyLocation::kDescriptor) {
if (old_details.kind() == kAccessor) {
// In case of kAccessor -> kData property reconfiguration, the property
// must already be prepared for data of certain type.
@@ -2924,7 +2931,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
DCHECK(!old_representation.IsDouble() && !representation.IsDouble());
}
} else {
- DCHECK_EQ(kField, old_details.location());
+ DCHECK_EQ(PropertyLocation::kField, old_details.location());
FieldIndex index = FieldIndex::ForDescriptor(isolate, *old_map, i);
value = handle(object->RawFastPropertyAt(isolate, index), isolate);
if (!old_representation.IsDouble() && representation.IsDouble()) {
@@ -2946,7 +2953,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
for (InternalIndex i : InternalIndex::Range(old_nof, new_nof)) {
PropertyDetails details = new_descriptors->GetDetails(i);
- if (details.location() != kField) continue;
+ if (details.location() != PropertyLocation::kField) continue;
DCHECK_EQ(kData, details.kind());
Handle<Object> value;
if (details.representation().IsDouble()) {
@@ -3035,7 +3042,7 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
PropertyDetails details = descs->GetDetails(i);
Handle<Name> key(descs->GetKey(isolate, i), isolate);
Handle<Object> value;
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
FieldIndex index = FieldIndex::ForDescriptor(isolate, *map, i);
if (details.kind() == kData) {
value = handle(object->RawFastPropertyAt(isolate, index), isolate);
@@ -3050,7 +3057,7 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
}
} else {
- DCHECK_EQ(kDescriptor, details.location());
+ DCHECK_EQ(PropertyLocation::kDescriptor, details.location());
value = handle(descs->GetStrongValue(isolate, i), isolate);
}
DCHECK(!value.is_null());
@@ -3592,7 +3599,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
new_map->set_may_have_interesting_symbols(true);
}
- DCHECK_EQ(kField, details.location());
+ DCHECK_EQ(PropertyLocation::kField, details.location());
DCHECK_IMPLIES(!V8_DICT_PROPERTY_CONST_TRACKING_BOOL,
details.constness() == PropertyConstness::kMutable);
@@ -3617,7 +3624,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
details.attributes());
}
details = d.GetDetails();
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
if (current_offset < inobject_props) {
object->InObjectPropertyAtPut(current_offset, value,
UPDATE_WRITE_BARRIER);
@@ -4417,7 +4424,7 @@ Object JSObject::SlowReverseLookup(Object value) {
bool value_is_number = value.IsNumber();
for (InternalIndex i : map().IterateOwnDescriptors()) {
PropertyDetails details = descs.GetDetails(i);
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
DCHECK_EQ(kData, details.kind());
FieldIndex field_index = FieldIndex::ForDescriptor(map(), i);
Object property = RawFastPropertyAt(field_index);
@@ -4430,7 +4437,7 @@ Object JSObject::SlowReverseLookup(Object value) {
return descs.GetKey(i);
}
} else {
- DCHECK_EQ(kDescriptor, details.location());
+ DCHECK_EQ(PropertyLocation::kDescriptor, details.location());
if (details.kind() == kData) {
if (descs.GetStrongValue(i) == value) {
return descs.GetKey(i);
diff --git a/chromium/v8/src/objects/js-objects.h b/chromium/v8/src/objects/js-objects.h
index 74522370065..c0d3405f26b 100644
--- a/chromium/v8/src/objects/js-objects.h
+++ b/chromium/v8/src/objects/js-objects.h
@@ -10,7 +10,6 @@
#include "src/objects/internal-index.h"
#include "src/objects/objects.h"
#include "src/objects/property-array.h"
-#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -34,7 +33,7 @@ class IsCompiledScope;
// JSReceiver includes types on which properties can be defined, i.e.,
// JSObject and JSProxy.
-class JSReceiver : public HeapObject {
+class JSReceiver : public TorqueGeneratedJSReceiver<JSReceiver, HeapObject> {
public:
NEVER_READ_ONLY_SPACE
// Returns true if there is no slow (ie, dictionary) backing store.
@@ -85,9 +84,6 @@ class JSReceiver : public HeapObject {
static void DeleteNormalizedProperty(Handle<JSReceiver> object,
InternalIndex entry);
- DECL_CAST(JSReceiver)
- DECL_VERIFIER(JSReceiver)
-
// ES6 section 7.1.1 ToPrimitive
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ToPrimitive(
Handle<JSReceiver> receiver,
@@ -288,14 +284,17 @@ class JSReceiver : public HeapObject {
static const int kHashMask = PropertyArray::HashField::kMask;
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_JS_RECEIVER_FIELDS)
bool HasProxyInPrototype(Isolate* isolate);
// TC39 "Dynamic Code Brand Checks"
bool IsCodeLike(Isolate* isolate) const;
- OBJECT_CONSTRUCTORS(JSReceiver, HeapObject);
+ private:
+ // Hide generated accessors; custom accessors are called
+ // "raw_properties_or_hash".
+ DECL_ACCESSORS(properties_or_hash, Object)
+
+ TQ_OBJECT_CONSTRUCTORS(JSReceiver)
};
// The JSObject describes real heap allocated JavaScript objects with
@@ -996,21 +995,14 @@ class JSGlobalProxy
};
// JavaScript global object.
-class JSGlobalObject : public JSSpecialObject {
+class JSGlobalObject
+ : public TorqueGeneratedJSGlobalObject<JSGlobalObject, JSSpecialObject> {
public:
- // [native context]: the natives corresponding to this global object.
- DECL_ACCESSORS(native_context, NativeContext)
-
- // [global proxy]: the global proxy object of the context
- DECL_ACCESSORS(global_proxy, JSGlobalProxy)
-
DECL_RELEASE_ACQUIRE_ACCESSORS(global_dictionary, GlobalDictionary)
static void InvalidatePropertyCell(Handle<JSGlobalObject> object,
Handle<Name> name);
- DECL_CAST(JSGlobalObject)
-
inline bool IsDetached();
// May be called by the concurrent GC when the global object is not
@@ -1021,11 +1013,7 @@ class JSGlobalObject : public JSSpecialObject {
DECL_PRINTER(JSGlobalObject)
DECL_VERIFIER(JSGlobalObject)
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(JSSpecialObject::kHeaderSize,
- TORQUE_GENERATED_JS_GLOBAL_OBJECT_FIELDS)
-
- OBJECT_CONSTRUCTORS(JSGlobalObject, JSSpecialObject);
+ TQ_OBJECT_CONSTRUCTORS(JSGlobalObject)
};
// Representation for JS Wrapper objects, String, Number, Boolean, etc.
@@ -1113,21 +1101,13 @@ class JSDate : public TorqueGeneratedJSDate<JSDate, JSObject> {
// error messages are not directly accessible from JavaScript to
// prevent leaking information to user code called during error
// formatting.
-class JSMessageObject : public JSObject {
+class JSMessageObject
+ : public TorqueGeneratedJSMessageObject<JSMessageObject, JSObject> {
public:
// [type]: the type of error message.
inline MessageTemplate type() const;
inline void set_type(MessageTemplate value);
- // [arguments]: the arguments for formatting the error message.
- DECL_ACCESSORS(argument, Object)
-
- // [script]: the script from which the error message originated.
- DECL_ACCESSORS(script, Script)
-
- // [stack_frames]: an array of stack frames for this error object.
- DECL_ACCESSORS(stack_frames, Object)
-
// Initializes the source positions in the object if possible. Does nothing if
// called more than once. If called when stack space is exhausted, then the
// source positions will be not be set and calling it again when there is more
@@ -1159,14 +1139,9 @@ class JSMessageObject : public JSObject {
DECL_INT_ACCESSORS(error_level)
- DECL_CAST(JSMessageObject)
-
// Dispatched behavior.
DECL_PRINTER(JSMessageObject)
- DECL_VERIFIER(JSMessageObject)
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JS_MESSAGE_OBJECT_FIELDS)
// TODO(v8:8989): [torque] Support marker constants.
static const int kPointerFieldsEndOffset = kStartPositionOffset;
@@ -1195,7 +1170,10 @@ class JSMessageObject : public JSObject {
DECL_INT_ACCESSORS(raw_type)
- OBJECT_CONSTRUCTORS(JSMessageObject, JSObject);
+ // Hide generated accessors; custom accessors are named "raw_type".
+ DECL_INT_ACCESSORS(message_type)
+
+ TQ_OBJECT_CONSTRUCTORS(JSMessageObject)
};
// The [Async-from-Sync Iterator] object
diff --git a/chromium/v8/src/objects/js-objects.tq b/chromium/v8/src/objects/js-objects.tq
index fd48d430456..927bca18de4 100644
--- a/chromium/v8/src/objects/js-objects.tq
+++ b/chromium/v8/src/objects/js-objects.tq
@@ -5,7 +5,6 @@
// JSReceiver corresponds to objects in the JS sense.
@abstract
@highestInstanceTypeWithinParentClassRange
-@doNotGenerateCppClass
extern class JSReceiver extends HeapObject {
properties_or_hash: SwissNameDictionary|FixedArrayBase|PropertyArray|Smi;
}
@@ -56,7 +55,7 @@ macro GetDerivedMap(implicit context: Context)(
try {
const constructor =
Cast<JSFunctionWithPrototypeSlot>(newTarget) otherwise SlowPath;
- assert(IsConstructor(constructor));
+ dcheck(IsConstructor(constructor));
const map =
Cast<Map>(constructor.prototype_or_initial_map) otherwise SlowPath;
if (LoadConstructorOrBackPointer(map) != target) {
@@ -97,20 +96,24 @@ extern class JSGlobalProxy extends JSSpecialObject {
native_context: Object;
}
-@doNotGenerateCppClass
extern class JSGlobalObject extends JSSpecialObject {
+ // [native context]: the natives corresponding to this global object.
native_context: NativeContext;
+
+ // [global proxy]: the global proxy object of the context
global_proxy: JSGlobalProxy;
}
extern class JSPrimitiveWrapper extends JSCustomElementsObject { value: JSAny; }
-@doNotGenerateCppClass
extern class JSMessageObject extends JSObject {
// Tagged fields.
message_type: Smi;
- arguments: Object;
+ // [argument]: the arguments for formatting the error message.
+ argument: Object;
+ // [script]: the script from which the error message originated.
script: Script;
+ // [stack_frames]: an array of stack frames for this error object.
stack_frames: Object;
shared_info: SharedFunctionInfo|Undefined;
diff --git a/chromium/v8/src/objects/js-plural-rules.cc b/chromium/v8/src/objects/js-plural-rules.cc
index 9c2d77d6bc9..ec15bd17cd0 100644
--- a/chromium/v8/src/objects/js-plural-rules.cc
+++ b/chromium/v8/src/objects/js-plural-rules.cc
@@ -12,6 +12,8 @@
#include "src/objects/intl-objects.h"
#include "src/objects/js-number-format.h"
#include "src/objects/js-plural-rules-inl.h"
+#include "src/objects/managed-inl.h"
+#include "src/objects/option-utils.h"
#include "unicode/locid.h"
#include "unicode/numberformatter.h"
#include "unicode/plurrule.h"
@@ -74,8 +76,7 @@ MaybeHandle<JSPluralRules> JSPluralRules::New(Isolate* isolate, Handle<Map> map,
Handle<JSReceiver> options;
const char* service = "Intl.PluralRules";
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, options,
- Intl::CoerceOptionsToObject(isolate, options_obj, service),
+ isolate, options, CoerceOptionsToObject(isolate, options_obj, service),
JSPluralRules);
// 5. Let matcher be ? GetOption(options, "localeMatcher", "string",
@@ -88,7 +89,7 @@ MaybeHandle<JSPluralRules> JSPluralRules::New(Isolate* isolate, Handle<Map> map,
// 7. Let t be ? GetOption(options, "type", "string", « "cardinal",
// "ordinal" », "cardinal").
- Maybe<Type> maybe_type = Intl::GetStringOption<Type>(
+ Maybe<Type> maybe_type = GetStringOption<Type>(
isolate, options, "type", service, {"cardinal", "ordinal"},
{Type::CARDINAL, Type::ORDINAL}, Type::CARDINAL);
MAYBE_RETURN(maybe_type, MaybeHandle<JSPluralRules>());
diff --git a/chromium/v8/src/objects/js-promise.h b/chromium/v8/src/objects/js-promise.h
index dda3afec99e..5afb66a0b27 100644
--- a/chromium/v8/src/objects/js-promise.h
+++ b/chromium/v8/src/objects/js-promise.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_JS_PROMISE_H_
#define V8_OBJECTS_JS_PROMISE_H_
+#include "include/v8-promise.h"
#include "src/objects/js-objects.h"
#include "src/objects/promise.h"
#include "torque-generated/bit-fields.h"
diff --git a/chromium/v8/src/objects/js-promise.tq b/chromium/v8/src/objects/js-promise.tq
index be8fb066375..01426fd6d2e 100644
--- a/chromium/v8/src/objects/js-promise.tq
+++ b/chromium/v8/src/objects/js-promise.tq
@@ -16,8 +16,8 @@ extern class JSPromise extends JSObject {
}
macro SetStatus(status: constexpr PromiseState): void {
- assert(this.Status() == PromiseState::kPending);
- assert(status != PromiseState::kPending);
+ dcheck(this.Status() == PromiseState::kPending);
+ dcheck(status != PromiseState::kPending);
this.flags.status = status;
}
diff --git a/chromium/v8/src/objects/js-proxy.h b/chromium/v8/src/objects/js-proxy.h
index 575c9426512..df89b4d17a2 100644
--- a/chromium/v8/src/objects/js-proxy.h
+++ b/chromium/v8/src/objects/js-proxy.h
@@ -98,7 +98,6 @@ class JSProxy : public TorqueGeneratedJSProxy<JSProxy, JSReceiver> {
LookupIterator* it);
// Dispatched behavior.
- DECL_PRINTER(JSProxy)
DECL_VERIFIER(JSProxy)
static const int kMaxIterationLimit = 100 * 1024;
@@ -124,12 +123,10 @@ class JSProxy : public TorqueGeneratedJSProxy<JSProxy, JSReceiver> {
// JSProxyRevocableResult is just a JSObject with a specific initial map.
// This initial map adds in-object properties for "proxy" and "revoke".
// See https://tc39.github.io/ecma262/#sec-proxy.revocable
-class JSProxyRevocableResult : public JSObject {
+class JSProxyRevocableResult
+ : public TorqueGeneratedJSProxyRevocableResult<JSProxyRevocableResult,
+ JSObject> {
public:
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(
- JSObject::kHeaderSize, TORQUE_GENERATED_JS_PROXY_REVOCABLE_RESULT_FIELDS)
-
// Indices of in-object properties.
static const int kProxyIndex = 0;
static const int kRevokeIndex = 1;
diff --git a/chromium/v8/src/objects/js-proxy.tq b/chromium/v8/src/objects/js-proxy.tq
index b91c0de5d0d..5d0f51a94f7 100644
--- a/chromium/v8/src/objects/js-proxy.tq
+++ b/chromium/v8/src/objects/js-proxy.tq
@@ -7,7 +7,6 @@ extern class JSProxy extends JSReceiver {
handler: JSReceiver|Null;
}
-@doNotGenerateCppClass
extern shape JSProxyRevocableResult extends JSObject {
proxy: JSAny;
revoke: JSAny;
diff --git a/chromium/v8/src/objects/js-regexp-inl.h b/chromium/v8/src/objects/js-regexp-inl.h
index 0f38daa5e7c..f4e38056f97 100644
--- a/chromium/v8/src/objects/js-regexp-inl.h
+++ b/chromium/v8/src/objects/js-regexp-inl.h
@@ -21,29 +21,21 @@ namespace internal {
#include "torque-generated/src/objects/js-regexp-tq-inl.inc"
TQ_OBJECT_CONSTRUCTORS_IMPL(JSRegExp)
-OBJECT_CONSTRUCTORS_IMPL_CHECK_SUPER(JSRegExpResult, JSArray)
-OBJECT_CONSTRUCTORS_IMPL_CHECK_SUPER(JSRegExpResultIndices, JSArray)
-
-inline JSRegExpResultWithIndices::JSRegExpResultWithIndices(Address ptr)
- : JSRegExpResult(ptr) {
- SLOW_DCHECK(IsJSArray());
-}
-
-CAST_ACCESSOR(JSRegExpResult)
-CAST_ACCESSOR(JSRegExpResultWithIndices)
-CAST_ACCESSOR(JSRegExpResultIndices)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSRegExpResult)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSRegExpResultIndices)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSRegExpResultWithIndices)
ACCESSORS(JSRegExp, last_index, Object, kLastIndexOffset)
-JSRegExp::Type JSRegExp::TypeTag() const {
+JSRegExp::Type JSRegExp::type_tag() const {
Object data = this->data();
if (data.IsUndefined()) return JSRegExp::NOT_COMPILED;
Smi smi = Smi::cast(FixedArray::cast(data).get(kTagIndex));
return static_cast<JSRegExp::Type>(smi.value());
}
-int JSRegExp::CaptureCount() const {
- switch (TypeTag()) {
+int JSRegExp::capture_count() const {
+ switch (type_tag()) {
case ATOM:
return 0;
case EXPERIMENTAL:
@@ -54,61 +46,59 @@ int JSRegExp::CaptureCount() const {
}
}
-int JSRegExp::MaxRegisterCount() const {
- CHECK_EQ(TypeTag(), IRREGEXP);
+int JSRegExp::max_register_count() const {
+ CHECK_EQ(type_tag(), IRREGEXP);
return Smi::ToInt(DataAt(kIrregexpMaxRegisterCountIndex));
}
-JSRegExp::Flags JSRegExp::GetFlags() {
- DCHECK(this->data().IsFixedArray());
- Object data = this->data();
- Smi smi = Smi::cast(FixedArray::cast(data).get(kFlagsIndex));
- return Flags(smi.value());
+String JSRegExp::atom_pattern() const {
+ DCHECK_EQ(type_tag(), ATOM);
+ return String::cast(DataAt(JSRegExp::kAtomPatternIndex));
}
-String JSRegExp::Pattern() {
- DCHECK(this->data().IsFixedArray());
- Object data = this->data();
- String pattern = String::cast(FixedArray::cast(data).get(kSourceIndex));
- return pattern;
+String JSRegExp::source() const {
+ return String::cast(TorqueGeneratedClass::source());
+}
+
+JSRegExp::Flags JSRegExp::flags() const {
+ Smi smi = Smi::cast(TorqueGeneratedClass::flags());
+ return Flags(smi.value());
}
String JSRegExp::EscapedPattern() {
DCHECK(this->source().IsString());
- String pattern = String::cast(source());
- return pattern;
+ return String::cast(source());
}
-Object JSRegExp::CaptureNameMap() {
- DCHECK(this->data().IsFixedArray());
- DCHECK(TypeSupportsCaptures(TypeTag()));
+Object JSRegExp::capture_name_map() {
+ DCHECK(TypeSupportsCaptures(type_tag()));
Object value = DataAt(kIrregexpCaptureNameMapIndex);
DCHECK_NE(value, Smi::FromInt(JSRegExp::kUninitializedValue));
return value;
}
+void JSRegExp::set_capture_name_map(Handle<FixedArray> capture_name_map) {
+ if (capture_name_map.is_null()) {
+ SetDataAt(JSRegExp::kIrregexpCaptureNameMapIndex, Smi::zero());
+ } else {
+ SetDataAt(JSRegExp::kIrregexpCaptureNameMapIndex, *capture_name_map);
+ }
+}
+
Object JSRegExp::DataAt(int index) const {
- DCHECK(TypeTag() != NOT_COMPILED);
+ DCHECK(type_tag() != NOT_COMPILED);
return FixedArray::cast(data()).get(index);
}
void JSRegExp::SetDataAt(int index, Object value) {
- DCHECK(TypeTag() != NOT_COMPILED);
- DCHECK_GE(index,
- kDataIndex); // Only implementation data can be set this way.
+ DCHECK(type_tag() != NOT_COMPILED);
+ // Only implementation data can be set this way.
+ DCHECK_GE(index, kFirstTypeSpecificIndex);
FixedArray::cast(data()).set(index, value);
}
-void JSRegExp::SetCaptureNameMap(Handle<FixedArray> capture_name_map) {
- if (capture_name_map.is_null()) {
- SetDataAt(JSRegExp::kIrregexpCaptureNameMapIndex, Smi::zero());
- } else {
- SetDataAt(JSRegExp::kIrregexpCaptureNameMapIndex, *capture_name_map);
- }
-}
-
bool JSRegExp::HasCompiledCode() const {
- if (TypeTag() != IRREGEXP) return false;
+ if (type_tag() != IRREGEXP) return false;
Smi uninitialized = Smi::FromInt(kUninitializedValue);
#ifdef DEBUG
DCHECK(DataAt(kIrregexpLatin1CodeIndex).IsCodeT() ||
diff --git a/chromium/v8/src/objects/js-regexp.cc b/chromium/v8/src/objects/js-regexp.cc
index bfc16d1b85d..ce9a9a908ce 100644
--- a/chromium/v8/src/objects/js-regexp.cc
+++ b/chromium/v8/src/objects/js-regexp.cc
@@ -6,6 +6,7 @@
#include "src/base/strings.h"
#include "src/common/globals.h"
+#include "src/objects/code.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/regexp/regexp.h"
@@ -105,70 +106,44 @@ Handle<JSRegExpResultIndices> JSRegExpResultIndices::BuildIndices(
return indices;
}
-uint32_t JSRegExp::BacktrackLimit() const {
- CHECK_EQ(TypeTag(), IRREGEXP);
+uint32_t JSRegExp::backtrack_limit() const {
+ CHECK_EQ(type_tag(), IRREGEXP);
return static_cast<uint32_t>(Smi::ToInt(DataAt(kIrregexpBacktrackLimit)));
}
// static
-JSRegExp::Flags JSRegExp::FlagsFromString(Isolate* isolate,
- Handle<String> flags, bool* success) {
- int length = flags->length();
- if (length == 0) {
- *success = true;
- return JSRegExp::kNone;
- }
+base::Optional<JSRegExp::Flags> JSRegExp::FlagsFromString(
+ Isolate* isolate, Handle<String> flags) {
+ const int length = flags->length();
+
// A longer flags string cannot be valid.
- if (length > JSRegExp::kFlagCount) return JSRegExp::Flags(0);
- JSRegExp::Flags value(0);
- if (flags->IsSeqOneByteString()) {
- DisallowGarbageCollection no_gc;
- SeqOneByteString seq_flags = SeqOneByteString::cast(*flags);
- for (int i = 0; i < length; i++) {
- base::Optional<JSRegExp::Flag> maybe_flag =
- JSRegExp::FlagFromChar(seq_flags.Get(i));
- if (!maybe_flag.has_value()) return JSRegExp::Flags(0);
- JSRegExp::Flag flag = *maybe_flag;
- // Duplicate flag.
- if (value & flag) return JSRegExp::Flags(0);
- value |= flag;
- }
- } else {
- flags = String::Flatten(isolate, flags);
- DisallowGarbageCollection no_gc;
- String::FlatContent flags_content = flags->GetFlatContent(no_gc);
- for (int i = 0; i < length; i++) {
- base::Optional<JSRegExp::Flag> maybe_flag =
- JSRegExp::FlagFromChar(flags_content.Get(i));
- if (!maybe_flag.has_value()) return JSRegExp::Flags(0);
- JSRegExp::Flag flag = *maybe_flag;
- // Duplicate flag.
- if (value & flag) return JSRegExp::Flags(0);
- value |= flag;
- }
+ if (length > JSRegExp::kFlagCount) return {};
+
+ RegExpFlags value;
+ FlatStringReader reader(isolate, String::Flatten(isolate, flags));
+
+ for (int i = 0; i < length; i++) {
+ base::Optional<RegExpFlag> flag = JSRegExp::FlagFromChar(reader.Get(i));
+ if (!flag.has_value()) return {};
+ if (value & flag.value()) return {}; // Duplicate.
+ value |= flag.value();
}
- *success = true;
- return value;
+
+ return JSRegExp::AsJSRegExpFlags(value);
}
// static
Handle<String> JSRegExp::StringFromFlags(Isolate* isolate,
JSRegExp::Flags flags) {
- // Ensure that this function is up-to-date with the supported flag options.
- constexpr size_t kFlagCount = JSRegExp::kFlagCount;
- STATIC_ASSERT(kFlagCount == 8);
-
- // Translate to the lexicographically smaller string.
+ static constexpr int kStringTerminator = 1;
int cursor = 0;
- char buffer[kFlagCount] = {'\0'};
- if (flags & JSRegExp::kHasIndices) buffer[cursor++] = 'd';
- if (flags & JSRegExp::kGlobal) buffer[cursor++] = 'g';
- if (flags & JSRegExp::kIgnoreCase) buffer[cursor++] = 'i';
- if (flags & JSRegExp::kLinear) buffer[cursor++] = 'l';
- if (flags & JSRegExp::kMultiline) buffer[cursor++] = 'm';
- if (flags & JSRegExp::kDotAll) buffer[cursor++] = 's';
- if (flags & JSRegExp::kUnicode) buffer[cursor++] = 'u';
- if (flags & JSRegExp::kSticky) buffer[cursor++] = 'y';
+ char buffer[kFlagCount + kStringTerminator];
+#define V(Lower, Camel, LowerCamel, Char, Bit) \
+ if (flags & JSRegExp::k##Camel) buffer[cursor++] = Char;
+ REGEXP_FLAG_LIST(V)
+#undef V
+ buffer[cursor++] = '\0';
+ DCHECK_LE(cursor, kFlagCount + kStringTerminator);
return isolate->factory()->NewStringFromAsciiChecked(buffer);
}
@@ -182,18 +157,33 @@ MaybeHandle<JSRegExp> JSRegExp::New(Isolate* isolate, Handle<String> pattern,
return JSRegExp::Initialize(regexp, pattern, flags, backtrack_limit);
}
-Object JSRegExp::Code(bool is_latin1) const {
- DCHECK_EQ(TypeTag(), JSRegExp::IRREGEXP);
+Object JSRegExp::code(bool is_latin1) const {
+ DCHECK_EQ(type_tag(), JSRegExp::IRREGEXP);
Object value = DataAt(code_index(is_latin1));
DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, value.IsSmi() || value.IsCodeT());
return value;
}
-Object JSRegExp::Bytecode(bool is_latin1) const {
- DCHECK_EQ(TypeTag(), JSRegExp::IRREGEXP);
+void JSRegExp::set_code(bool is_latin1, Handle<Code> code) {
+ SetDataAt(code_index(is_latin1), ToCodeT(*code));
+}
+
+Object JSRegExp::bytecode(bool is_latin1) const {
+ DCHECK(type_tag() == JSRegExp::IRREGEXP ||
+ type_tag() == JSRegExp::EXPERIMENTAL);
return DataAt(bytecode_index(is_latin1));
}
+void JSRegExp::set_bytecode_and_trampoline(Isolate* isolate,
+ Handle<ByteArray> bytecode) {
+ SetDataAt(kIrregexpLatin1BytecodeIndex, *bytecode);
+ SetDataAt(kIrregexpUC16BytecodeIndex, *bytecode);
+
+ Handle<Code> trampoline = BUILTIN_CODE(isolate, RegExpExperimentalTrampoline);
+ SetDataAt(JSRegExp::kIrregexpLatin1CodeIndex, ToCodeT(*trampoline));
+ SetDataAt(JSRegExp::kIrregexpUC16CodeIndex, ToCodeT(*trampoline));
+}
+
bool JSRegExp::ShouldProduceBytecode() {
return FLAG_regexp_interpret_all ||
(FLAG_regexp_tier_up && !MarkedForTierUp());
@@ -201,7 +191,7 @@ bool JSRegExp::ShouldProduceBytecode() {
// Only irregexps are subject to tier-up.
bool JSRegExp::CanTierUp() {
- return FLAG_regexp_tier_up && TypeTag() == JSRegExp::IRREGEXP;
+ return FLAG_regexp_tier_up && type_tag() == JSRegExp::IRREGEXP;
}
// An irregexp is considered to be marked for tier up if the tier-up ticks
@@ -218,7 +208,7 @@ bool JSRegExp::MarkedForTierUp() {
void JSRegExp::ResetLastTierUpTick() {
DCHECK(FLAG_regexp_tier_up);
- DCHECK_EQ(TypeTag(), JSRegExp::IRREGEXP);
+ DCHECK_EQ(type_tag(), JSRegExp::IRREGEXP);
int tier_up_ticks = Smi::ToInt(DataAt(kIrregexpTicksUntilTierUpIndex)) + 1;
FixedArray::cast(data()).set(JSRegExp::kIrregexpTicksUntilTierUpIndex,
Smi::FromInt(tier_up_ticks));
@@ -226,7 +216,7 @@ void JSRegExp::ResetLastTierUpTick() {
void JSRegExp::TierUpTick() {
DCHECK(FLAG_regexp_tier_up);
- DCHECK_EQ(TypeTag(), JSRegExp::IRREGEXP);
+ DCHECK_EQ(type_tag(), JSRegExp::IRREGEXP);
int tier_up_ticks = Smi::ToInt(DataAt(kIrregexpTicksUntilTierUpIndex));
if (tier_up_ticks == 0) {
return;
@@ -237,7 +227,7 @@ void JSRegExp::TierUpTick() {
void JSRegExp::MarkTierUpForNextExec() {
DCHECK(FLAG_regexp_tier_up);
- DCHECK_EQ(TypeTag(), JSRegExp::IRREGEXP);
+ DCHECK_EQ(type_tag(), JSRegExp::IRREGEXP);
FixedArray::cast(data()).set(JSRegExp::kIrregexpTicksUntilTierUpIndex,
Smi::zero());
}
@@ -247,15 +237,15 @@ MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
Handle<String> source,
Handle<String> flags_string) {
Isolate* isolate = regexp->GetIsolate();
- bool success = false;
- Flags flags = JSRegExp::FlagsFromString(isolate, flags_string, &success);
- if (!success) {
+ base::Optional<Flags> flags =
+ JSRegExp::FlagsFromString(isolate, flags_string);
+ if (!flags.has_value()) {
THROW_NEW_ERROR(
isolate,
NewSyntaxError(MessageTemplate::kInvalidRegExpFlags, flags_string),
JSRegExp);
}
- return Initialize(regexp, source, flags);
+ return Initialize(regexp, source, flags.value());
}
namespace {
@@ -417,7 +407,9 @@ MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
source = String::Flatten(isolate, source);
RETURN_ON_EXCEPTION(
- isolate, RegExp::Compile(isolate, regexp, source, flags, backtrack_limit),
+ isolate,
+ RegExp::Compile(isolate, regexp, source, JSRegExp::AsRegExpFlags(flags),
+ backtrack_limit),
JSRegExp);
Handle<String> escaped_source;
diff --git a/chromium/v8/src/objects/js-regexp.h b/chromium/v8/src/objects/js-regexp.h
index 029964faa27..36e6b791cde 100644
--- a/chromium/v8/src/objects/js-regexp.h
+++ b/chromium/v8/src/objects/js-regexp.h
@@ -5,8 +5,10 @@
#ifndef V8_OBJECTS_JS_REGEXP_H_
#define V8_OBJECTS_JS_REGEXP_H_
+#include "include/v8-regexp.h"
#include "src/objects/contexts.h"
#include "src/objects/js-array.h"
+#include "src/regexp/regexp-flags.h"
#include "torque-generated/bit-fields.h"
// Has to be the last include (doesn't have include guards):
@@ -35,46 +37,14 @@ namespace internal {
// - number of capture registers (output values) of the regexp.
class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
public:
- // Meaning of Type:
- // NOT_COMPILED: Initial value. No data has been stored in the JSRegExp yet.
- // ATOM: A simple string to match against using an indexOf operation.
- // IRREGEXP: Compiled with Irregexp.
- // EXPERIMENTAL: Compiled to use the new linear time engine.
- enum Type { NOT_COMPILED, ATOM, IRREGEXP, EXPERIMENTAL };
+ enum Type {
+ NOT_COMPILED, // Initial value. No data array has been set yet.
+ ATOM, // A simple string match.
+ IRREGEXP, // Compiled with Irregexp (code or bytecode).
+ EXPERIMENTAL, // Compiled to use the experimental linear time engine.
+ };
DEFINE_TORQUE_GENERATED_JS_REG_EXP_FLAGS()
- static base::Optional<Flag> FlagFromChar(char c) {
- STATIC_ASSERT(kFlagCount == 8);
- // clang-format off
- return c == 'g' ? base::Optional<Flag>(kGlobal)
- : c == 'i' ? base::Optional<Flag>(kIgnoreCase)
- : c == 'm' ? base::Optional<Flag>(kMultiline)
- : c == 'y' ? base::Optional<Flag>(kSticky)
- : c == 'u' ? base::Optional<Flag>(kUnicode)
- : c == 's' ? base::Optional<Flag>(kDotAll)
- : c == 'd' ? base::Optional<Flag>(kHasIndices)
- : (FLAG_enable_experimental_regexp_engine && c == 'l')
- ? base::Optional<Flag>(kLinear)
- : base::Optional<Flag>();
- // clang-format on
- }
-
- STATIC_ASSERT(static_cast<int>(kNone) == v8::RegExp::kNone);
- STATIC_ASSERT(static_cast<int>(kGlobal) == v8::RegExp::kGlobal);
- STATIC_ASSERT(static_cast<int>(kIgnoreCase) == v8::RegExp::kIgnoreCase);
- STATIC_ASSERT(static_cast<int>(kMultiline) == v8::RegExp::kMultiline);
- STATIC_ASSERT(static_cast<int>(kSticky) == v8::RegExp::kSticky);
- STATIC_ASSERT(static_cast<int>(kUnicode) == v8::RegExp::kUnicode);
- STATIC_ASSERT(static_cast<int>(kDotAll) == v8::RegExp::kDotAll);
- STATIC_ASSERT(static_cast<int>(kLinear) == v8::RegExp::kLinear);
- STATIC_ASSERT(static_cast<int>(kHasIndices) == v8::RegExp::kHasIndices);
- STATIC_ASSERT(kFlagCount == v8::RegExp::kFlagCount);
-
- DECL_ACCESSORS(last_index, Object)
-
- // If the backtrack limit is set to this marker value, no limit is applied.
- static constexpr uint32_t kNoBacktrackLimit = 0;
-
V8_EXPORT_PRIVATE static MaybeHandle<JSRegExp> New(
Isolate* isolate, Handle<String> source, Flags flags,
uint32_t backtrack_limit = kNoBacktrackLimit);
@@ -86,40 +56,88 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
Handle<String> source,
Handle<String> flags_string);
- static Flags FlagsFromString(Isolate* isolate, Handle<String> flags,
- bool* success);
+ DECL_ACCESSORS(last_index, Object)
+
+ // Instance fields accessors.
+ inline String source() const;
+ inline Flags flags() const;
+
+ // Data array field accessors.
+
+ inline Type type_tag() const;
+ inline String atom_pattern() const;
+ // This could be a Smi kUninitializedValue or Code.
+ V8_EXPORT_PRIVATE Object code(bool is_latin1) const;
+ V8_EXPORT_PRIVATE void set_code(bool is_unicode, Handle<Code> code);
+ // This could be a Smi kUninitializedValue or ByteArray.
+ V8_EXPORT_PRIVATE Object bytecode(bool is_latin1) const;
+ // Sets the bytecode as well as initializing trampoline slots to the
+ // RegExpInterpreterTrampoline.
+ void set_bytecode_and_trampoline(Isolate* isolate,
+ Handle<ByteArray> bytecode);
+ inline int max_register_count() const;
+ // Number of captures (without the match itself).
+ inline int capture_count() const;
+ inline Object capture_name_map();
+ inline void set_capture_name_map(Handle<FixedArray> capture_name_map);
+ uint32_t backtrack_limit() const;
+
+ static constexpr Flag AsJSRegExpFlag(RegExpFlag f) {
+ return static_cast<Flag>(f);
+ }
+ static constexpr Flags AsJSRegExpFlags(RegExpFlags f) {
+ return Flags{static_cast<int>(f)};
+ }
+ static constexpr RegExpFlags AsRegExpFlags(Flags f) {
+ return RegExpFlags{static_cast<int>(f)};
+ }
+
+ static base::Optional<RegExpFlag> FlagFromChar(char c) {
+ base::Optional<RegExpFlag> f = TryRegExpFlagFromChar(c);
+ if (!f.has_value()) return f;
+ if (f.value() == RegExpFlag::kLinear &&
+ !FLAG_enable_experimental_regexp_engine) {
+ return {};
+ }
+ return f;
+ }
+
+ STATIC_ASSERT(static_cast<int>(kNone) == v8::RegExp::kNone);
+#define V(_, Camel, ...) \
+ STATIC_ASSERT(static_cast<int>(k##Camel) == v8::RegExp::k##Camel); \
+ STATIC_ASSERT(static_cast<int>(k##Camel) == \
+ static_cast<int>(RegExpFlag::k##Camel));
+ REGEXP_FLAG_LIST(V)
+#undef V
+ STATIC_ASSERT(kFlagCount == v8::RegExp::kFlagCount);
+ STATIC_ASSERT(kFlagCount == kRegExpFlagCount);
+
+ static base::Optional<Flags> FlagsFromString(Isolate* isolate,
+ Handle<String> flags);
V8_EXPORT_PRIVATE static Handle<String> StringFromFlags(Isolate* isolate,
Flags flags);
+ inline String EscapedPattern();
+
bool CanTierUp();
bool MarkedForTierUp();
void ResetLastTierUpTick();
void TierUpTick();
void MarkTierUpForNextExec();
- inline Type TypeTag() const;
- static bool TypeSupportsCaptures(Type t) {
+ bool ShouldProduceBytecode();
+ inline bool HasCompiledCode() const;
+ inline void DiscardCompiledCodeForSerialization();
+
+ static constexpr bool TypeSupportsCaptures(Type t) {
return t == IRREGEXP || t == EXPERIMENTAL;
}
- // Maximum number of captures allowed.
- static constexpr int kMaxCaptures = 1 << 16;
-
- // Number of captures (without the match itself).
- inline int CaptureCount() const;
// Each capture (including the match itself) needs two registers.
- static int RegistersForCaptureCount(int count) { return (count + 1) * 2; }
-
- inline int MaxRegisterCount() const;
- inline Flags GetFlags();
- inline String Pattern();
- inline String EscapedPattern();
- inline Object CaptureNameMap();
- inline Object DataAt(int index) const;
- // Set implementation data after the object has been prepared.
- inline void SetDataAt(int index, Object value);
- inline void SetCaptureNameMap(Handle<FixedArray> capture_name_map);
+ static constexpr int RegistersForCaptureCount(int count) {
+ return (count + 1) * 2;
+ }
static constexpr int code_index(bool is_latin1) {
return is_latin1 ? kIrregexpLatin1CodeIndex : kIrregexpUC16CodeIndex;
@@ -130,17 +148,6 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
: kIrregexpUC16BytecodeIndex;
}
- // This could be a Smi kUninitializedValue or Code.
- V8_EXPORT_PRIVATE Object Code(bool is_latin1) const;
- // This could be a Smi kUninitializedValue or ByteArray.
- V8_EXPORT_PRIVATE Object Bytecode(bool is_latin1) const;
-
- bool ShouldProduceBytecode();
- inline bool HasCompiledCode() const;
- inline void DiscardCompiledCodeForSerialization();
-
- uint32_t BacktrackLimit() const;
-
// Dispatched behavior.
DECL_PRINTER(JSRegExp)
DECL_VERIFIER(JSRegExp)
@@ -153,59 +160,49 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
static constexpr int kInitialLastIndexValue = 0;
// Indices in the data array.
- static const int kTagIndex = 0;
- static const int kSourceIndex = kTagIndex + 1;
- static const int kFlagsIndex = kSourceIndex + 1;
- static const int kDataIndex = kFlagsIndex + 1;
-
- // TODO(jgruber): Rename kDataIndex to something more appropriate.
- // There is no 'data' field, kDataIndex is just a marker for the
- // first non-generic index.
- static constexpr int kMinDataArrayLength = kDataIndex;
+ static constexpr int kTagIndex = 0;
+ static constexpr int kSourceIndex = kTagIndex + 1;
+ static constexpr int kFlagsIndex = kSourceIndex + 1;
+ static constexpr int kFirstTypeSpecificIndex = kFlagsIndex + 1;
+ static constexpr int kMinDataArrayLength = kFirstTypeSpecificIndex;
// The data fields are used in different ways depending on the
// value of the tag.
// Atom regexps (literal strings).
- static const int kAtomPatternIndex = kDataIndex;
-
- static const int kAtomDataSize = kAtomPatternIndex + 1;
-
- // Irregexp compiled code or trampoline to interpreter for Latin1. If
- // compilation fails, this fields hold an exception object that should be
- // thrown if the regexp is used again.
- static const int kIrregexpLatin1CodeIndex = kDataIndex;
- // Irregexp compiled code or trampoline to interpreter for UC16. If
- // compilation fails, this fields hold an exception object that should be
- // thrown if the regexp is used again.
- static const int kIrregexpUC16CodeIndex = kDataIndex + 1;
- // Bytecode to interpret the regexp for Latin1. Contains kUninitializedValue
- // if we haven't compiled the regexp yet, regexp are always compiled or if
- // tier-up has happened (i.e. when kIrregexpLatin1CodeIndex contains native
- // irregexp code).
- static const int kIrregexpLatin1BytecodeIndex = kDataIndex + 2;
- // Bytecode to interpret the regexp for UC16. Contains kUninitializedValue if
- // we haven't compiled the regxp yet, regexp are always compiled or if tier-up
- // has happened (i.e. when kIrregexpUC16CodeIndex contains native irregexp
- // code).
- static const int kIrregexpUC16BytecodeIndex = kDataIndex + 3;
+ static constexpr int kAtomPatternIndex = kFirstTypeSpecificIndex;
+ static constexpr int kAtomDataSize = kAtomPatternIndex + 1;
+
+ // A Code object or a Smi marker value equal to kUninitializedValue.
+ static constexpr int kIrregexpLatin1CodeIndex = kFirstTypeSpecificIndex;
+ static constexpr int kIrregexpUC16CodeIndex = kIrregexpLatin1CodeIndex + 1;
+ // A ByteArray object or a Smi marker value equal to kUninitializedValue.
+ static constexpr int kIrregexpLatin1BytecodeIndex =
+ kIrregexpUC16CodeIndex + 1;
+ static constexpr int kIrregexpUC16BytecodeIndex =
+ kIrregexpLatin1BytecodeIndex + 1;
// Maximal number of registers used by either Latin1 or UC16.
// Only used to check that there is enough stack space
- static const int kIrregexpMaxRegisterCountIndex = kDataIndex + 4;
+ static constexpr int kIrregexpMaxRegisterCountIndex =
+ kIrregexpUC16BytecodeIndex + 1;
// Number of captures in the compiled regexp.
- static const int kIrregexpCaptureCountIndex = kDataIndex + 5;
+ static constexpr int kIrregexpCaptureCountIndex =
+ kIrregexpMaxRegisterCountIndex + 1;
// Maps names of named capture groups (at indices 2i) to their corresponding
// (1-based) capture group indices (at indices 2i + 1).
- static const int kIrregexpCaptureNameMapIndex = kDataIndex + 6;
+ static constexpr int kIrregexpCaptureNameMapIndex =
+ kIrregexpCaptureCountIndex + 1;
// Tier-up ticks are set to the value of the tier-up ticks flag. The value is
// decremented on each execution of the bytecode, so that the tier-up
// happens once the ticks reach zero.
// This value is ignored if the regexp-tier-up flag isn't turned on.
- static const int kIrregexpTicksUntilTierUpIndex = kDataIndex + 7;
+ static constexpr int kIrregexpTicksUntilTierUpIndex =
+ kIrregexpCaptureNameMapIndex + 1;
// A smi containing either the backtracking limit or kNoBacktrackLimit.
// TODO(jgruber): If needed, this limit could be packed into other fields
// above to save space.
- static const int kIrregexpBacktrackLimit = kDataIndex + 8;
- static const int kIrregexpDataSize = kDataIndex + 9;
+ static constexpr int kIrregexpBacktrackLimit =
+ kIrregexpTicksUntilTierUpIndex + 1;
+ static constexpr int kIrregexpDataSize = kIrregexpBacktrackLimit + 1;
// TODO(mbid,v8:10765): At the moment the EXPERIMENTAL data array conforms
// to the format of an IRREGEXP data array, with most fields set to some
@@ -217,27 +214,39 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
static constexpr int kExperimentalDataSize = kIrregexpDataSize;
// In-object fields.
- static const int kLastIndexFieldIndex = 0;
- static const int kInObjectFieldCount = 1;
+ static constexpr int kLastIndexFieldIndex = 0;
+ static constexpr int kInObjectFieldCount = 1;
// The actual object size including in-object fields.
- static int Size() { return kHeaderSize + kInObjectFieldCount * kTaggedSize; }
+ static constexpr int Size() {
+ return kHeaderSize + kInObjectFieldCount * kTaggedSize;
+ }
// Descriptor array index to important methods in the prototype.
- static const int kExecFunctionDescriptorIndex = 1;
- static const int kSymbolMatchFunctionDescriptorIndex = 14;
- static const int kSymbolMatchAllFunctionDescriptorIndex = 15;
- static const int kSymbolReplaceFunctionDescriptorIndex = 16;
- static const int kSymbolSearchFunctionDescriptorIndex = 17;
- static const int kSymbolSplitFunctionDescriptorIndex = 18;
+ static constexpr int kExecFunctionDescriptorIndex = 1;
+ static constexpr int kSymbolMatchFunctionDescriptorIndex = 14;
+ static constexpr int kSymbolMatchAllFunctionDescriptorIndex = 15;
+ static constexpr int kSymbolReplaceFunctionDescriptorIndex = 16;
+ static constexpr int kSymbolSearchFunctionDescriptorIndex = 17;
+ static constexpr int kSymbolSplitFunctionDescriptorIndex = 18;
// The uninitialized value for a regexp code object.
- static const int kUninitializedValue = -1;
+ static constexpr int kUninitializedValue = -1;
+
+ // If the backtrack limit is set to this marker value, no limit is applied.
+ static constexpr uint32_t kNoBacktrackLimit = 0;
// The heuristic value for the length of the subject string for which we
// tier-up to the compiler immediately, instead of using the interpreter.
static constexpr int kTierUpForSubjectLengthValue = 1000;
+ // Maximum number of captures allowed.
+ static constexpr int kMaxCaptures = 1 << 16;
+
+ private:
+ inline Object DataAt(int index) const;
+ inline void SetDataAt(int index, Object value);
+
TQ_OBJECT_CONSTRUCTORS(JSRegExp)
};
@@ -249,50 +258,40 @@ DEFINE_OPERATORS_FOR_FLAGS(JSRegExp::Flags)
// faster creation of RegExp exec results.
// This class just holds constants used when creating the result.
// After creation the result must be treated as a JSArray in all regards.
-class JSRegExpResult : public JSArray {
+class JSRegExpResult
+ : public TorqueGeneratedJSRegExpResult<JSRegExpResult, JSArray> {
public:
- DECL_CAST(JSRegExpResult)
-
// TODO(joshualitt): We would like to add printers and verifiers to
// JSRegExpResult, and maybe JSRegExpResultIndices, but both have the same
// instance type as JSArray.
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(JSArray::kHeaderSize,
- TORQUE_GENERATED_JS_REG_EXP_RESULT_FIELDS)
-
// Indices of in-object properties.
- static const int kIndexIndex = 0;
- static const int kInputIndex = 1;
- static const int kGroupsIndex = 2;
+ static constexpr int kIndexIndex = 0;
+ static constexpr int kInputIndex = 1;
+ static constexpr int kGroupsIndex = 2;
// Private internal only fields.
- static const int kNamesIndex = 3;
- static const int kRegExpInputIndex = 4;
- static const int kRegExpLastIndex = 5;
- static const int kInObjectPropertyCount = 6;
+ static constexpr int kNamesIndex = 3;
+ static constexpr int kRegExpInputIndex = 4;
+ static constexpr int kRegExpLastIndex = 5;
+ static constexpr int kInObjectPropertyCount = 6;
- static const int kMapIndexInContext = Context::REGEXP_RESULT_MAP_INDEX;
+ static constexpr int kMapIndexInContext = Context::REGEXP_RESULT_MAP_INDEX;
- OBJECT_CONSTRUCTORS(JSRegExpResult, JSArray);
+ TQ_OBJECT_CONSTRUCTORS(JSRegExpResult)
};
-class JSRegExpResultWithIndices : public JSRegExpResult {
+class JSRegExpResultWithIndices
+ : public TorqueGeneratedJSRegExpResultWithIndices<JSRegExpResultWithIndices,
+ JSRegExpResult> {
public:
- DECL_CAST(JSRegExpResultWithIndices)
-
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(
- JSRegExpResult::kSize,
- TORQUE_GENERATED_JS_REG_EXP_RESULT_WITH_INDICES_FIELDS)
-
static_assert(
JSRegExpResult::kInObjectPropertyCount == 6,
"JSRegExpResultWithIndices must be a subclass of JSRegExpResult");
- static const int kIndicesIndex = 6;
- static const int kInObjectPropertyCount = 7;
+ static constexpr int kIndicesIndex = 6;
+ static constexpr int kInObjectPropertyCount = 7;
- OBJECT_CONSTRUCTORS(JSRegExpResultWithIndices, JSRegExpResult);
+ TQ_OBJECT_CONSTRUCTORS(JSRegExpResultWithIndices)
};
// JSRegExpResultIndices is just a JSArray with a specific initial map.
@@ -301,26 +300,22 @@ class JSRegExpResultWithIndices : public JSRegExpResult {
// faster creation of RegExp exec results.
// This class just holds constants used when creating the result.
// After creation the result must be treated as a JSArray in all regards.
-class JSRegExpResultIndices : public JSArray {
+class JSRegExpResultIndices
+ : public TorqueGeneratedJSRegExpResultIndices<JSRegExpResultIndices,
+ JSArray> {
public:
- DECL_CAST(JSRegExpResultIndices)
-
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(
- JSArray::kHeaderSize, TORQUE_GENERATED_JS_REG_EXP_RESULT_INDICES_FIELDS)
-
static Handle<JSRegExpResultIndices> BuildIndices(
Isolate* isolate, Handle<RegExpMatchInfo> match_info,
Handle<Object> maybe_names);
// Indices of in-object properties.
- static const int kGroupsIndex = 0;
- static const int kInObjectPropertyCount = 1;
+ static constexpr int kGroupsIndex = 0;
+ static constexpr int kInObjectPropertyCount = 1;
// Descriptor index of groups.
- static const int kGroupsDescriptorIndex = 1;
+ static constexpr int kGroupsDescriptorIndex = 1;
- OBJECT_CONSTRUCTORS(JSRegExpResultIndices, JSArray);
+ TQ_OBJECT_CONSTRUCTORS(JSRegExpResultIndices)
};
} // namespace internal
diff --git a/chromium/v8/src/objects/js-regexp.tq b/chromium/v8/src/objects/js-regexp.tq
index 328dd94efbe..7c60df214af 100644
--- a/chromium/v8/src/objects/js-regexp.tq
+++ b/chromium/v8/src/objects/js-regexp.tq
@@ -38,7 +38,6 @@ RegExpBuiltinsAssembler::FastStoreLastIndex(FastJSRegExp, Smi): void;
extern class JSRegExpConstructor extends JSFunction
generates 'TNode<JSFunction>';
-@doNotGenerateCppClass
extern shape JSRegExpResult extends JSArray {
// In-object properties:
// The below fields are externally exposed.
@@ -52,12 +51,10 @@ extern shape JSRegExpResult extends JSArray {
regexp_last_index: Smi;
}
-@doNotGenerateCppClass
extern shape JSRegExpResultWithIndices extends JSRegExpResult {
indices: JSAny;
}
-@doNotGenerateCppClass
extern shape JSRegExpResultIndices extends JSArray {
// In-object properties:
// The groups field is externally exposed.
diff --git a/chromium/v8/src/objects/js-relative-time-format.cc b/chromium/v8/src/objects/js-relative-time-format.cc
index caa4ce562d7..d6a65d95cab 100644
--- a/chromium/v8/src/objects/js-relative-time-format.cc
+++ b/chromium/v8/src/objects/js-relative-time-format.cc
@@ -17,7 +17,9 @@
#include "src/objects/intl-objects.h"
#include "src/objects/js-number-format.h"
#include "src/objects/js-relative-time-format-inl.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/option-utils.h"
#include "unicode/decimfmt.h"
#include "unicode/numfmt.h"
#include "unicode/reldatefmt.h"
@@ -78,8 +80,7 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::New(
Handle<JSReceiver> options;
const char* service = "Intl.RelativeTimeFormat";
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, options,
- Intl::CoerceOptionsToObject(isolate, input_options, service),
+ isolate, options, CoerceOptionsToObject(isolate, input_options, service),
JSRelativeTimeFormat);
// 4. Let opt be a new Record.
@@ -147,7 +148,7 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::New(
// 16. Let s be ? GetOption(options, "style", "string",
// «"long", "short", "narrow"», "long").
- Maybe<Style> maybe_style = Intl::GetStringOption<Style>(
+ Maybe<Style> maybe_style = GetStringOption<Style>(
isolate, options, "style", service, {"long", "short", "narrow"},
{Style::LONG, Style::SHORT, Style::NARROW}, Style::LONG);
MAYBE_RETURN(maybe_style, MaybeHandle<JSRelativeTimeFormat>());
@@ -157,7 +158,7 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::New(
// 18. Let numeric be ? GetOption(options, "numeric", "string",
// «"always", "auto"», "always").
- Maybe<Numeric> maybe_numeric = Intl::GetStringOption<Numeric>(
+ Maybe<Numeric> maybe_numeric = GetStringOption<Numeric>(
isolate, options, "numeric", service, {"always", "auto"},
{Numeric::ALWAYS, Numeric::AUTO}, Numeric::ALWAYS);
MAYBE_RETURN(maybe_numeric, MaybeHandle<JSRelativeTimeFormat>());
@@ -342,9 +343,9 @@ template <typename T>
MaybeHandle<T> FormatCommon(
Isolate* isolate, Handle<JSRelativeTimeFormat> format,
Handle<Object> value_obj, Handle<Object> unit_obj, const char* func_name,
- MaybeHandle<T> (*formatToResult)(Isolate*,
- const icu::FormattedRelativeDateTime&,
- Handle<Object>, Handle<String>)) {
+ const std::function<
+ MaybeHandle<T>(Isolate*, const icu::FormattedRelativeDateTime&,
+ Handle<Object>, Handle<String>)>& formatToResult) {
// 3. Let value be ? ToNumber(value).
Handle<Object> value;
ASSIGN_RETURN_ON_EXCEPTION(isolate, value,
diff --git a/chromium/v8/src/objects/js-segment-iterator.cc b/chromium/v8/src/objects/js-segment-iterator.cc
index ff10303dbb9..4fa3f173cc7 100644
--- a/chromium/v8/src/objects/js-segment-iterator.cc
+++ b/chromium/v8/src/objects/js-segment-iterator.cc
@@ -17,7 +17,7 @@
#include "src/objects/intl-objects.h"
#include "src/objects/js-segment-iterator-inl.h"
#include "src/objects/js-segments.h"
-#include "src/objects/managed.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
#include "unicode/brkiter.h"
diff --git a/chromium/v8/src/objects/js-segmenter.cc b/chromium/v8/src/objects/js-segmenter.cc
index 386150613ae..be04f140526 100644
--- a/chromium/v8/src/objects/js-segmenter.cc
+++ b/chromium/v8/src/objects/js-segmenter.cc
@@ -16,8 +16,9 @@
#include "src/heap/factory.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-segmenter-inl.h"
-#include "src/objects/managed.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/option-utils.h"
#include "unicode/brkiter.h"
namespace v8 {
@@ -36,9 +37,9 @@ MaybeHandle<JSSegmenter> JSSegmenter::New(Isolate* isolate, Handle<Map> map,
Handle<JSReceiver> options;
const char* service = "Intl.Segmenter";
// 5. Let options be GetOptionsObject(_options_).
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, options, Intl::GetOptionsObject(isolate, input_options, service),
- JSSegmenter);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, options,
+ GetOptionsObject(isolate, input_options, service),
+ JSSegmenter);
// 7. Let opt be a new Record.
// 8. Let matcher be ? GetOption(options, "localeMatcher", "string",
@@ -68,7 +69,7 @@ MaybeHandle<JSSegmenter> JSSegmenter::New(Isolate* isolate, Handle<Map> map,
// 13. Let granularity be ? GetOption(options, "granularity", "string", «
// "grapheme", "word", "sentence" », "grapheme").
- Maybe<Granularity> maybe_granularity = Intl::GetStringOption<Granularity>(
+ Maybe<Granularity> maybe_granularity = GetStringOption<Granularity>(
isolate, options, "granularity", service,
{"grapheme", "word", "sentence"},
{Granularity::GRAPHEME, Granularity::WORD, Granularity::SENTENCE},
diff --git a/chromium/v8/src/objects/js-segments.cc b/chromium/v8/src/objects/js-segments.cc
index ec3f8f6a2c7..84d8197e57d 100644
--- a/chromium/v8/src/objects/js-segments.cc
+++ b/chromium/v8/src/objects/js-segments.cc
@@ -18,7 +18,7 @@
#include "src/objects/js-segment-iterator.h"
#include "src/objects/js-segmenter-inl.h"
#include "src/objects/js-segments-inl.h"
-#include "src/objects/managed.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
#include "unicode/brkiter.h"
diff --git a/chromium/v8/src/objects/js-weak-refs-inl.h b/chromium/v8/src/objects/js-weak-refs-inl.h
index 13ac175cf6e..acce7b72b94 100644
--- a/chromium/v8/src/objects/js-weak-refs-inl.h
+++ b/chromium/v8/src/objects/js-weak-refs-inl.h
@@ -21,18 +21,7 @@ namespace internal {
TQ_OBJECT_CONSTRUCTORS_IMPL(WeakCell)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSWeakRef)
-OBJECT_CONSTRUCTORS_IMPL(JSFinalizationRegistry, JSObject)
-
-ACCESSORS(JSFinalizationRegistry, native_context, NativeContext,
- kNativeContextOffset)
-ACCESSORS(JSFinalizationRegistry, cleanup, Object, kCleanupOffset)
-ACCESSORS(JSFinalizationRegistry, active_cells, HeapObject, kActiveCellsOffset)
-ACCESSORS(JSFinalizationRegistry, cleared_cells, HeapObject,
- kClearedCellsOffset)
-ACCESSORS(JSFinalizationRegistry, key_map, Object, kKeyMapOffset)
-SMI_ACCESSORS(JSFinalizationRegistry, flags, kFlagsOffset)
-ACCESSORS(JSFinalizationRegistry, next_dirty, Object, kNextDirtyOffset)
-CAST_ACCESSOR(JSFinalizationRegistry)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSFinalizationRegistry)
BIT_FIELD_ACCESSORS(JSFinalizationRegistry, flags, scheduled_for_cleanup,
JSFinalizationRegistry::ScheduledForCleanupBit)
diff --git a/chromium/v8/src/objects/js-weak-refs.h b/chromium/v8/src/objects/js-weak-refs.h
index 250186e7bef..57f765b282e 100644
--- a/chromium/v8/src/objects/js-weak-refs.h
+++ b/chromium/v8/src/objects/js-weak-refs.h
@@ -21,22 +21,12 @@ class WeakCell;
// FinalizationRegistry object from the JS Weak Refs spec proposal:
// https://github.com/tc39/proposal-weakrefs
-class JSFinalizationRegistry : public JSObject {
+class JSFinalizationRegistry
+ : public TorqueGeneratedJSFinalizationRegistry<JSFinalizationRegistry,
+ JSObject> {
public:
DECL_PRINTER(JSFinalizationRegistry)
EXPORT_DECL_VERIFIER(JSFinalizationRegistry)
- DECL_CAST(JSFinalizationRegistry)
-
- DECL_ACCESSORS(native_context, NativeContext)
- DECL_ACCESSORS(cleanup, Object)
-
- DECL_ACCESSORS(active_cells, HeapObject)
- DECL_ACCESSORS(cleared_cells, HeapObject)
- DECL_ACCESSORS(key_map, Object)
-
- DECL_ACCESSORS(next_dirty, Object)
-
- DECL_INT_ACCESSORS(flags)
DECL_BOOLEAN_ACCESSORS(scheduled_for_cleanup)
@@ -72,20 +62,15 @@ class JSFinalizationRegistry : public JSObject {
Isolate* isolate, Address raw_finalization_registry,
Address raw_weak_cell);
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(
- JSObject::kHeaderSize, TORQUE_GENERATED_JS_FINALIZATION_REGISTRY_FIELDS)
-
// Bitfields in flags.
DEFINE_TORQUE_GENERATED_FINALIZATION_REGISTRY_FLAGS()
- OBJECT_CONSTRUCTORS(JSFinalizationRegistry, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(JSFinalizationRegistry)
};
// Internal object for storing weak references in JSFinalizationRegistry.
class WeakCell : public TorqueGeneratedWeakCell<WeakCell, HeapObject> {
public:
- DECL_PRINTER(WeakCell)
EXPORT_DECL_VERIFIER(WeakCell)
class BodyDescriptor;
diff --git a/chromium/v8/src/objects/js-weak-refs.tq b/chromium/v8/src/objects/js-weak-refs.tq
index 36f3817ac78..c687ab50016 100644
--- a/chromium/v8/src/objects/js-weak-refs.tq
+++ b/chromium/v8/src/objects/js-weak-refs.tq
@@ -6,7 +6,6 @@ bitfield struct FinalizationRegistryFlags extends uint31 {
scheduled_for_cleanup: bool: 1 bit;
}
-@doNotGenerateCppClass
extern class JSFinalizationRegistry extends JSObject {
native_context: NativeContext;
cleanup: Callable;
diff --git a/chromium/v8/src/objects/keys.cc b/chromium/v8/src/objects/keys.cc
index 815d9ac5043..acd94fcf868 100644
--- a/chromium/v8/src/objects/keys.cc
+++ b/chromium/v8/src/objects/keys.cc
@@ -405,7 +405,7 @@ Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
Object key = descriptors->GetKey(i);
if (key.IsSymbol()) continue;
keys->set(index, key);
- if (details.location() != kField) fields_only = false;
+ if (details.location() != PropertyLocation::kField) fields_only = false;
index++;
}
DCHECK_EQ(index, keys->length());
@@ -422,7 +422,7 @@ Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
Object key = descriptors->GetKey(i);
if (key.IsSymbol()) continue;
DCHECK_EQ(kData, details.kind());
- DCHECK_EQ(kField, details.location());
+ DCHECK_EQ(PropertyLocation::kField, details.location());
FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
indices->set(index, Smi::FromInt(field_index.GetLoadByFieldIndex()));
index++;
diff --git a/chromium/v8/src/objects/keys.h b/chromium/v8/src/objects/keys.h
index 4abe2a5ad30..b1f539e2334 100644
--- a/chromium/v8/src/objects/keys.h
+++ b/chromium/v8/src/objects/keys.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_KEYS_H_
#define V8_OBJECTS_KEYS_H_
+#include "include/v8-object.h"
#include "src/objects/hash-table.h"
#include "src/objects/js-objects.h"
#include "src/objects/objects.h"
@@ -17,6 +18,18 @@ class FastKeyAccumulator;
enum AddKeyConversion { DO_NOT_CONVERT, CONVERT_TO_ARRAY_INDEX };
+enum class GetKeysConversion {
+ kKeepNumbers = static_cast<int>(v8::KeyConversionMode::kKeepNumbers),
+ kConvertToString = static_cast<int>(v8::KeyConversionMode::kConvertToString),
+ kNoNumbers = static_cast<int>(v8::KeyConversionMode::kNoNumbers)
+};
+
+enum class KeyCollectionMode {
+ kOwnOnly = static_cast<int>(v8::KeyCollectionMode::kOwnOnly),
+ kIncludePrototypes =
+ static_cast<int>(v8::KeyCollectionMode::kIncludePrototypes)
+};
+
// This is a helper class for JSReceiver::GetKeys which collects and sorts keys.
// GetKeys needs to sort keys per prototype level, first showing the integer
// indices from elements then the strings from the properties. However, this
diff --git a/chromium/v8/src/objects/literal-objects.h b/chromium/v8/src/objects/literal-objects.h
index a20347c4a7e..242b3a6469a 100644
--- a/chromium/v8/src/objects/literal-objects.h
+++ b/chromium/v8/src/objects/literal-objects.h
@@ -80,7 +80,6 @@ class RegExpBoilerplateDescription
RegExpBoilerplateDescription, Struct> {
public:
// Dispatched behavior.
- DECL_PRINTER(RegExpBoilerplateDescription)
void BriefPrintDetails(std::ostream& os);
private:
diff --git a/chromium/v8/src/objects/lookup.cc b/chromium/v8/src/objects/lookup.cc
index 283e4f84d53..b53bbeb0e7d 100644
--- a/chromium/v8/src/objects/lookup.cc
+++ b/chromium/v8/src/objects/lookup.cc
@@ -889,7 +889,7 @@ Handle<Object> LookupIterator::FetchValue(
result = holder_->property_dictionary(isolate_).ValueAt(
isolate_, dictionary_entry());
}
- } else if (property_details_.location() == kField) {
+ } else if (property_details_.location() == PropertyLocation::kField) {
DCHECK_EQ(kData, property_details_.kind());
#if V8_ENABLE_WEBASSEMBLY
if (V8_UNLIKELY(holder_->IsWasmObject(isolate_))) {
@@ -932,7 +932,7 @@ Handle<Object> LookupIterator::FetchValue(
bool LookupIterator::IsConstFieldValueEqualTo(Object value) const {
DCHECK(!IsElement(*holder_));
DCHECK(holder_->HasFastProperties(isolate_));
- DCHECK_EQ(kField, property_details_.location());
+ DCHECK_EQ(PropertyLocation::kField, property_details_.location());
DCHECK_EQ(PropertyConstness::kConst, property_details_.constness());
if (value.IsUninitialized(isolate())) {
// Storing uninitialized value means that we are preparing for a computed
@@ -1004,7 +1004,7 @@ bool LookupIterator::IsConstDictValueEqualTo(Object value) const {
int LookupIterator::GetFieldDescriptorIndex() const {
DCHECK(has_property_);
DCHECK(holder_->HasFastProperties());
- DCHECK_EQ(kField, property_details_.location());
+ DCHECK_EQ(PropertyLocation::kField, property_details_.location());
DCHECK_EQ(kData, property_details_.kind());
// TODO(jkummerow): Propagate InternalIndex further.
return descriptor_number().as_int();
@@ -1013,7 +1013,7 @@ int LookupIterator::GetFieldDescriptorIndex() const {
int LookupIterator::GetAccessorIndex() const {
DCHECK(has_property_);
DCHECK(holder_->HasFastProperties(isolate_));
- DCHECK_EQ(kDescriptor, property_details_.location());
+ DCHECK_EQ(PropertyLocation::kDescriptor, property_details_.location());
DCHECK_EQ(kAccessor, property_details_.kind());
return descriptor_number().as_int();
}
@@ -1021,7 +1021,7 @@ int LookupIterator::GetAccessorIndex() const {
FieldIndex LookupIterator::GetFieldIndex() const {
DCHECK(has_property_);
DCHECK(holder_->HasFastProperties(isolate_));
- DCHECK_EQ(kField, property_details_.location());
+ DCHECK_EQ(PropertyLocation::kField, property_details_.location());
DCHECK(!IsElement(*holder_));
return FieldIndex::ForDescriptor(holder_->map(isolate_), descriptor_number());
}
@@ -1062,7 +1062,7 @@ void LookupIterator::WriteDataValue(Handle<Object> value,
accessor->Set(object, number_, *value);
} else if (holder->HasFastProperties(isolate_)) {
DCHECK(holder->IsJSObject(isolate_));
- if (property_details_.location() == kField) {
+ if (property_details_.location() == PropertyLocation::kField) {
// Check that in case of VariableMode::kConst field the existing value is
// equal to |value|.
DCHECK_IMPLIES(!initializing_store && property_details_.constness() ==
@@ -1071,7 +1071,7 @@ void LookupIterator::WriteDataValue(Handle<Object> value,
JSObject::cast(*holder).WriteToField(descriptor_number(),
property_details_, *value);
} else {
- DCHECK_EQ(kDescriptor, property_details_.location());
+ DCHECK_EQ(PropertyLocation::kDescriptor, property_details_.location());
DCHECK_EQ(PropertyConstness::kConst, property_details_.constness());
}
} else if (holder->IsJSGlobalObject(isolate_)) {
@@ -1507,7 +1507,8 @@ ConcurrentLookupIterator::Result ConcurrentLookupIterator::TryGetOwnChar(
uint16_t charcode;
{
SharedStringAccessGuardIfNeeded access_guard(local_isolate);
- charcode = string.Get(static_cast<int>(index));
+ charcode = string.Get(static_cast<int>(index), PtrComprCageBase(isolate),
+ access_guard);
}
if (charcode > unibrow::Latin1::kMaxChar) return kGaveUp;
@@ -1527,8 +1528,8 @@ base::Optional<PropertyCell> ConcurrentLookupIterator::TryGetPropertyCell(
DisallowGarbageCollection no_gc;
Map holder_map = holder->map();
- CHECK(!holder_map.is_access_check_needed());
- CHECK(!holder_map.has_named_interceptor());
+ if (holder_map.is_access_check_needed()) return {};
+ if (holder_map.has_named_interceptor()) return {};
GlobalDictionary dict = holder->global_dictionary(kAcquireLoad);
base::Optional<PropertyCell> cell =
diff --git a/chromium/v8/src/objects/managed-inl.h b/chromium/v8/src/objects/managed-inl.h
new file mode 100644
index 00000000000..a8a54e18c0a
--- /dev/null
+++ b/chromium/v8/src/objects/managed-inl.h
@@ -0,0 +1,64 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_MANAGED_INL_H_
+#define V8_OBJECTS_MANAGED_INL_H_
+
+#include "src/handles/global-handles-inl.h"
+#include "src/objects/managed.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+template <class CppType>
+template <typename... Args>
+Handle<Managed<CppType>> Managed<CppType>::Allocate(Isolate* isolate,
+ size_t estimated_size,
+ Args&&... args) {
+ return FromSharedPtr(isolate, estimated_size,
+ std::make_shared<CppType>(std::forward<Args>(args)...));
+}
+
+// static
+template <class CppType>
+Handle<Managed<CppType>> Managed<CppType>::FromRawPtr(Isolate* isolate,
+ size_t estimated_size,
+ CppType* ptr) {
+ return FromSharedPtr(isolate, estimated_size, std::shared_ptr<CppType>{ptr});
+}
+
+// static
+template <class CppType>
+Handle<Managed<CppType>> Managed<CppType>::FromUniquePtr(
+ Isolate* isolate, size_t estimated_size,
+ std::unique_ptr<CppType> unique_ptr) {
+ return FromSharedPtr(isolate, estimated_size, std::move(unique_ptr));
+}
+
+// static
+template <class CppType>
+Handle<Managed<CppType>> Managed<CppType>::FromSharedPtr(
+ Isolate* isolate, size_t estimated_size,
+ std::shared_ptr<CppType> shared_ptr) {
+ reinterpret_cast<v8::Isolate*>(isolate)
+ ->AdjustAmountOfExternalAllocatedMemory(estimated_size);
+ auto destructor = new ManagedPtrDestructor(
+ estimated_size, new std::shared_ptr<CppType>{std::move(shared_ptr)},
+ Destructor);
+ Handle<Managed<CppType>> handle = Handle<Managed<CppType>>::cast(
+ isolate->factory()->NewForeign(reinterpret_cast<Address>(destructor)));
+ Handle<Object> global_handle = isolate->global_handles()->Create(*handle);
+ destructor->global_handle_location_ = global_handle.location();
+ GlobalHandles::MakeWeak(destructor->global_handle_location_, destructor,
+ &ManagedObjectFinalizer,
+ v8::WeakCallbackType::kParameter);
+ isolate->RegisterManagedPtrDestructor(destructor);
+ return handle;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_MANAGED_INL_H_
diff --git a/chromium/v8/src/objects/managed.cc b/chromium/v8/src/objects/managed.cc
index 8376ccb547a..8853fb95d2d 100644
--- a/chromium/v8/src/objects/managed.cc
+++ b/chromium/v8/src/objects/managed.cc
@@ -4,6 +4,8 @@
#include "src/objects/managed.h"
+#include "src/handles/global-handles-inl.h"
+
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/objects/managed.h b/chromium/v8/src/objects/managed.h
index 8d56a13aef2..b681230ba23 100644
--- a/chromium/v8/src/objects/managed.h
+++ b/chromium/v8/src/objects/managed.h
@@ -7,7 +7,6 @@
#include <memory>
#include "src/execution/isolate.h"
-#include "src/handles/global-handles.h"
#include "src/handles/handles.h"
#include "src/heap/factory.h"
#include "src/objects/foreign.h"
@@ -65,49 +64,25 @@ class Managed : public Foreign {
template <typename... Args>
static Handle<Managed<CppType>> Allocate(Isolate* isolate,
size_t estimated_size,
- Args&&... args) {
- return FromSharedPtr(
- isolate, estimated_size,
- std::make_shared<CppType>(std::forward<Args>(args)...));
- }
+ Args&&... args);
// Create a {Managed<CppType>} from an existing raw {CppType*}. The returned
// object will now own the memory pointed to by {CppType}.
static Handle<Managed<CppType>> FromRawPtr(Isolate* isolate,
size_t estimated_size,
- CppType* ptr) {
- return FromSharedPtr(isolate, estimated_size,
- std::shared_ptr<CppType>{ptr});
- }
+ CppType* ptr);
// Create a {Managed<CppType>} from an existing {std::unique_ptr<CppType>}.
// The returned object will now own the memory pointed to by {CppType}, and
// the unique pointer will be released.
static Handle<Managed<CppType>> FromUniquePtr(
Isolate* isolate, size_t estimated_size,
- std::unique_ptr<CppType> unique_ptr) {
- return FromSharedPtr(isolate, estimated_size, std::move(unique_ptr));
- }
+ std::unique_ptr<CppType> unique_ptr);
// Create a {Managed<CppType>} from an existing {std::shared_ptr<CppType>}.
static Handle<Managed<CppType>> FromSharedPtr(
Isolate* isolate, size_t estimated_size,
- std::shared_ptr<CppType> shared_ptr) {
- reinterpret_cast<v8::Isolate*>(isolate)
- ->AdjustAmountOfExternalAllocatedMemory(estimated_size);
- auto destructor = new ManagedPtrDestructor(
- estimated_size, new std::shared_ptr<CppType>{std::move(shared_ptr)},
- Destructor);
- Handle<Managed<CppType>> handle = Handle<Managed<CppType>>::cast(
- isolate->factory()->NewForeign(reinterpret_cast<Address>(destructor)));
- Handle<Object> global_handle = isolate->global_handles()->Create(*handle);
- destructor->global_handle_location_ = global_handle.location();
- GlobalHandles::MakeWeak(destructor->global_handle_location_, destructor,
- &ManagedObjectFinalizer,
- v8::WeakCallbackType::kParameter);
- isolate->RegisterManagedPtrDestructor(destructor);
- return handle;
- }
+ std::shared_ptr<CppType> shared_ptr);
private:
// Internally this {Foreign} object stores a pointer to a new
diff --git a/chromium/v8/src/objects/map-inl.h b/chromium/v8/src/objects/map-inl.h
index 572b3f9299c..c8eb4004241 100644
--- a/chromium/v8/src/objects/map-inl.h
+++ b/chromium/v8/src/objects/map-inl.h
@@ -466,6 +466,28 @@ void Map::AccountAddedOutOfObjectPropertyField(int unused_in_property_array) {
DCHECK_EQ(unused_in_property_array, UnusedPropertyFields());
}
+#if V8_ENABLE_WEBASSEMBLY
+uint8_t Map::WasmByte1() const {
+ DCHECK(IsWasmObjectMap());
+ return inobject_properties_start_or_constructor_function_index();
+}
+
+uint8_t Map::WasmByte2() const {
+ DCHECK(IsWasmObjectMap());
+ return used_or_unused_instance_size_in_words();
+}
+
+void Map::SetWasmByte1(uint8_t value) {
+ CHECK(IsWasmObjectMap());
+ set_inobject_properties_start_or_constructor_function_index(value);
+}
+
+void Map::SetWasmByte2(uint8_t value) {
+ CHECK(IsWasmObjectMap());
+ set_used_or_unused_instance_size_in_words(value);
+}
+#endif // V8_ENABLE_WEBASSEMBLY
+
byte Map::bit_field() const {
// TODO(solanes, v8:7790, v8:11353): Make this non-atomic when TSAN sees the
// map's store synchronization.
@@ -632,7 +654,8 @@ bool Map::CanBeDeprecated() const {
for (InternalIndex i : IterateOwnDescriptors()) {
PropertyDetails details = instance_descriptors(kRelaxedLoad).GetDetails(i);
if (details.representation().MightCauseMapDeprecation()) return true;
- if (details.kind() == kData && details.location() == kDescriptor) {
+ if (details.kind() == kData &&
+ details.location() == PropertyLocation::kDescriptor) {
return true;
}
}
@@ -707,7 +730,7 @@ void Map::AppendDescriptor(Isolate* isolate, Descriptor* desc) {
set_may_have_interesting_symbols(true);
}
PropertyDetails details = desc->GetDetails();
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
DCHECK_GT(UnusedPropertyFields(), 0);
AccountAddedPropertyField();
}
@@ -715,7 +738,8 @@ void Map::AppendDescriptor(Isolate* isolate, Descriptor* desc) {
// This function does not support appending double field descriptors and
// it should never try to (otherwise, layout descriptor must be updated too).
#ifdef DEBUG
- DCHECK(details.location() != kField || !details.representation().IsDouble());
+ DCHECK(details.location() != PropertyLocation::kField ||
+ !details.representation().IsDouble());
#endif
}
@@ -726,7 +750,7 @@ bool Map::ConcurrentIsMap(PtrComprCageBase cage_base,
}
DEF_GETTER(Map, GetBackPointer, HeapObject) {
- Object object = constructor_or_back_pointer(cage_base);
+ Object object = constructor_or_back_pointer(cage_base, kRelaxedLoad);
if (ConcurrentIsMap(cage_base, object)) {
return Map::cast(object);
}
@@ -754,6 +778,9 @@ ACCESSORS(Map, prototype_validity_cell, Object, kPrototypeValidityCellOffset)
ACCESSORS_CHECKED2(Map, constructor_or_back_pointer, Object,
kConstructorOrBackPointerOrNativeContextOffset,
!IsContextMap(), value.IsNull() || !IsContextMap())
+RELAXED_ACCESSORS_CHECKED2(Map, constructor_or_back_pointer, Object,
+ kConstructorOrBackPointerOrNativeContextOffset,
+ !IsContextMap(), value.IsNull() || !IsContextMap())
ACCESSORS_CHECKED(Map, native_context, NativeContext,
kConstructorOrBackPointerOrNativeContextOffset,
IsContextMap())
diff --git a/chromium/v8/src/objects/map-updater.cc b/chromium/v8/src/objects/map-updater.cc
index ba7961a9ca2..3bfd3922a3d 100644
--- a/chromium/v8/src/objects/map-updater.cc
+++ b/chromium/v8/src/objects/map-updater.cc
@@ -130,20 +130,20 @@ PropertyDetails MapUpdater::GetDetails(InternalIndex descriptor) const {
Object MapUpdater::GetValue(InternalIndex descriptor) const {
DCHECK(descriptor.is_found());
if (descriptor == modified_descriptor_) {
- DCHECK_EQ(kDescriptor, new_location_);
+ DCHECK_EQ(PropertyLocation::kDescriptor, new_location_);
return *new_value_;
}
- DCHECK_EQ(kDescriptor, GetDetails(descriptor).location());
+ DCHECK_EQ(PropertyLocation::kDescriptor, GetDetails(descriptor).location());
return old_descriptors_->GetStrongValue(descriptor);
}
FieldType MapUpdater::GetFieldType(InternalIndex descriptor) const {
DCHECK(descriptor.is_found());
if (descriptor == modified_descriptor_) {
- DCHECK_EQ(kField, new_location_);
+ DCHECK_EQ(PropertyLocation::kField, new_location_);
return *new_field_type_;
}
- DCHECK_EQ(kField, GetDetails(descriptor).location());
+ DCHECK_EQ(PropertyLocation::kField, GetDetails(descriptor).location());
return old_descriptors_->GetFieldType(descriptor);
}
@@ -153,7 +153,7 @@ Handle<FieldType> MapUpdater::GetOrComputeFieldType(
DCHECK(descriptor.is_found());
// |location| is just a pre-fetched GetDetails(descriptor).location().
DCHECK_EQ(location, GetDetails(descriptor).location());
- if (location == kField) {
+ if (location == PropertyLocation::kField) {
return handle(GetFieldType(descriptor), isolate_);
} else {
return GetValue(descriptor).OptimalType(isolate_, representation);
@@ -165,7 +165,7 @@ Handle<FieldType> MapUpdater::GetOrComputeFieldType(
PropertyLocation location, Representation representation) {
// |location| is just a pre-fetched GetDetails(descriptor).location().
DCHECK_EQ(descriptors->GetDetails(descriptor).location(), location);
- if (location == kField) {
+ if (location == PropertyLocation::kField) {
return handle(descriptors->GetFieldType(descriptor), isolate_);
} else {
return descriptors->GetStrongValue(descriptor)
@@ -188,7 +188,7 @@ Handle<Map> MapUpdater::ReconfigureToDataField(InternalIndex descriptor,
modified_descriptor_ = descriptor;
new_kind_ = kData;
new_attributes_ = attributes;
- new_location_ = kField;
+ new_location_ = PropertyLocation::kField;
PropertyDetails old_details =
old_descriptors_->GetDetails(modified_descriptor_);
@@ -460,7 +460,7 @@ MapUpdater::State MapUpdater::TryReconfigureToDataFieldInplace() {
DCHECK_EQ(new_kind_, old_details.kind());
DCHECK_EQ(new_attributes_, old_details.attributes());
- DCHECK_EQ(kField, old_details.location());
+ DCHECK_EQ(PropertyLocation::kField, old_details.location());
if (FLAG_trace_generalization) {
PrintGeneralization(
isolate_, old_map_, stdout, "uninitialized field", modified_descriptor_,
@@ -581,7 +581,7 @@ MapUpdater::State MapUpdater::FindRootMap() {
old_details.attributes() != new_attributes_) {
return Normalize("Normalize_RootModification1");
}
- if (old_details.location() != kField) {
+ if (old_details.location() != PropertyLocation::kField) {
return Normalize("Normalize_RootModification2");
}
if (!new_representation_.fits_into(old_details.representation())) {
@@ -590,7 +590,7 @@ MapUpdater::State MapUpdater::FindRootMap() {
DCHECK_EQ(kData, old_details.kind());
DCHECK_EQ(kData, new_kind_);
- DCHECK_EQ(kField, new_location_);
+ DCHECK_EQ(PropertyLocation::kField, new_location_);
// Modify root map in-place. The GeneralizeField method is a no-op
// if the {old_map_} is already general enough to hold the requested
@@ -645,7 +645,7 @@ MapUpdater::State MapUpdater::FindTargetMap() {
tmp_representation = generalized;
}
- if (tmp_details.location() == kField) {
+ if (tmp_details.location() == PropertyLocation::kField) {
Handle<FieldType> old_field_type =
GetOrComputeFieldType(i, old_details.location(), tmp_representation);
GeneralizeField(tmp_map, i, old_details.constness(), tmp_representation,
@@ -676,12 +676,12 @@ MapUpdater::State MapUpdater::FindTargetMap() {
DCHECK(IsGeneralizableTo(new_constness_, details.constness()));
DCHECK_EQ(new_location_, details.location());
DCHECK(new_representation_.fits_into(details.representation()));
- if (new_location_ == kField) {
- DCHECK_EQ(kField, details.location());
+ if (new_location_ == PropertyLocation::kField) {
+ DCHECK_EQ(PropertyLocation::kField, details.location());
DCHECK(new_field_type_->NowIs(
target_descriptors.GetFieldType(modified_descriptor_)));
} else {
- DCHECK(details.location() == kField ||
+ DCHECK(details.location() == PropertyLocation::kField ||
EqualImmutableValues(
*new_value_,
target_descriptors.GetStrongValue(modified_descriptor_)));
@@ -766,7 +766,7 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
int current_offset = 0;
for (InternalIndex i : InternalIndex::Range(root_nof)) {
PropertyDetails old_details = old_descriptors_->GetDetails(i);
- if (old_details.location() == kField) {
+ if (old_details.location() == PropertyLocation::kField) {
current_offset += old_details.field_width_in_words();
}
Descriptor d(handle(GetKey(i), isolate_),
@@ -793,22 +793,22 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
// Note: failed values equality check does not invalidate per-object
// property constness.
PropertyLocation next_location =
- old_details.location() == kField ||
- target_details.location() == kField ||
+ old_details.location() == PropertyLocation::kField ||
+ target_details.location() == PropertyLocation::kField ||
!EqualImmutableValues(target_descriptors->GetStrongValue(i),
GetValue(i))
- ? kField
- : kDescriptor;
+ ? PropertyLocation::kField
+ : PropertyLocation::kDescriptor;
// Ensure that mutable values are stored in fields.
DCHECK_IMPLIES(next_constness == PropertyConstness::kMutable,
- next_location == kField);
+ next_location == PropertyLocation::kField);
Representation next_representation =
old_details.representation().generalize(
target_details.representation());
- if (next_location == kField) {
+ if (next_location == PropertyLocation::kField) {
Handle<FieldType> old_field_type =
GetOrComputeFieldType(i, old_details.location(), next_representation);
@@ -837,7 +837,7 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
current_offset += d.GetDetails().field_width_in_words();
new_descriptors->Set(i, &d);
} else {
- DCHECK_EQ(kDescriptor, next_location);
+ DCHECK_EQ(PropertyLocation::kDescriptor, next_location);
DCHECK_EQ(PropertyConstness::kConst, next_constness);
Handle<Object> value(GetValue(i), isolate_);
@@ -860,7 +860,7 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
Representation next_representation = old_details.representation();
Descriptor d;
- if (next_location == kField) {
+ if (next_location == PropertyLocation::kField) {
Handle<FieldType> next_field_type =
GetOrComputeFieldType(i, old_details.location(), next_representation);
@@ -885,7 +885,7 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
current_offset += d.GetDetails().field_width_in_words();
new_descriptors->Set(i, &d);
} else {
- DCHECK_EQ(kDescriptor, next_location);
+ DCHECK_EQ(PropertyLocation::kDescriptor, next_location);
DCHECK_EQ(PropertyConstness::kConst, next_constness);
Handle<Object> value(GetValue(i), isolate_);
@@ -924,7 +924,7 @@ Handle<Map> MapUpdater::FindSplitMap(Handle<DescriptorArray> descriptors) {
if (details.location() != next_details.location()) break;
if (!details.representation().Equals(next_details.representation())) break;
- if (next_details.location() == kField) {
+ if (next_details.location() == PropertyLocation::kField) {
FieldType next_field_type = next_descriptors.GetFieldType(i);
if (!descriptors->GetFieldType(i).NowIs(next_field_type)) {
break;
@@ -981,14 +981,14 @@ MapUpdater::State MapUpdater::ConstructNewMap() {
MaybeHandle<FieldType> new_field_type;
MaybeHandle<Object> old_value;
MaybeHandle<Object> new_value;
- if (old_details.location() == kField) {
+ if (old_details.location() == PropertyLocation::kField) {
old_field_type = handle(
old_descriptors_->GetFieldType(modified_descriptor_), isolate_);
} else {
old_value = handle(old_descriptors_->GetStrongValue(modified_descriptor_),
isolate_);
}
- if (new_details.location() == kField) {
+ if (new_details.location() == PropertyLocation::kField) {
new_field_type =
handle(new_descriptors->GetFieldType(modified_descriptor_), isolate_);
} else {
@@ -999,7 +999,8 @@ MapUpdater::State MapUpdater::ConstructNewMap() {
PrintGeneralization(
isolate_, old_map_, stdout, "", modified_descriptor_, split_nof,
old_nof_,
- old_details.location() == kDescriptor && new_location_ == kField,
+ old_details.location() == PropertyLocation::kDescriptor &&
+ new_location_ == PropertyLocation::kField,
old_details.representation(), new_details.representation(),
old_details.constness(), new_details.constness(), old_field_type,
old_value, new_field_type, new_value);
@@ -1099,7 +1100,7 @@ void MapUpdater::UpdateFieldType(Isolate* isolate, Handle<Map> map,
DisallowGarbageCollection no_gc;
PropertyDetails details =
map->instance_descriptors(isolate).GetDetails(descriptor);
- if (details.location() != kField) return;
+ if (details.location() != PropertyLocation::kField) return;
DCHECK_EQ(kData, details.kind());
if (new_constness != details.constness() && map->is_prototype_map()) {
diff --git a/chromium/v8/src/objects/map-updater.h b/chromium/v8/src/objects/map-updater.h
index c5b425764a6..6f022e1d39f 100644
--- a/chromium/v8/src/objects/map-updater.h
+++ b/chromium/v8/src/objects/map-updater.h
@@ -230,7 +230,7 @@ class V8_EXPORT_PRIVATE MapUpdater {
PropertyKind new_kind_ = kData;
PropertyAttributes new_attributes_ = NONE;
PropertyConstness new_constness_ = PropertyConstness::kMutable;
- PropertyLocation new_location_ = kField;
+ PropertyLocation new_location_ = PropertyLocation::kField;
Representation new_representation_ = Representation::None();
// Data specific to kField location.
diff --git a/chromium/v8/src/objects/map.cc b/chromium/v8/src/objects/map.cc
index a8fdce3189f..0610e596880 100644
--- a/chromium/v8/src/objects/map.cc
+++ b/chromium/v8/src/objects/map.cc
@@ -25,7 +25,6 @@
#include "src/roots/roots.h"
#include "src/utils/ostreams.h"
#include "src/zone/zone-containers.h"
-#include "torque-generated/field-offsets.h"
namespace v8 {
namespace internal {
@@ -206,6 +205,7 @@ VisitorId Map::GetVisitorId(Map map) {
return kVisitJSDataView;
case JS_FUNCTION_TYPE:
+ case JS_CLASS_CONSTRUCTOR_TYPE:
case JS_PROMISE_CONSTRUCTOR_TYPE:
case JS_REG_EXP_CONSTRUCTOR_TYPE:
case JS_ARRAY_CONSTRUCTOR_TYPE:
@@ -502,7 +502,8 @@ int Map::NumberOfFields(ConcurrencyMode cmode) const {
: instance_descriptors();
int result = 0;
for (InternalIndex i : IterateOwnDescriptors()) {
- if (descriptors.GetDetails(i).location() == kField) result++;
+ if (descriptors.GetDetails(i).location() == PropertyLocation::kField)
+ result++;
}
return result;
}
@@ -513,7 +514,7 @@ Map::FieldCounts Map::GetFieldCounts() const {
int const_count = 0;
for (InternalIndex i : IterateOwnDescriptors()) {
PropertyDetails details = descriptors.GetDetails(i);
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
switch (details.constness()) {
case PropertyConstness::kMutable:
mutable_count++;
@@ -599,9 +600,10 @@ Map Map::FindRootMap(Isolate* isolate) const {
Map Map::FindFieldOwner(Isolate* isolate, InternalIndex descriptor) const {
DisallowGarbageCollection no_gc;
- DCHECK_EQ(kField, instance_descriptors(isolate, kRelaxedLoad)
- .GetDetails(descriptor)
- .location());
+ DCHECK_EQ(PropertyLocation::kField,
+ instance_descriptors(isolate, kRelaxedLoad)
+ .GetDetails(descriptor)
+ .location());
Map result = *this;
while (true) {
Object back = result.GetBackPointer(isolate);
@@ -635,7 +637,8 @@ Map SearchMigrationTarget(Isolate* isolate, Map old_map) {
DescriptorArray old_descriptors = old_map.instance_descriptors(isolate);
for (InternalIndex i : old_map.IterateOwnDescriptors()) {
PropertyDetails old_details = old_descriptors.GetDetails(i);
- if (old_details.location() == kField && old_details.kind() == kData) {
+ if (old_details.location() == PropertyLocation::kField &&
+ old_details.kind() == kData) {
FieldType old_type = old_descriptors.GetFieldType(i);
if (Map::FieldTypeIsCleared(old_details.representation(), old_type)) {
return Map();
@@ -708,7 +711,7 @@ Map Map::TryReplayPropertyTransitions(Isolate* isolate, Map old_map,
if (!old_details.representation().fits_into(new_details.representation())) {
return Map();
}
- if (new_details.location() == kField) {
+ if (new_details.location() == PropertyLocation::kField) {
if (new_details.kind() == kData) {
FieldType new_type = new_descriptors.GetFieldType(i);
// Cleared field types need special treatment. They represent lost
@@ -717,7 +720,7 @@ Map Map::TryReplayPropertyTransitions(Isolate* isolate, Map old_map,
return Map();
}
DCHECK_EQ(kData, old_details.kind());
- DCHECK_EQ(kField, old_details.location());
+ DCHECK_EQ(PropertyLocation::kField, old_details.location());
FieldType old_type = old_descriptors.GetFieldType(i);
if (FieldTypeIsCleared(old_details.representation(), old_type) ||
!old_type.NowIs(new_type)) {
@@ -732,8 +735,8 @@ Map Map::TryReplayPropertyTransitions(Isolate* isolate, Map old_map,
UNREACHABLE();
}
} else {
- DCHECK_EQ(kDescriptor, new_details.location());
- if (old_details.location() == kField ||
+ DCHECK_EQ(PropertyLocation::kDescriptor, new_details.location());
+ if (old_details.location() == PropertyLocation::kField ||
old_descriptors.GetStrongValue(i) !=
new_descriptors.GetStrongValue(i)) {
return Map();
@@ -1061,7 +1064,7 @@ int Map::NextFreePropertyIndex() const {
// Search properties backwards to find the last field.
for (int i = number_of_own_descriptors - 1; i >= 0; --i) {
PropertyDetails details = descs.GetDetails(InternalIndex(i));
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
return details.field_index() + details.field_width_in_words();
}
}
@@ -1479,7 +1482,7 @@ void Map::InstallDescriptors(Isolate* isolate, Handle<Map> parent,
new_descriptor.as_int() + 1);
child->CopyUnusedPropertyFields(*parent);
PropertyDetails details = descriptors->GetDetails(new_descriptor);
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
child->AccountAddedPropertyField();
}
@@ -1712,7 +1715,7 @@ namespace {
bool CanHoldValue(DescriptorArray descriptors, InternalIndex descriptor,
PropertyConstness constness, Object value) {
PropertyDetails details = descriptors.GetDetails(descriptor);
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
if (details.kind() == kData) {
return IsGeneralizableTo(constness, details.constness()) &&
value.FitsRepresentation(details.representation()) &&
@@ -1723,7 +1726,7 @@ bool CanHoldValue(DescriptorArray descriptors, InternalIndex descriptor,
}
} else {
- DCHECK_EQ(kDescriptor, details.location());
+ DCHECK_EQ(PropertyLocation::kDescriptor, details.location());
DCHECK_EQ(PropertyConstness::kConst, details.constness());
DCHECK_EQ(kAccessor, details.kind());
return false;
@@ -2009,8 +2012,9 @@ Handle<Map> Map::CopyReplaceDescriptor(Isolate* isolate, Handle<Map> map,
DCHECK_EQ(*key, descriptors->GetKey(insertion_index));
// This function does not support replacing property fields as
// that would break property field counters.
- DCHECK_NE(kField, descriptor->GetDetails().location());
- DCHECK_NE(kField, descriptors->GetDetails(insertion_index).location());
+ DCHECK_NE(PropertyLocation::kField, descriptor->GetDetails().location());
+ DCHECK_NE(PropertyLocation::kField,
+ descriptors->GetDetails(insertion_index).location());
Handle<DescriptorArray> new_descriptors = DescriptorArray::CopyUpTo(
isolate, descriptors, map->NumberOfOwnDescriptors());
@@ -2089,7 +2093,7 @@ bool Map::EquivalentToForElementsKindTransition(const Map other,
: instance_descriptors();
for (InternalIndex i : IterateOwnDescriptors()) {
PropertyDetails details = descriptors.GetDetails(i);
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
DCHECK(IsMostGeneralFieldType(details.representation(),
descriptors.GetFieldType(i)));
}
diff --git a/chromium/v8/src/objects/map.h b/chromium/v8/src/objects/map.h
index 74d2a859e81..d60890d9103 100644
--- a/chromium/v8/src/objects/map.h
+++ b/chromium/v8/src/objects/map.h
@@ -12,7 +12,7 @@
#include "src/objects/internal-index.h"
#include "src/objects/objects.h"
#include "torque-generated/bit-fields.h"
-#include "torque-generated/field-offsets.h"
+#include "torque-generated/visitor-lists.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -565,6 +565,7 @@ class Map : public TorqueGeneratedMap<Map, HeapObject> {
// The field also overlaps with the native context pointer for context maps,
// and with the Wasm type info for WebAssembly object maps.
DECL_ACCESSORS(constructor_or_back_pointer, Object)
+ DECL_RELAXED_ACCESSORS(constructor_or_back_pointer, Object)
DECL_ACCESSORS(native_context, NativeContext)
DECL_ACCESSORS(native_context_or_null, Object)
DECL_ACCESSORS(wasm_type_info, WasmTypeInfo)
@@ -850,6 +851,12 @@ class Map : public TorqueGeneratedMap<Map, HeapObject> {
InstanceType instance_type);
inline bool CanHaveFastTransitionableElementsKind() const;
+ // Maps for Wasm objects can use certain fields for other purposes.
+ inline uint8_t WasmByte1() const;
+ inline uint8_t WasmByte2() const;
+ inline void SetWasmByte1(uint8_t value);
+ inline void SetWasmByte2(uint8_t value);
+
private:
// This byte encodes either the instance size without the in-object slack or
// the slack size in properties backing store.
diff --git a/chromium/v8/src/objects/megadom-handler.tq b/chromium/v8/src/objects/megadom-handler.tq
index abcfa583a56..2a76e7045cd 100644
--- a/chromium/v8/src/objects/megadom-handler.tq
+++ b/chromium/v8/src/objects/megadom-handler.tq
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@generatePrint
@generateBodyDescriptor
extern class MegaDomHandler extends HeapObject {
accessor: MaybeObject;
diff --git a/chromium/v8/src/objects/microtask.h b/chromium/v8/src/objects/microtask.h
index f2869eadc73..8b1446373c8 100644
--- a/chromium/v8/src/objects/microtask.h
+++ b/chromium/v8/src/objects/microtask.h
@@ -30,9 +30,6 @@ class Microtask : public TorqueGeneratedMicrotask<Microtask, Struct> {
class CallbackTask
: public TorqueGeneratedCallbackTask<CallbackTask, Microtask> {
public:
- // Dispatched behavior.
- DECL_PRINTER(CallbackTask)
-
TQ_OBJECT_CONSTRUCTORS(CallbackTask)
};
@@ -43,7 +40,6 @@ class CallableTask
: public TorqueGeneratedCallableTask<CallableTask, Microtask> {
public:
// Dispatched behavior.
- DECL_PRINTER(CallableTask)
DECL_VERIFIER(CallableTask)
void BriefPrintDetails(std::ostream& os);
diff --git a/chromium/v8/src/objects/module.cc b/chromium/v8/src/objects/module.cc
index 2945f36a14a..110d67c8882 100644
--- a/chromium/v8/src/objects/module.cc
+++ b/chromium/v8/src/objects/module.cc
@@ -17,6 +17,7 @@
#include "src/objects/js-generator-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/property-descriptor.h"
#include "src/objects/source-text-module.h"
#include "src/objects/synthetic-module-inl.h"
#include "src/utils/ostreams.h"
@@ -427,6 +428,44 @@ Maybe<PropertyAttributes> JSModuleNamespace::GetPropertyAttributes(
return Just(it->property_attributes());
}
+// ES
+// https://tc39.es/ecma262/#sec-module-namespace-exotic-objects-defineownproperty-p-desc
+// static
+Maybe<bool> JSModuleNamespace::DefineOwnProperty(
+ Isolate* isolate, Handle<JSModuleNamespace> object, Handle<Object> key,
+ PropertyDescriptor* desc, Maybe<ShouldThrow> should_throw) {
+ // 1. If Type(P) is Symbol, return OrdinaryDefineOwnProperty(O, P, Desc).
+ if (key->IsSymbol()) {
+ return OrdinaryDefineOwnProperty(isolate, object, key, desc, should_throw);
+ }
+
+ // 2. Let current be ? O.[[GetOwnProperty]](P).
+ PropertyKey lookup_key(isolate, key);
+ LookupIterator it(isolate, object, lookup_key, LookupIterator::OWN);
+ PropertyDescriptor current;
+ Maybe<bool> has_own = GetOwnPropertyDescriptor(&it, &current);
+ MAYBE_RETURN(has_own, Nothing<bool>());
+
+ // 3. If current is undefined, return false.
+ // 4. If Desc.[[Configurable]] is present and has value true, return false.
+ // 5. If Desc.[[Enumerable]] is present and has value false, return false.
+ // 6. If ! IsAccessorDescriptor(Desc) is true, return false.
+ // 7. If Desc.[[Writable]] is present and has value false, return false.
+ // 8. If Desc.[[Value]] is present, return
+ // SameValue(Desc.[[Value]], current.[[Value]]).
+ if (!has_own.FromJust() ||
+ (desc->has_configurable() && desc->configurable()) ||
+ (desc->has_enumerable() && !desc->enumerable()) ||
+ PropertyDescriptor::IsAccessorDescriptor(desc) ||
+ (desc->has_writable() && !desc->writable()) ||
+ (desc->has_value() && !desc->value()->SameValue(*current.value()))) {
+ RETURN_FAILURE(isolate, GetShouldThrow(isolate, should_throw),
+ NewTypeError(MessageTemplate::kRedefineDisallowed, key));
+ }
+
+ return Just(true);
+}
+
bool Module::IsGraphAsync(Isolate* isolate) const {
DisallowGarbageCollection no_gc;
diff --git a/chromium/v8/src/objects/module.h b/chromium/v8/src/objects/module.h
index 05ea04ccd97..208613e4c96 100644
--- a/chromium/v8/src/objects/module.h
+++ b/chromium/v8/src/objects/module.h
@@ -5,11 +5,11 @@
#ifndef V8_OBJECTS_MODULE_H_
#define V8_OBJECTS_MODULE_H_
+#include "include/v8-script.h"
#include "src/objects/fixed-array.h"
#include "src/objects/js-objects.h"
#include "src/objects/objects.h"
#include "src/objects/struct.h"
-#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -159,6 +159,10 @@ class JSModuleNamespace
static V8_WARN_UNUSED_RESULT Maybe<PropertyAttributes> GetPropertyAttributes(
LookupIterator* it);
+ static V8_WARN_UNUSED_RESULT Maybe<bool> DefineOwnProperty(
+ Isolate* isolate, Handle<JSModuleNamespace> o, Handle<Object> key,
+ PropertyDescriptor* desc, Maybe<ShouldThrow> should_throw);
+
// In-object fields.
enum {
kToStringTagFieldIndex,
diff --git a/chromium/v8/src/objects/name.tq b/chromium/v8/src/objects/name.tq
index 55f70d26b58..6fe141f90c8 100644
--- a/chromium/v8/src/objects/name.tq
+++ b/chromium/v8/src/objects/name.tq
@@ -64,7 +64,7 @@ const kArrayIndexLengthBitsShift: uint32 =
kNofHashBitFields + kArrayIndexValueBits;
macro TenToThe(exponent: uint32): uint32 {
- assert(exponent <= 9);
+ dcheck(exponent <= 9);
let answer: int32 = 1;
for (let i: int32 = 0; i < Signed(exponent); i++) {
answer = answer * 10;
@@ -74,14 +74,14 @@ macro TenToThe(exponent: uint32): uint32 {
macro MakeArrayIndexHash(value: uint32, length: uint32): NameHash {
// This is in sync with StringHasher::MakeArrayIndexHash.
- assert(length <= kMaxArrayIndexSize);
+ dcheck(length <= kMaxArrayIndexSize);
const one: uint32 = 1;
- assert(TenToThe(kMaxCachedArrayIndexLength) < (one << kArrayIndexValueBits));
+ dcheck(TenToThe(kMaxCachedArrayIndexLength) < (one << kArrayIndexValueBits));
let hash: uint32 = value;
hash = (hash << kArrayIndexValueBitsShift) |
(length << kArrayIndexLengthBitsShift);
- assert((hash & kIsNotIntegerIndexMask) == 0);
- assert(
+ dcheck((hash & kIsNotIntegerIndexMask) == 0);
+ dcheck(
(length <= kMaxCachedArrayIndexLength) == ContainsCachedArrayIndex(hash));
return %RawDownCast<NameHash>(hash);
}
diff --git a/chromium/v8/src/objects/object-list-macros.h b/chromium/v8/src/objects/object-list-macros.h
index e5ba2684b23..51dc178f8b8 100644
--- a/chromium/v8/src/objects/object-list-macros.h
+++ b/chromium/v8/src/objects/object-list-macros.h
@@ -270,6 +270,7 @@ class ZoneForwardList;
V(FreeSpaceOrFiller) \
V(FunctionContext) \
V(JSApiObject) \
+ V(JSClassConstructor) \
V(JSLastDummyApiObject) \
V(JSPromiseConstructor) \
V(JSArrayConstructor) \
diff --git a/chromium/v8/src/objects/object-macros-undef.h b/chromium/v8/src/objects/object-macros-undef.h
index 1aa9dc10b4f..f531ab0aa57 100644
--- a/chromium/v8/src/objects/object-macros-undef.h
+++ b/chromium/v8/src/objects/object-macros-undef.h
@@ -8,7 +8,6 @@
#undef OBJECT_CONSTRUCTORS
#undef OBJECT_CONSTRUCTORS_IMPL
-#undef OBJECT_CONSTRUCTORS_IMPL_CHECK_SUPER
#undef NEVER_READ_ONLY_SPACE
#undef NEVER_READ_ONLY_SPACE_IMPL
#undef DECL_PRIMITIVE_GETTER
@@ -40,7 +39,6 @@
#undef CAST_ACCESSOR
#undef INT_ACCESSORS
#undef INT32_ACCESSORS
-#undef IMPLICIT_TAG_RELAXED_INT32_ACCESSORS
#undef RELAXED_INT32_ACCESSORS
#undef UINT16_ACCESSORS
#undef UINT8_ACCESSORS
diff --git a/chromium/v8/src/objects/object-macros.h b/chromium/v8/src/objects/object-macros.h
index 561b1de30b2..79cc79033ea 100644
--- a/chromium/v8/src/objects/object-macros.h
+++ b/chromium/v8/src/objects/object-macros.h
@@ -30,11 +30,6 @@
#define OBJECT_CONSTRUCTORS_IMPL(Type, Super) \
inline Type::Type(Address ptr) : Super(ptr) { SLOW_DCHECK(Is##Type()); }
-// In these cases, we don't have our own instance type to check, so check the
-// supertype instead. This happens for types denoting a NativeContext-dependent
-// set of maps.
-#define OBJECT_CONSTRUCTORS_IMPL_CHECK_SUPER(Type, Super) \
- inline Type::Type(Address ptr) : Super(ptr) { SLOW_DCHECK(Is##Super()); }
#define NEVER_READ_ONLY_SPACE \
inline Heap* GetHeap() const; \
@@ -163,15 +158,6 @@
int32_t holder::name() const { return ReadField<int32_t>(offset); } \
void holder::set_##name(int32_t value) { WriteField<int32_t>(offset, value); }
-// TODO(solanes): Use the non-implicit one, and change the uses to use the tag.
-#define IMPLICIT_TAG_RELAXED_INT32_ACCESSORS(holder, name, offset) \
- int32_t holder::name() const { \
- return RELAXED_READ_INT32_FIELD(*this, offset); \
- } \
- void holder::set_##name(int32_t value) { \
- RELAXED_WRITE_INT32_FIELD(*this, offset, value); \
- }
-
#define RELAXED_INT32_ACCESSORS(holder, name, offset) \
int32_t holder::name(RelaxedLoadTag) const { \
return RELAXED_READ_INT32_FIELD(*this, offset); \
diff --git a/chromium/v8/src/objects/objects-body-descriptors-inl.h b/chromium/v8/src/objects/objects-body-descriptors-inl.h
index 7750b265750..44a11accdb9 100644
--- a/chromium/v8/src/objects/objects-body-descriptors-inl.h
+++ b/chromium/v8/src/objects/objects-body-descriptors-inl.h
@@ -727,7 +727,7 @@ class WasmArray::BodyDescriptor final : public BodyDescriptorBase {
}
static inline int SizeOf(Map map, HeapObject object) {
- return WasmArray::GcSafeSizeFor(map, WasmArray::cast(object).length());
+ return WasmArray::SizeFor(map, WasmArray::cast(object).length());
}
};
@@ -800,8 +800,8 @@ class CoverageInfo::BodyDescriptor final : public BodyDescriptorBase {
class Code::BodyDescriptor final : public BodyDescriptorBase {
public:
STATIC_ASSERT(kRelocationInfoOffset + kTaggedSize ==
- kDeoptimizationDataOffset);
- STATIC_ASSERT(kDeoptimizationDataOffset + kTaggedSize ==
+ kDeoptimizationDataOrInterpreterDataOffset);
+ STATIC_ASSERT(kDeoptimizationDataOrInterpreterDataOffset + kTaggedSize ==
kPositionTableOffset);
STATIC_ASSERT(kPositionTableOffset + kTaggedSize == kCodeDataContainerOffset);
STATIC_ASSERT(kCodeDataContainerOffset + kTaggedSize == kDataStart);
@@ -931,7 +931,7 @@ class CodeDataContainer::BodyDescriptor final : public BodyDescriptorBase {
CodeDataContainer::kPointerFieldsWeakEndOffset, v);
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- v->VisitCodePointer(obj, obj.RawField(kCodeOffset));
+ v->VisitCodePointer(obj, obj.RawCodeField(kCodeOffset));
}
}
@@ -1114,6 +1114,7 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case JS_STRING_ITERATOR_TYPE:
case JS_TYPED_ARRAY_PROTOTYPE_TYPE:
case JS_FUNCTION_TYPE:
+ case JS_CLASS_CONSTRUCTOR_TYPE:
case JS_PROMISE_CONSTRUCTOR_TYPE:
case JS_REG_EXP_CONSTRUCTOR_TYPE:
case JS_ARRAY_CONSTRUCTOR_TYPE:
diff --git a/chromium/v8/src/objects/objects-definitions.h b/chromium/v8/src/objects/objects-definitions.h
index 20ce96aae5d..f70a4693647 100644
--- a/chromium/v8/src/objects/objects-definitions.h
+++ b/chromium/v8/src/objects/objects-definitions.h
@@ -124,7 +124,6 @@ namespace internal {
IF_WASM(V, _, ASM_WASM_DATA_TYPE, AsmWasmData, asm_wasm_data) \
V(_, ASYNC_GENERATOR_REQUEST_TYPE, AsyncGeneratorRequest, \
async_generator_request) \
- V(_, BASELINE_DATA_TYPE, BaselineData, baseline_data) \
V(_, BREAK_POINT_TYPE, BreakPoint, break_point) \
V(_, BREAK_POINT_INFO_TYPE, BreakPointInfo, break_point_info) \
V(_, CACHED_TEMPLATE_OBJECT_TYPE, CachedTemplateObject, \
diff --git a/chromium/v8/src/objects/objects-inl.h b/chromium/v8/src/objects/objects-inl.h
index 6800db3b789..92452e43b0d 100644
--- a/chromium/v8/src/objects/objects-inl.h
+++ b/chromium/v8/src/objects/objects-inl.h
@@ -18,6 +18,7 @@
#include "src/builtins/builtins.h"
#include "src/common/external-pointer-inl.h"
#include "src/common/globals.h"
+#include "src/common/ptr-compr-inl.h"
#include "src/handles/handles-inl.h"
#include "src/heap/factory.h"
#include "src/heap/heap-write-barrier-inl.h"
@@ -59,7 +60,7 @@ Smi PropertyDetails::AsSmi() const {
}
int PropertyDetails::field_width_in_words() const {
- DCHECK_EQ(location(), kField);
+ DCHECK_EQ(location(), PropertyLocation::kField);
return 1;
}
@@ -648,6 +649,10 @@ MaybeObjectSlot HeapObject::RawMaybeWeakField(int byte_offset) const {
return MaybeObjectSlot(field_address(byte_offset));
}
+CodeObjectSlot HeapObject::RawCodeField(int byte_offset) const {
+ return CodeObjectSlot(field_address(byte_offset));
+}
+
MapWord MapWord::FromMap(const Map map) {
DCHECK(map.is_null() || !MapWord::IsPacked(map.ptr()));
#ifdef V8_MAP_PACKING
@@ -675,6 +680,22 @@ MapWord MapWord::FromForwardingAddress(HeapObject object) {
HeapObject MapWord::ToForwardingAddress() {
DCHECK(IsForwardingAddress());
+ HeapObject obj = HeapObject::FromAddress(value_);
+ // For objects allocated outside of the main pointer compression cage the
+ // variant with explicit cage base must be used.
+ DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !obj.IsCode());
+ return obj;
+}
+
+HeapObject MapWord::ToForwardingAddress(PtrComprCageBase host_cage_base) {
+ DCHECK(IsForwardingAddress());
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ // Recompress value_ using proper host_cage_base since the map word
+ // has the upper 32 bits that correspond to the main cage base value.
+ Address value =
+ DecompressTaggedPointer(host_cage_base, CompressTagged(value_));
+ return HeapObject::FromAddress(value);
+ }
return HeapObject::FromAddress(value_);
}
diff --git a/chromium/v8/src/objects/objects.cc b/chromium/v8/src/objects/objects.cc
index 2f16615536a..db5a905f9c7 100644
--- a/chromium/v8/src/objects/objects.cc
+++ b/chromium/v8/src/objects/objects.cc
@@ -197,6 +197,8 @@ std::ostream& operator<<(std::ostream& os, PropertyCellType type) {
return os << "ConstantType";
case PropertyCellType::kMutable:
return os << "Mutable";
+ case PropertyCellType::kInTransition:
+ return os << "InTransition";
}
UNREACHABLE();
}
@@ -2291,7 +2293,7 @@ int HeapObject::SizeFromMap(Map map) const {
return WasmStruct::GcSafeSize(map);
}
if (instance_type == WASM_ARRAY_TYPE) {
- return WasmArray::GcSafeSizeFor(map, WasmArray::cast(*this).length());
+ return WasmArray::SizeFor(map, WasmArray::cast(*this).length());
}
#endif // V8_ENABLE_WEBASSEMBLY
DCHECK_EQ(instance_type, EMBEDDER_DATA_ARRAY_TYPE);
@@ -2433,7 +2435,7 @@ void DescriptorArray::GeneralizeAllFields() {
for (InternalIndex i : InternalIndex::Range(length)) {
PropertyDetails details = GetDetails(i);
details = details.CopyWithRepresentation(Representation::Tagged());
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
DCHECK_EQ(kData, details.kind());
details = details.CopyWithConstness(PropertyConstness::kMutable);
SetValue(i, MaybeObject::FromObject(FieldType::Any()));
@@ -6532,6 +6534,8 @@ PropertyCellType PropertyCell::UpdatedType(Isolate* isolate,
V8_FALLTHROUGH;
case PropertyCellType::kMutable:
return PropertyCellType::kMutable;
+ case PropertyCellType::kInTransition:
+ UNREACHABLE();
}
}
@@ -6587,6 +6591,7 @@ bool PropertyCell::CheckDataIsCompatible(PropertyDetails details,
Object value) {
DisallowGarbageCollection no_gc;
PropertyCellType cell_type = details.cell_type();
+ CHECK_NE(cell_type, PropertyCellType::kInTransition);
if (value.IsTheHole()) {
CHECK_EQ(cell_type, PropertyCellType::kConstant);
} else {
@@ -6620,8 +6625,9 @@ bool PropertyCell::CanTransitionTo(PropertyDetails new_details,
return new_details.cell_type() == PropertyCellType::kMutable ||
(new_details.cell_type() == PropertyCellType::kConstant &&
new_value.IsTheHole());
+ case PropertyCellType::kInTransition:
+ UNREACHABLE();
}
- UNREACHABLE();
}
#endif // DEBUG
diff --git a/chromium/v8/src/objects/objects.h b/chromium/v8/src/objects/objects.h
index eb31ec957d7..7cb94dfb740 100644
--- a/chromium/v8/src/objects/objects.h
+++ b/chromium/v8/src/objects/objects.h
@@ -9,7 +9,6 @@
#include <memory>
#include "include/v8-internal.h"
-#include "include/v8.h"
#include "include/v8config.h"
#include "src/base/bits.h"
#include "src/base/build_config.h"
@@ -647,7 +646,8 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
bool operator()(const Object a, const Object b) const { return a < b; }
};
- template <class T, typename std::enable_if<std::is_arithmetic<T>::value,
+ template <class T, typename std::enable_if<std::is_arithmetic<T>::value ||
+ std::is_enum<T>::value,
int>::type = 0>
inline T ReadField(size_t offset) const {
// Pointer compression causes types larger than kTaggedSize to be unaligned.
@@ -664,7 +664,8 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
}
}
- template <class T, typename std::enable_if<std::is_arithmetic<T>::value,
+ template <class T, typename std::enable_if<std::is_arithmetic<T>::value ||
+ std::is_enum<T>::value,
int>::type = 0>
inline void WriteField(size_t offset, T value) const {
// Pointer compression causes types larger than kTaggedSize to be unaligned.
@@ -786,8 +787,14 @@ class MapWord {
// Create a map word from a forwarding address.
static inline MapWord FromForwardingAddress(HeapObject object);
- // View this map word as a forwarding address.
+ // View this map word as a forwarding address. The parameterless version
+ // is allowed to be used for objects allocated in the main pointer compression
+ // cage, while the second variant uses the value of the cage base explicitly
+ // and thus can be used in situations where one has to deal with both cases.
+ // Note, that the parameterless version is preferred because it avoids
+ // unnecessary recompressions.
inline HeapObject ToForwardingAddress();
+ inline HeapObject ToForwardingAddress(PtrComprCageBase host_cage_base);
inline Address ptr() { return value_; }
@@ -840,18 +847,6 @@ enum EnsureElementsMode {
// Indicator for one component of an AccessorPair.
enum AccessorComponent { ACCESSOR_GETTER, ACCESSOR_SETTER };
-enum class GetKeysConversion {
- kKeepNumbers = static_cast<int>(v8::KeyConversionMode::kKeepNumbers),
- kConvertToString = static_cast<int>(v8::KeyConversionMode::kConvertToString),
- kNoNumbers = static_cast<int>(v8::KeyConversionMode::kNoNumbers)
-};
-
-enum class KeyCollectionMode {
- kOwnOnly = static_cast<int>(v8::KeyCollectionMode::kOwnOnly),
- kIncludePrototypes =
- static_cast<int>(v8::KeyCollectionMode::kIncludePrototypes)
-};
-
// Utility superclass for stack-allocated objects that must be updated
// on gc. It provides two ways for the gc to update instances, either
// iterating or updating after gc.
diff --git a/chromium/v8/src/objects/option-utils.cc b/chromium/v8/src/objects/option-utils.cc
new file mode 100644
index 00000000000..9e05b4a1044
--- /dev/null
+++ b/chromium/v8/src/objects/option-utils.cc
@@ -0,0 +1,172 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/option-utils.h"
+
+#include "src/numbers/conversions.h"
+#include "src/objects/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// ecma402/#sec-getoptionsobject
+MaybeHandle<JSReceiver> GetOptionsObject(Isolate* isolate,
+ Handle<Object> options,
+ const char* method_name) {
+ // 1. If options is undefined, then
+ if (options->IsUndefined(isolate)) {
+ // a. Return ! ObjectCreate(null).
+ return isolate->factory()->NewJSObjectWithNullProto();
+ }
+ // 2. If Type(options) is Object, then
+ if (options->IsJSReceiver()) {
+ // a. Return options.
+ return Handle<JSReceiver>::cast(options);
+ }
+ // 3. Throw a TypeError exception.
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kInvalidArgument),
+ JSReceiver);
+}
+
+// ecma402/#sec-coerceoptionstoobject
+MaybeHandle<JSReceiver> CoerceOptionsToObject(Isolate* isolate,
+ Handle<Object> options,
+ const char* method_name) {
+ // 1. If options is undefined, then
+ if (options->IsUndefined(isolate)) {
+ // a. Return ! ObjectCreate(null).
+ return isolate->factory()->NewJSObjectWithNullProto();
+ }
+ // 2. Return ? ToObject(options).
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, options,
+ Object::ToObject(isolate, options, method_name),
+ JSReceiver);
+ return Handle<JSReceiver>::cast(options);
+}
+
+Maybe<bool> GetStringOption(Isolate* isolate, Handle<JSReceiver> options,
+ const char* property,
+ std::vector<const char*> values,
+ const char* method_name,
+ std::unique_ptr<char[]>* result) {
+ Handle<String> property_str =
+ isolate->factory()->NewStringFromAsciiChecked(property);
+
+ // 1. Let value be ? Get(options, property).
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value,
+ Object::GetPropertyOrElement(isolate, options, property_str),
+ Nothing<bool>());
+
+ if (value->IsUndefined(isolate)) {
+ return Just(false);
+ }
+
+ // 2. c. Let value be ? ToString(value).
+ Handle<String> value_str;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value_str, Object::ToString(isolate, value), Nothing<bool>());
+ std::unique_ptr<char[]> value_cstr = value_str->ToCString();
+
+ // 2. d. if values is not undefined, then
+ if (values.size() > 0) {
+ // 2. d. i. If values does not contain an element equal to value,
+ // throw a RangeError exception.
+ for (size_t i = 0; i < values.size(); i++) {
+ if (strcmp(values.at(i), value_cstr.get()) == 0) {
+ // 2. e. return value
+ *result = std::move(value_cstr);
+ return Just(true);
+ }
+ }
+
+ Handle<String> method_str =
+ isolate->factory()->NewStringFromAsciiChecked(method_name);
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate,
+ NewRangeError(MessageTemplate::kValueOutOfRange, value, method_str,
+ property_str),
+ Nothing<bool>());
+ }
+
+ // 2. e. return value
+ *result = std::move(value_cstr);
+ return Just(true);
+}
+
+V8_WARN_UNUSED_RESULT Maybe<bool> GetBoolOption(Isolate* isolate,
+ Handle<JSReceiver> options,
+ const char* property,
+ const char* method_name,
+ bool* result) {
+ Handle<String> property_str =
+ isolate->factory()->NewStringFromAsciiChecked(property);
+
+ // 1. Let value be ? Get(options, property).
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value,
+ Object::GetPropertyOrElement(isolate, options, property_str),
+ Nothing<bool>());
+
+ // 2. If value is not undefined, then
+ if (!value->IsUndefined(isolate)) {
+ // 2. b. i. Let value be ToBoolean(value).
+ *result = value->BooleanValue(isolate);
+
+ // 2. e. return value
+ return Just(true);
+ }
+
+ return Just(false);
+}
+
+// ecma402/#sec-defaultnumberoption
+Maybe<int> DefaultNumberOption(Isolate* isolate, Handle<Object> value, int min,
+ int max, int fallback, Handle<String> property) {
+ // 2. Else, return fallback.
+ if (value->IsUndefined()) return Just(fallback);
+
+ // 1. If value is not undefined, then
+ // a. Let value be ? ToNumber(value).
+ Handle<Object> value_num;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value_num, Object::ToNumber(isolate, value), Nothing<int>());
+ DCHECK(value_num->IsNumber());
+
+ // b. If value is NaN or less than minimum or greater than maximum, throw a
+ // RangeError exception.
+ if (value_num->IsNaN() || value_num->Number() < min ||
+ value_num->Number() > max) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate,
+ NewRangeError(MessageTemplate::kPropertyValueOutOfRange, property),
+ Nothing<int>());
+ }
+
+ // The max and min arguments are integers and the above check makes
+ // sure that we are within the integer range making this double to
+ // int conversion safe.
+ //
+ // c. Return floor(value).
+ return Just(FastD2I(floor(value_num->Number())));
+}
+
+// ecma402/#sec-getnumberoption
+Maybe<int> GetNumberOption(Isolate* isolate, Handle<JSReceiver> options,
+ Handle<String> property, int min, int max,
+ int fallback) {
+ // 1. Let value be ? Get(options, property).
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value, JSReceiver::GetProperty(isolate, options, property),
+ Nothing<int>());
+
+ // Return ? DefaultNumberOption(value, minimum, maximum, fallback).
+ return DefaultNumberOption(isolate, value, min, max, fallback, property);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/objects/option-utils.h b/chromium/v8/src/objects/option-utils.h
new file mode 100644
index 00000000000..5bb2c35701a
--- /dev/null
+++ b/chromium/v8/src/objects/option-utils.h
@@ -0,0 +1,95 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_OPTION_UTILS_H_
+#define V8_OBJECTS_OPTION_UTILS_H_
+
+#include "src/execution/isolate.h"
+#include "src/objects/objects.h"
+
+namespace v8 {
+namespace internal {
+
+// ecma402/#sec-getoptionsobject and temporal/#sec-getoptionsobject
+V8_WARN_UNUSED_RESULT MaybeHandle<JSReceiver> GetOptionsObject(
+ Isolate* isolate, Handle<Object> options, const char* method_name);
+
+// ecma402/#sec-coerceoptionstoobject
+V8_WARN_UNUSED_RESULT MaybeHandle<JSReceiver> CoerceOptionsToObject(
+ Isolate* isolate, Handle<Object> options, const char* method_name);
+
+// ECMA402 9.2.10. GetOption( options, property, type, values, fallback)
+// ecma402/#sec-getoption and temporal/#sec-getoption
+//
+// This is specialized for the case when type is string.
+//
+// Instead of passing undefined for the values argument as the spec
+// defines, pass in an empty vector.
+//
+// Returns true if options object has the property and stores the
+// result in value. Returns false if the value is not found. The
+// caller is required to use fallback value appropriately in this
+// case.
+//
+// method_name is a string denoting the method the call from; used when
+// printing the error message.
+V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT Maybe<bool> GetStringOption(
+ Isolate* isolate, Handle<JSReceiver> options, const char* property,
+ std::vector<const char*> values, const char* method_name,
+ std::unique_ptr<char[]>* result);
+
+// A helper template to get string from option into a enum.
+// The enum in the enum_values is the corresponding value to the strings
+// in the str_values. If the option does not contains name,
+// default_value will be return.
+template <typename T>
+V8_WARN_UNUSED_RESULT static Maybe<T> GetStringOption(
+ Isolate* isolate, Handle<JSReceiver> options, const char* name,
+ const char* method_name, const std::vector<const char*>& str_values,
+ const std::vector<T>& enum_values, T default_value) {
+ DCHECK_EQ(str_values.size(), enum_values.size());
+ std::unique_ptr<char[]> cstr;
+ Maybe<bool> found =
+ GetStringOption(isolate, options, name, str_values, method_name, &cstr);
+ MAYBE_RETURN(found, Nothing<T>());
+ if (found.FromJust()) {
+ DCHECK_NOT_NULL(cstr.get());
+ for (size_t i = 0; i < str_values.size(); i++) {
+ if (strcmp(cstr.get(), str_values[i]) == 0) {
+ return Just(enum_values[i]);
+ }
+ }
+ UNREACHABLE();
+ }
+ return Just(default_value);
+}
+
+// ECMA402 9.2.10. GetOption( options, property, type, values, fallback)
+// ecma402/#sec-getoption
+//
+// This is specialized for the case when type is boolean.
+//
+// Returns true if options object has the property and stores the
+// result in value. Returns false if the value is not found. The
+// caller is required to use fallback value appropriately in this
+// case.
+//
+// method_name is a string denoting the method it called from; used when
+// printing the error message.
+V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT Maybe<bool> GetBoolOption(
+ Isolate* isolate, Handle<JSReceiver> options, const char* property,
+ const char* method_name, bool* result);
+
+V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT Maybe<int> GetNumberOption(
+ Isolate* isolate, Handle<JSReceiver> options, Handle<String> property,
+ int min, int max, int fallback);
+
+// ecma402/#sec-defaultnumberoption
+V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT Maybe<int> DefaultNumberOption(
+ Isolate* isolate, Handle<Object> value, int min, int max, int fallback,
+ Handle<String> property);
+
+} // namespace internal
+} // namespace v8
+#endif // V8_OBJECTS_OPTION_UTILS_H_
diff --git a/chromium/v8/src/objects/ordered-hash-table.h b/chromium/v8/src/objects/ordered-hash-table.h
index 1110352e46b..45682e45e90 100644
--- a/chromium/v8/src/objects/ordered-hash-table.h
+++ b/chromium/v8/src/objects/ordered-hash-table.h
@@ -10,6 +10,7 @@
#include "src/objects/fixed-array.h"
#include "src/objects/internal-index.h"
#include "src/objects/js-objects.h"
+#include "src/objects/keys.h"
#include "src/objects/smi.h"
#include "src/roots/roots.h"
diff --git a/chromium/v8/src/objects/ordered-hash-table.tq b/chromium/v8/src/objects/ordered-hash-table.tq
index 82d49b27bc2..b37b03e8503 100644
--- a/chromium/v8/src/objects/ordered-hash-table.tq
+++ b/chromium/v8/src/objects/ordered-hash-table.tq
@@ -14,7 +14,6 @@ const kSmallOrderedHashTableNotFound: constexpr int31
const kSmallOrderedHashTableLoadFactor: constexpr int31
generates 'SmallOrderedHashTable<int>::kLoadFactor';
-@noVerifier
@abstract
@doNotGenerateCppClass
extern class SmallOrderedHashTable extends HeapObject
@@ -41,7 +40,7 @@ extern class SmallOrderedHashSet extends SmallOrderedHashTable {
@export
macro AllocateSmallOrderedHashSet(capacity: intptr): SmallOrderedHashSet {
const hashTableSize = capacity / kSmallOrderedHashTableLoadFactor;
- assert(
+ dcheck(
0 <= hashTableSize && hashTableSize <= kSmallOrderedHashTableMaxCapacity);
return new SmallOrderedHashSet{
map: kSmallOrderedHashSetMap,
@@ -80,7 +79,7 @@ extern class SmallOrderedHashMap extends SmallOrderedHashTable {
@export
macro AllocateSmallOrderedHashMap(capacity: intptr): SmallOrderedHashMap {
const hashTableSize = capacity / kSmallOrderedHashTableLoadFactor;
- assert(
+ dcheck(
0 <= hashTableSize && hashTableSize <= kSmallOrderedHashTableMaxCapacity);
return new SmallOrderedHashMap{
map: kSmallOrderedHashMapMap,
diff --git a/chromium/v8/src/objects/promise.h b/chromium/v8/src/objects/promise.h
index 497498c1669..075afbeebcb 100644
--- a/chromium/v8/src/objects/promise.h
+++ b/chromium/v8/src/objects/promise.h
@@ -39,9 +39,6 @@ class PromiseFulfillReactionJobTask
: public TorqueGeneratedPromiseFulfillReactionJobTask<
PromiseFulfillReactionJobTask, PromiseReactionJobTask> {
public:
- // Dispatched behavior.
- DECL_PRINTER(PromiseFulfillReactionJobTask)
-
STATIC_ASSERT(kSize == kSizeOfAllPromiseReactionJobTasks);
TQ_OBJECT_CONSTRUCTORS(PromiseFulfillReactionJobTask)
@@ -52,9 +49,6 @@ class PromiseRejectReactionJobTask
: public TorqueGeneratedPromiseRejectReactionJobTask<
PromiseRejectReactionJobTask, PromiseReactionJobTask> {
public:
- // Dispatched behavior.
- DECL_PRINTER(PromiseRejectReactionJobTask)
-
STATIC_ASSERT(kSize == kSizeOfAllPromiseReactionJobTasks);
TQ_OBJECT_CONSTRUCTORS(PromiseRejectReactionJobTask)
@@ -65,9 +59,6 @@ class PromiseResolveThenableJobTask
: public TorqueGeneratedPromiseResolveThenableJobTask<
PromiseResolveThenableJobTask, Microtask> {
public:
- // Dispatched behavior.
- DECL_PRINTER(PromiseResolveThenableJobTask)
-
TQ_OBJECT_CONSTRUCTORS(PromiseResolveThenableJobTask)
};
@@ -75,9 +66,6 @@ class PromiseResolveThenableJobTask
class PromiseCapability
: public TorqueGeneratedPromiseCapability<PromiseCapability, Struct> {
public:
- // Dispatched behavior.
- DECL_PRINTER(PromiseCapability)
-
TQ_OBJECT_CONSTRUCTORS(PromiseCapability)
};
@@ -103,9 +91,6 @@ class PromiseReaction
public:
enum Type { kFulfill, kReject };
- // Dispatched behavior.
- DECL_PRINTER(PromiseReaction)
-
TQ_OBJECT_CONSTRUCTORS(PromiseReaction)
};
diff --git a/chromium/v8/src/objects/property-array.h b/chromium/v8/src/objects/property-array.h
index 52242c87c9f..03c2ccd0059 100644
--- a/chromium/v8/src/objects/property-array.h
+++ b/chromium/v8/src/objects/property-array.h
@@ -6,7 +6,6 @@
#define V8_OBJECTS_PROPERTY_ARRAY_H_
#include "src/objects/heap-object.h"
-#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/chromium/v8/src/objects/property-cell-inl.h b/chromium/v8/src/objects/property-cell-inl.h
index dfaaf1c80a0..ef4fa754631 100644
--- a/chromium/v8/src/objects/property-cell-inl.h
+++ b/chromium/v8/src/objects/property-cell-inl.h
@@ -57,6 +57,9 @@ void PropertyCell::Transition(PropertyDetails new_details,
DCHECK(CanTransitionTo(new_details, *new_value));
// This code must be in sync with its counterpart in
// PropertyCellData::Serialize.
+ PropertyDetails transition_marker = new_details;
+ transition_marker.set_cell_type(PropertyCellType::kInTransition);
+ set_property_details_raw(transition_marker.AsSmi(), kReleaseStore);
set_value(*new_value, kReleaseStore);
set_property_details_raw(new_details.AsSmi(), kReleaseStore);
}
diff --git a/chromium/v8/src/objects/property-cell.h b/chromium/v8/src/objects/property-cell.h
index 38a83f590de..a85bc1e4dfc 100644
--- a/chromium/v8/src/objects/property-cell.h
+++ b/chromium/v8/src/objects/property-cell.h
@@ -6,7 +6,6 @@
#define V8_OBJECTS_PROPERTY_CELL_H_
#include "src/objects/heap-object.h"
-#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/chromium/v8/src/objects/property-descriptor-object.tq b/chromium/v8/src/objects/property-descriptor-object.tq
index 3f0acdd6892..256903e815c 100644
--- a/chromium/v8/src/objects/property-descriptor-object.tq
+++ b/chromium/v8/src/objects/property-descriptor-object.tq
@@ -16,7 +16,6 @@ bitfield struct PropertyDescriptorObjectFlags extends uint31 {
has_set: bool: 1 bit;
}
-@generatePrint
extern class PropertyDescriptorObject extends Struct {
flags: SmiTagged<PropertyDescriptorObjectFlags>;
value: JSAny|TheHole;
diff --git a/chromium/v8/src/objects/property-descriptor.cc b/chromium/v8/src/objects/property-descriptor.cc
index cde66262cf3..e33759f6f77 100644
--- a/chromium/v8/src/objects/property-descriptor.cc
+++ b/chromium/v8/src/objects/property-descriptor.cc
@@ -61,7 +61,7 @@ bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<JSReceiver> obj,
for (InternalIndex i : map->IterateOwnDescriptors()) {
PropertyDetails details = descs->GetDetails(i);
Handle<Object> value;
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
if (details.kind() == kData) {
value = JSObject::FastPropertyAt(Handle<JSObject>::cast(obj),
details.representation(),
@@ -73,7 +73,7 @@ bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<JSReceiver> obj,
}
} else {
- DCHECK_EQ(kDescriptor, details.location());
+ DCHECK_EQ(PropertyLocation::kDescriptor, details.location());
if (details.kind() == kData) {
value = handle(descs->GetStrongValue(i), isolate);
} else {
diff --git a/chromium/v8/src/objects/property-descriptor.h b/chromium/v8/src/objects/property-descriptor.h
index 22fb1d6ff8c..8950a9d227c 100644
--- a/chromium/v8/src/objects/property-descriptor.h
+++ b/chromium/v8/src/objects/property-descriptor.h
@@ -122,10 +122,6 @@ class PropertyDescriptor {
Handle<Object> get_;
Handle<Object> set_;
Handle<Object> name_;
-
- // Some compilers (Xcode 5.1, ARM GCC 4.9) insist on having a copy
- // constructor for std::vector<PropertyDescriptor>, so we can't
- // DISALLOW_COPY_AND_ASSIGN(PropertyDescriptor); here.
};
} // namespace internal
diff --git a/chromium/v8/src/objects/property-details.h b/chromium/v8/src/objects/property-details.h
index 58cc2359cb4..f356bcd53ad 100644
--- a/chromium/v8/src/objects/property-details.h
+++ b/chromium/v8/src/objects/property-details.h
@@ -5,7 +5,7 @@
#ifndef V8_OBJECTS_PROPERTY_DETAILS_H_
#define V8_OBJECTS_PROPERTY_DETAILS_H_
-#include "include/v8.h"
+#include "include/v8-object.h"
#include "src/base/bit-field.h"
#include "src/common/globals.h"
#include "src/flags/flags.h"
@@ -83,7 +83,7 @@ enum PropertyKind { kData = 0, kAccessor = 1 };
// Order of modes is significant.
// Must fit in the BitField PropertyDetails::LocationField.
-enum PropertyLocation { kField = 0, kDescriptor = 1 };
+enum class PropertyLocation { kField = 0, kDescriptor = 1 };
// Order of modes is significant.
// Must fit in the BitField PropertyDetails::ConstnessField.
@@ -242,6 +242,9 @@ enum class PropertyCellType {
kUndefined, // The PREMONOMORPHIC of property cells.
kConstant, // Cell has been assigned only once.
kConstantType, // Cell has been assigned only one type.
+ // Temporary value indicating an ongoing property cell state transition. Only
+ // observable by a background thread.
+ kInTransition,
// Value for dictionaries not holding cells, must be 0:
kNoCell = kMutable,
};
@@ -253,7 +256,8 @@ class PropertyDetails {
// Property details for global dictionary properties.
PropertyDetails(PropertyKind kind, PropertyAttributes attributes,
PropertyCellType cell_type, int dictionary_index = 0) {
- value_ = KindField::encode(kind) | LocationField::encode(kField) |
+ value_ = KindField::encode(kind) |
+ LocationField::encode(PropertyLocation::kField) |
AttributesField::encode(attributes) |
// We track PropertyCell constness via PropertyCellTypeField,
// so we set ConstnessField to kMutable to simplify DCHECKs related
@@ -266,7 +270,8 @@ class PropertyDetails {
// Property details for dictionary mode properties/elements.
PropertyDetails(PropertyKind kind, PropertyAttributes attributes,
PropertyConstness constness, int dictionary_index = 0) {
- value_ = KindField::encode(kind) | LocationField::encode(kField) |
+ value_ = KindField::encode(kind) |
+ LocationField::encode(PropertyLocation::kField) |
AttributesField::encode(attributes) |
ConstnessField::encode(constness) |
DictionaryStorageField::encode(dictionary_index) |
@@ -381,8 +386,7 @@ class PropertyDetails {
// Bit fields in value_ (type, shift, size). Must be public so the
// constants can be embedded in generated code.
using KindField = base::BitField<PropertyKind, 0, 1>;
- using LocationField = KindField::Next<PropertyLocation, 1>;
- using ConstnessField = LocationField::Next<PropertyConstness, 1>;
+ using ConstnessField = KindField::Next<PropertyConstness, 1>;
using AttributesField = ConstnessField::Next<PropertyAttributes, 3>;
static const int kAttributesReadOnlyMask =
(READ_ONLY << AttributesField::kShift);
@@ -392,11 +396,12 @@ class PropertyDetails {
(DONT_ENUM << AttributesField::kShift);
// Bit fields for normalized/dictionary mode objects.
- using PropertyCellTypeField = AttributesField::Next<PropertyCellType, 2>;
+ using PropertyCellTypeField = AttributesField::Next<PropertyCellType, 3>;
using DictionaryStorageField = PropertyCellTypeField::Next<uint32_t, 23>;
// Bit fields for fast objects.
- using RepresentationField = AttributesField::Next<uint32_t, 3>;
+ using LocationField = AttributesField::Next<PropertyLocation, 1>;
+ using RepresentationField = LocationField::Next<uint32_t, 3>;
using DescriptorPointer =
RepresentationField::Next<uint32_t, kDescriptorIndexBitCount>;
using FieldIndexField =
@@ -415,7 +420,6 @@ class PropertyDetails {
STATIC_ASSERT(KindField::kLastUsedBit < 8);
STATIC_ASSERT(ConstnessField::kLastUsedBit < 8);
STATIC_ASSERT(AttributesField::kLastUsedBit < 8);
- STATIC_ASSERT(LocationField::kLastUsedBit < 8);
static const int kInitialIndex = 1;
@@ -445,12 +449,12 @@ class PropertyDetails {
// with an enumeration index of 0 as a single byte.
uint8_t ToByte() {
// We only care about the value of KindField, ConstnessField, and
- // AttributesField. LocationField is also stored, but it will always be
- // kField. We've statically asserted earlier that all those fields fit into
- // a byte together.
+ // AttributesField. We've statically asserted earlier that these fields fit
+ // into a byte together.
+
+ DCHECK_EQ(PropertyLocation::kField, location());
+ STATIC_ASSERT(static_cast<int>(PropertyLocation::kField) == 0);
- // PropertyCellTypeField comes next, its value must be kNoCell == 0 for
- // dictionary mode PropertyDetails anyway.
DCHECK_EQ(PropertyCellType::kNoCell, cell_type());
STATIC_ASSERT(static_cast<int>(PropertyCellType::kNoCell) == 0);
@@ -464,16 +468,13 @@ class PropertyDetails {
// Only to be used for bytes obtained by ToByte. In particular, only used for
// non-global dictionary properties.
static PropertyDetails FromByte(uint8_t encoded_details) {
- // The 0-extension to 32bit sets PropertyCellType to kNoCell and
- // enumeration index to 0, as intended. Everything else is obtained from
- // |encoded_details|.
-
+ // The 0-extension to 32bit sets PropertyLocation to kField,
+ // PropertyCellType to kNoCell, and enumeration index to 0, as intended.
+ // Everything else is obtained from |encoded_details|.
PropertyDetails details(encoded_details);
-
- DCHECK_EQ(0, details.dictionary_index());
DCHECK_EQ(PropertyLocation::kField, details.location());
DCHECK_EQ(PropertyCellType::kNoCell, details.cell_type());
-
+ DCHECK_EQ(0, details.dictionary_index());
return details;
}
@@ -500,7 +501,7 @@ class PropertyDetails {
// kField location is more general than kDescriptor, kDescriptor generalizes
// only to itself.
inline bool IsGeneralizableTo(PropertyLocation a, PropertyLocation b) {
- return b == kField || a == kDescriptor;
+ return b == PropertyLocation::kField || a == PropertyLocation::kDescriptor;
}
// PropertyConstness::kMutable constness is more general than
diff --git a/chromium/v8/src/objects/property.cc b/chromium/v8/src/objects/property.cc
index 014b41ff38c..4cc29c70ae0 100644
--- a/chromium/v8/src/objects/property.cc
+++ b/chromium/v8/src/objects/property.cc
@@ -89,8 +89,8 @@ Descriptor Descriptor::DataField(Handle<Name> key, int field_index,
Representation representation,
const MaybeObjectHandle& wrapped_field_type) {
DCHECK(wrapped_field_type->IsSmi() || wrapped_field_type->IsWeak());
- PropertyDetails details(kData, attributes, kField, constness, representation,
- field_index);
+ PropertyDetails details(kData, attributes, PropertyLocation::kField,
+ constness, representation, field_index);
return Descriptor(key, wrapped_field_type, details);
}
@@ -98,7 +98,7 @@ Descriptor Descriptor::DataConstant(Handle<Name> key, Handle<Object> value,
PropertyAttributes attributes) {
PtrComprCageBase cage_base = GetPtrComprCageBase(*key);
return Descriptor(key, MaybeObjectHandle(value), kData, attributes,
- kDescriptor, PropertyConstness::kConst,
+ PropertyLocation::kDescriptor, PropertyConstness::kConst,
value->OptimalRepresentation(cage_base), 0);
}
@@ -114,7 +114,7 @@ Descriptor Descriptor::AccessorConstant(Handle<Name> key,
Handle<Object> foreign,
PropertyAttributes attributes) {
return Descriptor(key, MaybeObjectHandle(foreign), kAccessor, attributes,
- kDescriptor, PropertyConstness::kConst,
+ PropertyLocation::kDescriptor, PropertyConstness::kConst,
Representation::Tagged(), 0);
}
@@ -134,7 +134,7 @@ void PropertyDetails::PrintAsFastTo(std::ostream& os, PrintMode mode) {
os << "(";
if (constness() == PropertyConstness::kConst) os << "const ";
os << (kind() == kData ? "data" : "accessor");
- if (location() == kField) {
+ if (location() == PropertyLocation::kField) {
os << " field";
if (mode & kPrintFieldIndex) {
os << " " << field_index();
diff --git a/chromium/v8/src/objects/regexp-match-info.h b/chromium/v8/src/objects/regexp-match-info.h
index 0d6f76fccf7..3ce08262b1b 100644
--- a/chromium/v8/src/objects/regexp-match-info.h
+++ b/chromium/v8/src/objects/regexp-match-info.h
@@ -8,7 +8,6 @@
#include "src/base/compiler-specific.h"
#include "src/objects/fixed-array.h"
#include "src/objects/objects.h"
-#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/chromium/v8/src/objects/script.h b/chromium/v8/src/objects/script.h
index 10fe0f834e6..76b8d92dd83 100644
--- a/chromium/v8/src/objects/script.h
+++ b/chromium/v8/src/objects/script.h
@@ -7,6 +7,7 @@
#include <memory>
+#include "include/v8-script.h"
#include "src/base/export-template.h"
#include "src/objects/fixed-array.h"
#include "src/objects/objects.h"
@@ -22,6 +23,10 @@ namespace internal {
class FunctionLiteral;
+namespace wasm {
+class NativeModule;
+} // namespace wasm
+
#include "torque-generated/src/objects/script-tq.inc"
// Script describes a script which has been added to the VM.
diff --git a/chromium/v8/src/objects/shared-function-info-inl.h b/chromium/v8/src/objects/shared-function-info-inl.h
index 583ca8dccf7..5ab324dc957 100644
--- a/chromium/v8/src/objects/shared-function-info-inl.h
+++ b/chromium/v8/src/objects/shared-function-info-inl.h
@@ -7,6 +7,7 @@
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
+#include "src/common/globals.h"
#include "src/handles/handles-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/debug-objects-inl.h"
@@ -92,8 +93,6 @@ TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledData)
TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledDataWithoutPreparseData)
TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledDataWithPreparseData)
-TQ_OBJECT_CONSTRUCTORS_IMPL(BaselineData)
-
TQ_OBJECT_CONSTRUCTORS_IMPL(InterpreterData)
ACCESSORS(InterpreterData, raw_interpreter_trampoline, CodeT,
@@ -130,13 +129,37 @@ DEF_ACQUIRE_GETTER(SharedFunctionInfo,
return value;
}
-RENAME_UINT16_TORQUE_ACCESSORS(SharedFunctionInfo,
- internal_formal_parameter_count,
- formal_parameter_count)
+uint16_t SharedFunctionInfo::internal_formal_parameter_count_with_receiver()
+ const {
+ const uint16_t param_count = TorqueGeneratedClass::formal_parameter_count();
+ if (param_count == kDontAdaptArgumentsSentinel) return param_count;
+ return param_count + (kJSArgcIncludesReceiver ? 0 : 1);
+}
+
+uint16_t SharedFunctionInfo::internal_formal_parameter_count_without_receiver()
+ const {
+ const uint16_t param_count = TorqueGeneratedClass::formal_parameter_count();
+ if (param_count == kDontAdaptArgumentsSentinel) return param_count;
+ return param_count - kJSArgcReceiverSlots;
+}
+
+void SharedFunctionInfo::set_internal_formal_parameter_count(int value) {
+ DCHECK_EQ(value, static_cast<uint16_t>(value));
+ DCHECK_GE(value, kJSArgcReceiverSlots);
+ TorqueGeneratedClass::set_formal_parameter_count(value);
+}
+
RENAME_UINT16_TORQUE_ACCESSORS(SharedFunctionInfo, raw_function_token_offset,
function_token_offset)
-IMPLICIT_TAG_RELAXED_INT32_ACCESSORS(SharedFunctionInfo, flags, kFlagsOffset)
+RELAXED_INT32_ACCESSORS(SharedFunctionInfo, flags, kFlagsOffset)
+int32_t SharedFunctionInfo::relaxed_flags() const {
+ return flags(kRelaxedLoad);
+}
+void SharedFunctionInfo::set_relaxed_flags(int32_t flags) {
+ return set_flags(flags, kRelaxedStore);
+}
+
UINT8_ACCESSORS(SharedFunctionInfo, flags2, kFlags2Offset)
bool SharedFunctionInfo::HasSharedName() const {
@@ -221,8 +244,6 @@ SharedFunctionInfo::Inlineability SharedFunctionInfo::GetInlineability(
return kNeedsBinaryCoverage;
}
- if (optimization_disabled()) return kHasOptimizationDisabled;
-
// Built-in functions are handled by the JSCallReducer.
if (HasBuiltinId()) return kIsBuiltin;
@@ -243,6 +264,8 @@ SharedFunctionInfo::Inlineability SharedFunctionInfo::GetInlineability(
if (HasBreakInfo()) return kMayContainBreakPoints;
+ if (optimization_disabled()) return kHasOptimizationDisabled;
+
return kIsInlineable;
}
@@ -253,34 +276,36 @@ BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags2,
has_static_private_methods_or_accessors,
SharedFunctionInfo::HasStaticPrivateMethodsOrAccessorsBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, syntax_kind,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags, syntax_kind,
SharedFunctionInfo::FunctionSyntaxKindBits)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, allows_lazy_compilation,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags, allows_lazy_compilation,
SharedFunctionInfo::AllowLazyCompilationBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, has_duplicate_parameters,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags, has_duplicate_parameters,
SharedFunctionInfo::HasDuplicateParametersBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, native,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags, native,
SharedFunctionInfo::IsNativeBit)
#if V8_ENABLE_WEBASSEMBLY
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, is_asm_wasm_broken,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags, is_asm_wasm_broken,
SharedFunctionInfo::IsAsmWasmBrokenBit)
#endif // V8_ENABLE_WEBASSEMBLY
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags,
requires_instance_members_initializer,
SharedFunctionInfo::RequiresInstanceMembersInitializerBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, name_should_print_as_anonymous,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags,
+ name_should_print_as_anonymous,
SharedFunctionInfo::NameShouldPrintAsAnonymousBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, has_reported_binary_coverage,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags,
+ has_reported_binary_coverage,
SharedFunctionInfo::HasReportedBinaryCoverageBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, is_toplevel,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags, is_toplevel,
SharedFunctionInfo::IsTopLevelBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, properties_are_final,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags, properties_are_final,
SharedFunctionInfo::PropertiesAreFinalBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags,
private_name_lookup_skips_outer_class,
SharedFunctionInfo::PrivateNameLookupSkipsOuterClassBit)
@@ -289,12 +314,12 @@ bool SharedFunctionInfo::optimization_disabled() const {
}
BailoutReason SharedFunctionInfo::disable_optimization_reason() const {
- return DisabledOptimizationReasonBits::decode(flags());
+ return DisabledOptimizationReasonBits::decode(flags(kRelaxedLoad));
}
LanguageMode SharedFunctionInfo::language_mode() const {
STATIC_ASSERT(LanguageModeSize == 2);
- return construct_language_mode(IsStrictBit::decode(flags()));
+ return construct_language_mode(IsStrictBit::decode(flags(kRelaxedLoad)));
}
void SharedFunctionInfo::set_language_mode(LanguageMode language_mode) {
@@ -302,22 +327,22 @@ void SharedFunctionInfo::set_language_mode(LanguageMode language_mode) {
// We only allow language mode transitions that set the same language mode
// again or go up in the chain:
DCHECK(is_sloppy(this->language_mode()) || is_strict(language_mode));
- int hints = flags();
+ int hints = flags(kRelaxedLoad);
hints = IsStrictBit::update(hints, is_strict(language_mode));
- set_flags(hints);
+ set_flags(hints, kRelaxedStore);
UpdateFunctionMapIndex();
}
FunctionKind SharedFunctionInfo::kind() const {
STATIC_ASSERT(FunctionKindBits::kSize == kFunctionKindBitSize);
- return FunctionKindBits::decode(flags());
+ return FunctionKindBits::decode(flags(kRelaxedLoad));
}
void SharedFunctionInfo::set_kind(FunctionKind kind) {
- int hints = flags();
+ int hints = flags(kRelaxedLoad);
hints = FunctionKindBits::update(hints, kind);
hints = IsClassConstructorBit::update(hints, IsClassConstructor(kind));
- set_flags(hints);
+ set_flags(hints, kRelaxedStore);
UpdateFunctionMapIndex();
}
@@ -326,7 +351,7 @@ bool SharedFunctionInfo::is_wrapped() const {
}
bool SharedFunctionInfo::construct_as_builtin() const {
- return ConstructAsBuiltinBit::decode(flags());
+ return ConstructAsBuiltinBit::decode(flags(kRelaxedLoad));
}
void SharedFunctionInfo::CalculateConstructAsBuiltin() {
@@ -340,15 +365,15 @@ void SharedFunctionInfo::CalculateConstructAsBuiltin() {
uses_builtins_construct_stub = true;
}
- int f = flags();
+ int f = flags(kRelaxedLoad);
f = ConstructAsBuiltinBit::update(f, uses_builtins_construct_stub);
- set_flags(f);
+ set_flags(f, kRelaxedStore);
}
int SharedFunctionInfo::function_map_index() const {
// Note: Must be kept in sync with the FastNewClosure builtin.
- int index =
- Context::FIRST_FUNCTION_MAP_INDEX + FunctionMapIndexBits::decode(flags());
+ int index = Context::FIRST_FUNCTION_MAP_INDEX +
+ FunctionMapIndexBits::decode(flags(kRelaxedLoad));
DCHECK_LE(index, Context::LAST_FUNCTION_MAP_INDEX);
return index;
}
@@ -359,7 +384,8 @@ void SharedFunctionInfo::set_function_map_index(int index) {
DCHECK_LE(Context::FIRST_FUNCTION_MAP_INDEX, index);
DCHECK_LE(index, Context::LAST_FUNCTION_MAP_INDEX);
index -= Context::FIRST_FUNCTION_MAP_INDEX;
- set_flags(FunctionMapIndexBits::update(flags(), index));
+ set_flags(FunctionMapIndexBits::update(flags(kRelaxedLoad), index),
+ kRelaxedStore);
}
void SharedFunctionInfo::clear_padding() {
@@ -378,7 +404,12 @@ void SharedFunctionInfo::DontAdaptArguments() {
// TODO(leszeks): Revise this DCHECK now that the code field is gone.
DCHECK(!HasWasmExportedFunctionData());
#endif // V8_ENABLE_WEBASSEMBLY
- set_internal_formal_parameter_count(kDontAdaptArgumentsSentinel);
+ TorqueGeneratedClass::set_formal_parameter_count(kDontAdaptArgumentsSentinel);
+}
+
+bool SharedFunctionInfo::IsDontAdaptArguments() const {
+ return TorqueGeneratedClass::formal_parameter_count() ==
+ kDontAdaptArgumentsSentinel;
}
bool SharedFunctionInfo::IsInterpreted() const { return HasBytecodeArray(); }
@@ -484,8 +515,8 @@ IsCompiledScope SharedFunctionInfo::is_compiled_scope(IsolateT* isolate) const {
IsCompiledScope::IsCompiledScope(const SharedFunctionInfo shared,
Isolate* isolate)
: is_compiled_(shared.is_compiled()) {
- if (shared.HasBaselineData()) {
- retain_code_ = handle(shared.baseline_data(), isolate);
+ if (shared.HasBaselineCode()) {
+ retain_code_ = handle(shared.baseline_code(kAcquireLoad), isolate);
} else if (shared.HasBytecodeArray()) {
retain_code_ = handle(shared.GetBytecodeArray(isolate), isolate);
} else {
@@ -498,8 +529,9 @@ IsCompiledScope::IsCompiledScope(const SharedFunctionInfo shared,
IsCompiledScope::IsCompiledScope(const SharedFunctionInfo shared,
LocalIsolate* isolate)
: is_compiled_(shared.is_compiled()) {
- if (shared.HasBaselineData()) {
- retain_code_ = isolate->heap()->NewPersistentHandle(shared.baseline_data());
+ if (shared.HasBaselineCode()) {
+ retain_code_ = isolate->heap()->NewPersistentHandle(
+ shared.baseline_code(kAcquireLoad));
} else if (shared.HasBytecodeArray()) {
retain_code_ =
isolate->heap()->NewPersistentHandle(shared.GetBytecodeArray(isolate));
@@ -530,8 +562,7 @@ FunctionTemplateInfo SharedFunctionInfo::get_api_func_data() const {
bool SharedFunctionInfo::HasBytecodeArray() const {
Object data = function_data(kAcquireLoad);
- return data.IsBytecodeArray() || data.IsInterpreterData() ||
- data.IsBaselineData();
+ return data.IsBytecodeArray() || data.IsInterpreterData() || data.IsCodeT();
}
template <typename IsolateT>
@@ -547,40 +578,14 @@ BytecodeArray SharedFunctionInfo::GetBytecodeArray(IsolateT* isolate) const {
return GetActiveBytecodeArray();
}
-DEF_GETTER(BaselineData, baseline_code, Code) {
- return FromCodeT(TorqueGeneratedClass::baseline_code(cage_base));
-}
-
-void BaselineData::set_baseline_code(Code code, WriteBarrierMode mode) {
- return TorqueGeneratedClass::set_baseline_code(ToCodeT(code), mode);
-}
-
-BytecodeArray BaselineData::GetActiveBytecodeArray() const {
- Object data = this->data();
- if (data.IsBytecodeArray()) {
- return BytecodeArray::cast(data);
- } else {
- DCHECK(data.IsInterpreterData());
- return InterpreterData::cast(data).bytecode_array();
- }
-}
-
-void BaselineData::SetActiveBytecodeArray(BytecodeArray bytecode) {
- Object data = this->data();
- if (data.IsBytecodeArray()) {
- set_data(bytecode);
- } else {
- DCHECK(data.IsInterpreterData());
- InterpreterData::cast(data).set_bytecode_array(bytecode);
- }
-}
-
BytecodeArray SharedFunctionInfo::GetActiveBytecodeArray() const {
Object data = function_data(kAcquireLoad);
+ if (data.IsCodeT()) {
+ Code baseline_code = FromCodeT(CodeT::cast(data));
+ data = baseline_code.bytecode_or_interpreter_data();
+ }
if (data.IsBytecodeArray()) {
return BytecodeArray::cast(data);
- } else if (data.IsBaselineData()) {
- return baseline_data().GetActiveBytecodeArray();
} else {
DCHECK(data.IsInterpreterData());
return InterpreterData::cast(data).bytecode_array();
@@ -588,11 +593,13 @@ BytecodeArray SharedFunctionInfo::GetActiveBytecodeArray() const {
}
void SharedFunctionInfo::SetActiveBytecodeArray(BytecodeArray bytecode) {
+ // We don't allow setting the active bytecode array on baseline-optimized
+ // functions. They should have been flushed earlier.
+ DCHECK(!HasBaselineCode());
+
Object data = function_data(kAcquireLoad);
if (data.IsBytecodeArray()) {
set_function_data(bytecode, kReleaseStore);
- } else if (data.IsBaselineData()) {
- baseline_data().SetActiveBytecodeArray(bytecode);
} else {
DCHECK(data.IsInterpreterData());
interpreter_data().set_bytecode_array(bytecode);
@@ -618,12 +625,13 @@ bool SharedFunctionInfo::ShouldFlushCode(
// check if it is old. Note, this is done this way since this function can be
// called by the concurrent marker.
Object data = function_data(kAcquireLoad);
- if (data.IsBaselineData()) {
+ if (data.IsCodeT()) {
+ Code baseline_code = FromCodeT(CodeT::cast(data));
+ DCHECK_EQ(baseline_code.kind(), CodeKind::BASELINE);
// If baseline code flushing isn't enabled and we have baseline data on SFI
// we cannot flush baseline / bytecode.
if (!IsBaselineCodeFlushingEnabled(code_flush_mode)) return false;
- data =
- ACQUIRE_READ_FIELD(BaselineData::cast(data), BaselineData::kDataOffset);
+ data = baseline_code.bytecode_or_interpreter_data();
} else if (!IsByteCodeFlushingEnabled(code_flush_mode)) {
// If bytecode flushing isn't enabled and there is no baseline code there is
// nothing to flush.
@@ -645,40 +653,56 @@ Code SharedFunctionInfo::InterpreterTrampoline() const {
bool SharedFunctionInfo::HasInterpreterData() const {
Object data = function_data(kAcquireLoad);
- if (data.IsBaselineData()) data = BaselineData::cast(data).data();
+ if (data.IsCodeT()) {
+ Code baseline_code = FromCodeT(CodeT::cast(data));
+ DCHECK_EQ(baseline_code.kind(), CodeKind::BASELINE);
+ data = baseline_code.bytecode_or_interpreter_data();
+ }
return data.IsInterpreterData();
}
InterpreterData SharedFunctionInfo::interpreter_data() const {
DCHECK(HasInterpreterData());
Object data = function_data(kAcquireLoad);
- if (data.IsBaselineData()) data = BaselineData::cast(data).data();
+ if (data.IsCodeT()) {
+ Code baseline_code = FromCodeT(CodeT::cast(data));
+ DCHECK_EQ(baseline_code.kind(), CodeKind::BASELINE);
+ data = baseline_code.bytecode_or_interpreter_data();
+ }
return InterpreterData::cast(data);
}
void SharedFunctionInfo::set_interpreter_data(
InterpreterData interpreter_data) {
DCHECK(FLAG_interpreted_frames_native_stack);
- DCHECK(!HasBaselineData());
+ DCHECK(!HasBaselineCode());
set_function_data(interpreter_data, kReleaseStore);
}
-bool SharedFunctionInfo::HasBaselineData() const {
- return function_data(kAcquireLoad).IsBaselineData();
+bool SharedFunctionInfo::HasBaselineCode() const {
+ Object data = function_data(kAcquireLoad);
+ if (data.IsCodeT()) {
+ DCHECK_EQ(FromCodeT(CodeT::cast(data)).kind(), CodeKind::BASELINE);
+ return true;
+ }
+ return false;
}
-BaselineData SharedFunctionInfo::baseline_data() const {
- DCHECK(HasBaselineData());
- return BaselineData::cast(function_data(kAcquireLoad));
+Code SharedFunctionInfo::baseline_code(AcquireLoadTag) const {
+ DCHECK(HasBaselineCode());
+ return FromCodeT(CodeT::cast(function_data(kAcquireLoad)));
}
-void SharedFunctionInfo::set_baseline_data(BaselineData baseline_data) {
- set_function_data(baseline_data, kReleaseStore);
+void SharedFunctionInfo::set_baseline_code(Code baseline_code,
+ ReleaseStoreTag) {
+ DCHECK_EQ(baseline_code.kind(), CodeKind::BASELINE);
+ set_function_data(ToCodeT(baseline_code), kReleaseStore);
}
-void SharedFunctionInfo::flush_baseline_data() {
- DCHECK(HasBaselineData());
- set_function_data(baseline_data().data(), kReleaseStore);
+void SharedFunctionInfo::FlushBaselineCode() {
+ DCHECK(HasBaselineCode());
+ set_function_data(baseline_code(kAcquireLoad).bytecode_or_interpreter_data(),
+ kReleaseStore);
}
#if V8_ENABLE_WEBASSEMBLY
@@ -898,11 +922,11 @@ bool SharedFunctionInfo::CanDiscardCompiled() const {
if (HasAsmWasmData()) return true;
#endif // V8_ENABLE_WEBASSEMBLY
return HasBytecodeArray() || HasUncompiledDataWithPreparseData() ||
- HasBaselineData();
+ HasBaselineCode();
}
bool SharedFunctionInfo::is_class_constructor() const {
- return IsClassConstructorBit::decode(flags());
+ return IsClassConstructorBit::decode(flags(kRelaxedLoad));
}
void SharedFunctionInfo::set_are_properties_final(bool value) {
diff --git a/chromium/v8/src/objects/shared-function-info.cc b/chromium/v8/src/objects/shared-function-info.cc
index 22e98a140c4..4354a2af28c 100644
--- a/chromium/v8/src/objects/shared-function-info.cc
+++ b/chromium/v8/src/objects/shared-function-info.cc
@@ -8,6 +8,7 @@
#include "src/ast/scopes.h"
#include "src/codegen/compilation-cache.h"
#include "src/codegen/compiler.h"
+#include "src/common/globals.h"
#include "src/diagnostics/code-tracer.h"
#include "src/objects/shared-function-info-inl.h"
#include "src/strings/string-builder-inl.h"
@@ -52,13 +53,13 @@ void SharedFunctionInfo::Init(ReadOnlyRoots ro_roots, int unique_id) {
// Set integer fields (smi or int, depending on the architecture).
set_length(0);
- set_internal_formal_parameter_count(0);
+ set_internal_formal_parameter_count(JSParameterCount(0));
set_expected_nof_properties(0);
set_raw_function_token_offset(0);
// All flags default to false or 0, except ConstructAsBuiltinBit just because
// we're using the kIllegal builtin.
- set_flags(ConstructAsBuiltinBit::encode(true));
+ set_flags(ConstructAsBuiltinBit::encode(true), kRelaxedStore);
set_flags2(0);
UpdateFunctionMapIndex();
@@ -84,10 +85,10 @@ Code SharedFunctionInfo::GetCode() const {
DCHECK(HasBytecodeArray());
return isolate->builtins()->code(Builtin::kInterpreterEntryTrampoline);
}
- if (data.IsBaselineData()) {
- // Having BaselineData means we are a compiled, baseline function.
- DCHECK(HasBaselineData());
- return baseline_data().baseline_code();
+ if (data.IsCodeT()) {
+ // Having baseline Code means we are a compiled, baseline function.
+ DCHECK(HasBaselineCode());
+ return FromCodeT(CodeT::cast(data));
}
#if V8_ENABLE_WEBASSEMBLY
if (data.IsAsmWasmData()) {
@@ -435,7 +436,8 @@ std::ostream& operator<<(std::ostream& os, const SourceCodeOf& v) {
void SharedFunctionInfo::DisableOptimization(BailoutReason reason) {
DCHECK_NE(reason, BailoutReason::kNoReason);
- set_flags(DisabledOptimizationReasonBits::update(flags(), reason));
+ set_flags(DisabledOptimizationReasonBits::update(flags(kRelaxedLoad), reason),
+ kRelaxedStore);
// Code should be the lazy compilation stub or else interpreted.
Isolate* isolate = GetIsolate();
DCHECK(abstract_code(isolate).kind() == CodeKind::INTERPRETED_FUNCTION ||
@@ -459,7 +461,8 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
// When adding fields here, make sure DeclarationScope::AnalyzePartially is
// updated accordingly.
- shared_info->set_internal_formal_parameter_count(lit->parameter_count());
+ shared_info->set_internal_formal_parameter_count(
+ JSParameterCount(lit->parameter_count()));
shared_info->SetFunctionTokenPosition(lit->function_token_position(),
lit->start_position());
shared_info->set_syntax_kind(lit->syntax_kind());
@@ -704,6 +707,7 @@ void SharedFunctionInfo::UninstallDebugBytecode(SharedFunctionInfo shared,
isolate->shared_function_info_access());
DebugInfo debug_info = shared.GetDebugInfo();
BytecodeArray original_bytecode_array = debug_info.OriginalBytecodeArray();
+ DCHECK(!shared.HasBaselineCode());
shared.SetActiveBytecodeArray(original_bytecode_array);
debug_info.set_original_bytecode_array(
ReadOnlyRoots(isolate).undefined_value(), kReleaseStore);
diff --git a/chromium/v8/src/objects/shared-function-info.h b/chromium/v8/src/objects/shared-function-info.h
index fd19f90165f..52678a87244 100644
--- a/chromium/v8/src/objects/shared-function-info.h
+++ b/chromium/v8/src/objects/shared-function-info.h
@@ -10,6 +10,7 @@
#include "src/base/bit-field.h"
#include "src/builtins/builtins.h"
#include "src/codegen/bailout-reason.h"
+#include "src/common/globals.h"
#include "src/objects/compressed-slots.h"
#include "src/objects/function-kind.h"
#include "src/objects/function-syntax-kind.h"
@@ -21,7 +22,6 @@
#include "src/roots/roots.h"
#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
#include "torque-generated/bit-fields.h"
-#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -146,24 +146,12 @@ class InterpreterData
public:
DECL_ACCESSORS(interpreter_trampoline, Code)
- DECL_PRINTER(InterpreterData)
-
private:
DECL_ACCESSORS(raw_interpreter_trampoline, CodeT)
TQ_OBJECT_CONSTRUCTORS(InterpreterData)
};
-class BaselineData : public TorqueGeneratedBaselineData<BaselineData, Struct> {
- public:
- inline BytecodeArray GetActiveBytecodeArray() const;
- inline void SetActiveBytecodeArray(BytecodeArray bytecode);
-
- DECL_ACCESSORS(baseline_code, Code)
-
- TQ_OBJECT_CONSTRUCTORS(BaselineData)
-};
-
// SharedFunctionInfo describes the JSFunction information that can be
// shared by multiple instances of the function.
class SharedFunctionInfo
@@ -275,8 +263,12 @@ class SharedFunctionInfo
// [internal formal parameter count]: The declared number of parameters.
// For subclass constructors, also includes new.target.
- // The size of function's frame is internal_formal_parameter_count + 1.
- DECL_UINT16_ACCESSORS(internal_formal_parameter_count)
+ // The size of function's frame is
+ // internal_formal_parameter_count_with_receiver.
+ inline void set_internal_formal_parameter_count(int value);
+ inline uint16_t internal_formal_parameter_count_with_receiver() const;
+ inline uint16_t internal_formal_parameter_count_without_receiver() const;
+
private:
using TorqueGeneratedSharedFunctionInfo::formal_parameter_count;
using TorqueGeneratedSharedFunctionInfo::set_formal_parameter_count;
@@ -285,6 +277,7 @@ class SharedFunctionInfo
// Set the formal parameter count so the function code will be
// called without using argument adaptor frames.
inline void DontAdaptArguments();
+ inline bool IsDontAdaptArguments() const;
// [function data]: This field holds some additional data for function.
// Currently it has one of:
@@ -314,10 +307,10 @@ class SharedFunctionInfo
inline bool HasInterpreterData() const;
inline InterpreterData interpreter_data() const;
inline void set_interpreter_data(InterpreterData interpreter_data);
- inline bool HasBaselineData() const;
- inline BaselineData baseline_data() const;
- inline void set_baseline_data(BaselineData Baseline_data);
- inline void flush_baseline_data();
+ inline bool HasBaselineCode() const;
+ inline Code baseline_code(AcquireLoadTag) const;
+ inline void set_baseline_code(Code baseline_code, ReleaseStoreTag);
+ inline void FlushBaselineCode();
inline BytecodeArray GetActiveBytecodeArray() const;
inline void SetActiveBytecodeArray(BytecodeArray bytecode);
@@ -414,7 +407,7 @@ class SharedFunctionInfo
inline bool HasSharedName() const;
// [flags] Bit field containing various flags about the function.
- DECL_INT32_ACCESSORS(flags)
+ DECL_RELAXED_INT32_ACCESSORS(flags)
DECL_UINT8_ACCESSORS(flags2)
// True if the outer class scope contains a private brand for
@@ -537,17 +530,19 @@ class SharedFunctionInfo
inline bool ShouldFlushCode(base::EnumSet<CodeFlushMode> code_flush_mode);
enum Inlineability {
- kIsInlineable,
// Different reasons for not being inlineable:
kHasNoScript,
kNeedsBinaryCoverage,
- kHasOptimizationDisabled,
kIsBuiltin,
kIsNotUserCode,
kHasNoBytecode,
kExceedsBytecodeLimit,
kMayContainBreakPoints,
+ kHasOptimizationDisabled,
+ // Actually inlineable!
+ kIsInlineable,
};
+ // Returns the first value that applies (see enum definition for the order).
template <typename IsolateT>
Inlineability GetInlineability(IsolateT* isolate, bool is_turboprop) const;
@@ -673,6 +668,10 @@ class SharedFunctionInfo
inline uint16_t get_property_estimate_from_literal(FunctionLiteral* literal);
+ // For ease of use of the BITFIELD macro.
+ inline int32_t relaxed_flags() const;
+ inline void set_relaxed_flags(int32_t flags);
+
template <typename Impl>
friend class FactoryBase;
friend class V8HeapExplorer;
diff --git a/chromium/v8/src/objects/shared-function-info.tq b/chromium/v8/src/objects/shared-function-info.tq
index 0b0930b6b42..4f80f568dcb 100644
--- a/chromium/v8/src/objects/shared-function-info.tq
+++ b/chromium/v8/src/objects/shared-function-info.tq
@@ -14,13 +14,6 @@ extern class InterpreterData extends Struct {
@ifnot(V8_EXTERNAL_CODE_SPACE) interpreter_trampoline: Code;
}
-@generatePrint
-extern class BaselineData extends Struct {
- @if(V8_EXTERNAL_CODE_SPACE) baseline_code: CodeDataContainer;
- @ifnot(V8_EXTERNAL_CODE_SPACE) baseline_code: Code;
- data: BytecodeArray|InterpreterData;
-}
-
type FunctionKind extends uint8 constexpr 'FunctionKind';
type FunctionSyntaxKind extends uint8 constexpr 'FunctionSyntaxKind';
type BailoutReason extends uint8 constexpr 'BailoutReason';
@@ -63,11 +56,17 @@ class SharedFunctionInfo extends HeapObject {
name_or_scope_info: String|NoSharedNameSentinel|ScopeInfo;
outer_scope_info_or_feedback_metadata: HeapObject;
script_or_debug_info: Script|DebugInfo|Undefined;
- // [length]: The function length - usually the number of declared parameters.
+ // [length]: The function length - usually the number of declared parameters
+ // (always without the receiver).
// Use up to 2^16-2 parameters (16 bits of values, where one is reserved for
// kDontAdaptArgumentsSentinel). The value is only reliable when the function
// has been compiled.
length: int16;
+ // [formal_parameter_count]: The number of declared parameters (or the special
+ // value kDontAdaptArgumentsSentinel to indicate that arguments are passed
+ // unaltered).
+ // In contrast to [length], formal_parameter_count includes the receiver if
+ // kJSArgcIncludesReceiver is true.
formal_parameter_count: uint16;
function_token_offset: uint16;
// [expected_nof_properties]: Expected number of properties for the
@@ -84,6 +83,40 @@ class SharedFunctionInfo extends HeapObject {
@if(V8_SFI_HAS_UNIQUE_ID) unique_id: int32;
}
+const kDontAdaptArgumentsSentinel: constexpr int32
+ generates 'kDontAdaptArgumentsSentinel';
+const kJSArgcIncludesReceiver:
+ constexpr bool generates 'kJSArgcIncludesReceiver';
+@export
+macro LoadSharedFunctionInfoFormalParameterCountWithoutReceiver(
+ sfi: SharedFunctionInfo): uint16 {
+ let formalParameterCount = sfi.formal_parameter_count;
+ if (kJSArgcIncludesReceiver) {
+ if (Convert<int32>(formalParameterCount) != kDontAdaptArgumentsSentinel) {
+ formalParameterCount = Convert<uint16>(formalParameterCount - 1);
+ }
+ }
+ return formalParameterCount;
+}
+
+@export
+macro LoadSharedFunctionInfoFormalParameterCountWithReceiver(
+ sfi: SharedFunctionInfo): uint16 {
+ let formalParameterCount = sfi.formal_parameter_count;
+ if (!kJSArgcIncludesReceiver) {
+ if (Convert<int32>(formalParameterCount) != kDontAdaptArgumentsSentinel) {
+ formalParameterCount = Convert<uint16>(formalParameterCount + 1);
+ }
+ }
+ return formalParameterCount;
+}
+
+@export
+macro IsSharedFunctionInfoDontAdaptArguments(sfi: SharedFunctionInfo): bool {
+ const formalParameterCount = sfi.formal_parameter_count;
+ return Convert<int32>(formalParameterCount) == kDontAdaptArgumentsSentinel;
+}
+
@abstract
@export
@customCppClass
diff --git a/chromium/v8/src/objects/source-text-module.h b/chromium/v8/src/objects/source-text-module.h
index 6f2a3cd0f72..bb5bd5d796e 100644
--- a/chromium/v8/src/objects/source-text-module.h
+++ b/chromium/v8/src/objects/source-text-module.h
@@ -283,7 +283,6 @@ class SourceTextModuleInfoEntry
: public TorqueGeneratedSourceTextModuleInfoEntry<SourceTextModuleInfoEntry,
Struct> {
public:
- DECL_PRINTER(SourceTextModuleInfoEntry)
DECL_VERIFIER(SourceTextModuleInfoEntry)
template <typename IsolateT>
diff --git a/chromium/v8/src/objects/source-text-module.tq b/chromium/v8/src/objects/source-text-module.tq
index d378d5a8626..c663c6906a6 100644
--- a/chromium/v8/src/objects/source-text-module.tq
+++ b/chromium/v8/src/objects/source-text-module.tq
@@ -47,7 +47,6 @@ extern class SourceTextModule extends Module {
flags: SmiTagged<SourceTextModuleFlags>;
}
-@generatePrint
extern class ModuleRequest extends Struct {
specifier: String;
diff --git a/chromium/v8/src/objects/stack-frame-info.cc b/chromium/v8/src/objects/stack-frame-info.cc
index 7ccdd6d9552..71357816d78 100644
--- a/chromium/v8/src/objects/stack-frame-info.cc
+++ b/chromium/v8/src/objects/stack-frame-info.cc
@@ -292,7 +292,7 @@ PrimitiveHeapObject InferMethodNameFromFastObject(Isolate* isolate,
auto details = descriptors.GetDetails(i);
if (details.IsDontEnum()) continue;
Object value;
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
auto field_index = FieldIndex::ForPropertyIndex(
map, details.field_index(), details.representation());
if (field_index.is_double()) continue;
diff --git a/chromium/v8/src/objects/stack-frame-info.h b/chromium/v8/src/objects/stack-frame-info.h
index afd45819b9a..ce23de26d41 100644
--- a/chromium/v8/src/objects/stack-frame-info.h
+++ b/chromium/v8/src/objects/stack-frame-info.h
@@ -44,7 +44,6 @@ class StackFrameInfo
DECL_ACCESSORS(code_object, HeapObject)
// Dispatched behavior.
- DECL_PRINTER(StackFrameInfo)
DECL_VERIFIER(StackFrameInfo)
// Used to signal that the requested field is unknown.
diff --git a/chromium/v8/src/objects/string-inl.h b/chromium/v8/src/objects/string-inl.h
index ba2d463047b..33f34c6bcdf 100644
--- a/chromium/v8/src/objects/string-inl.h
+++ b/chromium/v8/src/objects/string-inl.h
@@ -119,6 +119,12 @@ StringShape::StringShape(const String str)
DCHECK_EQ(type_ & kIsNotStringMask, kStringTag);
}
+StringShape::StringShape(const String str, PtrComprCageBase cage_base)
+ : type_(str.map(cage_base, kAcquireLoad).instance_type()) {
+ set_valid();
+ DCHECK_EQ(type_ & kIsNotStringMask, kStringTag);
+}
+
StringShape::StringShape(Map map) : type_(map.instance_type()) {
set_valid();
DCHECK_EQ(type_ & kIsNotStringMask, kStringTag);
@@ -506,12 +512,14 @@ bool String::IsEqualToImpl(
data, len);
case kExternalStringTag | kOneByteStringTag:
return CompareCharsEqual(
- ExternalOneByteString::cast(string).GetChars() + slice_offset, data,
- len);
+ ExternalOneByteString::cast(string).GetChars(cage_base) +
+ slice_offset,
+ data, len);
case kExternalStringTag | kTwoByteStringTag:
return CompareCharsEqual(
- ExternalTwoByteString::cast(string).GetChars() + slice_offset, data,
- len);
+ ExternalTwoByteString::cast(string).GetChars(cage_base) +
+ slice_offset,
+ data, len);
case kSlicedStringTag | kOneByteStringTag:
case kSlicedStringTag | kTwoByteStringTag: {
@@ -576,19 +584,20 @@ bool String::IsOneByteEqualTo(base::Vector<const char> str) {
}
template <typename Char>
-const Char* String::GetChars(const DisallowGarbageCollection& no_gc) const {
+const Char* String::GetChars(PtrComprCageBase cage_base,
+ const DisallowGarbageCollection& no_gc) const {
DCHECK(!SharedStringAccessGuardIfNeeded::IsNeeded(*this));
- return StringShape(*this).IsExternal()
- ? CharTraits<Char>::ExternalString::cast(*this).GetChars()
+ return StringShape(*this, cage_base).IsExternal()
+ ? CharTraits<Char>::ExternalString::cast(*this).GetChars(cage_base)
: CharTraits<Char>::String::cast(*this).GetChars(no_gc);
}
template <typename Char>
const Char* String::GetChars(
- const DisallowGarbageCollection& no_gc,
+ PtrComprCageBase cage_base, const DisallowGarbageCollection& no_gc,
const SharedStringAccessGuardIfNeeded& access_guard) const {
- return StringShape(*this).IsExternal()
- ? CharTraits<Char>::ExternalString::cast(*this).GetChars()
+ return StringShape(*this, cage_base).IsExternal()
+ ? CharTraits<Char>::ExternalString::cast(*this).GetChars(cage_base)
: CharTraits<Char>::String::cast(*this).GetChars(no_gc,
access_guard);
}
@@ -617,45 +626,53 @@ Handle<String> String::Flatten(LocalIsolate* isolate, Handle<String> string,
return string;
}
+uint16_t String::Get(int index) const {
+ DCHECK(!SharedStringAccessGuardIfNeeded::IsNeeded(*this));
+ return GetImpl(index, GetPtrComprCageBase(*this),
+ SharedStringAccessGuardIfNeeded::NotNeeded());
+}
+
uint16_t String::Get(int index, Isolate* isolate) const {
SharedStringAccessGuardIfNeeded scope(isolate);
- return GetImpl(index, scope);
+ return GetImpl(index, isolate, scope);
}
uint16_t String::Get(int index, LocalIsolate* local_isolate) const {
SharedStringAccessGuardIfNeeded scope(local_isolate);
- return GetImpl(index, scope);
+ return GetImpl(index, local_isolate, scope);
}
uint16_t String::Get(
- int index, const SharedStringAccessGuardIfNeeded& access_guard) const {
- return GetImpl(index, access_guard);
+ int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const {
+ return GetImpl(index, cage_base, access_guard);
}
uint16_t String::GetImpl(
- int index, const SharedStringAccessGuardIfNeeded& access_guard) const {
+ int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const {
DCHECK(index >= 0 && index < length());
class StringGetDispatcher : public AllStatic {
public:
#define DEFINE_METHOD(Type) \
static inline uint16_t Handle##Type( \
- Type str, int index, \
+ Type str, int index, PtrComprCageBase cage_base, \
const SharedStringAccessGuardIfNeeded& access_guard) { \
- return str.Get(index, access_guard); \
+ return str.Get(index, cage_base, access_guard); \
}
STRING_CLASS_TYPES(DEFINE_METHOD)
#undef DEFINE_METHOD
static inline uint16_t HandleInvalidString(
- String str, int index,
+ String str, int index, PtrComprCageBase cage_base,
const SharedStringAccessGuardIfNeeded& access_guard) {
UNREACHABLE();
}
};
return StringShape(*this)
- .DispatchToSpecificType<StringGetDispatcher, uint16_t>(*this, index,
- access_guard);
+ .DispatchToSpecificType<StringGetDispatcher, uint16_t>(
+ *this, index, cage_base, access_guard);
}
void String::Set(int index, uint16_t value) {
@@ -667,9 +684,11 @@ void String::Set(int index, uint16_t value) {
: SeqTwoByteString::cast(*this).SeqTwoByteStringSet(index, value);
}
-bool String::IsFlat() const {
- if (!StringShape(*this).IsCons()) return true;
- return ConsString::cast(*this).second().length() == 0;
+bool String::IsFlat() const { return IsFlat(GetPtrComprCageBase(*this)); }
+
+bool String::IsFlat(PtrComprCageBase cage_base) const {
+ if (!StringShape(*this, cage_base).IsCons()) return true;
+ return ConsString::cast(*this).second(cage_base).length() == 0;
}
String String::GetUnderlying() const {
@@ -701,9 +720,10 @@ ConsString String::VisitFlat(
int slice_offset = offset;
const int length = string.length();
DCHECK(offset <= length);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(string);
while (true) {
- int32_t type = string.map().instance_type();
- switch (type & (kStringRepresentationMask | kStringEncodingMask)) {
+ int32_t tag = StringShape(string, cage_base).full_representation_tag();
+ switch (tag) {
case kSeqStringTag | kOneByteStringTag:
visitor->VisitOneByteString(
SeqOneByteString::cast(string).GetChars(no_gc, access_guard) +
@@ -720,13 +740,15 @@ ConsString String::VisitFlat(
case kExternalStringTag | kOneByteStringTag:
visitor->VisitOneByteString(
- ExternalOneByteString::cast(string).GetChars() + slice_offset,
+ ExternalOneByteString::cast(string).GetChars(cage_base) +
+ slice_offset,
length - offset);
return ConsString();
case kExternalStringTag | kTwoByteStringTag:
visitor->VisitTwoByteString(
- ExternalTwoByteString::cast(string).GetChars() + slice_offset,
+ ExternalTwoByteString::cast(string).GetChars(cage_base) +
+ slice_offset,
length - offset);
return ConsString();
@@ -734,7 +756,7 @@ ConsString String::VisitFlat(
case kSlicedStringTag | kTwoByteStringTag: {
SlicedString slicedString = SlicedString::cast(string);
slice_offset += slicedString.offset();
- string = slicedString.parent();
+ string = slicedString.parent(cage_base);
continue;
}
@@ -744,7 +766,7 @@ ConsString String::VisitFlat(
case kThinStringTag | kOneByteStringTag:
case kThinStringTag | kTwoByteStringTag:
- string = ThinString::cast(string).actual();
+ string = ThinString::cast(string).actual(cage_base);
continue;
default:
@@ -771,11 +793,13 @@ inline base::Vector<const base::uc16> String::GetCharVector(
uint8_t SeqOneByteString::Get(int index) const {
DCHECK(!SharedStringAccessGuardIfNeeded::IsNeeded(*this));
- return Get(index, SharedStringAccessGuardIfNeeded::NotNeeded());
+ return Get(index, GetPtrComprCageBase(*this),
+ SharedStringAccessGuardIfNeeded::NotNeeded());
}
uint8_t SeqOneByteString::Get(
- int index, const SharedStringAccessGuardIfNeeded& access_guard) const {
+ int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const {
USE(access_guard);
DCHECK(index >= 0 && index < length());
return ReadField<byte>(kHeaderSize + index * kCharSize);
@@ -825,7 +849,8 @@ base::uc16* SeqTwoByteString::GetChars(
}
uint16_t SeqTwoByteString::Get(
- int index, const SharedStringAccessGuardIfNeeded& access_guard) const {
+ int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const {
USE(access_guard);
DCHECK(index >= 0 && index < length());
return ReadField<uint16_t>(kHeaderSize + index * kShortSize);
@@ -929,11 +954,13 @@ DEF_GETTER(ExternalOneByteString, mutable_resource,
void ExternalOneByteString::update_data_cache(Isolate* isolate) {
DisallowGarbageCollection no_gc;
if (is_uncached()) {
- if (resource()->IsCacheable()) mutable_resource()->UpdateDataCache();
+ if (resource(isolate)->IsCacheable())
+ mutable_resource(isolate)->UpdateDataCache();
} else {
- WriteExternalPointerField(kResourceDataOffset, isolate,
- reinterpret_cast<Address>(resource()->data()),
- kExternalStringResourceDataTag);
+ WriteExternalPointerField(
+ kResourceDataOffset, isolate,
+ reinterpret_cast<Address>(resource(isolate)->data()),
+ kExternalStringResourceDataTag);
}
}
@@ -954,13 +981,15 @@ void ExternalOneByteString::set_resource(
if (resource != nullptr) update_data_cache(isolate);
}
-const uint8_t* ExternalOneByteString::GetChars() const {
+const uint8_t* ExternalOneByteString::GetChars(
+ PtrComprCageBase cage_base) const {
DisallowGarbageCollection no_gc;
+ auto res = resource(cage_base);
if (is_uncached()) {
- if (resource()->IsCacheable()) {
+ if (res->IsCacheable()) {
// TODO(solanes): Teach TurboFan/CSA to not bailout to the runtime to
// avoid this call.
- return reinterpret_cast<const uint8_t*>(resource()->cached_data());
+ return reinterpret_cast<const uint8_t*>(res->cached_data());
}
#if DEBUG
// Check that this method is called only from the main thread if we have an
@@ -973,14 +1002,15 @@ const uint8_t* ExternalOneByteString::GetChars() const {
#endif
}
- return reinterpret_cast<const uint8_t*>(resource()->data());
+ return reinterpret_cast<const uint8_t*>(res->data());
}
uint8_t ExternalOneByteString::Get(
- int index, const SharedStringAccessGuardIfNeeded& access_guard) const {
+ int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const {
USE(access_guard);
DCHECK(index >= 0 && index < length());
- return GetChars()[index];
+ return GetChars(cage_base)[index];
}
DEF_GETTER(ExternalTwoByteString, resource,
@@ -996,11 +1026,13 @@ DEF_GETTER(ExternalTwoByteString, mutable_resource,
void ExternalTwoByteString::update_data_cache(Isolate* isolate) {
DisallowGarbageCollection no_gc;
if (is_uncached()) {
- if (resource()->IsCacheable()) mutable_resource()->UpdateDataCache();
+ if (resource(isolate)->IsCacheable())
+ mutable_resource(isolate)->UpdateDataCache();
} else {
- WriteExternalPointerField(kResourceDataOffset, isolate,
- reinterpret_cast<Address>(resource()->data()),
- kExternalStringResourceDataTag);
+ WriteExternalPointerField(
+ kResourceDataOffset, isolate,
+ reinterpret_cast<Address>(resource(isolate)->data()),
+ kExternalStringResourceDataTag);
}
}
@@ -1021,13 +1053,15 @@ void ExternalTwoByteString::set_resource(
if (resource != nullptr) update_data_cache(isolate);
}
-const uint16_t* ExternalTwoByteString::GetChars() const {
+const uint16_t* ExternalTwoByteString::GetChars(
+ PtrComprCageBase cage_base) const {
DisallowGarbageCollection no_gc;
+ auto res = resource(cage_base);
if (is_uncached()) {
- if (resource()->IsCacheable()) {
+ if (res->IsCacheable()) {
// TODO(solanes): Teach TurboFan/CSA to not bailout to the runtime to
// avoid this call.
- return resource()->cached_data();
+ return res->cached_data();
}
#if DEBUG
// Check that this method is called only from the main thread if we have an
@@ -1040,19 +1074,20 @@ const uint16_t* ExternalTwoByteString::GetChars() const {
#endif
}
- return resource()->data();
+ return res->data();
}
uint16_t ExternalTwoByteString::Get(
- int index, const SharedStringAccessGuardIfNeeded& access_guard) const {
+ int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const {
USE(access_guard);
DCHECK(index >= 0 && index < length());
- return GetChars()[index];
+ return GetChars(cage_base)[index];
}
const uint16_t* ExternalTwoByteString::ExternalTwoByteStringGetData(
unsigned start) {
- return GetChars() + start;
+ return GetChars(GetPtrComprCageBase(*this)) + start;
}
int ConsStringIterator::OffsetForDepth(int depth) { return depth & kDepthMask; }
diff --git a/chromium/v8/src/objects/string-table.cc b/chromium/v8/src/objects/string-table.cc
index cff50bea798..d4809010951 100644
--- a/chromium/v8/src/objects/string-table.cc
+++ b/chromium/v8/src/objects/string-table.cc
@@ -574,13 +574,14 @@ Address StringTable::Data::TryStringToIndexOrLookupExisting(Isolate* isolate,
std::unique_ptr<Char[]> buffer;
const Char* chars;
- if (source.IsConsString()) {
- DCHECK(!source.IsFlat());
+ SharedStringAccessGuardIfNeeded access_guard(isolate);
+ if (source.IsConsString(isolate)) {
+ DCHECK(!source.IsFlat(isolate));
buffer.reset(new Char[length]);
- String::WriteToFlat(source, buffer.get(), 0, length);
+ String::WriteToFlat(source, buffer.get(), 0, length, isolate, access_guard);
chars = buffer.get();
} else {
- chars = source.GetChars<Char>(no_gc) + start;
+ chars = source.GetChars<Char>(isolate, no_gc, access_guard) + start;
}
// TODO(verwaest): Internalize to one-byte when possible.
SequentialStringKey<Char> key(base::Vector<const Char>(chars, length), seed);
diff --git a/chromium/v8/src/objects/string.cc b/chromium/v8/src/objects/string.cc
index 4b18ee3d056..7e951b428d2 100644
--- a/chromium/v8/src/objects/string.cc
+++ b/chromium/v8/src/objects/string.cc
@@ -15,6 +15,7 @@
#include "src/heap/memory-chunk.h"
#include "src/heap/read-only-heap.h"
#include "src/numbers/conversions.h"
+#include "src/objects/instance-type.h"
#include "src/objects/map.h"
#include "src/objects/oddball.h"
#include "src/objects/string-comparator.h"
@@ -546,29 +547,30 @@ String::FlatContent String::GetFlatContent(
}
#endif
USE(no_gc);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
int length = this->length();
- StringShape shape(*this);
+ StringShape shape(*this, cage_base);
String string = *this;
int offset = 0;
if (shape.representation_tag() == kConsStringTag) {
ConsString cons = ConsString::cast(string);
- if (cons.second().length() != 0) {
+ if (cons.second(cage_base).length() != 0) {
return FlatContent(no_gc);
}
- string = cons.first();
- shape = StringShape(string);
+ string = cons.first(cage_base);
+ shape = StringShape(string, cage_base);
} else if (shape.representation_tag() == kSlicedStringTag) {
SlicedString slice = SlicedString::cast(string);
offset = slice.offset();
- string = slice.parent();
- shape = StringShape(string);
+ string = slice.parent(cage_base);
+ shape = StringShape(string, cage_base);
DCHECK(shape.representation_tag() != kConsStringTag &&
shape.representation_tag() != kSlicedStringTag);
}
if (shape.representation_tag() == kThinStringTag) {
ThinString thin = ThinString::cast(string);
- string = thin.actual();
- shape = StringShape(string);
+ string = thin.actual(cage_base);
+ shape = StringShape(string, cage_base);
DCHECK(!shape.IsCons());
DCHECK(!shape.IsSliced());
}
@@ -577,7 +579,7 @@ String::FlatContent String::GetFlatContent(
if (shape.representation_tag() == kSeqStringTag) {
start = SeqOneByteString::cast(string).GetChars(no_gc);
} else {
- start = ExternalOneByteString::cast(string).GetChars();
+ start = ExternalOneByteString::cast(string).GetChars(cage_base);
}
return FlatContent(start + offset, length, no_gc);
} else {
@@ -586,7 +588,7 @@ String::FlatContent String::GetFlatContent(
if (shape.representation_tag() == kSeqStringTag) {
start = SeqTwoByteString::cast(string).GetChars(no_gc);
} else {
- start = ExternalTwoByteString::cast(string).GetChars();
+ start = ExternalTwoByteString::cast(string).GetChars(cage_base);
}
return FlatContent(start + offset, length, no_gc);
}
@@ -645,104 +647,113 @@ std::unique_ptr<char[]> String::ToCString(AllowNullsFlag allow_nulls,
// static
template <typename sinkchar>
-void String::WriteToFlat(String source, sinkchar* sink, int from, int to) {
+void String::WriteToFlat(String source, sinkchar* sink, int start, int length) {
DCHECK(!SharedStringAccessGuardIfNeeded::IsNeeded(source));
- return WriteToFlat(source, sink, from, to,
+ return WriteToFlat(source, sink, start, length, GetPtrComprCageBase(source),
SharedStringAccessGuardIfNeeded::NotNeeded());
}
// static
template <typename sinkchar>
-void String::WriteToFlat(String source, sinkchar* sink, int from, int to,
+void String::WriteToFlat(String source, sinkchar* sink, int start, int length,
+ PtrComprCageBase cage_base,
const SharedStringAccessGuardIfNeeded& access_guard) {
DisallowGarbageCollection no_gc;
- while (from < to) {
- DCHECK_LE(0, from);
- DCHECK_LE(to, source.length());
- switch (StringShape(source).full_representation_tag()) {
- case kOneByteStringTag | kExternalStringTag: {
- CopyChars(sink, ExternalOneByteString::cast(source).GetChars() + from,
- to - from);
- return;
- }
- case kTwoByteStringTag | kExternalStringTag: {
- const base::uc16* data = ExternalTwoByteString::cast(source).GetChars();
- CopyChars(sink, data + from, to - from);
- return;
- }
- case kOneByteStringTag | kSeqStringTag: {
+ if (length == 0) return;
+ while (true) {
+ DCHECK_LT(0, length);
+ DCHECK_LE(0, start);
+ DCHECK_LE(length, source.length());
+ switch (StringShape(source, cage_base).full_representation_tag()) {
+ case kOneByteStringTag | kExternalStringTag:
CopyChars(
sink,
- SeqOneByteString::cast(source).GetChars(no_gc, access_guard) + from,
- to - from);
+ ExternalOneByteString::cast(source).GetChars(cage_base) + start,
+ length);
return;
- }
- case kTwoByteStringTag | kSeqStringTag: {
+ case kTwoByteStringTag | kExternalStringTag:
CopyChars(
sink,
- SeqTwoByteString::cast(source).GetChars(no_gc, access_guard) + from,
- to - from);
+ ExternalTwoByteString::cast(source).GetChars(cage_base) + start,
+ length);
+ return;
+ case kOneByteStringTag | kSeqStringTag:
+ CopyChars(sink,
+ SeqOneByteString::cast(source).GetChars(no_gc, access_guard) +
+ start,
+ length);
+ return;
+ case kTwoByteStringTag | kSeqStringTag:
+ CopyChars(sink,
+ SeqTwoByteString::cast(source).GetChars(no_gc, access_guard) +
+ start,
+ length);
return;
- }
case kOneByteStringTag | kConsStringTag:
case kTwoByteStringTag | kConsStringTag: {
ConsString cons_string = ConsString::cast(source);
- String first = cons_string.first();
+ String first = cons_string.first(cage_base);
int boundary = first.length();
- if (to - boundary >= boundary - from) {
+ int first_length = boundary - start;
+ int second_length = start + length - boundary;
+ if (second_length >= first_length) {
// Right hand side is longer. Recurse over left.
- if (from < boundary) {
- WriteToFlat(first, sink, from, boundary, access_guard);
- if (from == 0 && cons_string.second() == first) {
+ if (first_length > 0) {
+ WriteToFlat(first, sink, start, first_length, cage_base,
+ access_guard);
+ if (start == 0 && cons_string.second(cage_base) == first) {
CopyChars(sink + boundary, sink, boundary);
return;
}
- sink += boundary - from;
- from = 0;
+ sink += boundary - start;
+ start = 0;
+ length -= first_length;
} else {
- from -= boundary;
+ start -= boundary;
}
- to -= boundary;
- source = cons_string.second();
+ source = cons_string.second(cage_base);
} else {
// Left hand side is longer. Recurse over right.
- if (to > boundary) {
- String second = cons_string.second();
+ if (second_length > 0) {
+ String second = cons_string.second(cage_base);
// When repeatedly appending to a string, we get a cons string that
// is unbalanced to the left, a list, essentially. We inline the
// common case of sequential one-byte right child.
- if (to - boundary == 1) {
- sink[boundary - from] = static_cast<sinkchar>(second.Get(0));
- } else if (second.IsSeqOneByteString()) {
+ if (second_length == 1) {
+ sink[boundary - start] =
+ static_cast<sinkchar>(second.Get(0, cage_base, access_guard));
+ } else if (second.IsSeqOneByteString(cage_base)) {
CopyChars(
- sink + boundary - from,
+ sink + boundary - start,
SeqOneByteString::cast(second).GetChars(no_gc, access_guard),
- to - boundary);
+ second_length);
} else {
- WriteToFlat(second, sink + boundary - from, 0, to - boundary,
- access_guard);
+ WriteToFlat(second, sink + boundary - start, 0, second_length,
+ cage_base, access_guard);
}
- to = boundary;
+ length -= second_length;
}
source = first;
}
- break;
+ if (length == 0) return;
+ continue;
}
case kOneByteStringTag | kSlicedStringTag:
case kTwoByteStringTag | kSlicedStringTag: {
SlicedString slice = SlicedString::cast(source);
unsigned offset = slice.offset();
- WriteToFlat(slice.parent(), sink, from + offset, to + offset,
- access_guard);
- return;
+ source = slice.parent(cage_base);
+ start += offset;
+ continue;
}
case kOneByteStringTag | kThinStringTag:
case kTwoByteStringTag | kThinStringTag:
- source = ThinString::cast(source).actual();
- break;
+ source = ThinString::cast(source).actual(cage_base);
+ continue;
}
+ UNREACHABLE();
}
- DCHECK_EQ(from, to);
+ UNREACHABLE();
}
template <typename SourceChar>
@@ -819,12 +830,15 @@ bool String::SlowEquals(
if (len != other.length()) return false;
if (len == 0) return true;
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+
// Fast check: if at least one ThinString is involved, dereference it/them
// and restart.
- if (this->IsThinString() || other.IsThinString()) {
- if (other.IsThinString()) other = ThinString::cast(other).actual();
- if (this->IsThinString()) {
- return ThinString::cast(*this).actual().Equals(other);
+ if (this->IsThinString(cage_base) || other.IsThinString(cage_base)) {
+ if (other.IsThinString(cage_base))
+ other = ThinString::cast(other).actual(cage_base);
+ if (this->IsThinString(cage_base)) {
+ return ThinString::cast(*this).actual(cage_base).Equals(other);
} else {
return this->Equals(other);
}
@@ -852,7 +866,9 @@ bool String::SlowEquals(
// We know the strings are both non-empty. Compare the first chars
// before we try to flatten the strings.
- if (this->Get(0, access_guard) != other.Get(0, access_guard)) return false;
+ if (this->Get(0, cage_base, access_guard) !=
+ other.Get(0, cage_base, access_guard))
+ return false;
if (IsSeqOneByteString() && other.IsSeqOneByteString()) {
const uint8_t* str1 =
@@ -1348,6 +1364,7 @@ namespace {
template <typename Char>
uint32_t HashString(String string, size_t start, int length, uint64_t seed,
+ PtrComprCageBase cage_base,
const SharedStringAccessGuardIfNeeded& access_guard) {
DisallowGarbageCollection no_gc;
@@ -1358,14 +1375,15 @@ uint32_t HashString(String string, size_t start, int length, uint64_t seed,
std::unique_ptr<Char[]> buffer;
const Char* chars;
- if (string.IsConsString()) {
+ if (string.IsConsString(cage_base)) {
DCHECK_EQ(0, start);
DCHECK(!string.IsFlat());
buffer.reset(new Char[length]);
- String::WriteToFlat(string, buffer.get(), 0, length, access_guard);
+ String::WriteToFlat(string, buffer.get(), 0, length, cage_base,
+ access_guard);
chars = buffer.get();
} else {
- chars = string.GetChars<Char>(no_gc, access_guard) + start;
+ chars = string.GetChars<Char>(cage_base, no_gc, access_guard) + start;
}
return StringHasher::HashSequentialString<Char>(chars, length, seed);
@@ -1387,25 +1405,32 @@ uint32_t String::ComputeAndSetHash(
uint64_t seed = HashSeed(GetReadOnlyRoots());
size_t start = 0;
String string = *this;
- if (string.IsSlicedString()) {
+ PtrComprCageBase cage_base = GetPtrComprCageBase(string);
+ StringShape shape(string, cage_base);
+ if (shape.IsSliced()) {
SlicedString sliced = SlicedString::cast(string);
start = sliced.offset();
- string = sliced.parent();
+ string = sliced.parent(cage_base);
+ shape = StringShape(string, cage_base);
}
- if (string.IsConsString() && string.IsFlat()) {
- string = ConsString::cast(string).first();
+ if (shape.IsCons() && string.IsFlat(cage_base)) {
+ string = ConsString::cast(string).first(cage_base);
+ shape = StringShape(string, cage_base);
}
- if (string.IsThinString()) {
- string = ThinString::cast(string).actual();
+ if (shape.IsThin()) {
+ string = ThinString::cast(string).actual(cage_base);
+ shape = StringShape(string, cage_base);
if (length() == string.length()) {
set_raw_hash_field(string.raw_hash_field());
return hash();
}
}
uint32_t raw_hash_field =
- string.IsOneByteRepresentation()
- ? HashString<uint8_t>(string, start, length(), seed, access_guard)
- : HashString<uint16_t>(string, start, length(), seed, access_guard);
+ shape.encoding_tag() == kOneByteStringTag
+ ? HashString<uint8_t>(string, start, length(), seed, cage_base,
+ access_guard)
+ : HashString<uint16_t>(string, start, length(), seed, cage_base,
+ access_guard);
set_raw_hash_field(raw_hash_field);
// Check the hash code is there.
@@ -1453,6 +1478,13 @@ void String::PrintOn(FILE* file) {
}
}
+void String::PrintOn(std::ostream& ostream) {
+ int length = this->length();
+ for (int i = 0; i < length; i++) {
+ ostream.put(Get(i));
+ }
+}
+
Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) {
if (new_length == 0) return string->GetReadOnlyRoots().empty_string_handle();
@@ -1502,29 +1534,30 @@ void SeqTwoByteString::clear_padding() {
}
uint16_t ConsString::Get(
- int index, const SharedStringAccessGuardIfNeeded& access_guard) const {
+ int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const {
DCHECK(index >= 0 && index < this->length());
// Check for a flattened cons string
- if (second().length() == 0) {
- String left = first();
+ if (second(cage_base).length() == 0) {
+ String left = first(cage_base);
return left.Get(index);
}
String string = String::cast(*this);
while (true) {
- if (StringShape(string).IsCons()) {
+ if (StringShape(string, cage_base).IsCons()) {
ConsString cons_string = ConsString::cast(string);
String left = cons_string.first();
if (left.length() > index) {
string = left;
} else {
index -= left.length();
- string = cons_string.second();
+ string = cons_string.second(cage_base);
}
} else {
- return string.Get(index, access_guard);
+ return string.Get(index, cage_base, access_guard);
}
}
@@ -1532,13 +1565,15 @@ uint16_t ConsString::Get(
}
uint16_t ThinString::Get(
- int index, const SharedStringAccessGuardIfNeeded& access_guard) const {
- return actual().Get(index, access_guard);
+ int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const {
+ return actual(cage_base).Get(index, cage_base, access_guard);
}
uint16_t SlicedString::Get(
- int index, const SharedStringAccessGuardIfNeeded& access_guard) const {
- return parent().Get(offset() + index, access_guard);
+ int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const {
+ return parent(cage_base).Get(offset() + index, cage_base, access_guard);
}
int ExternalString::ExternalPayloadSize() const {
@@ -1705,30 +1740,39 @@ const byte* String::AddressOfCharacterAt(
int start_index, const DisallowGarbageCollection& no_gc) {
DCHECK(IsFlat());
String subject = *this;
- if (subject.IsConsString()) {
- subject = ConsString::cast(subject).first();
- } else if (subject.IsSlicedString()) {
+ PtrComprCageBase cage_base = GetPtrComprCageBase(subject);
+ StringShape shape(subject, cage_base);
+ if (subject.IsConsString(cage_base)) {
+ subject = ConsString::cast(subject).first(cage_base);
+ shape = StringShape(subject, cage_base);
+ } else if (subject.IsSlicedString(cage_base)) {
start_index += SlicedString::cast(subject).offset();
- subject = SlicedString::cast(subject).parent();
+ subject = SlicedString::cast(subject).parent(cage_base);
+ shape = StringShape(subject, cage_base);
}
- if (subject.IsThinString()) {
- subject = ThinString::cast(subject).actual();
+ if (subject.IsThinString(cage_base)) {
+ subject = ThinString::cast(subject).actual(cage_base);
+ shape = StringShape(subject, cage_base);
}
CHECK_LE(0, start_index);
CHECK_LE(start_index, subject.length());
- if (subject.IsSeqOneByteString()) {
- return reinterpret_cast<const byte*>(
- SeqOneByteString::cast(subject).GetChars(no_gc) + start_index);
- } else if (subject.IsSeqTwoByteString()) {
- return reinterpret_cast<const byte*>(
- SeqTwoByteString::cast(subject).GetChars(no_gc) + start_index);
- } else if (subject.IsExternalOneByteString()) {
- return reinterpret_cast<const byte*>(
- ExternalOneByteString::cast(subject).GetChars() + start_index);
- } else {
- DCHECK(subject.IsExternalTwoByteString());
- return reinterpret_cast<const byte*>(
- ExternalTwoByteString::cast(subject).GetChars() + start_index);
+ switch (shape.full_representation_tag()) {
+ case kOneByteStringTag | kSeqStringTag:
+ return reinterpret_cast<const byte*>(
+ SeqOneByteString::cast(subject).GetChars(no_gc) + start_index);
+ case kTwoByteStringTag | kSeqStringTag:
+ return reinterpret_cast<const byte*>(
+ SeqTwoByteString::cast(subject).GetChars(no_gc) + start_index);
+ case kOneByteStringTag | kExternalStringTag:
+ return reinterpret_cast<const byte*>(
+ ExternalOneByteString::cast(subject).GetChars(cage_base) +
+ start_index);
+ case kTwoByteStringTag | kExternalStringTag:
+ return reinterpret_cast<const byte*>(
+ ExternalTwoByteString::cast(subject).GetChars(cage_base) +
+ start_index);
+ default:
+ UNREACHABLE();
}
}
@@ -1737,10 +1781,10 @@ template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void String::WriteToFlat(
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void String::WriteToFlat(
String source, uint8_t* sink, int from, int to);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void String::WriteToFlat(
- String source, uint16_t* sink, int from, int to,
+ String source, uint16_t* sink, int from, int to, PtrComprCageBase cage_base,
const SharedStringAccessGuardIfNeeded&);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void String::WriteToFlat(
- String source, uint8_t* sink, int from, int to,
+ String source, uint8_t* sink, int from, int to, PtrComprCageBase cage_base,
const SharedStringAccessGuardIfNeeded&);
namespace {
diff --git a/chromium/v8/src/objects/string.h b/chromium/v8/src/objects/string.h
index 3bb3ba1d6e4..7a0166b7af8 100644
--- a/chromium/v8/src/objects/string.h
+++ b/chromium/v8/src/objects/string.h
@@ -10,11 +10,11 @@
#include "src/base/bits.h"
#include "src/base/export-template.h"
#include "src/base/strings.h"
+#include "src/common/globals.h"
#include "src/objects/instance-type.h"
#include "src/objects/name.h"
#include "src/objects/smi.h"
#include "src/strings/unicode-decoder.h"
-#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -43,6 +43,7 @@ enum RobustnessFlag { ROBUST_STRING_TRAVERSAL, FAST_STRING_TRAVERSAL };
class StringShape {
public:
inline explicit StringShape(const String s);
+ inline explicit StringShape(const String s, PtrComprCageBase cage_base);
inline explicit StringShape(Map s);
inline explicit StringShape(InstanceType t);
inline bool IsSequential() const;
@@ -183,12 +184,13 @@ class String : public TorqueGeneratedString<String, Name> {
// SharedStringAccessGuard is not needed (i.e. on the main thread or on
// read-only strings).
template <typename Char>
- inline const Char* GetChars(const DisallowGarbageCollection& no_gc) const;
+ inline const Char* GetChars(PtrComprCageBase cage_base,
+ const DisallowGarbageCollection& no_gc) const;
// Get chars from sequential or external strings.
template <typename Char>
inline const Char* GetChars(
- const DisallowGarbageCollection& no_gc,
+ PtrComprCageBase cage_base, const DisallowGarbageCollection& no_gc,
const SharedStringAccessGuardIfNeeded& access_guard) const;
// Returns the address of the character at an offset into this string.
@@ -220,13 +222,15 @@ class String : public TorqueGeneratedString<String, Name> {
// to this method are not efficient unless the string is flat.
// If it is called from a background thread, the LocalIsolate version should
// be used.
- V8_INLINE uint16_t Get(int index, Isolate* isolate = nullptr) const;
+ V8_INLINE uint16_t Get(int index) const;
+ V8_INLINE uint16_t Get(int index, Isolate* isolate) const;
V8_INLINE uint16_t Get(int index, LocalIsolate* local_isolate) const;
// Method to pass down the access_guard. Useful for recursive calls such as
// ThinStrings where we go String::Get into ThinString::Get into String::Get
// again for the internalized string.
V8_INLINE uint16_t
- Get(int index, const SharedStringAccessGuardIfNeeded& access_guard) const;
+ Get(int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const;
// ES6 section 7.1.3.1 ToNumber Applied to the String Type
static Handle<Object> ToNumber(Isolate* isolate, Handle<String> subject);
@@ -403,6 +407,7 @@ class String : public TorqueGeneratedString<String, Name> {
enum TrimMode { kTrim, kTrimStart, kTrimEnd };
V8_EXPORT_PRIVATE void PrintOn(FILE* out);
+ V8_EXPORT_PRIVATE void PrintOn(std::ostream& out);
// For use during stack traces. Performs rudimentary sanity check.
bool LooksValid();
@@ -428,6 +433,7 @@ class String : public TorqueGeneratedString<String, Name> {
DECL_VERIFIER(String)
inline bool IsFlat() const;
+ inline bool IsFlat(PtrComprCageBase cage_base) const;
// Max char codes.
static const int32_t kMaxOneByteCharCode = unibrow::Latin1::kMaxChar;
@@ -473,6 +479,7 @@ class String : public TorqueGeneratedString<String, Name> {
static void WriteToFlat(String source, sinkchar* sink, int from, int to);
template <typename sinkchar>
static void WriteToFlat(String source, sinkchar* sink, int from, int to,
+ PtrComprCageBase cage_base,
const SharedStringAccessGuardIfNeeded&);
static inline bool IsAscii(const char* chars, int length) {
@@ -550,7 +557,8 @@ class String : public TorqueGeneratedString<String, Name> {
// Implementation of the Get() public methods. Do not use directly.
V8_INLINE uint16_t
- GetImpl(int index, const SharedStringAccessGuardIfNeeded& access_guard) const;
+ GetImpl(int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const;
// Implementation of the IsEqualTo() public methods. Do not use directly.
template <EqualityType kEqType, typename Char>
@@ -595,11 +603,13 @@ void String::WriteToFlat(String source, uint8_t* sink, int from, int to);
extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
void String::WriteToFlat(String source, uint16_t* sink, int from, int to);
extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
-void String::WriteToFlat(String source, uint8_t* sink, int from, int to ,
- const SharedStringAccessGuardIfNeeded&);
+void String::WriteToFlat(String source, uint8_t* sink, int from, int to,
+ PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded&);
extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
void String::WriteToFlat(String source, uint16_t* sink, int from, int to,
- const SharedStringAccessGuardIfNeeded&);
+ PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded&);
// clang-format on
class SubStringRange {
@@ -649,7 +659,7 @@ class SeqOneByteString
// defined for convenience and it will check that the access guard is not
// needed.
inline uint8_t Get(int index) const;
- inline uint8_t Get(int index,
+ inline uint8_t Get(int index, PtrComprCageBase cage_base,
const SharedStringAccessGuardIfNeeded& access_guard) const;
inline void SeqOneByteStringSet(int index, uint16_t value);
@@ -697,7 +707,8 @@ class SeqTwoByteString
// Dispatched behavior.
inline uint16_t Get(
- int index, const SharedStringAccessGuardIfNeeded& access_guard) const;
+ int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const;
inline void SeqTwoByteStringSet(int index, uint16_t value);
// Get the address of the characters in this string.
@@ -755,7 +766,8 @@ class ConsString : public TorqueGeneratedConsString<ConsString, String> {
// Dispatched behavior.
V8_EXPORT_PRIVATE uint16_t
- Get(int index, const SharedStringAccessGuardIfNeeded& access_guard) const;
+ Get(int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const;
// Minimum length for a cons string.
static const int kMinLength = 13;
@@ -779,7 +791,8 @@ class ThinString : public TorqueGeneratedThinString<ThinString, String> {
DECL_GETTER(unchecked_actual, HeapObject)
V8_EXPORT_PRIVATE uint16_t
- Get(int index, const SharedStringAccessGuardIfNeeded& access_guard) const;
+ Get(int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const;
DECL_VERIFIER(ThinString)
@@ -804,7 +817,8 @@ class SlicedString : public TorqueGeneratedSlicedString<SlicedString, String> {
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Dispatched behavior.
V8_EXPORT_PRIVATE uint16_t
- Get(int index, const SharedStringAccessGuardIfNeeded& access_guard) const;
+ Get(int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const;
// Minimum length for a sliced string.
static const int kMinLength = 13;
@@ -887,10 +901,10 @@ class ExternalOneByteString
// which the pointer cache has to be refreshed.
inline void update_data_cache(Isolate* isolate);
- inline const uint8_t* GetChars() const;
+ inline const uint8_t* GetChars(PtrComprCageBase cage_base) const;
// Dispatched behavior.
- inline uint8_t Get(int index,
+ inline uint8_t Get(int index, PtrComprCageBase cage_base,
const SharedStringAccessGuardIfNeeded& access_guard) const;
class BodyDescriptor;
@@ -930,11 +944,12 @@ class ExternalTwoByteString
// which the pointer cache has to be refreshed.
inline void update_data_cache(Isolate* isolate);
- inline const uint16_t* GetChars() const;
+ inline const uint16_t* GetChars(PtrComprCageBase cage_base) const;
// Dispatched behavior.
inline uint16_t Get(
- int index, const SharedStringAccessGuardIfNeeded& access_guard) const;
+ int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const;
// For regexp code.
inline const uint16_t* ExternalTwoByteStringGetData(unsigned start);
diff --git a/chromium/v8/src/objects/string.tq b/chromium/v8/src/objects/string.tq
index 9ab35d1e001..68c280de3a5 100644
--- a/chromium/v8/src/objects/string.tq
+++ b/chromium/v8/src/objects/string.tq
@@ -128,7 +128,7 @@ type DirectString extends String;
macro AllocateNonEmptySeqOneByteString<Iterator: type>(
length: uint32, content: Iterator): SeqOneByteString {
- assert(length != 0 && length <= kStringMaxLength);
+ dcheck(length != 0 && length <= kStringMaxLength);
return new SeqOneByteString{
map: kOneByteStringMap,
raw_hash_field: kNameEmptyHashField,
@@ -139,7 +139,7 @@ macro AllocateNonEmptySeqOneByteString<Iterator: type>(
macro AllocateNonEmptySeqTwoByteString<Iterator: type>(
length: uint32, content: Iterator): SeqTwoByteString {
- assert(length > 0 && length <= kStringMaxLength);
+ dcheck(length > 0 && length <= kStringMaxLength);
return new SeqTwoByteString{
map: kStringMap,
raw_hash_field: kNameEmptyHashField,
@@ -177,8 +177,10 @@ macro AllocateSeqTwoByteString(length: uint32): SeqTwoByteString|EmptyString {
return AllocateSeqTwoByteString(length, UninitializedIterator{});
}
-extern macro StringWriteToFlatOneByte(String, RawPtr<char8>, int32, int32);
-extern macro StringWriteToFlatTwoByte(String, RawPtr<char16>, int32, int32);
+extern macro StringWriteToFlatOneByte(
+ String, RawPtr<char8>, int32, int32): void;
+extern macro StringWriteToFlatTwoByte(
+ String, RawPtr<char16>, int32, int32): void;
// Corresponds to String::SlowFlatten in the C++ runtime.
builtin StringSlowFlatten(cons: ConsString): String {
@@ -222,7 +224,7 @@ macro Flatten(string: String): String {
return Flatten(cons);
}
case (thin: ThinString): {
- assert(!Is<ConsString>(thin.actual));
+ dcheck(!Is<ConsString>(thin.actual));
return thin.actual;
}
case (other: String): {
diff --git a/chromium/v8/src/objects/struct.h b/chromium/v8/src/objects/struct.h
index 2cc51c8544b..41a4b2b481d 100644
--- a/chromium/v8/src/objects/struct.h
+++ b/chromium/v8/src/objects/struct.h
@@ -69,9 +69,6 @@ class AccessorPair : public TorqueGeneratedAccessorPair<AccessorPair, Struct> {
inline bool Equals(Object getter_value, Object setter_value);
- // Dispatched behavior.
- DECL_PRINTER(AccessorPair)
-
TQ_OBJECT_CONSTRUCTORS(AccessorPair)
};
@@ -79,7 +76,6 @@ class ClassPositions
: public TorqueGeneratedClassPositions<ClassPositions, Struct> {
public:
// Dispatched behavior.
- DECL_PRINTER(ClassPositions)
void BriefPrintDetails(std::ostream& os);
TQ_OBJECT_CONSTRUCTORS(ClassPositions)
diff --git a/chromium/v8/src/objects/struct.tq b/chromium/v8/src/objects/struct.tq
index ec9782bab03..9c87663fd22 100644
--- a/chromium/v8/src/objects/struct.tq
+++ b/chromium/v8/src/objects/struct.tq
@@ -3,11 +3,9 @@
// found in the LICENSE file.
@abstract
-@generatePrint
extern class Struct extends HeapObject {
}
-@generatePrint
extern class Tuple2 extends Struct {
value1: Object;
value2: Object;
diff --git a/chromium/v8/src/objects/swiss-hash-table-helpers.tq b/chromium/v8/src/objects/swiss-hash-table-helpers.tq
index 627fde72979..0d8543d5d13 100644
--- a/chromium/v8/src/objects/swiss-hash-table-helpers.tq
+++ b/chromium/v8/src/objects/swiss-hash-table-helpers.tq
@@ -31,7 +31,7 @@ extern macro LoadSwissNameDictionaryCtrlTableGroup(intptr): uint64;
// Counterpart to swiss_table::ProbeSequence in C++ implementation.
struct ProbeSequence {
- macro Next() {
+ macro Next(): void {
this.index = this.index + Unsigned(FromConstexpr<int32>(kGroupWidth));
this.offset = (this.offset + this.index) & this.mask;
}
@@ -64,7 +64,7 @@ struct ByteMask {
}
// Counterpart to operator++() in C++ version.
- macro ClearLowestSetBit() {
+ macro ClearLowestSetBit(): void {
this.mask = ClearLowestSetBit<uint64>(this.mask);
}
@@ -83,7 +83,7 @@ struct BitMask {
}
// Counterpart to operator++() in C++ version.
- macro ClearLowestSetBit() {
+ macro ClearLowestSetBit(): void {
this.mask = ClearLowestSetBit<uint32>(this.mask);
}
diff --git a/chromium/v8/src/objects/swiss-name-dictionary.tq b/chromium/v8/src/objects/swiss-name-dictionary.tq
index 803014448ec..c1c1d406160 100644
--- a/chromium/v8/src/objects/swiss-name-dictionary.tq
+++ b/chromium/v8/src/objects/swiss-name-dictionary.tq
@@ -4,7 +4,6 @@
#include 'src/objects/swiss-name-dictionary.h'
-@noVerifier
@doNotGenerateCppClass
extern class SwissNameDictionary extends HeapObject {
hash: uint32;
@@ -32,13 +31,13 @@ const kNotFoundSentinel:
extern macro LoadSwissNameDictionaryKey(SwissNameDictionary, intptr): Name;
extern macro StoreSwissNameDictionaryKeyAndValue(
- SwissNameDictionary, intptr, Object, Object);
+ SwissNameDictionary, intptr, Object, Object): void;
extern macro SwissNameDictionarySetCtrl(
- SwissNameDictionary, intptr, intptr, uint8);
+ SwissNameDictionary, intptr, intptr, uint8): void;
extern macro StoreSwissNameDictionaryPropertyDetails(
- SwissNameDictionary, intptr, intptr, uint8);
+ SwissNameDictionary, intptr, intptr, uint8): void;
extern macro
SwissNameDictionaryIncreaseElementCountOrBailout(
@@ -46,7 +45,7 @@ SwissNameDictionaryIncreaseElementCountOrBailout(
extern macro
StoreSwissNameDictionaryEnumToEntryMapping(
- SwissNameDictionary, intptr, intptr, int32);
+ SwissNameDictionary, intptr, intptr, int32): void;
extern macro
SwissNameDictionaryUpdateCountsForDeletion(ByteArray, intptr): uint32;
@@ -70,10 +69,10 @@ macro SwissNameDictionaryCapacityFor(atLeastSpaceFor: intptr): intptr {
} else if (atLeastSpaceFor < kSwissNameDictionaryInitialCapacity) {
return 4;
} else if (FromConstexpr<bool>(kGroupWidth == 16)) {
- assert(atLeastSpaceFor == 4);
+ dcheck(atLeastSpaceFor == 4);
return 4;
} else if (FromConstexpr<bool>(kGroupWidth == 8)) {
- assert(atLeastSpaceFor == 4);
+ dcheck(atLeastSpaceFor == 4);
return 8;
}
}
@@ -85,7 +84,7 @@ macro SwissNameDictionaryCapacityFor(atLeastSpaceFor: intptr): intptr {
// Counterpart for SwissNameDictionary::MaxUsableCapacity in C++.
@export
macro SwissNameDictionaryMaxUsableCapacity(capacity: intptr): intptr {
- assert(capacity == 0 || capacity >= kSwissNameDictionaryInitialCapacity);
+ dcheck(capacity == 0 || capacity >= kSwissNameDictionaryInitialCapacity);
if (FromConstexpr<bool>(kGroupWidth == 8) && capacity == 4) {
// If the group size is 16 we can fully utilize capacity 4: There will be
// enough kEmpty entries in the ctrl table.
@@ -147,7 +146,7 @@ macro SwissNameDictionaryCtrlTableStartOffsetMT(capacity: intptr): intptr {
macro Probe(hash: uint32, mask: uint32): ProbeSequence {
// Mask must be a power of 2 minus 1.
- assert(((mask + 1) & mask) == 0);
+ dcheck(((mask + 1) & mask) == 0);
return ProbeSequence{mask: mask, offset: H1(hash) & mask, index: 0};
}
@@ -215,8 +214,7 @@ macro FindFirstEmpty<GroupLoader: type>(
macro Add<GroupLoader: type>(
table: SwissNameDictionary, key: Name, value: Object,
- propertyDetails: uint8)
- labels Bailout {
+ propertyDetails: uint8): void labels Bailout {
const capacity: intptr = Convert<intptr>(table.capacity);
const maxUsable: uint32 =
Unsigned(Convert<int32>(SwissNameDictionaryMaxUsableCapacity(capacity)));
@@ -250,9 +248,8 @@ macro Add<GroupLoader: type>(
}
@export
-macro SwissNameDictionaryDelete(table: SwissNameDictionary, entry: intptr)
- labels
- Shrunk(SwissNameDictionary) {
+macro SwissNameDictionaryDelete(table: SwissNameDictionary, entry: intptr):
+ void labels Shrunk(SwissNameDictionary) {
const capacity = Convert<intptr>(table.capacity);
// Update present and deleted element counts at once, without needing to do
@@ -305,7 +302,7 @@ Found(intptr),
@export
macro SwissNameDictionaryAddSIMD(
table: SwissNameDictionary, key: Name, value: Object,
- propertyDetails: uint8) labels Bailout {
+ propertyDetails: uint8): void labels Bailout {
Add<GroupSse2Loader>(table, key, value, propertyDetails)
otherwise Bailout;
}
@@ -313,7 +310,7 @@ macro SwissNameDictionaryAddSIMD(
@export
macro SwissNameDictionaryAddPortable(
table: SwissNameDictionary, key: Name, value: Object,
- propertyDetails: uint8) labels Bailout {
+ propertyDetails: uint8): void labels Bailout {
Add<GroupPortableLoader>(table, key, value, propertyDetails)
otherwise Bailout;
}
diff --git a/chromium/v8/src/objects/synthetic-module.h b/chromium/v8/src/objects/synthetic-module.h
index a8b79fb0a00..cad81b39647 100644
--- a/chromium/v8/src/objects/synthetic-module.h
+++ b/chromium/v8/src/objects/synthetic-module.h
@@ -24,7 +24,6 @@ class SyntheticModule
public:
NEVER_READ_ONLY_SPACE
DECL_VERIFIER(SyntheticModule)
- DECL_PRINTER(SyntheticModule)
// Set module's exported value for the specified export_name to the specified
// export_value. An error will be thrown if export_name is not one
diff --git a/chromium/v8/src/objects/tagged-impl.h b/chromium/v8/src/objects/tagged-impl.h
index e7278a12451..6b01c6fe628 100644
--- a/chromium/v8/src/objects/tagged-impl.h
+++ b/chromium/v8/src/objects/tagged-impl.h
@@ -6,7 +6,6 @@
#define V8_OBJECTS_TAGGED_IMPL_H_
#include "include/v8-internal.h"
-#include "include/v8.h"
#include "src/common/globals.h"
namespace v8 {
diff --git a/chromium/v8/src/objects/template-objects.tq b/chromium/v8/src/objects/template-objects.tq
index 2aa657977ff..63260bfd9c2 100644
--- a/chromium/v8/src/objects/template-objects.tq
+++ b/chromium/v8/src/objects/template-objects.tq
@@ -2,14 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@generatePrint
extern class CachedTemplateObject extends Struct {
slot_id: Smi;
template_object: JSArray;
next: CachedTemplateObject|TheHole;
}
-@generatePrint
extern class TemplateObjectDescription extends Struct {
raw_strings: FixedArray;
cooked_strings: FixedArray;
diff --git a/chromium/v8/src/objects/templates.tq b/chromium/v8/src/objects/templates.tq
index 9406f62d7a9..a3bb7a9e357 100644
--- a/chromium/v8/src/objects/templates.tq
+++ b/chromium/v8/src/objects/templates.tq
@@ -11,7 +11,6 @@ extern class TemplateInfo extends Struct {
property_accessors: TemplateList|Undefined;
}
-@generatePrint
extern class FunctionTemplateRareData extends Struct {
// See DECL_RARE_ACCESSORS in FunctionTemplateInfo.
prototype_template: ObjectTemplateInfo|Undefined;
diff --git a/chromium/v8/src/objects/transitions-inl.h b/chromium/v8/src/objects/transitions-inl.h
index e842e5ae660..91cc9060132 100644
--- a/chromium/v8/src/objects/transitions-inl.h
+++ b/chromium/v8/src/objects/transitions-inl.h
@@ -326,7 +326,8 @@ Handle<String> TransitionsAccessor::ExpectedTransitionKey() {
case kWeakRef: {
Map target = Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
PropertyDetails details = GetSimpleTargetDetails(target);
- if (details.location() != kField) return Handle<String>::null();
+ if (details.location() != PropertyLocation::kField)
+ return Handle<String>::null();
DCHECK_EQ(kData, details.kind());
if (details.attributes() != NONE) return Handle<String>::null();
Name name = GetSimpleTransitionKey(target);
diff --git a/chromium/v8/src/objects/transitions.cc b/chromium/v8/src/objects/transitions.cc
index 2bc8cf8697d..0e76dc4e1b1 100644
--- a/chromium/v8/src/objects/transitions.cc
+++ b/chromium/v8/src/objects/transitions.cc
@@ -270,7 +270,8 @@ MaybeHandle<Map> TransitionsAccessor::FindTransitionToDataProperty(
PropertyDetails details = target.GetLastDescriptorDetails(isolate_);
DCHECK_EQ(attributes, details.attributes());
DCHECK_EQ(kData, details.kind());
- if (requested_location == kFieldOnly && details.location() != kField) {
+ if (requested_location == kFieldOnly &&
+ details.location() != PropertyLocation::kField) {
return MaybeHandle<Map>();
}
return Handle<Map>(target, isolate_);
diff --git a/chromium/v8/src/objects/value-serializer.cc b/chromium/v8/src/objects/value-serializer.cc
index 53bb0cf9273..a8c78404c40 100644
--- a/chromium/v8/src/objects/value-serializer.cc
+++ b/chromium/v8/src/objects/value-serializer.cc
@@ -6,13 +6,16 @@
#include <type_traits>
+#include "include/v8-maybe.h"
#include "include/v8-value-serializer-version.h"
-#include "include/v8.h"
+#include "include/v8-value-serializer.h"
+#include "include/v8-wasm.h"
#include "src/api/api-inl.h"
#include "src/base/logging.h"
#include "src/base/platform/wrappers.h"
#include "src/execution/isolate.h"
#include "src/flags/flags.h"
+#include "src/handles/global-handles-inl.h"
#include "src/handles/handles-inl.h"
#include "src/handles/maybe-handles-inl.h"
#include "src/heap/factory.h"
@@ -618,7 +621,8 @@ Maybe<bool> ValueSerializer::WriteJSObject(Handle<JSObject> object) {
Handle<Object> value;
if (V8_LIKELY(!map_changed)) map_changed = *map != object->map();
- if (V8_LIKELY(!map_changed && details.location() == kField)) {
+ if (V8_LIKELY(!map_changed &&
+ details.location() == PropertyLocation::kField)) {
DCHECK_EQ(kData, details.kind());
FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
value = JSObject::FastPropertyAt(object, details.representation(),
@@ -802,8 +806,8 @@ Maybe<bool> ValueSerializer::WriteJSPrimitiveWrapper(
void ValueSerializer::WriteJSRegExp(Handle<JSRegExp> regexp) {
WriteTag(SerializationTag::kRegExp);
- WriteString(handle(regexp->Pattern(), isolate_));
- WriteVarint(static_cast<uint32_t>(regexp->GetFlags()));
+ WriteString(handle(regexp->source(), isolate_));
+ WriteVarint(static_cast<uint32_t>(regexp->flags()));
}
Maybe<bool> ValueSerializer::WriteJSMap(Handle<JSMap> map) {
@@ -1117,7 +1121,7 @@ ValueDeserializer::ValueDeserializer(Isolate* isolate,
: isolate_(isolate),
delegate_(delegate),
position_(data.begin()),
- end_(data.begin() + data.length()),
+ end_(data.end()),
id_map_(isolate->global_handles()->Create(
ReadOnlyRoots(isolate_).empty_fixed_array())) {}
diff --git a/chromium/v8/src/objects/value-serializer.h b/chromium/v8/src/objects/value-serializer.h
index 8a381d1691f..c6363e67c63 100644
--- a/chromium/v8/src/objects/value-serializer.h
+++ b/chromium/v8/src/objects/value-serializer.h
@@ -8,7 +8,7 @@
#include <cstdint>
#include <vector>
-#include "include/v8.h"
+#include "include/v8-value-serializer.h"
#include "src/base/compiler-specific.h"
#include "src/base/macros.h"
#include "src/base/strings.h"
diff --git a/chromium/v8/src/objects/visitors-inl.h b/chromium/v8/src/objects/visitors-inl.h
new file mode 100644
index 00000000000..25186ac7f9a
--- /dev/null
+++ b/chromium/v8/src/objects/visitors-inl.h
@@ -0,0 +1,43 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_VISITORS_INL_H_
+#define V8_OBJECTS_VISITORS_INL_H_
+
+#include "src/common/globals.h"
+#include "src/execution/isolate.h"
+#include "src/objects/visitors.h"
+
+namespace v8 {
+namespace internal {
+
+ObjectVisitorWithCageBases::ObjectVisitorWithCageBases(
+ PtrComprCageBase cage_base, PtrComprCageBase code_cage_base)
+#if V8_COMPRESS_POINTERS
+ : cage_base_(cage_base)
+#ifdef V8_EXTERNAL_CODE_SPACE
+ ,
+ code_cage_base_(code_cage_base)
+#endif // V8_EXTERNAL_CODE_SPACE
+#endif // V8_COMPRESS_POINTERS
+{
+}
+
+ObjectVisitorWithCageBases::ObjectVisitorWithCageBases(Isolate* isolate)
+#if V8_COMPRESS_POINTERS
+ : ObjectVisitorWithCageBases(PtrComprCageBase(isolate->cage_base()),
+ PtrComprCageBase(isolate->code_cage_base()))
+#else
+ : ObjectVisitorWithCageBases(PtrComprCageBase(), PtrComprCageBase())
+#endif // V8_COMPRESS_POINTERS
+{
+}
+
+ObjectVisitorWithCageBases::ObjectVisitorWithCageBases(Heap* heap)
+ : ObjectVisitorWithCageBases(Isolate::FromHeap(heap)) {}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_VISITORS_INL_H_
diff --git a/chromium/v8/src/objects/visitors.h b/chromium/v8/src/objects/visitors.h
index a784cec7561..f065bb71475 100644
--- a/chromium/v8/src/objects/visitors.h
+++ b/chromium/v8/src/objects/visitors.h
@@ -168,12 +168,50 @@ class ObjectVisitor {
virtual void VisitOffHeapTarget(Code host, RelocInfo* rinfo) {}
// Visits the relocation info using the given iterator.
- virtual void VisitRelocInfo(RelocIterator* it);
+ void VisitRelocInfo(RelocIterator* it);
// Visits the object's map pointer, decoding as necessary
virtual void VisitMapPointer(HeapObject host) { UNREACHABLE(); }
};
+// Helper version of ObjectVisitor that also takes care of caching base values
+// of the main pointer compression cage and for the code cage.
+class ObjectVisitorWithCageBases : public ObjectVisitor {
+ public:
+ inline ObjectVisitorWithCageBases(PtrComprCageBase cage_base,
+ PtrComprCageBase code_cage_base);
+ inline explicit ObjectVisitorWithCageBases(Isolate* isolate);
+ inline explicit ObjectVisitorWithCageBases(Heap* heap);
+
+ // The pointer compression cage base value used for decompression of all
+ // tagged values except references to Code objects.
+ PtrComprCageBase cage_base() const {
+#if V8_COMPRESS_POINTERS
+ return cage_base_;
+#else
+ return PtrComprCageBase{};
+#endif // V8_COMPRESS_POINTERS
+ }
+
+ // The pointer compression cage base value used for decompression of
+ // references to Code objects.
+ PtrComprCageBase code_cage_base() const {
+#if V8_EXTERNAL_CODE_SPACE
+ return code_cage_base_;
+#else
+ return cage_base();
+#endif // V8_EXTERNAL_CODE_SPACE
+ }
+
+ private:
+#if V8_COMPRESS_POINTERS
+ const PtrComprCageBase cage_base_;
+#ifdef V8_EXTERNAL_CODE_SPACE
+ const PtrComprCageBase code_cage_base_;
+#endif // V8_EXTERNAL_CODE_SPACE
+#endif // V8_COMPRESS_POINTERS
+};
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/parsing/parse-info.h b/chromium/v8/src/parsing/parse-info.h
index c6bcb221ea8..57153c345bc 100644
--- a/chromium/v8/src/parsing/parse-info.h
+++ b/chromium/v8/src/parsing/parse-info.h
@@ -9,7 +9,6 @@
#include <memory>
#include <vector>
-#include "include/v8.h"
#include "src/base/bit-field.h"
#include "src/base/export-template.h"
#include "src/base/logging.h"
diff --git a/chromium/v8/src/parsing/parser-base.h b/chromium/v8/src/parsing/parser-base.h
index 108b11edc83..8093472eebe 100644
--- a/chromium/v8/src/parsing/parser-base.h
+++ b/chromium/v8/src/parsing/parser-base.h
@@ -27,12 +27,15 @@
#include "src/parsing/parse-info.h"
#include "src/parsing/scanner.h"
#include "src/parsing/token.h"
+#include "src/regexp/regexp.h"
#include "src/utils/pointer-with-payload.h"
#include "src/zone/zone-chunk-list.h"
namespace v8 {
namespace internal {
+class PreParserIdentifier;
+
enum FunctionNameValidity {
kFunctionNameIsStrictReserved,
kSkipFunctionNameCheck,
@@ -1074,22 +1077,24 @@ class ParserBase {
}
// Report syntax errors.
- V8_NOINLINE void ReportMessage(MessageTemplate message) {
- Scanner::Location source_location = scanner()->location();
- impl()->ReportMessageAt(source_location, message,
- static_cast<const char*>(nullptr));
+ template <typename... Ts>
+ V8_NOINLINE void ReportMessage(MessageTemplate message, const Ts&... args) {
+ ReportMessageAt(scanner()->location(), message, args...);
}
- template <typename T>
- V8_NOINLINE void ReportMessage(MessageTemplate message, T arg) {
- Scanner::Location source_location = scanner()->location();
- impl()->ReportMessageAt(source_location, message, arg);
+ template <typename... Ts>
+ V8_NOINLINE void ReportMessageAt(Scanner::Location source_location,
+ MessageTemplate message, const Ts&... args) {
+ impl()->pending_error_handler()->ReportMessageAt(
+ source_location.beg_pos, source_location.end_pos, message, args...);
+ scanner()->set_parser_error();
}
- V8_NOINLINE void ReportMessageAt(Scanner::Location location,
- MessageTemplate message) {
- impl()->ReportMessageAt(location, message,
- static_cast<const char*>(nullptr));
+ V8_NOINLINE void ReportMessageAt(Scanner::Location source_location,
+ MessageTemplate message,
+ const PreParserIdentifier& arg0) {
+ ReportMessageAt(source_location, message,
+ impl()->PreParserIdentifierToAstRawString(arg0));
}
V8_NOINLINE void ReportUnexpectedToken(Token::Value token);
@@ -1122,6 +1127,12 @@ class ParserBase {
}
V8_INLINE IdentifierT ParseAndClassifyIdentifier(Token::Value token);
+
+ // Similar logic to ParseAndClassifyIdentifier but the identifier is
+ // already parsed in prop_info. Returns false if this is an invalid
+ // identifier or an invalid use of the "arguments" keyword.
+ V8_INLINE bool ClassifyPropertyIdentifier(Token::Value token,
+ ParsePropertyInfo* prop_info);
// Parses an identifier or a strict mode future reserved word. Allows passing
// in function_kind for the case of parsing the identifier in a function
// expression, where the relevant "function_kind" bit is of the function being
@@ -1140,6 +1151,11 @@ class ParserBase {
ExpressionT ParsePropertyOrPrivatePropertyName();
+ const AstRawString* GetNextSymbolForRegExpLiteral() const {
+ return scanner()->NextSymbol(ast_value_factory());
+ }
+ bool ValidateRegExpLiteral(const AstRawString* pattern, RegExpFlags flags,
+ RegExpError* regexp_error);
ExpressionT ParseRegExpLiteral();
ExpressionT ParseBindingPattern();
@@ -1634,8 +1650,39 @@ void ParserBase<Impl>::ReportUnexpectedToken(Token::Value token) {
}
template <typename Impl>
+bool ParserBase<Impl>::ClassifyPropertyIdentifier(
+ Token::Value next, ParsePropertyInfo* prop_info) {
+ // Updates made here must be reflected on ParseAndClassifyIdentifier.
+ if (V8_LIKELY(base::IsInRange(next, Token::IDENTIFIER, Token::ASYNC))) {
+ if (V8_UNLIKELY(impl()->IsArguments(prop_info->name) &&
+ scope()->ShouldBanArguments())) {
+ ReportMessage(
+ MessageTemplate::kArgumentsDisallowedInInitializerAndStaticBlock);
+ return false;
+ }
+ return true;
+ }
+
+ if (!Token::IsValidIdentifier(next, language_mode(), is_generator(),
+ is_await_as_identifier_disallowed())) {
+ ReportUnexpectedToken(next);
+ return false;
+ }
+
+ DCHECK(!prop_info->is_computed_name);
+
+ if (next == Token::AWAIT) {
+ DCHECK(!is_async_function());
+ expression_scope()->RecordAsyncArrowParametersError(
+ scanner()->peek_location(), MessageTemplate::kAwaitBindingIdentifier);
+ }
+ return true;
+}
+
+template <typename Impl>
typename ParserBase<Impl>::IdentifierT
ParserBase<Impl>::ParseAndClassifyIdentifier(Token::Value next) {
+ // Updates made here must be reflected on ClassifyPropertyIdentifier.
DCHECK_EQ(scanner()->current_token(), next);
if (V8_LIKELY(base::IsInRange(next, Token::IDENTIFIER, Token::ASYNC))) {
IdentifierT name = impl()->GetIdentifier();
@@ -1746,6 +1793,26 @@ ParserBase<Impl>::ParsePropertyOrPrivatePropertyName() {
}
template <typename Impl>
+bool ParserBase<Impl>::ValidateRegExpLiteral(const AstRawString* pattern,
+ RegExpFlags flags,
+ RegExpError* regexp_error) {
+ // TODO(jgruber): If already validated in the preparser, skip validation in
+ // the parser.
+ DisallowGarbageCollection no_gc;
+ ZoneScope zone_scope(zone()); // Free regexp parser memory after use.
+ const unsigned char* d = pattern->raw_data();
+ if (pattern->is_one_byte()) {
+ return RegExp::VerifySyntax(zone(), stack_limit(),
+ static_cast<const uint8_t*>(d),
+ pattern->length(), flags, regexp_error, no_gc);
+ } else {
+ return RegExp::VerifySyntax(zone(), stack_limit(),
+ reinterpret_cast<const uint16_t*>(d),
+ pattern->length(), flags, regexp_error, no_gc);
+ }
+}
+
+template <typename Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseRegExpLiteral() {
int pos = peek_position();
if (!scanner()->ScanRegExpPattern()) {
@@ -1754,15 +1821,22 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseRegExpLiteral() {
return impl()->FailureExpression();
}
- IdentifierT js_pattern = impl()->GetNextSymbol();
- Maybe<int> flags = scanner()->ScanRegExpFlags();
- if (flags.IsNothing()) {
+ const AstRawString* js_pattern = GetNextSymbolForRegExpLiteral();
+ base::Optional<RegExpFlags> flags = scanner()->ScanRegExpFlags();
+ if (!flags.has_value()) {
Next();
ReportMessage(MessageTemplate::kMalformedRegExpFlags);
return impl()->FailureExpression();
}
Next();
- return factory()->NewRegExpLiteral(js_pattern, flags.FromJust(), pos);
+ RegExpError regexp_error;
+ if (!ValidateRegExpLiteral(js_pattern, flags.value(), &regexp_error)) {
+ if (RegExpErrorIsStackOverflow(regexp_error)) set_stack_overflow();
+ ReportMessage(MessageTemplate::kMalformedRegExp, js_pattern,
+ RegExpErrorString(regexp_error));
+ return impl()->FailureExpression();
+ }
+ return factory()->NewRegExpLiteral(js_pattern, flags.value(), pos);
}
template <typename Impl>
@@ -2514,7 +2588,6 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ParsePropertyInfo* prop_info,
IdentifierT name = prop_info->name;
ParseFunctionFlags function_flags = prop_info->function_flags;
- ParsePropertyKind kind = prop_info->kind;
switch (prop_info->kind) {
case ParsePropertyKind::kSpread:
@@ -2562,19 +2635,10 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ParsePropertyInfo* prop_info,
// IdentifierReference Initializer?
DCHECK_EQ(function_flags, ParseFunctionFlag::kIsNormal);
- if (!Token::IsValidIdentifier(name_token, language_mode(), is_generator(),
- is_await_as_identifier_disallowed())) {
- ReportUnexpectedToken(Next());
+ if (!ClassifyPropertyIdentifier(name_token, prop_info)) {
return impl()->NullLiteralProperty();
}
- DCHECK(!prop_info->is_computed_name);
-
- if (name_token == Token::AWAIT) {
- DCHECK(!is_async_function());
- expression_scope()->RecordAsyncArrowParametersError(
- next_loc, MessageTemplate::kAwaitBindingIdentifier);
- }
ExpressionT lhs =
impl()->ExpressionFromIdentifier(name, next_loc.beg_pos);
if (!IsAssignableIdentifier(lhs)) {
@@ -2637,7 +2701,7 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ParsePropertyInfo* prop_info,
case ParsePropertyKind::kAccessorGetter:
case ParsePropertyKind::kAccessorSetter: {
DCHECK_EQ(function_flags, ParseFunctionFlag::kIsNormal);
- bool is_get = kind == ParsePropertyKind::kAccessorGetter;
+ bool is_get = prop_info->kind == ParsePropertyKind::kAccessorGetter;
expression_scope()->RecordPatternError(
Scanner::Location(next_loc.beg_pos, end_position()),
@@ -3170,20 +3234,21 @@ template <typename Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseBinaryExpression(
int prec) {
DCHECK_GE(prec, 4);
- ExpressionT x;
+
// "#foo in ShiftExpression" needs to be parsed separately, since private
// identifiers are not valid PrimaryExpressions.
if (V8_UNLIKELY(FLAG_harmony_private_brand_checks &&
peek() == Token::PRIVATE_NAME)) {
- x = ParsePropertyOrPrivatePropertyName();
- if (peek() != Token::IN) {
- ReportUnexpectedToken(peek());
+ ExpressionT x = ParsePropertyOrPrivatePropertyName();
+ int prec1 = Token::Precedence(peek(), accept_IN_);
+ if (peek() != Token::IN || prec1 < prec) {
+ ReportUnexpectedToken(Token::PRIVATE_NAME);
return impl()->FailureExpression();
}
- } else {
- x = ParseUnaryExpression();
+ return ParseBinaryContinuation(x, prec, prec1);
}
+ ExpressionT x = ParseUnaryExpression();
int prec1 = Token::Precedence(peek(), accept_IN_);
if (prec1 >= prec) {
return ParseBinaryContinuation(x, prec, prec1);
@@ -5258,7 +5323,7 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseBlock(
body->set_scope(scope()->FinalizeBlockScope());
}
- body->InitializeStatements(statements, zone_);
+ body->InitializeStatements(statements, zone());
return body;
}
diff --git a/chromium/v8/src/parsing/parser.h b/chromium/v8/src/parsing/parser.h
index 6b50ed134c3..c5cc0c80307 100644
--- a/chromium/v8/src/parsing/parser.h
+++ b/chromium/v8/src/parsing/parser.h
@@ -701,25 +701,10 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return NewThrowError(Runtime::kNewTypeError, message, arg, pos);
}
- // Reporting errors.
- void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate message, const char* arg = nullptr) {
- pending_error_handler()->ReportMessageAt(
- source_location.beg_pos, source_location.end_pos, message, arg);
- scanner_.set_parser_error();
- }
-
// Dummy implementation. The parser should never have a unidentifiable
// error.
V8_INLINE void ReportUnidentifiableError() { UNREACHABLE(); }
- void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate message, const AstRawString* arg) {
- pending_error_handler()->ReportMessageAt(
- source_location.beg_pos, source_location.end_pos, message, arg);
- scanner_.set_parser_error();
- }
-
const AstRawString* GetRawNameFromIdentifier(const AstRawString* arg) {
return arg;
}
diff --git a/chromium/v8/src/parsing/pending-compilation-error-handler.cc b/chromium/v8/src/parsing/pending-compilation-error-handler.cc
index 60bc8ada278..4756628ca70 100644
--- a/chromium/v8/src/parsing/pending-compilation-error-handler.cc
+++ b/chromium/v8/src/parsing/pending-compilation-error-handler.cc
@@ -19,49 +19,53 @@ namespace internal {
void PendingCompilationErrorHandler::MessageDetails::SetString(
Handle<String> string, Isolate* isolate) {
- DCHECK_NE(type_, kMainThreadHandle);
- type_ = kMainThreadHandle;
- arg_handle_ = string;
+ DCHECK_NE(args_[0].type, kMainThreadHandle);
+ args_[0].type = kMainThreadHandle;
+ args_[0].js_string = string;
}
void PendingCompilationErrorHandler::MessageDetails::SetString(
Handle<String> string, LocalIsolate* isolate) {
- DCHECK_NE(type_, kMainThreadHandle);
- type_ = kMainThreadHandle;
- arg_handle_ = isolate->heap()->NewPersistentHandle(string);
+ DCHECK_NE(args_[0].type, kMainThreadHandle);
+ args_[0].type = kMainThreadHandle;
+ args_[0].js_string = isolate->heap()->NewPersistentHandle(string);
}
template <typename IsolateT>
void PendingCompilationErrorHandler::MessageDetails::Prepare(
IsolateT* isolate) {
- switch (type_) {
- case kAstRawString:
- return SetString(arg_->string(), isolate);
-
- case kNone:
- case kConstCharString:
- // We can delay allocation until ArgumentString(isolate).
- // TODO(leszeks): We don't actually have to transfer this string, since
- // it's a root.
- return;
-
- case kMainThreadHandle:
- // The message details might already be prepared, so skip them if this is
- // the case.
- return;
+ for (int i = 0; i < kMaxArgumentCount; i++) {
+ switch (args_[i].type) {
+ case kAstRawString:
+ return SetString(args_[i].ast_string->string(), isolate);
+
+ case kNone:
+ case kConstCharString:
+ // We can delay allocation until ArgString(isolate).
+ return;
+
+ case kMainThreadHandle:
+ // The message details might already be prepared, so skip them if this
+ // is the case.
+ return;
+ }
}
}
-Handle<String> PendingCompilationErrorHandler::MessageDetails::ArgumentString(
- Isolate* isolate) const {
- switch (type_) {
+Handle<String> PendingCompilationErrorHandler::MessageDetails::ArgString(
+ Isolate* isolate, int index) const {
+ // `index` may be >= argc; in that case we return a default value to pass on
+ // elsewhere.
+ DCHECK_LT(index, kMaxArgumentCount);
+ switch (args_[index].type) {
case kMainThreadHandle:
- return arg_handle_;
+ return args_[index].js_string;
case kNone:
- return isolate->factory()->undefined_string();
+ return Handle<String>::null();
case kConstCharString:
return isolate->factory()
- ->NewStringFromUtf8(base::CStrVector(char_arg_), AllocationType::kOld)
+ ->NewStringFromUtf8(base::CStrVector(args_[index].c_string),
+ AllocationType::kOld)
.ToHandleChecked();
case kAstRawString:
UNREACHABLE();
@@ -93,6 +97,17 @@ void PendingCompilationErrorHandler::ReportMessageAt(int start_position,
error_details_ = MessageDetails(start_position, end_position, message, arg);
}
+void PendingCompilationErrorHandler::ReportMessageAt(int start_position,
+ int end_position,
+ MessageTemplate message,
+ const AstRawString* arg0,
+ const char* arg1) {
+ if (has_pending_error_) return;
+ has_pending_error_ = true;
+ error_details_ =
+ MessageDetails(start_position, end_position, message, arg0, arg1);
+}
+
void PendingCompilationErrorHandler::ReportWarningAt(int start_position,
int end_position,
MessageTemplate message,
@@ -119,7 +134,8 @@ void PendingCompilationErrorHandler::ReportWarnings(
for (const MessageDetails& warning : warning_messages_) {
MessageLocation location = warning.GetLocation(script);
- Handle<String> argument = warning.ArgumentString(isolate);
+ Handle<String> argument = warning.ArgString(isolate, 0);
+ DCHECK_LT(warning.ArgCount(), 2); // Arg1 is only used for errors.
Handle<JSMessageObject> message =
MessageHandler::MakeMessageObject(isolate, warning.message(), &location,
argument, Handle<FixedArray>::null());
@@ -160,12 +176,13 @@ void PendingCompilationErrorHandler::ThrowPendingError(
if (!has_pending_error_) return;
MessageLocation location = error_details_.GetLocation(script);
- Handle<String> argument = error_details_.ArgumentString(isolate);
+ Handle<String> arg0 = error_details_.ArgString(isolate, 0);
+ Handle<String> arg1 = error_details_.ArgString(isolate, 1);
isolate->debug()->OnCompileError(script);
Factory* factory = isolate->factory();
Handle<JSObject> error =
- factory->NewSyntaxError(error_details_.message(), argument);
+ factory->NewSyntaxError(error_details_.message(), arg0, arg1);
isolate->ThrowAt(error, &location);
}
@@ -173,7 +190,8 @@ Handle<String> PendingCompilationErrorHandler::FormatErrorMessageForTest(
Isolate* isolate) {
error_details_.Prepare(isolate);
return MessageFormatter::Format(isolate, error_details_.message(),
- error_details_.ArgumentString(isolate));
+ error_details_.ArgString(isolate, 0),
+ error_details_.ArgString(isolate, 1));
}
} // namespace internal
diff --git a/chromium/v8/src/parsing/pending-compilation-error-handler.h b/chromium/v8/src/parsing/pending-compilation-error-handler.h
index 31e765d5145..9384e94df70 100644
--- a/chromium/v8/src/parsing/pending-compilation-error-handler.h
+++ b/chromium/v8/src/parsing/pending-compilation-error-handler.h
@@ -25,9 +25,7 @@ class Script;
// compilation phases.
class PendingCompilationErrorHandler {
public:
- PendingCompilationErrorHandler()
- : has_pending_error_(false), stack_overflow_(false) {}
-
+ PendingCompilationErrorHandler() = default;
PendingCompilationErrorHandler(const PendingCompilationErrorHandler&) =
delete;
PendingCompilationErrorHandler& operator=(
@@ -39,6 +37,10 @@ class PendingCompilationErrorHandler {
void ReportMessageAt(int start_position, int end_position,
MessageTemplate message, const AstRawString* arg);
+ void ReportMessageAt(int start_position, int end_position,
+ MessageTemplate message, const AstRawString* arg0,
+ const char* arg1);
+
void ReportWarningAt(int start_position, int end_position,
MessageTemplate message, const char* arg = nullptr);
@@ -85,24 +87,45 @@ class PendingCompilationErrorHandler {
MessageDetails()
: start_position_(-1),
end_position_(-1),
- message_(MessageTemplate::kNone),
- type_(kNone) {}
+ message_(MessageTemplate::kNone) {}
+ MessageDetails(int start_position, int end_position,
+ MessageTemplate message, const AstRawString* arg0)
+ : start_position_(start_position),
+ end_position_(end_position),
+ message_(message),
+ args_{MessageArgument{arg0}, MessageArgument{}} {}
MessageDetails(int start_position, int end_position,
- MessageTemplate message, const AstRawString* arg)
+ MessageTemplate message, const AstRawString* arg0,
+ const char* arg1)
: start_position_(start_position),
end_position_(end_position),
message_(message),
- arg_(arg),
- type_(arg ? kAstRawString : kNone) {}
+ args_{MessageArgument{arg0}, MessageArgument{arg1}} {
+ DCHECK_NOT_NULL(arg0);
+ DCHECK_NOT_NULL(arg1);
+ }
MessageDetails(int start_position, int end_position,
- MessageTemplate message, const char* char_arg)
+ MessageTemplate message, const char* arg0)
: start_position_(start_position),
end_position_(end_position),
message_(message),
- char_arg_(char_arg),
- type_(char_arg_ ? kConstCharString : kNone) {}
+ args_{MessageArgument{arg0}, MessageArgument{}} {}
+
+ Handle<String> ArgString(Isolate* isolate, int index) const;
+ int ArgCount() const {
+ int argc = 0;
+ for (int i = 0; i < kMaxArgumentCount; i++) {
+ if (args_[i].type == kNone) break;
+ argc++;
+ }
+#ifdef DEBUG
+ for (int i = argc; i < kMaxArgumentCount; i++) {
+ DCHECK_EQ(args_[i].type, kNone);
+ }
+#endif // DEBUG
+ return argc;
+ }
- Handle<String> ArgumentString(Isolate* isolate) const;
MessageLocation GetLocation(Handle<Script> script) const;
MessageTemplate message() const { return message_; }
@@ -117,19 +140,32 @@ class PendingCompilationErrorHandler {
int start_position_;
int end_position_;
+
MessageTemplate message_;
- union {
- const AstRawString* arg_;
- const char* char_arg_;
- Handle<String> arg_handle_;
+
+ struct MessageArgument final {
+ constexpr MessageArgument() : ast_string(nullptr), type(kNone) {}
+ explicit constexpr MessageArgument(const AstRawString* s)
+ : ast_string(s), type(s == nullptr ? kNone : kAstRawString) {}
+ explicit constexpr MessageArgument(const char* s)
+ : c_string(s), type(s == nullptr ? kNone : kConstCharString) {}
+
+ union {
+ const AstRawString* ast_string;
+ const char* c_string;
+ Handle<String> js_string;
+ };
+ Type type;
};
- Type type_;
+
+ static constexpr int kMaxArgumentCount = 2;
+ MessageArgument args_[kMaxArgumentCount];
};
void ThrowPendingError(Isolate* isolate, Handle<Script> script) const;
- bool has_pending_error_;
- bool stack_overflow_;
+ bool has_pending_error_ = false;
+ bool stack_overflow_ = false;
bool unidentifiable_error_ = false;
MessageDetails error_details_;
diff --git a/chromium/v8/src/parsing/preparse-data.cc b/chromium/v8/src/parsing/preparse-data.cc
index 1643c6ba1a6..f368a11f9ab 100644
--- a/chromium/v8/src/parsing/preparse-data.cc
+++ b/chromium/v8/src/parsing/preparse-data.cc
@@ -666,12 +666,13 @@ void BaseConsumedPreparseData<Data>::RestoreDataForScope(
scope->AsDeclarationScope()->RecordNeedsPrivateNameContextChainRecalc();
}
if (ShouldSaveClassVariableIndexField::decode(scope_data_flags)) {
- Variable* var;
- // An anonymous class whose class variable needs to be saved do not
+ Variable* var = scope->AsClassScope()->class_variable();
+ // An anonymous class whose class variable needs to be saved might not
// have the class variable created during reparse since we skip parsing
// the inner scopes that contain potential access to static private
// methods. So create it now.
- if (scope->AsClassScope()->is_anonymous_class()) {
+ if (var == nullptr) {
+ DCHECK(scope->AsClassScope()->is_anonymous_class());
var = scope->AsClassScope()->DeclareClassVariable(
ast_value_factory, nullptr, kNoSourcePosition);
AstNodeFactory factory(ast_value_factory, zone);
@@ -679,9 +680,6 @@ void BaseConsumedPreparseData<Data>::RestoreDataForScope(
factory.NewVariableDeclaration(kNoSourcePosition);
scope->declarations()->Add(declaration);
declaration->set_var(var);
- } else {
- var = scope->AsClassScope()->class_variable();
- DCHECK_NOT_NULL(var);
}
var->set_is_used();
var->ForceContextAllocation();
diff --git a/chromium/v8/src/parsing/preparser.h b/chromium/v8/src/parsing/preparser.h
index 1949e7f8a7e..746802a9aac 100644
--- a/chromium/v8/src/parsing/preparser.h
+++ b/chromium/v8/src/parsing/preparser.h
@@ -537,7 +537,7 @@ class PreParserFactory {
PreParserExpression NewTheHoleLiteral() {
return PreParserExpression::Default();
}
- PreParserExpression NewRegExpLiteral(const PreParserIdentifier& js_pattern,
+ PreParserExpression NewRegExpLiteral(const AstRawString* js_pattern,
int js_flags, int pos) {
return PreParserExpression::Default();
}
@@ -1455,12 +1455,9 @@ class PreParser : public ParserBase<PreParser> {
return PreParserExpression::Default();
}
- // Reporting errors.
- void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate message, const char* arg = nullptr) {
- pending_error_handler()->ReportMessageAt(
- source_location.beg_pos, source_location.end_pos, message, arg);
- scanner()->set_parser_error();
+ V8_INLINE const AstRawString* PreParserIdentifierToAstRawString(
+ const PreParserIdentifier& x) {
+ return x.string_;
}
V8_INLINE void ReportUnidentifiableError() {
@@ -1468,19 +1465,6 @@ class PreParser : public ParserBase<PreParser> {
scanner()->set_parser_error();
}
- V8_INLINE void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate message,
- const PreParserIdentifier& arg) {
- ReportMessageAt(source_location, message, arg.string_);
- }
-
- void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate message, const AstRawString* arg) {
- pending_error_handler()->ReportMessageAt(
- source_location.beg_pos, source_location.end_pos, message, arg);
- scanner()->set_parser_error();
- }
-
const AstRawString* GetRawNameFromIdentifier(const PreParserIdentifier& arg) {
return arg.string_;
}
diff --git a/chromium/v8/src/parsing/scanner-character-streams.cc b/chromium/v8/src/parsing/scanner-character-streams.cc
index becc72c12d8..f090503c840 100644
--- a/chromium/v8/src/parsing/scanner-character-streams.cc
+++ b/chromium/v8/src/parsing/scanner-character-streams.cc
@@ -7,9 +7,11 @@
#include <memory>
#include <vector>
-#include "include/v8.h"
+#include "include/v8-callbacks.h"
+#include "include/v8-primitive.h"
#include "src/base/strings.h"
#include "src/common/globals.h"
+#include "src/execution/isolate-utils.h"
#include "src/handles/handles.h"
#include "src/logging/runtime-call-stats-scope.h"
#include "src/objects/objects-inl.h"
@@ -101,7 +103,7 @@ class ExternalStringStream {
ExternalStringStream(ExternalString string, size_t start_offset,
size_t length)
: lock_(string),
- data_(string.GetChars() + start_offset),
+ data_(string.GetChars(GetPtrComprCageBase(string)) + start_offset),
length_(length) {}
ExternalStringStream(const ExternalStringStream& other) V8_NOEXCEPT
diff --git a/chromium/v8/src/parsing/scanner-character-streams.h b/chromium/v8/src/parsing/scanner-character-streams.h
index 09181356f0b..8665ea0b4bd 100644
--- a/chromium/v8/src/parsing/scanner-character-streams.h
+++ b/chromium/v8/src/parsing/scanner-character-streams.h
@@ -7,7 +7,7 @@
#include <memory>
-#include "include/v8.h" // for v8::ScriptCompiler
+#include "include/v8-script.h" // for v8::ScriptCompiler
#include "src/common/globals.h"
namespace v8 {
diff --git a/chromium/v8/src/parsing/scanner.cc b/chromium/v8/src/parsing/scanner.cc
index b624694295c..cbfd3990202 100644
--- a/chromium/v8/src/parsing/scanner.cc
+++ b/chromium/v8/src/parsing/scanner.cc
@@ -978,9 +978,6 @@ bool Scanner::ScanRegExpPattern() {
// worrying whether the following characters are part of the escape
// or not, since any '/', '\\' or '[' is guaranteed to not be part
// of the escape sequence.
-
- // TODO(896): At some point, parse RegExps more thoroughly to capture
- // octal esacpes in strict mode.
} else { // Unescaped character.
if (c0_ == '[') in_character_class = true;
if (c0_ == ']') in_character_class = false;
@@ -993,22 +990,21 @@ bool Scanner::ScanRegExpPattern() {
return true;
}
-Maybe<int> Scanner::ScanRegExpFlags() {
+base::Optional<RegExpFlags> Scanner::ScanRegExpFlags() {
DCHECK_EQ(Token::REGEXP_LITERAL, next().token);
- // Scan regular expression flags.
- JSRegExp::Flags flags;
+ RegExpFlags flags;
while (IsIdentifierPart(c0_)) {
- base::Optional<JSRegExp::Flags> maybe_flag = JSRegExp::FlagFromChar(c0_);
- if (!maybe_flag.has_value()) return Nothing<int>();
- JSRegExp::Flags flag = *maybe_flag;
- if (flags & flag) return Nothing<int>();
+ base::Optional<RegExpFlag> maybe_flag = JSRegExp::FlagFromChar(c0_);
+ if (!maybe_flag.has_value()) return {};
+ RegExpFlag flag = maybe_flag.value();
+ if (flags & flag) return {};
Advance();
flags |= flag;
}
next().location.end_pos = source_pos();
- return Just<int>(flags);
+ return flags;
}
const AstRawString* Scanner::CurrentSymbol(
diff --git a/chromium/v8/src/parsing/scanner.h b/chromium/v8/src/parsing/scanner.h
index 3474f7270d5..7ab44d5b201 100644
--- a/chromium/v8/src/parsing/scanner.h
+++ b/chromium/v8/src/parsing/scanner.h
@@ -10,7 +10,6 @@
#include <algorithm>
#include <memory>
-#include "include/v8.h"
#include "src/base/logging.h"
#include "src/base/strings.h"
#include "src/common/globals.h"
@@ -18,6 +17,7 @@
#include "src/parsing/literal-buffer.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/token.h"
+#include "src/regexp/regexp-flags.h"
#include "src/strings/char-predicates.h"
#include "src/strings/unicode.h"
#include "src/utils/allocation.h"
@@ -399,7 +399,7 @@ class V8_EXPORT_PRIVATE Scanner {
// Returns true if a pattern is scanned.
bool ScanRegExpPattern();
// Scans the input as regular expression flags. Returns the flags on success.
- Maybe<int> ScanRegExpFlags();
+ base::Optional<RegExpFlags> ScanRegExpFlags();
// Scans the input as a template literal
Token::Value ScanTemplateContinuation() {
diff --git a/chromium/v8/src/profiler/allocation-tracker.cc b/chromium/v8/src/profiler/allocation-tracker.cc
index 9bba48521ca..f228d79ad63 100644
--- a/chromium/v8/src/profiler/allocation-tracker.cc
+++ b/chromium/v8/src/profiler/allocation-tracker.cc
@@ -5,7 +5,7 @@
#include "src/profiler/allocation-tracker.h"
#include "src/execution/frames-inl.h"
-#include "src/handles/global-handles.h"
+#include "src/handles/global-handles-inl.h"
#include "src/objects/objects-inl.h"
#include "src/profiler/heap-snapshot-generator-inl.h"
diff --git a/chromium/v8/src/profiler/allocation-tracker.h b/chromium/v8/src/profiler/allocation-tracker.h
index 36b9e91883e..a33f08c0d0e 100644
--- a/chromium/v8/src/profiler/allocation-tracker.h
+++ b/chromium/v8/src/profiler/allocation-tracker.h
@@ -8,7 +8,9 @@
#include <map>
#include <vector>
+#include "include/v8-persistent-handle.h"
#include "include/v8-profiler.h"
+#include "include/v8-unwinder.h"
#include "src/base/hashmap.h"
#include "src/base/vector.h"
#include "src/handles/handles.h"
diff --git a/chromium/v8/src/profiler/cpu-profiler.cc b/chromium/v8/src/profiler/cpu-profiler.cc
index a59c9359eba..829f2ab67fc 100644
--- a/chromium/v8/src/profiler/cpu-profiler.cc
+++ b/chromium/v8/src/profiler/cpu-profiler.cc
@@ -7,6 +7,7 @@
#include <unordered_map>
#include <utility>
+#include "include/v8-locker.h"
#include "src/base/lazy-instance.h"
#include "src/base/template-utils.h"
#include "src/debug/debug.h"
@@ -39,9 +40,10 @@ class CpuSampler : public sampler::Sampler {
void SampleStack(const v8::RegisterState& regs) override {
Isolate* isolate = reinterpret_cast<Isolate*>(this->isolate());
- if (v8::Locker::IsActive() && (!isolate->thread_manager()->IsLockedByThread(
- perThreadData_->thread_id()) ||
- perThreadData_->thread_state() != nullptr)) {
+ if (v8::Locker::WasEverUsed() &&
+ (!isolate->thread_manager()->IsLockedByThread(
+ perThreadData_->thread_id()) ||
+ perThreadData_->thread_state() != nullptr)) {
ProfilerStats::Instance()->AddReason(
ProfilerStats::Reason::kIsolateNotLocked);
return;
@@ -361,6 +363,16 @@ void ProfilerCodeObserver::CodeEventHandler(
CodeEventHandlerInternal(evt_rec);
}
+size_t ProfilerCodeObserver::GetEstimatedMemoryUsage() const {
+ // To avoid race condition in codemap,
+ // for now limit computation in kEagerLogging mode
+ if (!processor_) {
+ return sizeof(*this) + code_map_.GetEstimatedMemoryUsage() +
+ code_entries_.strings().GetStringSize();
+ }
+ return 0;
+}
+
void ProfilerCodeObserver::CodeEventHandlerInternal(
const CodeEventsContainer& evt_rec) {
CodeEventsContainer record = evt_rec;
diff --git a/chromium/v8/src/profiler/cpu-profiler.h b/chromium/v8/src/profiler/cpu-profiler.h
index b465f827c96..ea14d6c6187 100644
--- a/chromium/v8/src/profiler/cpu-profiler.h
+++ b/chromium/v8/src/profiler/cpu-profiler.h
@@ -268,6 +268,7 @@ class V8_EXPORT_PRIVATE ProfilerCodeObserver : public CodeEventObserver {
CodeEntryStorage* code_entries() { return &code_entries_; }
CodeMap* code_map() { return &code_map_; }
WeakCodeRegistry* weak_code_registry() { return &weak_code_registry_; }
+ size_t GetEstimatedMemoryUsage() const;
void ClearCodeMap();
diff --git a/chromium/v8/src/profiler/heap-snapshot-generator.cc b/chromium/v8/src/profiler/heap-snapshot-generator.cc
index 231595dae7a..13c587dd760 100644
--- a/chromium/v8/src/profiler/heap-snapshot-generator.cc
+++ b/chromium/v8/src/profiler/heap-snapshot-generator.cc
@@ -604,7 +604,7 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject object) {
return AddEntry(object, HeapEntry::kClosure, "native_bind");
} else if (object.IsJSRegExp()) {
JSRegExp re = JSRegExp::cast(object);
- return AddEntry(object, HeapEntry::kRegExp, names_->GetName(re.Pattern()));
+ return AddEntry(object, HeapEntry::kRegExp, names_->GetName(re.source()));
} else if (object.IsJSObject()) {
const char* name = names_->GetName(
GetConstructorName(JSObject::cast(object)));
@@ -718,11 +718,12 @@ int V8HeapExplorer::EstimateObjectsCount() {
return objects_count;
}
-class IndexedReferencesExtractor : public ObjectVisitor {
+class IndexedReferencesExtractor : public ObjectVisitorWithCageBases {
public:
IndexedReferencesExtractor(V8HeapExplorer* generator, HeapObject parent_obj,
HeapEntry* parent)
- : generator_(generator),
+ : ObjectVisitorWithCageBases(generator->isolate()),
+ generator_(generator),
parent_obj_(parent_obj),
parent_start_(parent_obj_.RawMaybeWeakField(0)),
parent_end_(parent_obj_.RawMaybeWeakField(parent_obj_.Size())),
@@ -733,10 +734,7 @@ class IndexedReferencesExtractor : public ObjectVisitor {
VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
}
void VisitMapPointer(HeapObject object) override {
- // TODO(v8:11880): support external code space (here object could be Code,
- // so the V8 heap cage_base must be used here).
- PtrComprCageBase cage_base = GetPtrComprCageBase(object);
- VisitSlotImpl(cage_base, object.map_slot());
+ VisitSlotImpl(cage_base(), object.map_slot());
}
void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) override {
@@ -744,17 +742,14 @@ class IndexedReferencesExtractor : public ObjectVisitor {
// all the slots must point inside the object.
CHECK_LE(parent_start_, start);
CHECK_LE(end, parent_end_);
- PtrComprCageBase cage_base = GetPtrComprCageBase(host);
for (MaybeObjectSlot slot = start; slot < end; ++slot) {
- VisitSlotImpl(cage_base, slot);
+ VisitSlotImpl(cage_base(), slot);
}
}
void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- // TODO(v8:11880): support external code space.
- PtrComprCageBase code_cage_base = GetPtrComprCageBase(host);
- VisitSlotImpl(code_cage_base, slot);
+ VisitSlotImpl(code_cage_base(), slot);
}
void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
@@ -763,7 +758,12 @@ class IndexedReferencesExtractor : public ObjectVisitor {
}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
- VisitHeapObjectImpl(rinfo->target_object(), -1);
+ HeapObject object = rinfo->target_object_no_host(cage_base());
+ if (host.IsWeakObject(object)) {
+ generator_->SetWeakReference(parent_, next_index_++, object, {});
+ } else {
+ VisitHeapObjectImpl(rinfo->target_object(), -1);
+ }
}
private:
@@ -774,8 +774,11 @@ class IndexedReferencesExtractor : public ObjectVisitor {
generator_->visited_fields_[field_index] = false;
} else {
HeapObject heap_object;
- if (slot.load(cage_base).GetHeapObject(&heap_object)) {
+ auto loaded_value = slot.load(cage_base);
+ if (loaded_value.GetHeapObjectIfStrong(&heap_object)) {
VisitHeapObjectImpl(heap_object, field_index);
+ } else if (loaded_value.GetHeapObjectIfWeak(&heap_object)) {
+ generator_->SetWeakReference(parent_, next_index_++, heap_object, {});
}
}
}
@@ -1223,15 +1226,20 @@ void V8HeapExplorer::ExtractCodeReferences(HeapEntry* entry, Code code) {
return;
}
- TagObject(code.deoptimization_data(), "(code deopt data)");
- SetInternalReference(entry, "deoptimization_data", code.deoptimization_data(),
- Code::kDeoptimizationDataOffset);
if (code.kind() == CodeKind::BASELINE) {
+ TagObject(code.bytecode_or_interpreter_data(), "(interpreter data)");
+ SetInternalReference(entry, "interpreter_data",
+ code.bytecode_or_interpreter_data(),
+ Code::kDeoptimizationDataOrInterpreterDataOffset);
TagObject(code.bytecode_offset_table(), "(bytecode offset table)");
SetInternalReference(entry, "bytecode_offset_table",
code.bytecode_offset_table(),
Code::kPositionTableOffset);
} else {
+ TagObject(code.deoptimization_data(), "(code deopt data)");
+ SetInternalReference(entry, "deoptimization_data",
+ code.deoptimization_data(),
+ Code::kDeoptimizationDataOrInterpreterDataOffset);
TagObject(code.source_position_table(), "(source position table)");
SetInternalReference(entry, "source_position_table",
code.source_position_table(),
@@ -1415,7 +1423,7 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject js_obj,
for (InternalIndex i : js_obj.map().IterateOwnDescriptors()) {
PropertyDetails details = descs.GetDetails(i);
switch (details.location()) {
- case kField: {
+ case PropertyLocation::kField: {
if (!snapshot_->capture_numeric_value()) {
Representation r = details.representation();
if (r.IsSmi() || r.IsDouble()) break;
@@ -1431,7 +1439,7 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject js_obj,
nullptr, field_offset);
break;
}
- case kDescriptor:
+ case PropertyLocation::kDescriptor:
SetDataOrAccessorPropertyReference(
details.kind(), entry, descs.GetKey(i), descs.GetStrongValue(i));
break;
@@ -1781,7 +1789,8 @@ void V8HeapExplorer::SetWeakReference(HeapEntry* parent_entry,
}
void V8HeapExplorer::SetWeakReference(HeapEntry* parent_entry, int index,
- Object child_obj, int field_offset) {
+ Object child_obj,
+ base::Optional<int> field_offset) {
if (!IsEssentialObject(child_obj)) {
return;
}
@@ -1789,7 +1798,9 @@ void V8HeapExplorer::SetWeakReference(HeapEntry* parent_entry, int index,
DCHECK_NOT_NULL(child_entry);
parent_entry->SetNamedReference(
HeapGraphEdge::kWeak, names_->GetFormatted("%d", index), child_entry);
- MarkVisitedField(field_offset);
+ if (field_offset.has_value()) {
+ MarkVisitedField(*field_offset);
+ }
}
void V8HeapExplorer::SetDataOrAccessorPropertyReference(
diff --git a/chromium/v8/src/profiler/heap-snapshot-generator.h b/chromium/v8/src/profiler/heap-snapshot-generator.h
index 2ab13a99bf3..682a28773ca 100644
--- a/chromium/v8/src/profiler/heap-snapshot-generator.h
+++ b/chromium/v8/src/profiler/heap-snapshot-generator.h
@@ -13,6 +13,7 @@
#include "include/v8-profiler.h"
#include "src/base/platform/time.h"
+#include "src/execution/isolate.h"
#include "src/objects/fixed-array.h"
#include "src/objects/hash-table.h"
#include "src/objects/heap-object.h"
@@ -349,6 +350,8 @@ class V8_EXPORT_PRIVATE V8HeapExplorer : public HeapEntriesAllocator {
V8HeapExplorer(const V8HeapExplorer&) = delete;
V8HeapExplorer& operator=(const V8HeapExplorer&) = delete;
+ V8_INLINE Isolate* isolate() { return Isolate::FromHeap(heap_); }
+
HeapEntry* AllocateEntry(HeapThing ptr) override;
HeapEntry* AllocateEntry(Smi smi) override;
int EstimateObjectsCount();
@@ -436,7 +439,7 @@ class V8_EXPORT_PRIVATE V8HeapExplorer : public HeapEntriesAllocator {
void SetWeakReference(HeapEntry* parent_entry, const char* reference_name,
Object child_obj, int field_offset);
void SetWeakReference(HeapEntry* parent_entry, int index, Object child_obj,
- int field_offset);
+ base::Optional<int> field_offset);
void SetPropertyReference(HeapEntry* parent_entry, Name reference_name,
Object child,
const char* name_format_string = nullptr,
diff --git a/chromium/v8/src/profiler/profile-generator.cc b/chromium/v8/src/profiler/profile-generator.cc
index 06aefe95055..34a15159a37 100644
--- a/chromium/v8/src/profiler/profile-generator.cc
+++ b/chromium/v8/src/profiler/profile-generator.cc
@@ -64,6 +64,11 @@ int SourcePositionTable::GetInliningId(int pc_offset) const {
return it->inlining_id;
}
+size_t SourcePositionTable::Size() const {
+ return sizeof(*this) + pc_offsets_to_lines_.capacity() *
+ sizeof(decltype(pc_offsets_to_lines_)::value_type);
+}
+
void SourcePositionTable::print() const {
base::OS::Print(" - source position table at %p\n", this);
for (const SourcePositionTuple& pos_info : pc_offsets_to_lines_) {
@@ -207,6 +212,37 @@ void CodeEntry::FillFunctionInfo(SharedFunctionInfo shared) {
}
}
+size_t CodeEntry::EstimatedSize() const {
+ size_t estimated_size = 0;
+ if (rare_data_) {
+ estimated_size += sizeof(rare_data_.get());
+
+ for (const auto& inline_entry : rare_data_->inline_entries_) {
+ estimated_size += inline_entry->EstimatedSize();
+ }
+ estimated_size += rare_data_->inline_entries_.size() *
+ sizeof(decltype(rare_data_->inline_entries_)::value_type);
+
+ for (const auto& inline_stack_pair : rare_data_->inline_stacks_) {
+ estimated_size += inline_stack_pair.second.size() *
+ sizeof(decltype(inline_stack_pair.second)::value_type);
+ }
+ estimated_size +=
+ rare_data_->inline_stacks_.size() *
+ (sizeof(decltype(rare_data_->inline_stacks_)::key_type) +
+ sizeof(decltype(rare_data_->inline_stacks_)::value_type));
+
+ estimated_size +=
+ rare_data_->deopt_inlined_frames_.capacity() *
+ sizeof(decltype(rare_data_->deopt_inlined_frames_)::value_type);
+ }
+
+ if (line_info_) {
+ estimated_size += line_info_.get()->Size();
+ }
+ return sizeof(*this) + estimated_size;
+}
+
CpuProfileDeoptInfo CodeEntry::GetDeoptInfo() {
DCHECK(has_deopt_info());
@@ -423,9 +459,7 @@ class DeleteNodesCallback {
public:
void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
- void AfterAllChildrenTraversed(ProfileNode* node) {
- delete node;
- }
+ void AfterAllChildrenTraversed(ProfileNode* node) { delete node; }
void AfterChildTraversed(ProfileNode*, ProfileNode*) { }
};
@@ -845,6 +879,15 @@ void CodeMap::Print() {
}
}
+size_t CodeMap::GetEstimatedMemoryUsage() const {
+ size_t map_size = 0;
+ for (const auto& pair : code_map_) {
+ map_size += sizeof(pair.first) + sizeof(pair.second) +
+ pair.second.entry->EstimatedSize();
+ }
+ return sizeof(*this) + map_size;
+}
+
CpuProfilesCollection::CpuProfilesCollection(Isolate* isolate)
: profiler_(nullptr), current_profiles_semaphore_(1) {}
diff --git a/chromium/v8/src/profiler/profile-generator.h b/chromium/v8/src/profiler/profile-generator.h
index 3e8d073f630..bb0adbfe3b2 100644
--- a/chromium/v8/src/profiler/profile-generator.h
+++ b/chromium/v8/src/profiler/profile-generator.h
@@ -38,6 +38,7 @@ class V8_EXPORT_PRIVATE SourcePositionTable : public Malloced {
int GetSourceLineNumber(int pc_offset) const;
int GetInliningId(int pc_offset) const;
+ size_t Size() const;
void print() const;
private:
@@ -98,6 +99,7 @@ class CodeEntry {
void set_deopt_info(const char* deopt_reason, int deopt_id,
std::vector<CpuProfileDeoptFrame> inlined_frames);
+ size_t EstimatedSize() const;
CpuProfileDeoptInfo GetDeoptInfo();
bool has_deopt_info() const {
return rare_data_ && rare_data_->deopt_id_ != kNoDeoptimizationId;
@@ -491,6 +493,8 @@ class V8_EXPORT_PRIVATE CodeMap {
void Print();
size_t size() const { return code_map_.size(); }
+ size_t GetEstimatedMemoryUsage() const;
+
CodeEntryStorage& code_entries() { return code_entries_; }
void Clear();
diff --git a/chromium/v8/src/profiler/strings-storage.cc b/chromium/v8/src/profiler/strings-storage.cc
index 054aa3f80e5..37197a5918b 100644
--- a/chromium/v8/src/profiler/strings-storage.cc
+++ b/chromium/v8/src/profiler/strings-storage.cc
@@ -36,6 +36,7 @@ const char* StringsStorage::GetCopy(const char* src) {
base::StrNCpy(dst, src, len);
dst[len] = '\0';
entry->key = dst.begin();
+ string_size_ += len;
}
entry->value =
reinterpret_cast<void*>(reinterpret_cast<size_t>(entry->value) + 1);
@@ -56,6 +57,7 @@ const char* StringsStorage::AddOrDisposeString(char* str, int len) {
if (entry->value == nullptr) {
// New entry added.
entry->key = str;
+ string_size_ += len;
} else {
DeleteArray(str);
}
@@ -156,6 +158,7 @@ bool StringsStorage::Release(const char* str) {
reinterpret_cast<void*>(reinterpret_cast<size_t>(entry->value) - 1);
if (entry->value == 0) {
+ string_size_ -= len;
names_.Remove(const_cast<char*>(str), hash);
DeleteArray(str);
}
@@ -166,6 +169,11 @@ size_t StringsStorage::GetStringCountForTesting() const {
return names_.occupancy();
}
+size_t StringsStorage::GetStringSize() {
+ base::MutexGuard guard(&mutex_);
+ return string_size_;
+}
+
base::HashMap::Entry* StringsStorage::GetEntry(const char* str, int len) {
uint32_t hash = ComputeStringHash(str, len);
return names_.LookupOrInsert(const_cast<char*>(str), hash);
diff --git a/chromium/v8/src/profiler/strings-storage.h b/chromium/v8/src/profiler/strings-storage.h
index 7e39c0ee33b..1d4c2e44d2a 100644
--- a/chromium/v8/src/profiler/strings-storage.h
+++ b/chromium/v8/src/profiler/strings-storage.h
@@ -47,6 +47,9 @@ class V8_EXPORT_PRIVATE StringsStorage {
// Returns the number of strings in the store.
size_t GetStringCountForTesting() const;
+ // Returns the size of strings in the store
+ size_t GetStringSize();
+
// Returns true if the strings table is empty.
bool empty() const { return names_.occupancy() == 0; }
@@ -62,6 +65,7 @@ class V8_EXPORT_PRIVATE StringsStorage {
base::CustomMatcherHashMap names_;
base::Mutex mutex_;
+ size_t string_size_ = 0;
};
} // namespace internal
diff --git a/chromium/v8/src/profiler/tick-sample.cc b/chromium/v8/src/profiler/tick-sample.cc
index 253b80d19e2..daef48eb263 100644
--- a/chromium/v8/src/profiler/tick-sample.cc
+++ b/chromium/v8/src/profiler/tick-sample.cc
@@ -105,7 +105,7 @@ bool SimulatorHelper::FillRegisters(Isolate* isolate,
state->sp = reinterpret_cast<void*>(simulator->sp());
state->fp = reinterpret_cast<void*>(simulator->fp());
state->lr = reinterpret_cast<void*>(simulator->lr());
-#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_LOONG64
if (!simulator->has_bad_pc()) {
state->pc = reinterpret_cast<void*>(simulator->get_pc());
}
diff --git a/chromium/v8/src/profiler/tick-sample.h b/chromium/v8/src/profiler/tick-sample.h
index 1bfcb7d0971..4402bdc2724 100644
--- a/chromium/v8/src/profiler/tick-sample.h
+++ b/chromium/v8/src/profiler/tick-sample.h
@@ -5,7 +5,7 @@
#ifndef V8_PROFILER_TICK_SAMPLE_H_
#define V8_PROFILER_TICK_SAMPLE_H_
-#include "include/v8.h"
+#include "include/v8-unwinder.h"
#include "src/base/platform/time.h"
#include "src/common/globals.h"
diff --git a/chromium/v8/src/profiler/weak-code-registry.cc b/chromium/v8/src/profiler/weak-code-registry.cc
index 2918e1ca827..961164d7933 100644
--- a/chromium/v8/src/profiler/weak-code-registry.cc
+++ b/chromium/v8/src/profiler/weak-code-registry.cc
@@ -4,7 +4,8 @@
#include "src/profiler/weak-code-registry.h"
-#include "src/handles/global-handles.h"
+#include "src/handles/global-handles-inl.h"
+#include "src/objects/code-inl.h"
#include "src/objects/instance-type-inl.h"
namespace v8 {
diff --git a/chromium/v8/src/regexp/arm/regexp-macro-assembler-arm.cc b/chromium/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
index 6c90e008173..f21ee023da9 100644
--- a/chromium/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
+++ b/chromium/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -6,15 +6,13 @@
#include "src/regexp/arm/regexp-macro-assembler-arm.h"
-#include "src/codegen/assembler-inl.h"
+#include "src/codegen/arm/assembler-arm-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/heap/factory.h"
#include "src/logging/log.h"
-#include "src/objects/objects-inl.h"
-#include "src/regexp/regexp-macro-assembler.h"
+#include "src/objects/code-inl.h"
#include "src/regexp/regexp-stack.h"
#include "src/snapshot/embedded/embedded-data.h"
-#include "src/strings/unicode.h"
namespace v8 {
namespace internal {
@@ -40,14 +38,12 @@ namespace internal {
* Each call to a public method should retain this convention.
*
* The stack will have the following structure:
- * - fp[56] Address regexp (address of the JSRegExp object; unused in
+ * - fp[52] Address regexp (address of the JSRegExp object; unused in
* native code, passed to match signature of
* the interpreter)
- * - fp[52] Isolate* isolate (address of the current isolate)
- * - fp[48] direct_call (if 1, direct call from JavaScript code,
+ * - fp[48] Isolate* isolate (address of the current isolate)
+ * - fp[44] direct_call (if 1, direct call from JavaScript code,
* if 0, call through the runtime system).
- * - fp[44] stack_area_base (high end of the memory area to use as
- * backtracking stack).
* - fp[40] capture array size (may fit multiple sets of matches)
* - fp[36] int* capture_array (int[num_saved_registers_], for output).
* --- sp when called ---
@@ -84,7 +80,6 @@ namespace internal {
* Address end,
* int* capture_output_array,
* int num_capture_registers,
- * byte* stack_area_base,
* bool direct_call = false,
* Isolate* isolate,
* Address regexp);
@@ -100,8 +95,10 @@ RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, CodeObjectRequired::kYes,
- NewAssemblerBuffer(kRegExpCodeSize))),
+ masm_(std::make_unique<MacroAssembler>(
+ isolate, CodeObjectRequired::kYes,
+ NewAssemblerBuffer(kRegExpCodeSize))),
+ no_root_array_scope_(masm_.get()),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -110,15 +107,12 @@ RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(Isolate* isolate, Zone* zone,
success_label_(),
backtrack_label_(),
exit_label_() {
- masm_->set_root_array_available(false);
-
DCHECK_EQ(0, registers_to_save % 2);
__ jmp(&entry_label_); // We'll write the entry code later.
__ bind(&start_label_); // And then continue from here.
}
RegExpMacroAssemblerARM::~RegExpMacroAssemblerARM() {
- delete masm_;
// Unuse labels in case we throw away the assembler without calling GetCode.
entry_label_.Unuse();
start_label_.Unuse();
@@ -338,7 +332,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
__ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
{
- AllowExternalCallThatCantCauseGC scope(masm_);
+ AllowExternalCallThatCantCauseGC scope(masm_.get());
ExternalReference function =
unicode ? ExternalReference::re_case_insensitive_compare_unicode(
isolate())
@@ -619,6 +613,42 @@ void RegExpMacroAssemblerARM::Fail() {
__ jmp(&exit_label_);
}
+void RegExpMacroAssemblerARM::LoadRegExpStackPointerFromMemory(Register dst) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ mov(dst, Operand(ref));
+ __ ldr(dst, MemOperand(dst));
+}
+
+void RegExpMacroAssemblerARM::StoreRegExpStackPointerToMemory(
+ Register src, Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ mov(scratch, Operand(ref));
+ __ str(src, MemOperand(scratch));
+}
+
+void RegExpMacroAssemblerARM::PushRegExpBasePointer(Register stack_pointer,
+ Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ mov(scratch, Operand(ref));
+ __ ldr(scratch, MemOperand(scratch));
+ __ sub(scratch, stack_pointer, scratch);
+ __ str(scratch, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+}
+
+void RegExpMacroAssemblerARM::PopRegExpBasePointer(Register stack_pointer_out,
+ Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ ldr(stack_pointer_out,
+ MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ __ mov(scratch, Operand(ref));
+ __ ldr(scratch, MemOperand(scratch));
+ __ add(stack_pointer_out, stack_pointer_out, scratch);
+ StoreRegExpStackPointerToMemory(stack_pointer_out, scratch);
+}
Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
Label return_r0;
@@ -630,7 +660,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Tell the system that we have a stack frame. Because the type is MANUAL, no
// is generated.
- FrameScope scope(masm_, StackFrame::MANUAL);
+ FrameScope scope(masm_.get(), StackFrame::MANUAL);
// Actually emit code to start a new stack frame.
// Push arguments
@@ -654,34 +684,47 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ push(r0); // Make room for "string start - 1" constant.
STATIC_ASSERT(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize);
__ push(r0); // The backtrack counter.
+ STATIC_ASSERT(kRegExpStackBasePointer ==
+ kBacktrackCount - kSystemPointerSize);
+ __ push(r0); // The regexp stack base ptr.
+
+ // Initialize backtrack stack pointer. It must not be clobbered from here on.
+ // Note the backtrack_stackpointer is callee-saved.
+ STATIC_ASSERT(backtrack_stackpointer() == r8);
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
+ // Store the regexp base pointer - we'll later restore it / write it to
+ // memory when returning from this irregexp code object.
+ PushRegExpBasePointer(backtrack_stackpointer(), r1);
+
+ {
+ // Check if we have space on the stack for registers.
+ Label stack_limit_hit, stack_ok;
+
+ ExternalReference stack_limit =
+ ExternalReference::address_of_jslimit(isolate());
+ __ mov(r0, Operand(stack_limit));
+ __ ldr(r0, MemOperand(r0));
+ __ sub(r0, sp, r0, SetCC);
+ // Handle it if the stack pointer is already below the stack limit.
+ __ b(ls, &stack_limit_hit);
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ cmp(r0, Operand(num_registers_ * kPointerSize));
+ __ b(hs, &stack_ok);
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ mov(r0, Operand(EXCEPTION));
+ __ jmp(&return_r0);
- // Check if we have space on the stack for registers.
- Label stack_limit_hit;
- Label stack_ok;
-
- ExternalReference stack_limit =
- ExternalReference::address_of_jslimit(isolate());
- __ mov(r0, Operand(stack_limit));
- __ ldr(r0, MemOperand(r0));
- __ sub(r0, sp, r0, SetCC);
- // Handle it if the stack pointer is already below the stack limit.
- __ b(ls, &stack_limit_hit);
- // Check if there is room for the variable number of registers above
- // the stack limit.
- __ cmp(r0, Operand(num_registers_ * kPointerSize));
- __ b(hs, &stack_ok);
- // Exit with OutOfMemory exception. There is not enough space on the stack
- // for our working registers.
- __ mov(r0, Operand(EXCEPTION));
- __ jmp(&return_r0);
-
- __ bind(&stack_limit_hit);
- CallCheckStackGuardState();
- __ cmp(r0, Operand::Zero());
- // If returned value is non-zero, we exit with the returned value as result.
- __ b(ne, &return_r0);
+ __ bind(&stack_limit_hit);
+ CallCheckStackGuardState();
+ __ cmp(r0, Operand::Zero());
+ // If returned value is non-zero, we exit with the returned value as result.
+ __ b(ne, &return_r0);
- __ bind(&stack_ok);
+ __ bind(&stack_ok);
+ }
// Allocate space on stack for registers.
__ AllocateStackSpace(num_registers_ * kPointerSize);
@@ -703,18 +746,21 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Initialize code pointer register
__ mov(code_pointer(), Operand(masm_->CodeObject()));
- Label load_char_start_regexp, start_regexp;
- // Load newline if index is at start, previous character otherwise.
- __ cmp(r1, Operand::Zero());
- __ b(ne, &load_char_start_regexp);
- __ mov(current_character(), Operand('\n'), LeaveCC, eq);
- __ jmp(&start_regexp);
-
- // Global regexp restarts matching here.
- __ bind(&load_char_start_regexp);
- // Load previous char as initial value of current character register.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&start_regexp);
+ Label load_char_start_regexp;
+ {
+ Label start_regexp;
+ // Load newline if index is at start, previous character otherwise.
+ __ cmp(r1, Operand::Zero());
+ __ b(ne, &load_char_start_regexp);
+ __ mov(current_character(), Operand('\n'), LeaveCC, eq);
+ __ jmp(&start_regexp);
+
+ // Global regexp restarts matching here.
+ __ bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&start_regexp);
+ }
// Initialize on-stack registers.
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
@@ -735,9 +781,6 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
}
}
- // Initialize backtrack stack pointer.
- __ ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
-
__ jmp(&start_label_);
// Exit code:
@@ -804,6 +847,10 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Prepare r0 to initialize registers with its value in the next run.
__ ldr(r0, MemOperand(frame_pointer(), kStringStartMinusOne));
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(backtrack_stackpointer(), r2);
+
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
// r4: capture start index
@@ -834,6 +881,10 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
}
__ bind(&return_r0);
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(backtrack_stackpointer(), r2);
+
// Skip sp past regexp registers and local variables..
__ mov(sp, frame_pointer());
// Restore registers r4..r11 and return (restoring lr to pc).
@@ -851,12 +902,16 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
if (check_preempt_label_.is_linked()) {
SafeCallTarget(&check_preempt_label_);
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), r1);
+
CallCheckStackGuardState();
__ cmp(r0, Operand::Zero());
// If returning non-zero, we should end execution with the given
// result as return value.
__ b(ne, &return_r0);
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
// String might have moved: Reload end of string from frame.
__ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
SafeReturn();
@@ -867,17 +922,18 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
SafeCallTarget(&stack_overflow_label_);
// Reached if the backtrack-stack limit has been hit.
- // Call GrowStack(backtrack_stackpointer(), &stack_base)
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments);
- __ mov(r0, backtrack_stackpointer());
- __ add(r1, frame_pointer(), Operand(kStackHighEnd));
- __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
+ // Call GrowStack(isolate).
+
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), r1);
+
+ static constexpr int kNumArguments = 1;
+ __ PrepareCallCFunction(kNumArguments);
+ __ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
ExternalReference grow_stack =
ExternalReference::re_grow_stack(isolate());
- __ CallCFunction(grow_stack, num_arguments);
- // If return nullptr, we have failed to grow the stack, and
- // must exit with a stack-overflow exception.
+ __ CallCFunction(grow_stack, kNumArguments);
+ // If nullptr is returned, we have failed to grow the stack, and must exit
+ // with a stack-overflow exception.
__ cmp(r0, Operand::Zero());
__ b(eq, &exit_with_exception);
// Otherwise use return value as new stack pointer.
@@ -984,14 +1040,24 @@ void RegExpMacroAssemblerARM::ReadCurrentPositionFromRegister(int reg) {
__ ldr(current_input_offset(), register_location(reg));
}
+void RegExpMacroAssemblerARM::WriteStackPointerToRegister(int reg) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ mov(r1, Operand(ref));
+ __ ldr(r1, MemOperand(r1));
+ __ sub(r0, backtrack_stackpointer(), r1);
+ __ str(r0, register_location(reg));
+}
void RegExpMacroAssemblerARM::ReadStackPointerFromRegister(int reg) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ mov(r0, Operand(ref));
+ __ ldr(r0, MemOperand(r0));
__ ldr(backtrack_stackpointer(), register_location(reg));
- __ ldr(r0, MemOperand(frame_pointer(), kStackHighEnd));
- __ add(backtrack_stackpointer(), backtrack_stackpointer(), Operand(r0));
+ __ add(backtrack_stackpointer(), backtrack_stackpointer(), r0);
}
-
void RegExpMacroAssemblerARM::SetCurrentPositionFromEnd(int by) {
Label after_position;
__ cmp(current_input_offset(), Operand(-by * char_size()));
@@ -1037,14 +1103,6 @@ void RegExpMacroAssemblerARM::ClearRegisters(int reg_from, int reg_to) {
}
}
-
-void RegExpMacroAssemblerARM::WriteStackPointerToRegister(int reg) {
- __ ldr(r1, MemOperand(frame_pointer(), kStackHighEnd));
- __ sub(r0, backtrack_stackpointer(), r1);
- __ str(r0, register_location(reg));
-}
-
-
// Private methods:
void RegExpMacroAssemblerARM::CallCheckStackGuardState() {
diff --git a/chromium/v8/src/regexp/arm/regexp-macro-assembler-arm.h b/chromium/v8/src/regexp/arm/regexp-macro-assembler-arm.h
index a02a4dc2af5..478ed292ae9 100644
--- a/chromium/v8/src/regexp/arm/regexp-macro-assembler-arm.h
+++ b/chromium/v8/src/regexp/arm/regexp-macro-assembler-arm.h
@@ -5,8 +5,6 @@
#ifndef V8_REGEXP_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
#define V8_REGEXP_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
-#include "src/base/strings.h"
-#include "src/codegen/arm/assembler-arm.h"
#include "src/codegen/macro-assembler.h"
#include "src/regexp/regexp-macro-assembler.h"
@@ -93,15 +91,13 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM
static const int kFramePointer = 0;
// Above the frame pointer - Stored registers and stack passed parameters.
- // Register 4..11.
static const int kStoredRegisters = kFramePointer;
// Return address (stored from link register, read into pc on return).
static const int kReturnAddress = kStoredRegisters + 8 * kPointerSize;
// Stack parameters placed by caller.
static const int kRegisterOutput = kReturnAddress + kPointerSize;
static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
- static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
- static const int kDirectCall = kStackHighEnd + kPointerSize;
+ static const int kDirectCall = kNumOutputRegisters + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize;
// Below the frame pointer.
@@ -115,8 +111,14 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM
static const int kSuccessfulCaptures = kInputString - kPointerSize;
static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
+ // Stores the initial value of the regexp stack pointer in a
+ // position-independent representation (in case the regexp stack grows and
+ // thus moves).
+ static const int kRegExpStackBasePointer =
+ kBacktrackCount - kSystemPointerSize;
+
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kBacktrackCount - kSystemPointerSize;
+ static const int kRegisterZero = kRegExpStackBasePointer - kSystemPointerSize;
// Initial size of code buffer.
static const int kRegExpCodeSize = 1024;
@@ -129,7 +131,6 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM
// Check whether we are exceeding the stack limit on the backtrack stack.
void CheckStackLimit();
-
// Generate a call to CheckStackGuardState.
void CallCheckStackGuardState();
@@ -138,27 +139,27 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM
// Register holding the current input position as negative offset from
// the end of the string.
- inline Register current_input_offset() { return r6; }
+ static constexpr Register current_input_offset() { return r6; }
// The register containing the current character after LoadCurrentCharacter.
- inline Register current_character() { return r7; }
+ static constexpr Register current_character() { return r7; }
// Register holding address of the end of the input string.
- inline Register end_of_input_address() { return r10; }
+ static constexpr Register end_of_input_address() { return r10; }
// Register holding the frame address. Local variables, parameters and
// regexp registers are addressed relative to this.
- inline Register frame_pointer() { return fp; }
+ static constexpr Register frame_pointer() { return fp; }
// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
- inline Register backtrack_stackpointer() { return r8; }
+ static constexpr Register backtrack_stackpointer() { return r8; }
// Register holding pointer to the current code object.
- inline Register code_pointer() { return r5; }
+ static constexpr Register code_pointer() { return r5; }
// Byte size of chars in the string to match (decided by the Mode argument)
- inline int char_size() { return static_cast<int>(mode_); }
+ inline int char_size() const { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
// is nullptr, in which case it is a conditional Backtrack.
@@ -178,19 +179,25 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM
// and increments it by a word size.
inline void Pop(Register target);
+ void LoadRegExpStackPointerFromMemory(Register dst);
+ void StoreRegExpStackPointerToMemory(Register src, Register scratch);
+ void PushRegExpBasePointer(Register stack_pointer, Register scratch);
+ void PopRegExpBasePointer(Register stack_pointer_out, Register scratch);
+
Isolate* isolate() const { return masm_->isolate(); }
- MacroAssembler* masm_;
+ const std::unique_ptr<MacroAssembler> masm_;
+ const NoRootArrayScope no_root_array_scope_;
// Which mode to generate code for (Latin1 or UC16).
- Mode mode_;
+ const Mode mode_;
// One greater than maximal register index actually used.
int num_registers_;
// Number of registers to output at the end (the saved registers
// are always 0..num_saved_registers_-1)
- int num_saved_registers_;
+ const int num_saved_registers_;
// Labels used internally.
Label entry_label_;
diff --git a/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index 6edb1335760..b0d55d4fe04 100644
--- a/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -66,14 +66,12 @@ namespace internal {
* ^^^^^^^^^ fp ^^^^^^^^^
* - fp[-8] direct_call 1 => Direct call from JavaScript code.
* 0 => Call through the runtime system.
- * - fp[-16] stack_base High end of the memory area to use as
- * the backtracking stack.
- * - fp[-24] output_size Output may fit multiple sets of matches.
- * - fp[-32] input Handle containing the input string.
- * - fp[-40] success_counter
+ * - fp[-16] output_size Output may fit multiple sets of matches.
+ * - fp[-24] input Handle containing the input string.
+ * - fp[-32] success_counter
* ^^^^^^^^^^^^^ From here and downwards we store 32 bit values ^^^^^^^^^^^^^
- * - fp[-44] register N Capture registers initialized with
- * - fp[-48] register N + 1 non_position_value.
+ * - fp[-40] register N Capture registers initialized with
+ * - fp[-44] register N + 1 non_position_value.
* ... The first kNumCachedRegisters (N) registers
* ... are cached in x0 to x7.
* ... Only positions must be stored in the first
@@ -95,7 +93,6 @@ namespace internal {
* Address end,
* int* capture_output_array,
* int num_capture_registers,
- * byte* stack_area_base,
* bool direct_call = false,
* Isolate* isolate,
* Address regexp);
@@ -111,8 +108,10 @@ RegExpMacroAssemblerARM64::RegExpMacroAssemblerARM64(Isolate* isolate,
Zone* zone, Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, CodeObjectRequired::kYes,
- NewAssemblerBuffer(kRegExpCodeSize))),
+ masm_(std::make_unique<MacroAssembler>(
+ isolate, CodeObjectRequired::kYes,
+ NewAssemblerBuffer(kRegExpCodeSize))),
+ no_root_array_scope_(masm_.get()),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -121,8 +120,6 @@ RegExpMacroAssemblerARM64::RegExpMacroAssemblerARM64(Isolate* isolate,
success_label_(),
backtrack_label_(),
exit_label_() {
- masm_->set_root_array_available(false);
-
DCHECK_EQ(0, registers_to_save % 2);
// We can cache at most 16 W registers in x0-x7.
STATIC_ASSERT(kNumCachedRegisters <= 16);
@@ -134,7 +131,6 @@ RegExpMacroAssemblerARM64::RegExpMacroAssemblerARM64(Isolate* isolate,
}
RegExpMacroAssemblerARM64::~RegExpMacroAssemblerARM64() {
- delete masm_;
// Unuse labels in case we throw away the assembler without calling GetCode.
entry_label_.Unuse();
start_label_.Unuse();
@@ -185,7 +181,6 @@ void RegExpMacroAssemblerARM64::AdvanceRegister(int reg, int by) {
}
default:
UNREACHABLE();
- break;
}
}
}
@@ -195,7 +190,7 @@ void RegExpMacroAssemblerARM64::Backtrack() {
CheckPreemption();
if (has_backtrack_limit()) {
Label next;
- UseScratchRegisterScope temps(masm_);
+ UseScratchRegisterScope temps(masm_.get());
Register scratch = temps.AcquireW();
__ Ldr(scratch, MemOperand(frame_pointer(), kBacktrackCount));
__ Add(scratch, scratch, 1);
@@ -426,7 +421,7 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
__ Mov(x3, ExternalReference::isolate_address(isolate()));
{
- AllowExternalCallThatCantCauseGC scope(masm_);
+ AllowExternalCallThatCantCauseGC scope(masm_.get());
ExternalReference function =
unicode ? ExternalReference::re_case_insensitive_compare_unicode(
isolate())
@@ -700,6 +695,42 @@ void RegExpMacroAssemblerARM64::Fail() {
__ B(&exit_label_);
}
+void RegExpMacroAssemblerARM64::LoadRegExpStackPointerFromMemory(Register dst) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ Mov(dst, ref);
+ __ Ldr(dst, MemOperand(dst));
+}
+
+void RegExpMacroAssemblerARM64::StoreRegExpStackPointerToMemory(
+ Register src, Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ Mov(scratch, ref);
+ __ Str(src, MemOperand(scratch));
+}
+
+void RegExpMacroAssemblerARM64::PushRegExpBasePointer(Register stack_pointer,
+ Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ Mov(scratch, ref);
+ __ Ldr(scratch, MemOperand(scratch));
+ __ Sub(scratch, stack_pointer, scratch);
+ __ Str(scratch, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+}
+
+void RegExpMacroAssemblerARM64::PopRegExpBasePointer(Register stack_pointer_out,
+ Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ Ldr(stack_pointer_out,
+ MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ __ Mov(scratch, ref);
+ __ Ldr(scratch, MemOperand(scratch));
+ __ Add(stack_pointer_out, stack_pointer_out, scratch);
+ StoreRegExpStackPointerToMemory(stack_pointer_out, scratch);
+}
Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
Label return_w0;
@@ -716,15 +747,14 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// x3: byte* input_end
// x4: int* output array
// x5: int output array size
- // x6: Address stack_base
- // x7: int direct_call
-
- // sp[8]: address of the current isolate
- // sp[0]: secondary link/return address used by native call
+ // x6: int direct_call
+ // x7: Isolate* isolate
+ //
+ // sp[0]: secondary link/return address used by native call
// Tell the system that we have a stack frame. Because the type is MANUAL, no
// code is generated.
- FrameScope scope(masm_, StackFrame::MANUAL);
+ FrameScope scope(masm_.get(), StackFrame::MANUAL);
// Push registers on the stack, only push the argument registers that we need.
CPURegList argument_registers(x0, x5, x6, x7);
@@ -745,52 +775,63 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
__ Mov(input_end(), x3);
__ Mov(output_array(), x4);
- // Set the number of registers we will need to allocate, that is:
- // - kSuccessCounter / success_counter (X register)
- // - kBacktrackCount (X register)
- // - (num_registers_ - kNumCachedRegisters) (W registers)
- int num_wreg_to_allocate = num_registers_ - kNumCachedRegisters;
- // Do not allocate registers on the stack if they can all be cached.
- if (num_wreg_to_allocate < 0) { num_wreg_to_allocate = 0; }
- // Make room for the success_counter and kBacktrackCount. Each X (64-bit)
- // register is equivalent to two W (32-bit) registers.
- num_wreg_to_allocate += 2 + 2;
-
// Make sure the stack alignment will be respected.
- int alignment = masm_->ActivationFrameAlignment();
+ const int alignment = masm_->ActivationFrameAlignment();
DCHECK_EQ(alignment % 16, 0);
- int align_mask = (alignment / kWRegSize) - 1;
- num_wreg_to_allocate = (num_wreg_to_allocate + align_mask) & ~align_mask;
+ const int align_mask = (alignment / kWRegSize) - 1;
- // Check if we have space on the stack.
- Label stack_limit_hit;
- Label stack_ok;
+ // Make room for stack locals.
+ static constexpr int kWRegPerXReg = kXRegSize / kWRegSize;
+ DCHECK_EQ(kNumberOfStackLocals * kWRegPerXReg,
+ ((kNumberOfStackLocals * kWRegPerXReg) + align_mask) & ~align_mask);
+ __ Claim(kNumberOfStackLocals * kWRegPerXReg);
- ExternalReference stack_limit =
- ExternalReference::address_of_jslimit(isolate());
- __ Mov(x10, stack_limit);
- __ Ldr(x10, MemOperand(x10));
- __ Subs(x10, sp, x10);
-
- // Handle it if the stack pointer is already below the stack limit.
- __ B(ls, &stack_limit_hit);
+ // Initialize backtrack stack pointer. It must not be clobbered from here on.
+ // Note the backtrack_stackpointer is callee-saved.
+ STATIC_ASSERT(backtrack_stackpointer() == x23);
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
- // Check if there is room for the variable number of registers above
- // the stack limit.
- __ Cmp(x10, num_wreg_to_allocate * kWRegSize);
- __ B(hs, &stack_ok);
+ // Store the regexp base pointer - we'll later restore it / write it to
+ // memory when returning from this irregexp code object.
+ PushRegExpBasePointer(backtrack_stackpointer(), x11);
- // Exit with OutOfMemory exception. There is not enough space on the stack
- // for our working registers.
- __ Mov(w0, EXCEPTION);
- __ B(&return_w0);
+ // Set the number of registers we will need to allocate, that is:
+ // - (num_registers_ - kNumCachedRegisters) (W registers)
+ const int num_stack_registers =
+ std::max(0, num_registers_ - kNumCachedRegisters);
+ const int num_wreg_to_allocate =
+ (num_stack_registers + align_mask) & ~align_mask;
+
+ {
+ // Check if we have space on the stack.
+ Label stack_limit_hit, stack_ok;
+
+ ExternalReference stack_limit =
+ ExternalReference::address_of_jslimit(isolate());
+ __ Mov(x10, stack_limit);
+ __ Ldr(x10, MemOperand(x10));
+ __ Subs(x10, sp, x10);
+
+ // Handle it if the stack pointer is already below the stack limit.
+ __ B(ls, &stack_limit_hit);
+
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ Cmp(x10, num_wreg_to_allocate * kWRegSize);
+ __ B(hs, &stack_ok);
+
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ Mov(w0, EXCEPTION);
+ __ B(&return_w0);
- __ Bind(&stack_limit_hit);
- CallCheckStackGuardState(x10);
- // If returned value is non-zero, we exit with the returned value as result.
- __ Cbnz(w0, &return_w0);
+ __ Bind(&stack_limit_hit);
+ CallCheckStackGuardState(x10);
+ // If returned value is non-zero, we exit with the returned value as result.
+ __ Cbnz(w0, &return_w0);
- __ Bind(&stack_ok);
+ __ Bind(&stack_ok);
+ }
// Allocate space on stack.
__ Claim(num_wreg_to_allocate, kWRegSize);
@@ -823,26 +864,27 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// Initialize code pointer register.
__ Mov(code_pointer(), Operand(masm_->CodeObject()));
- Label load_char_start_regexp, start_regexp;
- // Load newline if index is at start, previous character otherwise.
- __ Cbnz(start_offset(), &load_char_start_regexp);
- __ Mov(current_character(), '\n');
- __ B(&start_regexp);
+ Label load_char_start_regexp;
+ {
+ Label start_regexp;
+ // Load newline if index is at start, previous character otherwise.
+ __ Cbnz(start_offset(), &load_char_start_regexp);
+ __ Mov(current_character(), '\n');
+ __ B(&start_regexp);
+
+ // Global regexp restarts matching here.
+ __ Bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ Bind(&start_regexp);
+ }
- // Global regexp restarts matching here.
- __ Bind(&load_char_start_regexp);
- // Load previous char as initial value of current character register.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ Bind(&start_regexp);
// Initialize on-stack registers.
if (num_saved_registers_ > 0) {
ClearRegisters(0, num_saved_registers_ - 1);
}
- // Initialize backtrack stack pointer.
- __ Ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackBase));
-
- // Execute
+ // Execute.
__ B(&start_label_);
if (backtrack_label_.is_linked()) {
@@ -992,6 +1034,10 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// Update output size on the frame before we restart matching.
__ Str(output_size, MemOperand(frame_pointer(), kOutputSize));
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(backtrack_stackpointer(), x11);
+
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
__ Cmp(current_input_offset(), first_capture_start);
@@ -1014,7 +1060,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
}
if (exit_label_.is_linked()) {
- // Exit and return w0
+ // Exit and return w0.
__ Bind(&exit_label_);
if (global()) {
__ Ldr(w0, MemOperand(frame_pointer(), kSuccessCounter));
@@ -1022,8 +1068,11 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
}
__ Bind(&return_w0);
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(backtrack_stackpointer(), x11);
- // Set stack pointer back to first register to retain
+ // Set stack pointer back to first register to retain.
__ Mov(sp, fp);
__ Pop<TurboAssembler::kAuthLR>(fp, lr);
@@ -1040,6 +1089,9 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
if (check_preempt_label_.is_linked()) {
__ Bind(&check_preempt_label_);
+
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), x10);
+
SaveLinkRegister();
// The cached registers need to be retained.
__ PushCPURegList(cached_registers);
@@ -1049,26 +1101,30 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
__ Cbnz(w0, &return_w0);
// Reset the cached registers.
__ PopCPURegList(cached_registers);
+
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
RestoreLinkRegister();
__ Ret();
}
if (stack_overflow_label_.is_linked()) {
__ Bind(&stack_overflow_label_);
+
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), x10);
+
SaveLinkRegister();
// The cached registers need to be retained.
__ PushCPURegList(cached_registers);
- // Call GrowStack(backtrack_stackpointer(), &stack_base)
- __ Mov(x2, ExternalReference::isolate_address(isolate()));
- __ Add(x1, frame_pointer(), kStackBase);
- __ Mov(x0, backtrack_stackpointer());
- ExternalReference grow_stack =
- ExternalReference::re_grow_stack(isolate());
- __ CallCFunction(grow_stack, 3);
- // If return nullptr, we have failed to grow the stack, and
- // must exit with a stack-overflow exception.
- // Returning from the regexp code restores the stack (sp <- fp)
- // so we don't need to drop the link register from it before exiting.
+ // Call GrowStack(isolate)
+ static constexpr int kNumArguments = 1;
+ __ Mov(x0, ExternalReference::isolate_address(isolate()));
+ __ CallCFunction(ExternalReference::re_grow_stack(isolate()),
+ kNumArguments);
+ // If return nullptr, we have failed to grow the stack, and must exit with
+ // a stack-overflow exception. Returning from the regexp code restores the
+ // stack (sp <- fp) so we don't need to drop the link register from it
+ // before exiting.
__ Cbz(w0, &exit_with_exception);
// Otherwise use return value as new stack pointer.
__ Mov(backtrack_stackpointer(), x0);
@@ -1192,14 +1248,29 @@ void RegExpMacroAssemblerARM64::ReadCurrentPositionFromRegister(int reg) {
}
}
+void RegExpMacroAssemblerARM64::WriteStackPointerToRegister(int reg) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ Mov(x10, ref);
+ __ Ldr(x10, MemOperand(x10));
+ __ Sub(x10, backtrack_stackpointer(), x10);
+ if (FLAG_debug_code) {
+ __ Cmp(x10, Operand(w10, SXTW));
+ // The stack offset needs to fit in a W register.
+ __ Check(eq, AbortReason::kOffsetOutOfRange);
+ }
+ StoreRegister(reg, w10);
+}
void RegExpMacroAssemblerARM64::ReadStackPointerFromRegister(int reg) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
Register read_from = GetRegister(reg, w10);
- __ Ldr(x11, MemOperand(frame_pointer(), kStackBase));
+ __ Mov(x11, ref);
+ __ Ldr(x11, MemOperand(x11));
__ Add(backtrack_stackpointer(), x11, Operand(read_from, SXTW));
}
-
void RegExpMacroAssemblerARM64::SetCurrentPositionFromEnd(int by) {
Label after_position;
__ Cmp(current_input_offset(), -by * char_size());
@@ -1301,19 +1372,6 @@ void RegExpMacroAssemblerARM64::ClearRegisters(int reg_from, int reg_to) {
}
}
-
-void RegExpMacroAssemblerARM64::WriteStackPointerToRegister(int reg) {
- __ Ldr(x10, MemOperand(frame_pointer(), kStackBase));
- __ Sub(x10, backtrack_stackpointer(), x10);
- if (FLAG_debug_code) {
- __ Cmp(x10, Operand(w10, SXTW));
- // The stack offset needs to fit in a W register.
- __ Check(eq, AbortReason::kOffsetOutOfRange);
- }
- StoreRegister(reg, w10);
-}
-
-
// Helper function for reading a value out of a stack frame.
template <typename T>
static T& frame_entry(Address re_frame, int frame_offset) {
diff --git a/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h b/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
index 80931e3ca42..7b3d1b90e4c 100644
--- a/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
+++ b/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
@@ -102,26 +102,32 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM64
// Callee-saved registers (x19-x28).
static const int kNumCalleeSavedRegisters = 10;
static const int kCalleeSavedRegisters = kReturnAddress + kSystemPointerSize;
- // Stack parameter placed by caller.
- // It is placed above the FP, LR and the callee-saved registers.
- static const int kIsolate =
- kCalleeSavedRegisters + kNumCalleeSavedRegisters * kSystemPointerSize;
// Below the frame pointer.
// Register parameters stored by setup code.
- static const int kDirectCall = -kSystemPointerSize;
- static const int kStackBase = kDirectCall - kSystemPointerSize;
- static const int kOutputSize = kStackBase - kSystemPointerSize;
+ static const int kIsolate = -kSystemPointerSize;
+ static const int kDirectCall = kIsolate - kSystemPointerSize;
+ static const int kOutputSize = kDirectCall - kSystemPointerSize;
static const int kInput = kOutputSize - kSystemPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kSuccessCounter = kInput - kSystemPointerSize;
static const int kBacktrackCount = kSuccessCounter - kSystemPointerSize;
+ // Stores the initial value of the regexp stack pointer in a
+ // position-independent representation (in case the regexp stack grows and
+ // thus moves).
+ static const int kRegExpStackBasePointer =
+ kBacktrackCount - kSystemPointerSize;
+ // A padding slot to preserve alignment.
+ static const int kStackLocalPadding =
+ kRegExpStackBasePointer - kSystemPointerSize;
+ static constexpr int kNumberOfStackLocals = 4;
+
// First position register address on the stack. Following positions are
// below it. A position is a 32 bit value.
- static const int kFirstRegisterOnStack = kBacktrackCount - kWRegSize;
+ static const int kFirstRegisterOnStack = kStackLocalPadding - kWRegSize;
// A capture is a 64 bit value holding two position.
- static const int kFirstCaptureOnStack = kBacktrackCount - kXRegSize;
+ static const int kFirstCaptureOnStack = kStackLocalPadding - kXRegSize;
// Initial size of code buffer.
static const int kRegExpCodeSize = 1024;
@@ -152,43 +158,43 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM64
// Register holding the current input position as negative offset from
// the end of the string.
- Register current_input_offset() { return w21; }
+ static constexpr Register current_input_offset() { return w21; }
// The register containing the current character after LoadCurrentCharacter.
- Register current_character() { return w22; }
+ static constexpr Register current_character() { return w22; }
// Register holding address of the end of the input string.
- Register input_end() { return x25; }
+ static constexpr Register input_end() { return x25; }
// Register holding address of the start of the input string.
- Register input_start() { return x26; }
+ static constexpr Register input_start() { return x26; }
// Register holding the offset from the start of the string where we should
// start matching.
- Register start_offset() { return w27; }
+ static constexpr Register start_offset() { return w27; }
// Pointer to the output array's first element.
- Register output_array() { return x28; }
+ static constexpr Register output_array() { return x28; }
// Register holding the frame address. Local variables, parameters and
// regexp registers are addressed relative to this.
- Register frame_pointer() { return fp; }
+ static constexpr Register frame_pointer() { return fp; }
// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
- Register backtrack_stackpointer() { return x23; }
+ static constexpr Register backtrack_stackpointer() { return x23; }
// Register holding pointer to the current code object.
- Register code_pointer() { return x20; }
+ static constexpr Register code_pointer() { return x20; }
// Register holding the value used for clearing capture registers.
- Register string_start_minus_one() { return w24; }
+ static constexpr Register string_start_minus_one() { return w24; }
// The top 32 bit of this register is used to store this value
// twice. This is used for clearing more than one register at a time.
- Register twice_non_position_value() { return x24; }
+ static constexpr Register twice_non_position_value() { return x24; }
// Byte size of chars in the string to match (decided by the Mode argument)
- int char_size() { return static_cast<int>(mode_); }
+ int char_size() const { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
// is nullptr, in which case it is a conditional Backtrack.
@@ -254,19 +260,25 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM64
// This assumes that the state of the register is not STACKED.
inline Register GetCachedRegister(int register_index);
+ void LoadRegExpStackPointerFromMemory(Register dst);
+ void StoreRegExpStackPointerToMemory(Register src, Register scratch);
+ void PushRegExpBasePointer(Register stack_pointer, Register scratch);
+ void PopRegExpBasePointer(Register stack_pointer_out, Register scratch);
+
Isolate* isolate() const { return masm_->isolate(); }
- MacroAssembler* masm_;
+ const std::unique_ptr<MacroAssembler> masm_;
+ const NoRootArrayScope no_root_array_scope_;
// Which mode to generate code for (LATIN1 or UC16).
- Mode mode_;
+ const Mode mode_;
// One greater than maximal register index actually used.
int num_registers_;
// Number of registers to output at the end (the saved registers
// are always 0..num_saved_registers_-1)
- int num_saved_registers_;
+ const int num_saved_registers_;
// Labels used internally.
Label entry_label_;
diff --git a/chromium/v8/src/regexp/experimental/experimental-compiler.cc b/chromium/v8/src/regexp/experimental/experimental-compiler.cc
index 8b1d8415367..ae4abce7b5f 100644
--- a/chromium/v8/src/regexp/experimental/experimental-compiler.cc
+++ b/chromium/v8/src/regexp/experimental/experimental-compiler.cc
@@ -16,12 +16,14 @@ namespace {
// TODO(mbid, v8:10765): Currently the experimental engine doesn't support
// UTF-16, but this shouldn't be too hard to implement.
constexpr base::uc32 kMaxSupportedCodepoint = 0xFFFFu;
+#ifdef DEBUG
+constexpr base::uc32 kMaxCodePoint = 0x10ffff;
+#endif // DEBUG
class CanBeHandledVisitor final : private RegExpVisitor {
// Visitor to implement `ExperimentalRegExp::CanBeHandled`.
public:
- static bool Check(RegExpTree* tree, JSRegExp::Flags flags,
- int capture_count) {
+ static bool Check(RegExpTree* tree, RegExpFlags flags, int capture_count) {
if (!AreSuitableFlags(flags)) return false;
CanBeHandledVisitor visitor;
tree->Accept(&visitor, nullptr);
@@ -31,15 +33,15 @@ class CanBeHandledVisitor final : private RegExpVisitor {
private:
CanBeHandledVisitor() = default;
- static bool AreSuitableFlags(JSRegExp::Flags flags) {
+ static bool AreSuitableFlags(RegExpFlags flags) {
// TODO(mbid, v8:10765): We should be able to support all flags in the
// future.
- static constexpr JSRegExp::Flags kAllowedFlags =
- JSRegExp::kGlobal | JSRegExp::kSticky | JSRegExp::kMultiline |
- JSRegExp::kDotAll | JSRegExp::kLinear;
+ static constexpr RegExpFlags kAllowedFlags =
+ RegExpFlag::kGlobal | RegExpFlag::kSticky | RegExpFlag::kMultiline |
+ RegExpFlag::kDotAll | RegExpFlag::kLinear;
// We support Unicode iff kUnicode is among the supported flags.
STATIC_ASSERT(ExperimentalRegExp::kSupportsUnicode ==
- ((kAllowedFlags & JSRegExp::kUnicode) != 0));
+ IsUnicode(kAllowedFlags));
return (flags & ~kAllowedFlags) == 0;
}
@@ -173,7 +175,7 @@ class CanBeHandledVisitor final : private RegExpVisitor {
} // namespace
bool ExperimentalRegExpCompiler::CanBeHandled(RegExpTree* tree,
- JSRegExp::Flags flags,
+ RegExpFlags flags,
int capture_count) {
return CanBeHandledVisitor::Check(tree, flags, capture_count);
}
@@ -294,11 +296,10 @@ class BytecodeAssembler {
class CompileVisitor : private RegExpVisitor {
public:
static ZoneList<RegExpInstruction> Compile(RegExpTree* tree,
- JSRegExp::Flags flags,
- Zone* zone) {
+ RegExpFlags flags, Zone* zone) {
CompileVisitor compiler(zone);
- if ((flags & JSRegExp::kSticky) == 0 && !tree->IsAnchoredAtStart()) {
+ if (!IsSticky(flags) && !tree->IsAnchoredAtStart()) {
// The match is not anchored, i.e. may start at any input position, so we
// emit a preamble corresponding to /.*?/. This skips an arbitrary
// prefix in the input non-greedily.
@@ -409,7 +410,7 @@ class CompileVisitor : private RegExpVisitor {
base::uc16 from_uc16 = static_cast<base::uc16>(from);
base::uc32 to = (*ranges)[i].to();
- DCHECK_IMPLIES(to > kMaxSupportedCodepoint, to == String::kMaxCodePoint);
+ DCHECK_IMPLIES(to > kMaxSupportedCodepoint, to == kMaxCodePoint);
base::uc16 to_uc16 =
static_cast<base::uc16>(std::min(to, kMaxSupportedCodepoint));
@@ -627,7 +628,7 @@ class CompileVisitor : private RegExpVisitor {
} // namespace
ZoneList<RegExpInstruction> ExperimentalRegExpCompiler::Compile(
- RegExpTree* tree, JSRegExp::Flags flags, Zone* zone) {
+ RegExpTree* tree, RegExpFlags flags, Zone* zone) {
return CompileVisitor::Compile(tree, flags, zone);
}
diff --git a/chromium/v8/src/regexp/experimental/experimental-compiler.h b/chromium/v8/src/regexp/experimental/experimental-compiler.h
index 87abcd39176..e6abf0557f4 100644
--- a/chromium/v8/src/regexp/experimental/experimental-compiler.h
+++ b/chromium/v8/src/regexp/experimental/experimental-compiler.h
@@ -7,6 +7,7 @@
#include "src/regexp/experimental/experimental-bytecode.h"
#include "src/regexp/regexp-ast.h"
+#include "src/regexp/regexp-flags.h"
#include "src/zone/zone-list.h"
namespace v8 {
@@ -19,13 +20,13 @@ class ExperimentalRegExpCompiler final : public AllStatic {
// but see the definition.
// TODO(mbid,v8:10765): Currently more things are not handled, e.g. some
// quantifiers and unicode.
- static bool CanBeHandled(RegExpTree* tree, JSRegExp::Flags flags,
+ static bool CanBeHandled(RegExpTree* tree, RegExpFlags flags,
int capture_count);
// Compile regexp into a bytecode program. The regexp must be handlable by
// the experimental engine; see`CanBeHandled`. The program is returned as a
// ZoneList backed by the same Zone that is used in the RegExpTree argument.
static ZoneList<RegExpInstruction> Compile(RegExpTree* tree,
- JSRegExp::Flags flags, Zone* zone);
+ RegExpFlags flags, Zone* zone);
};
} // namespace internal
diff --git a/chromium/v8/src/regexp/experimental/experimental-interpreter.h b/chromium/v8/src/regexp/experimental/experimental-interpreter.h
index d65299499b6..a21b01639a4 100644
--- a/chromium/v8/src/regexp/experimental/experimental-interpreter.h
+++ b/chromium/v8/src/regexp/experimental/experimental-interpreter.h
@@ -5,15 +5,14 @@
#ifndef V8_REGEXP_EXPERIMENTAL_EXPERIMENTAL_INTERPRETER_H_
#define V8_REGEXP_EXPERIMENTAL_EXPERIMENTAL_INTERPRETER_H_
-#include "src/base/vector.h"
-#include "src/objects/fixed-array.h"
-#include "src/objects/string.h"
#include "src/regexp/experimental/experimental-bytecode.h"
#include "src/regexp/regexp.h"
namespace v8 {
namespace internal {
+class ByteArray;
+class String;
class Zone;
class ExperimentalRegExpInterpreter final : public AllStatic {
diff --git a/chromium/v8/src/regexp/experimental/experimental.cc b/chromium/v8/src/regexp/experimental/experimental.cc
index bff2d7da664..1e745eaa31b 100644
--- a/chromium/v8/src/regexp/experimental/experimental.cc
+++ b/chromium/v8/src/regexp/experimental/experimental.cc
@@ -14,7 +14,7 @@
namespace v8 {
namespace internal {
-bool ExperimentalRegExp::CanBeHandled(RegExpTree* tree, JSRegExp::Flags flags,
+bool ExperimentalRegExp::CanBeHandled(RegExpTree* tree, RegExpFlags flags,
int capture_count) {
DCHECK(FLAG_enable_experimental_regexp_engine ||
FLAG_enable_experimental_regexp_engine_on_excessive_backtracks);
@@ -22,27 +22,27 @@ bool ExperimentalRegExp::CanBeHandled(RegExpTree* tree, JSRegExp::Flags flags,
}
void ExperimentalRegExp::Initialize(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> source,
- JSRegExp::Flags flags, int capture_count) {
+ Handle<String> source, RegExpFlags flags,
+ int capture_count) {
DCHECK(FLAG_enable_experimental_regexp_engine);
if (FLAG_trace_experimental_regexp_engine) {
StdoutStream{} << "Initializing experimental regexp " << *source
<< std::endl;
}
- isolate->factory()->SetRegExpExperimentalData(re, source, flags,
- capture_count);
+ isolate->factory()->SetRegExpExperimentalData(
+ re, source, JSRegExp::AsJSRegExpFlags(flags), capture_count);
}
bool ExperimentalRegExp::IsCompiled(Handle<JSRegExp> re, Isolate* isolate) {
DCHECK(FLAG_enable_experimental_regexp_engine);
- DCHECK_EQ(re->TypeTag(), JSRegExp::EXPERIMENTAL);
+ DCHECK_EQ(re->type_tag(), JSRegExp::EXPERIMENTAL);
#ifdef VERIFY_HEAP
re->JSRegExpVerify(isolate);
#endif
- return re->DataAt(JSRegExp::kIrregexpLatin1BytecodeIndex) !=
- Smi::FromInt(JSRegExp::kUninitializedValue);
+ static constexpr bool kIsLatin1 = true;
+ return re->bytecode(kIsLatin1) != Smi::FromInt(JSRegExp::kUninitializedValue);
}
template <class T>
@@ -68,16 +68,15 @@ base::Optional<CompilationResult> CompileImpl(Isolate* isolate,
Handle<JSRegExp> regexp) {
Zone zone(isolate->allocator(), ZONE_NAME);
- Handle<String> source(regexp->Pattern(), isolate);
- JSRegExp::Flags flags = regexp->GetFlags();
+ Handle<String> source(regexp->source(), isolate);
// Parse and compile the regexp source.
RegExpCompileData parse_result;
- FlatStringReader reader(isolate, source);
DCHECK(!isolate->has_pending_exception());
- bool parse_success =
- RegExpParser::ParseRegExp(isolate, &zone, &reader, flags, &parse_result);
+ bool parse_success = RegExpParser::ParseRegExpFromHeapString(
+ isolate, &zone, source, JSRegExp::AsRegExpFlags(regexp->flags()),
+ &parse_result);
if (!parse_success) {
// The pattern was already parsed successfully during initialization, so
// the only way parsing can fail now is because of stack overflow.
@@ -87,12 +86,13 @@ base::Optional<CompilationResult> CompileImpl(Isolate* isolate,
return base::nullopt;
}
- ZoneList<RegExpInstruction> bytecode =
- ExperimentalRegExpCompiler::Compile(parse_result.tree, flags, &zone);
+ ZoneList<RegExpInstruction> bytecode = ExperimentalRegExpCompiler::Compile(
+ parse_result.tree, JSRegExp::AsRegExpFlags(regexp->flags()), &zone);
CompilationResult result;
result.bytecode = VectorToByteArray(isolate, bytecode.ToVector());
- result.capture_name_map = parse_result.capture_name_map;
+ result.capture_name_map =
+ RegExp::CreateCaptureNameMap(isolate, parse_result.named_captures);
return result;
}
@@ -100,12 +100,12 @@ base::Optional<CompilationResult> CompileImpl(Isolate* isolate,
bool ExperimentalRegExp::Compile(Isolate* isolate, Handle<JSRegExp> re) {
DCHECK(FLAG_enable_experimental_regexp_engine);
- DCHECK_EQ(re->TypeTag(), JSRegExp::EXPERIMENTAL);
+ DCHECK_EQ(re->type_tag(), JSRegExp::EXPERIMENTAL);
#ifdef VERIFY_HEAP
re->JSRegExpVerify(isolate);
#endif
- Handle<String> source(re->Pattern(), isolate);
+ Handle<String> source(re->source(), isolate);
if (FLAG_trace_experimental_regexp_engine) {
StdoutStream{} << "Compiling experimental regexp " << *source << std::endl;
}
@@ -117,16 +117,8 @@ bool ExperimentalRegExp::Compile(Isolate* isolate, Handle<JSRegExp> re) {
return false;
}
- re->SetDataAt(JSRegExp::kIrregexpLatin1BytecodeIndex,
- *compilation_result->bytecode);
- re->SetDataAt(JSRegExp::kIrregexpUC16BytecodeIndex,
- *compilation_result->bytecode);
-
- Handle<Code> trampoline = BUILTIN_CODE(isolate, RegExpExperimentalTrampoline);
- re->SetDataAt(JSRegExp::kIrregexpLatin1CodeIndex, ToCodeT(*trampoline));
- re->SetDataAt(JSRegExp::kIrregexpUC16CodeIndex, ToCodeT(*trampoline));
-
- re->SetCaptureNameMap(compilation_result->capture_name_map);
+ re->set_bytecode_and_trampoline(isolate, compilation_result->bytecode);
+ re->set_capture_name_map(compilation_result->capture_name_map);
return true;
}
@@ -177,23 +169,22 @@ int32_t ExperimentalRegExp::ExecRaw(Isolate* isolate,
DisallowGarbageCollection no_gc;
if (FLAG_trace_experimental_regexp_engine) {
- String source = String::cast(regexp.DataAt(JSRegExp::kSourceIndex));
- StdoutStream{} << "Executing experimental regexp " << source << std::endl;
+ StdoutStream{} << "Executing experimental regexp " << regexp.source()
+ << std::endl;
}
- ByteArray bytecode =
- ByteArray::cast(regexp.DataAt(JSRegExp::kIrregexpLatin1BytecodeIndex));
+ static constexpr bool kIsLatin1 = true;
+ ByteArray bytecode = ByteArray::cast(regexp.bytecode(kIsLatin1));
return ExecRawImpl(isolate, call_origin, bytecode, subject,
- regexp.CaptureCount(), output_registers,
+ regexp.capture_count(), output_registers,
output_register_count, subject_index);
}
int32_t ExperimentalRegExp::MatchForCallFromJs(
Address subject, int32_t start_position, Address input_start,
Address input_end, int* output_registers, int32_t output_register_count,
- Address backtrack_stack, RegExp::CallOrigin call_origin, Isolate* isolate,
- Address regexp) {
+ RegExp::CallOrigin call_origin, Isolate* isolate, Address regexp) {
DCHECK(FLAG_enable_experimental_regexp_engine);
DCHECK_NOT_NULL(isolate);
DCHECK_NOT_NULL(output_registers);
@@ -217,7 +208,7 @@ MaybeHandle<Object> ExperimentalRegExp::Exec(
int subject_index, Handle<RegExpMatchInfo> last_match_info,
RegExp::ExecQuirks exec_quirks) {
DCHECK(FLAG_enable_experimental_regexp_engine);
- DCHECK_EQ(regexp->TypeTag(), JSRegExp::EXPERIMENTAL);
+ DCHECK_EQ(regexp->type_tag(), JSRegExp::EXPERIMENTAL);
#ifdef VERIFY_HEAP
regexp->JSRegExpVerify(isolate);
#endif
@@ -231,7 +222,7 @@ MaybeHandle<Object> ExperimentalRegExp::Exec(
subject = String::Flatten(isolate, subject);
- int capture_count = regexp->CaptureCount();
+ int capture_count = regexp->capture_count();
int output_register_count = JSRegExp::RegistersForCaptureCount(capture_count);
int32_t* output_registers;
@@ -275,7 +266,7 @@ int32_t ExperimentalRegExp::OneshotExecRaw(Isolate* isolate,
if (FLAG_trace_experimental_regexp_engine) {
StdoutStream{} << "Experimental execution (oneshot) of regexp "
- << regexp->Pattern() << std::endl;
+ << regexp->source() << std::endl;
}
base::Optional<CompilationResult> compilation_result =
@@ -285,7 +276,7 @@ int32_t ExperimentalRegExp::OneshotExecRaw(Isolate* isolate,
DisallowGarbageCollection no_gc;
return ExecRawImpl(isolate, RegExp::kFromRuntime,
*compilation_result->bytecode, *subject,
- regexp->CaptureCount(), output_registers,
+ regexp->capture_count(), output_registers,
output_register_count, subject_index);
}
@@ -294,9 +285,9 @@ MaybeHandle<Object> ExperimentalRegExp::OneshotExec(
int subject_index, Handle<RegExpMatchInfo> last_match_info,
RegExp::ExecQuirks exec_quirks) {
DCHECK(FLAG_enable_experimental_regexp_engine_on_excessive_backtracks);
- DCHECK_NE(regexp->TypeTag(), JSRegExp::NOT_COMPILED);
+ DCHECK_NE(regexp->type_tag(), JSRegExp::NOT_COMPILED);
- int capture_count = regexp->CaptureCount();
+ int capture_count = regexp->capture_count();
int output_register_count = JSRegExp::RegistersForCaptureCount(capture_count);
int32_t* output_registers;
diff --git a/chromium/v8/src/regexp/experimental/experimental.h b/chromium/v8/src/regexp/experimental/experimental.h
index 1b44100cc88..cdc683e97e9 100644
--- a/chromium/v8/src/regexp/experimental/experimental.h
+++ b/chromium/v8/src/regexp/experimental/experimental.h
@@ -5,6 +5,7 @@
#ifndef V8_REGEXP_EXPERIMENTAL_EXPERIMENTAL_H_
#define V8_REGEXP_EXPERIMENTAL_EXPERIMENTAL_H_
+#include "src/regexp/regexp-flags.h"
#include "src/regexp/regexp.h"
namespace v8 {
@@ -19,10 +20,10 @@ class ExperimentalRegExp final : public AllStatic {
// TODO(mbid, v8:10765): This walks the RegExpTree, but it could also be
// checked on the fly in the parser. Not done currently because walking the
// AST again is more flexible and less error prone (but less performant).
- static bool CanBeHandled(RegExpTree* tree, JSRegExp::Flags flags,
+ static bool CanBeHandled(RegExpTree* tree, RegExpFlags flags,
int capture_count);
static void Initialize(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> pattern, JSRegExp::Flags flags,
+ Handle<String> pattern, RegExpFlags flags,
int capture_count);
static bool IsCompiled(Handle<JSRegExp> re, Isolate* isolate);
V8_WARN_UNUSED_RESULT
@@ -33,7 +34,6 @@ class ExperimentalRegExp final : public AllStatic {
Address input_start, Address input_end,
int* output_registers,
int32_t output_register_count,
- Address backtrack_stack,
RegExp::CallOrigin call_origin,
Isolate* isolate, Address regexp);
static MaybeHandle<Object> Exec(
diff --git a/chromium/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc b/chromium/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
index 6af1d02eed3..913f704b33e 100644
--- a/chromium/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
+++ b/chromium/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
@@ -40,8 +40,6 @@ namespace internal {
* - Isolate* isolate (address of the current isolate)
* - direct_call (if 1, direct call from JavaScript code, if 0
* call through the runtime system)
- * - stack_area_base (high end of the memory area to use as
- * backtracking stack)
* - capture array size (may fit multiple sets of matches)
* - int* capture_array (int[num_saved_registers_], for output).
* - end of input (address of end of string)
@@ -74,7 +72,6 @@ namespace internal {
* Address end,
* int* capture_output_array,
* int num_capture_registers,
- * byte* stack_area_base,
* bool direct_call = false,
* Isolate* isolate
* Address regexp);
@@ -88,8 +85,10 @@ RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, CodeObjectRequired::kYes,
- NewAssemblerBuffer(kRegExpCodeSize))),
+ masm_(std::make_unique<MacroAssembler>(
+ isolate, CodeObjectRequired::kYes,
+ NewAssemblerBuffer(kRegExpCodeSize))),
+ no_root_array_scope_(masm_.get()),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -98,16 +97,12 @@ RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32(Isolate* isolate, Zone* zone,
success_label_(),
backtrack_label_(),
exit_label_() {
- // Irregexp code clobbers ebx and spills/restores it at all boundaries.
- masm_->set_root_array_available(false);
-
DCHECK_EQ(0, registers_to_save % 2);
__ jmp(&entry_label_); // We'll write the entry code later.
__ bind(&start_label_); // And then continue from here.
}
RegExpMacroAssemblerIA32::~RegExpMacroAssemblerIA32() {
- delete masm_;
// Unuse labels in case we throw away the assembler without calling GetCode.
entry_label_.Unuse();
start_label_.Unuse();
@@ -339,7 +334,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
__ mov(Operand(esp, 0 * kSystemPointerSize), edx);
{
- AllowExternalCallThatCantCauseGC scope(masm_);
+ AllowExternalCallThatCantCauseGC scope(masm_.get());
ExternalReference compare =
unicode ? ExternalReference::re_case_insensitive_compare_unicode(
isolate())
@@ -655,6 +650,38 @@ void RegExpMacroAssemblerIA32::Fail() {
__ jmp(&exit_label_);
}
+void RegExpMacroAssemblerIA32::LoadRegExpStackPointerFromMemory(Register dst) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ mov(dst, __ ExternalReferenceAsOperand(ref, dst));
+}
+
+void RegExpMacroAssemblerIA32::StoreRegExpStackPointerToMemory(
+ Register src, Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ mov(__ ExternalReferenceAsOperand(ref, scratch), src);
+}
+
+void RegExpMacroAssemblerIA32::PushRegExpBasePointer(Register stack_pointer,
+ Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ mov(scratch, __ ExternalReferenceAsOperand(ref, scratch));
+ __ sub(scratch, stack_pointer);
+ __ mov(Operand(ebp, kRegExpStackBasePointer), scratch);
+}
+
+void RegExpMacroAssemblerIA32::PopRegExpBasePointer(Register stack_pointer_out,
+ Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ mov(scratch, Operand(ebp, kRegExpStackBasePointer));
+ __ mov(stack_pointer_out,
+ __ ExternalReferenceAsOperand(ref, stack_pointer_out));
+ __ sub(stack_pointer_out, scratch);
+ StoreRegExpStackPointerToMemory(stack_pointer_out, scratch);
+}
Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
Label return_eax;
@@ -666,7 +693,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// Tell the system that we have a stack frame. Because the type is MANUAL, no
// code is generated.
- FrameScope scope(masm_, StackFrame::MANUAL);
+ FrameScope scope(masm_.get(), StackFrame::MANUAL);
// Actually emit code to start a new stack frame.
__ push(ebp);
@@ -676,41 +703,59 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ push(esi);
__ push(edi);
__ push(ebx); // Callee-save on MacOS.
+ STATIC_ASSERT(kLastCalleeSaveRegister == kBackup_ebx);
- STATIC_ASSERT(kSuccessfulCaptures == kBackup_ebx - kSystemPointerSize);
+ STATIC_ASSERT(kSuccessfulCaptures ==
+ kLastCalleeSaveRegister - kSystemPointerSize);
__ push(Immediate(0)); // Number of successful matches in a global regexp.
STATIC_ASSERT(kStringStartMinusOne ==
kSuccessfulCaptures - kSystemPointerSize);
__ push(Immediate(0)); // Make room for "string start - 1" constant.
STATIC_ASSERT(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize);
__ push(Immediate(0)); // The backtrack counter.
+ STATIC_ASSERT(kRegExpStackBasePointer ==
+ kBacktrackCount - kSystemPointerSize);
+ __ push(Immediate(0)); // The regexp stack base ptr.
+
+ // Initialize backtrack stack pointer. It must not be clobbered from here on.
+ // Note the backtrack_stackpointer is *not* callee-saved.
+ STATIC_ASSERT(backtrack_stackpointer() == ecx);
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
+ // Store the regexp base pointer - we'll later restore it / write it to
+ // memory when returning from this irregexp code object.
+ PushRegExpBasePointer(backtrack_stackpointer(), eax);
+
+ {
+ // Check if we have space on the stack for registers.
+ Label stack_limit_hit, stack_ok;
+
+ ExternalReference stack_limit =
+ ExternalReference::address_of_jslimit(isolate());
+ __ mov(eax, esp);
+ __ sub(eax, StaticVariable(stack_limit));
+ // Handle it if the stack pointer is already below the stack limit.
+ __ j(below_equal, &stack_limit_hit);
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ cmp(eax, num_registers_ * kSystemPointerSize);
+ __ j(above_equal, &stack_ok);
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ mov(eax, EXCEPTION);
+ __ jmp(&return_eax);
- // Check if we have space on the stack for registers.
- Label stack_limit_hit;
- Label stack_ok;
+ __ bind(&stack_limit_hit);
+ __ push(backtrack_stackpointer());
+ CallCheckStackGuardState(ebx);
+ __ pop(backtrack_stackpointer());
+ __ or_(eax, eax);
+ // If returned value is non-zero, we exit with the returned value as result.
+ __ j(not_zero, &return_eax);
+
+ __ bind(&stack_ok);
+ }
- ExternalReference stack_limit =
- ExternalReference::address_of_jslimit(isolate());
- __ mov(ecx, esp);
- __ sub(ecx, StaticVariable(stack_limit));
- // Handle it if the stack pointer is already below the stack limit.
- __ j(below_equal, &stack_limit_hit);
- // Check if there is room for the variable number of registers above
- // the stack limit.
- __ cmp(ecx, num_registers_ * kSystemPointerSize);
- __ j(above_equal, &stack_ok);
- // Exit with OutOfMemory exception. There is not enough space on the stack
- // for our working registers.
- __ mov(eax, EXCEPTION);
- __ jmp(&return_eax);
-
- __ bind(&stack_limit_hit);
- CallCheckStackGuardState(ebx);
- __ or_(eax, eax);
- // If returned value is non-zero, we exit with the returned value as result.
- __ j(not_zero, &return_eax);
-
- __ bind(&stack_ok);
// Load start index for later use.
__ mov(ebx, Operand(ebp, kStartIndex));
@@ -735,18 +780,22 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// position registers.
__ mov(Operand(ebp, kStringStartMinusOne), eax);
- Label load_char_start_regexp, start_regexp;
- // Load newline if index is at start, previous character otherwise.
- __ cmp(Operand(ebp, kStartIndex), Immediate(0));
- __ j(not_equal, &load_char_start_regexp, Label::kNear);
- __ mov(current_character(), '\n');
- __ jmp(&start_regexp, Label::kNear);
-
- // Global regexp restarts matching here.
- __ bind(&load_char_start_regexp);
- // Load previous char as initial value of current character register.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&start_regexp);
+ Label load_char_start_regexp;
+ {
+ Label start_regexp;
+
+ // Load newline if index is at start, previous character otherwise.
+ __ cmp(Operand(ebp, kStartIndex), Immediate(0));
+ __ j(not_equal, &load_char_start_regexp, Label::kNear);
+ __ mov(current_character(), '\n');
+ __ jmp(&start_regexp, Label::kNear);
+
+ // Global regexp restarts matching here.
+ __ bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&start_regexp);
+ }
// Initialize on-stack registers.
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
@@ -754,6 +803,8 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// Fill in stack push order, to avoid accessing across an unwritten
// page (a problem on Windows).
if (num_saved_registers_ > 8) {
+ DCHECK_EQ(ecx, backtrack_stackpointer());
+ __ push(ecx);
__ mov(ecx, kRegisterZero);
Label init_loop;
__ bind(&init_loop);
@@ -761,6 +812,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ sub(ecx, Immediate(kSystemPointerSize));
__ cmp(ecx, kRegisterZero - num_saved_registers_ * kSystemPointerSize);
__ j(greater, &init_loop);
+ __ pop(ecx);
} else { // Unroll the loop.
for (int i = 0; i < num_saved_registers_; i++) {
__ mov(register_location(i), eax);
@@ -768,9 +820,6 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
}
}
- // Initialize backtrack stack pointer.
- __ mov(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
-
__ jmp(&start_label_);
// Exit code:
@@ -823,6 +872,10 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// Prepare eax to initialize registers with its value in the next run.
__ mov(eax, Operand(ebp, kStringStartMinusOne));
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(backtrack_stackpointer(), ebx);
+
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
// edx: capture start index
@@ -855,8 +908,12 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
}
__ bind(&return_eax);
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(backtrack_stackpointer(), ebx);
+
// Skip esp past regexp registers.
- __ lea(esp, Operand(ebp, kBackup_ebx));
+ __ lea(esp, Operand(ebp, kLastCalleeSaveRegister));
// Restore callee-save registers.
__ pop(ebx);
__ pop(edi);
@@ -877,7 +934,8 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
if (check_preempt_label_.is_linked()) {
SafeCallTarget(&check_preempt_label_);
- __ push(backtrack_stackpointer());
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), edi);
+
__ push(edi);
CallCheckStackGuardState(ebx);
@@ -887,7 +945,9 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ j(not_zero, &return_eax);
__ pop(edi);
- __ pop(backtrack_stackpointer());
+
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
// String might have moved: Reload esi from frame.
__ mov(esi, Operand(ebp, kInputEnd));
SafeReturn();
@@ -898,21 +958,19 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
SafeCallTarget(&stack_overflow_label_);
// Reached if the backtrack-stack limit has been hit.
- // Save registers before calling C function
+ // Save registers before calling C function.
__ push(esi);
__ push(edi);
- // Call GrowStack(backtrack_stackpointer())
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, ebx);
- __ mov(Operand(esp, 2 * kSystemPointerSize),
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), edi);
+
+ // Call GrowStack(isolate).
+ static const int kNumArguments = 1;
+ __ PrepareCallCFunction(kNumArguments, ebx);
+ __ mov(Operand(esp, 0 * kSystemPointerSize),
Immediate(ExternalReference::isolate_address(isolate())));
- __ lea(eax, Operand(ebp, kStackHighEnd));
- __ mov(Operand(esp, 1 * kSystemPointerSize), eax);
- __ mov(Operand(esp, 0 * kSystemPointerSize), backtrack_stackpointer());
- ExternalReference grow_stack =
- ExternalReference::re_grow_stack(isolate());
- __ CallCFunction(grow_stack, num_arguments);
+ __ CallCFunction(ExternalReference::re_grow_stack(isolate()),
+ kNumArguments);
// If return nullptr, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
__ or_(eax, eax);
@@ -1019,10 +1077,21 @@ void RegExpMacroAssemblerIA32::ReadCurrentPositionFromRegister(int reg) {
__ mov(edi, register_location(reg));
}
+void RegExpMacroAssemblerIA32::WriteStackPointerToRegister(int reg) {
+ ExternalReference stack_top_address =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ mov(eax, __ ExternalReferenceAsOperand(stack_top_address, eax));
+ __ sub(eax, backtrack_stackpointer());
+ __ mov(register_location(reg), eax);
+}
void RegExpMacroAssemblerIA32::ReadStackPointerFromRegister(int reg) {
- __ mov(backtrack_stackpointer(), register_location(reg));
- __ add(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
+ ExternalReference stack_top_address =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ mov(backtrack_stackpointer(),
+ __ ExternalReferenceAsOperand(stack_top_address,
+ backtrack_stackpointer()));
+ __ sub(backtrack_stackpointer(), register_location(reg));
}
void RegExpMacroAssemblerIA32::SetCurrentPositionFromEnd(int by) {
@@ -1069,14 +1138,6 @@ void RegExpMacroAssemblerIA32::ClearRegisters(int reg_from, int reg_to) {
}
}
-
-void RegExpMacroAssemblerIA32::WriteStackPointerToRegister(int reg) {
- __ mov(eax, backtrack_stackpointer());
- __ sub(eax, Operand(ebp, kStackHighEnd));
- __ mov(register_location(reg), eax);
-}
-
-
// Private methods:
void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch) {
diff --git a/chromium/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h b/chromium/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
index 93fb2c9aba3..30275036ddf 100644
--- a/chromium/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
+++ b/chromium/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
@@ -105,8 +105,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerIA32
// one set of capture results. For the case of non-global regexp, we ignore
// this value.
static const int kNumOutputRegisters = kRegisterOutput + kSystemPointerSize;
- static const int kStackHighEnd = kNumOutputRegisters + kSystemPointerSize;
- static const int kDirectCall = kStackHighEnd + kSystemPointerSize;
+ static const int kDirectCall = kNumOutputRegisters + kSystemPointerSize;
static const int kIsolate = kDirectCall + kSystemPointerSize;
// Below the frame pointer - local stack variables.
// When adding local variables remember to push space for them in
@@ -114,12 +113,20 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerIA32
static const int kBackup_esi = kFramePointer - kSystemPointerSize;
static const int kBackup_edi = kBackup_esi - kSystemPointerSize;
static const int kBackup_ebx = kBackup_edi - kSystemPointerSize;
- static const int kSuccessfulCaptures = kBackup_ebx - kSystemPointerSize;
+ static const int kLastCalleeSaveRegister = kBackup_ebx;
+
+ static const int kSuccessfulCaptures =
+ kLastCalleeSaveRegister - kSystemPointerSize;
static const int kStringStartMinusOne =
kSuccessfulCaptures - kSystemPointerSize;
static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
+ // Stores the initial value of the regexp stack pointer in a
+ // position-independent representation (in case the regexp stack grows and
+ // thus moves).
+ static const int kRegExpStackBasePointer =
+ kBacktrackCount - kSystemPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kBacktrackCount - kSystemPointerSize;
+ static const int kRegisterZero = kRegExpStackBasePointer - kSystemPointerSize;
// Initial size of code buffer.
static const int kRegExpCodeSize = 1024;
@@ -137,14 +144,14 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerIA32
Operand register_location(int register_index);
// The register containing the current character after LoadCurrentCharacter.
- inline Register current_character() { return edx; }
+ static constexpr Register current_character() { return edx; }
// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
- inline Register backtrack_stackpointer() { return ecx; }
+ static constexpr Register backtrack_stackpointer() { return ecx; }
// Byte size of chars in the string to match (decided by the Mode argument)
- inline int char_size() { return static_cast<int>(mode_); }
+ inline int char_size() const { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
// is nullptr, in which case it is a conditional Backtrack.
@@ -168,19 +175,25 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerIA32
// (ecx) and increments it by a word size.
inline void Pop(Register target);
+ void LoadRegExpStackPointerFromMemory(Register dst);
+ void StoreRegExpStackPointerToMemory(Register src, Register scratch);
+ void PushRegExpBasePointer(Register stack_pointer, Register scratch);
+ void PopRegExpBasePointer(Register stack_pointer_out, Register scratch);
+
Isolate* isolate() const { return masm_->isolate(); }
- MacroAssembler* masm_;
+ const std::unique_ptr<MacroAssembler> masm_;
+ const NoRootArrayScope no_root_array_scope_;
// Which mode to generate code for (LATIN1 or UC16).
- Mode mode_;
+ const Mode mode_;
// One greater than maximal register index actually used.
int num_registers_;
// Number of registers to output at the end (the saved registers
- // are always 0..num_saved_registers_-1)
- int num_saved_registers_;
+ // are always 0..num_saved_registers_-1).
+ const int num_saved_registers_;
// Labels used internally.
Label entry_label_;
diff --git a/chromium/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc b/chromium/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc
new file mode 100644
index 00000000000..0c2b83ba88f
--- /dev/null
+++ b/chromium/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc
@@ -0,0 +1,1317 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_LOONG64
+
+#include "src/regexp/loong64/regexp-macro-assembler-loong64.h"
+
+#include "src/codegen/macro-assembler.h"
+#include "src/heap/factory.h"
+#include "src/logging/log.h"
+#include "src/objects/code-inl.h"
+#include "src/regexp/regexp-stack.h"
+#include "src/snapshot/embedded/embedded-data.h"
+
+namespace v8 {
+namespace internal {
+
+/* clang-format off
+ *
+ * This assembler uses the following register assignment convention
+ * - t3 : Temporarily stores the index of capture start after a matching pass
+ * for a global regexp.
+ * - a5 : Pointer to current Code object including heap object tag.
+ * - a6 : Current position in input, as negative offset from end of string.
+ * Please notice that this is the byte offset, not the character offset!
+ * - a7 : Currently loaded character. Must be loaded using
+ * LoadCurrentCharacter before using any of the dispatch methods.
+ * - t0 : Points to tip of backtrack stack
+ * - t1 : Unused.
+ * - t2 : End of input (points to byte after last character in input).
+ * - fp : Frame pointer. Used to access arguments, local variables and
+ * RegExp registers.
+ * - sp : Points to tip of C stack.
+ *
+ * The remaining registers are free for computations.
+ * Each call to a public method should retain this convention.
+ *
+ * The stack will have the following structure:
+ *
+ * - fp[80] Isolate* isolate (address of the current isolate) kIsolate
+ * kStackFrameHeader
+ * --- sp when called ---
+ * - fp[72] ra Return from RegExp code (ra). kReturnAddress
+ * - fp[64] old-fp Old fp, callee saved.
+ * - fp[0..63] s0..s7 Callee-saved registers s0..s7.
+ * --- frame pointer ----
+ * - fp[-8] direct_call (1 = direct call from JS, 0 = from runtime) kDirectCall
+ * - fp[-16] capture array size (may fit multiple sets of matches) kNumOutputRegisters
+ * - fp[-24] int* capture_array (int[num_saved_registers_], for output). kRegisterOutput
+ * - fp[-32] end of input (address of end of string). kInputEnd
+ * - fp[-40] start of input (address of first character in string). kInputStart
+ * - fp[-48] start index (character index of start). kStartIndex
+ * - fp[-56] void* input_string (location of a handle containing the string). kInputString
+ * - fp[-64] success counter (only for global regexps to count matches). kSuccessfulCaptures
+ * - fp[-72] Offset of location before start of input (effectively character kStringStartMinusOne
+ * position -1). Used to initialize capture registers to a
+ * non-position.
+ * --------- The following output registers are 32-bit values. ---------
+ * - fp[-80] register 0 (Only positions must be stored in the first kRegisterZero
+ * - register 1 num_saved_registers_ registers)
+ * - ...
+ * - register num_registers-1
+ * --- sp ---
+ *
+ * The first num_saved_registers_ registers are initialized to point to
+ * "character -1" in the string (i.e., char_size() bytes before the first
+ * character of the string). The remaining registers start out as garbage.
+ *
+ * The data up to the return address must be placed there by the calling
+ * code and the remaining arguments are passed in registers, e.g. by calling the
+ * code entry as cast to a function with the signature:
+ * int (*match)(String input_string,
+ * int start_index,
+ * Address start,
+ * Address end,
+ * int* capture_output_array,
+ * int num_capture_registers,
+ * bool direct_call = false,
+ * Isolate* isolate);
+ * The call is performed by NativeRegExpMacroAssembler::Execute()
+ * (in regexp-macro-assembler.cc) via the GeneratedCode wrapper.
+ *
+ * clang-format on
+ */
+
+#define __ ACCESS_MASM(masm_)
+
+const int RegExpMacroAssemblerLOONG64::kRegExpCodeSize;
+
+RegExpMacroAssemblerLOONG64::RegExpMacroAssemblerLOONG64(Isolate* isolate,
+ Zone* zone, Mode mode,
+ int registers_to_save)
+ : NativeRegExpMacroAssembler(isolate, zone),
+ masm_(std::make_unique<MacroAssembler>(
+ isolate, CodeObjectRequired::kYes,
+ NewAssemblerBuffer(kRegExpCodeSize))),
+ no_root_array_scope_(masm_.get()),
+ mode_(mode),
+ num_registers_(registers_to_save),
+ num_saved_registers_(registers_to_save),
+ entry_label_(),
+ start_label_(),
+ success_label_(),
+ backtrack_label_(),
+ exit_label_(),
+ internal_failure_label_() {
+ DCHECK_EQ(0, registers_to_save % 2);
+ __ jmp(&entry_label_); // We'll write the entry code later.
+ // If the code gets too big or corrupted, an internal exception will be
+ // raised, and we will exit right away.
+ __ bind(&internal_failure_label_);
+ __ li(a0, Operand(FAILURE));
+ __ Ret();
+ __ bind(&start_label_); // And then continue from here.
+}
+
+RegExpMacroAssemblerLOONG64::~RegExpMacroAssemblerLOONG64() {
+ // Unuse labels in case we throw away the assembler without calling GetCode.
+ entry_label_.Unuse();
+ start_label_.Unuse();
+ success_label_.Unuse();
+ backtrack_label_.Unuse();
+ exit_label_.Unuse();
+ check_preempt_label_.Unuse();
+ stack_overflow_label_.Unuse();
+ internal_failure_label_.Unuse();
+ fallback_label_.Unuse();
+}
+
+int RegExpMacroAssemblerLOONG64::stack_limit_slack() {
+ return RegExpStack::kStackLimitSlack;
+}
+
+void RegExpMacroAssemblerLOONG64::AdvanceCurrentPosition(int by) {
+ if (by != 0) {
+ __ Add_d(current_input_offset(), current_input_offset(),
+ Operand(by * char_size()));
+ }
+}
+
+void RegExpMacroAssemblerLOONG64::AdvanceRegister(int reg, int by) {
+ DCHECK_LE(0, reg);
+ DCHECK_GT(num_registers_, reg);
+ if (by != 0) {
+ __ Ld_d(a0, register_location(reg));
+ __ Add_d(a0, a0, Operand(by));
+ __ St_d(a0, register_location(reg));
+ }
+}
+
+void RegExpMacroAssemblerLOONG64::Backtrack() {
+ CheckPreemption();
+ if (has_backtrack_limit()) {
+ Label next;
+ __ Ld_d(a0, MemOperand(frame_pointer(), kBacktrackCount));
+ __ Add_d(a0, a0, Operand(1));
+ __ St_d(a0, MemOperand(frame_pointer(), kBacktrackCount));
+ __ Branch(&next, ne, a0, Operand(backtrack_limit()));
+
+ // Backtrack limit exceeded.
+ if (can_fallback()) {
+ __ jmp(&fallback_label_);
+ } else {
+ // Can't fallback, so we treat it as a failed match.
+ Fail();
+ }
+
+ __ bind(&next);
+ }
+ // Pop Code offset from backtrack stack, add Code and jump to location.
+ Pop(a0);
+ __ Add_d(a0, a0, code_pointer());
+ __ Jump(a0);
+}
+
+void RegExpMacroAssemblerLOONG64::Bind(Label* label) { __ bind(label); }
+
+void RegExpMacroAssemblerLOONG64::CheckCharacter(uint32_t c, Label* on_equal) {
+ BranchOrBacktrack(on_equal, eq, current_character(), Operand(c));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckCharacterGT(base::uc16 limit,
+ Label* on_greater) {
+ BranchOrBacktrack(on_greater, gt, current_character(), Operand(limit));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckAtStart(int cp_offset,
+ Label* on_at_start) {
+ __ Ld_d(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Add_d(a0, current_input_offset(),
+ Operand(-char_size() + cp_offset * char_size()));
+ BranchOrBacktrack(on_at_start, eq, a0, Operand(a1));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckNotAtStart(int cp_offset,
+ Label* on_not_at_start) {
+ __ Ld_d(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Add_d(a0, current_input_offset(),
+ Operand(-char_size() + cp_offset * char_size()));
+ BranchOrBacktrack(on_not_at_start, ne, a0, Operand(a1));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckCharacterLT(base::uc16 limit,
+ Label* on_less) {
+ BranchOrBacktrack(on_less, lt, current_character(), Operand(limit));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckGreedyLoop(Label* on_equal) {
+ Label backtrack_non_equal;
+ __ Ld_w(a0, MemOperand(backtrack_stackpointer(), 0));
+ __ Branch(&backtrack_non_equal, ne, current_input_offset(), Operand(a0));
+ __ Add_d(backtrack_stackpointer(), backtrack_stackpointer(),
+ Operand(kIntSize));
+ __ bind(&backtrack_non_equal);
+ BranchOrBacktrack(on_equal, eq, current_input_offset(), Operand(a0));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckNotBackReferenceIgnoreCase(
+ int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
+ Label fallthrough;
+ __ Ld_d(a0, register_location(start_reg)); // Index of start of capture.
+ __ Ld_d(a1, register_location(start_reg + 1)); // Index of end of capture.
+ __ Sub_d(a1, a1, a0); // Length of capture.
+
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
+ __ Branch(&fallthrough, eq, a1, Operand(zero_reg));
+
+ if (read_backward) {
+ __ Ld_d(t1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Add_d(t1, t1, a1);
+ BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t1));
+ } else {
+ __ Add_d(t1, a1, current_input_offset());
+ // Check that there are enough characters left in the input.
+ BranchOrBacktrack(on_no_match, gt, t1, Operand(zero_reg));
+ }
+
+ if (mode_ == LATIN1) {
+ Label success;
+ Label fail;
+ Label loop_check;
+
+ // a0 - offset of start of capture.
+ // a1 - length of capture.
+ __ Add_d(a0, a0, Operand(end_of_input_address()));
+ __ Add_d(a2, end_of_input_address(), Operand(current_input_offset()));
+ if (read_backward) {
+ __ Sub_d(a2, a2, Operand(a1));
+ }
+ __ Add_d(a1, a0, Operand(a1));
+
+ // a0 - Address of start of capture.
+ // a1 - Address of end of capture.
+ // a2 - Address of current input position.
+
+ Label loop;
+ __ bind(&loop);
+ __ Ld_bu(a3, MemOperand(a0, 0));
+ __ addi_d(a0, a0, char_size());
+ __ Ld_bu(a4, MemOperand(a2, 0));
+ __ addi_d(a2, a2, char_size());
+
+ __ Branch(&loop_check, eq, a4, Operand(a3));
+
+ // Mismatch, try case-insensitive match (converting letters to lower-case).
+ __ Or(a3, a3, Operand(0x20)); // Convert capture character to lower-case.
+ __ Or(a4, a4, Operand(0x20)); // Also convert input character.
+ __ Branch(&fail, ne, a4, Operand(a3));
+ __ Sub_d(a3, a3, Operand('a'));
+ __ Branch(&loop_check, ls, a3, Operand('z' - 'a'));
+ // Latin-1: Check for values in range [224,254] but not 247.
+ __ Sub_d(a3, a3, Operand(224 - 'a'));
+ // Weren't Latin-1 letters.
+ __ Branch(&fail, hi, a3, Operand(254 - 224));
+ // Check for 247.
+ __ Branch(&fail, eq, a3, Operand(247 - 224));
+
+ __ bind(&loop_check);
+ __ Branch(&loop, lt, a0, Operand(a1));
+ __ jmp(&success);
+
+ __ bind(&fail);
+ GoTo(on_no_match);
+
+ __ bind(&success);
+ // Compute new value of character position after the matched part.
+ __ Sub_d(current_input_offset(), a2, end_of_input_address());
+ if (read_backward) {
+ __ Ld_d(t1, register_location(start_reg)); // Index of start of capture.
+ __ Ld_d(a2,
+ register_location(start_reg + 1)); // Index of end of capture.
+ __ Add_d(current_input_offset(), current_input_offset(), Operand(t1));
+ __ Sub_d(current_input_offset(), current_input_offset(), Operand(a2));
+ }
+ } else {
+ DCHECK(mode_ == UC16);
+ // Put regexp engine registers on stack.
+ RegList regexp_registers_to_retain = current_input_offset().bit() |
+ current_character().bit() |
+ backtrack_stackpointer().bit();
+ __ MultiPush(regexp_registers_to_retain);
+
+ int argument_count = 4;
+ __ PrepareCallCFunction(argument_count, a2);
+
+ // a0 - offset of start of capture.
+ // a1 - length of capture.
+
+ // Put arguments into arguments registers.
+ // Parameters are
+ // a0: Address byte_offset1 - Address captured substring's start.
+ // a1: Address byte_offset2 - Address of current character position.
+ // a2: size_t byte_length - length of capture in bytes(!).
+ // a3: Isolate* isolate.
+
+ // Address of start of capture.
+ __ Add_d(a0, a0, Operand(end_of_input_address()));
+ // Length of capture.
+ __ mov(a2, a1);
+ // Save length in callee-save register for use on return.
+ __ mov(s3, a1);
+ // Address of current input position.
+ __ Add_d(a1, current_input_offset(), Operand(end_of_input_address()));
+ if (read_backward) {
+ __ Sub_d(a1, a1, Operand(s3));
+ }
+ // Isolate.
+ __ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
+
+ {
+ AllowExternalCallThatCantCauseGC scope(masm_.get());
+ ExternalReference function =
+ unicode ? ExternalReference::re_case_insensitive_compare_unicode(
+ isolate())
+ : ExternalReference::re_case_insensitive_compare_non_unicode(
+ isolate());
+ __ CallCFunction(function, argument_count);
+ }
+
+ // Restore regexp engine registers.
+ __ MultiPop(regexp_registers_to_retain);
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+ __ Ld_d(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+
+ // Check if function returned non-zero for success or zero for failure.
+ BranchOrBacktrack(on_no_match, eq, a0, Operand(zero_reg));
+ // On success, increment position by length of capture.
+ if (read_backward) {
+ __ Sub_d(current_input_offset(), current_input_offset(), Operand(s3));
+ } else {
+ __ Add_d(current_input_offset(), current_input_offset(), Operand(s3));
+ }
+ }
+
+ __ bind(&fallthrough);
+}
+
+void RegExpMacroAssemblerLOONG64::CheckNotBackReference(int start_reg,
+ bool read_backward,
+ Label* on_no_match) {
+ Label fallthrough;
+
+ // Find length of back-referenced capture.
+ __ Ld_d(a0, register_location(start_reg));
+ __ Ld_d(a1, register_location(start_reg + 1));
+ __ Sub_d(a1, a1, a0); // Length to check.
+
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
+ __ Branch(&fallthrough, eq, a1, Operand(zero_reg));
+
+ if (read_backward) {
+ __ Ld_d(t1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Add_d(t1, t1, a1);
+ BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t1));
+ } else {
+ __ Add_d(t1, a1, current_input_offset());
+ // Check that there are enough characters left in the input.
+ BranchOrBacktrack(on_no_match, gt, t1, Operand(zero_reg));
+ }
+
+ // Compute pointers to match string and capture string.
+ __ Add_d(a0, a0, Operand(end_of_input_address()));
+ __ Add_d(a2, end_of_input_address(), Operand(current_input_offset()));
+ if (read_backward) {
+ __ Sub_d(a2, a2, Operand(a1));
+ }
+ __ Add_d(a1, a1, Operand(a0));
+
+ Label loop;
+ __ bind(&loop);
+ if (mode_ == LATIN1) {
+ __ Ld_bu(a3, MemOperand(a0, 0));
+ __ addi_d(a0, a0, char_size());
+ __ Ld_bu(a4, MemOperand(a2, 0));
+ __ addi_d(a2, a2, char_size());
+ } else {
+ DCHECK(mode_ == UC16);
+ __ Ld_hu(a3, MemOperand(a0, 0));
+ __ addi_d(a0, a0, char_size());
+ __ Ld_hu(a4, MemOperand(a2, 0));
+ __ addi_d(a2, a2, char_size());
+ }
+ BranchOrBacktrack(on_no_match, ne, a3, Operand(a4));
+ __ Branch(&loop, lt, a0, Operand(a1));
+
+ // Move current character position to position after match.
+ __ Sub_d(current_input_offset(), a2, end_of_input_address());
+ if (read_backward) {
+ __ Ld_d(t1, register_location(start_reg)); // Index of start of capture.
+ __ Ld_d(a2, register_location(start_reg + 1)); // Index of end of capture.
+ __ Add_d(current_input_offset(), current_input_offset(), Operand(t1));
+ __ Sub_d(current_input_offset(), current_input_offset(), Operand(a2));
+ }
+ __ bind(&fallthrough);
+}
+
+void RegExpMacroAssemblerLOONG64::CheckNotCharacter(uint32_t c,
+ Label* on_not_equal) {
+ BranchOrBacktrack(on_not_equal, ne, current_character(), Operand(c));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_equal) {
+ __ And(a0, current_character(), Operand(mask));
+ Operand rhs = (c == 0) ? Operand(zero_reg) : Operand(c);
+ BranchOrBacktrack(on_equal, eq, a0, rhs);
+}
+
+void RegExpMacroAssemblerLOONG64::CheckNotCharacterAfterAnd(
+ uint32_t c, uint32_t mask, Label* on_not_equal) {
+ __ And(a0, current_character(), Operand(mask));
+ Operand rhs = (c == 0) ? Operand(zero_reg) : Operand(c);
+ BranchOrBacktrack(on_not_equal, ne, a0, rhs);
+}
+
+void RegExpMacroAssemblerLOONG64::CheckNotCharacterAfterMinusAnd(
+ base::uc16 c, base::uc16 minus, base::uc16 mask, Label* on_not_equal) {
+ DCHECK_GT(String::kMaxUtf16CodeUnit, minus);
+ __ Sub_d(a0, current_character(), Operand(minus));
+ __ And(a0, a0, Operand(mask));
+ BranchOrBacktrack(on_not_equal, ne, a0, Operand(c));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckCharacterInRange(base::uc16 from,
+ base::uc16 to,
+ Label* on_in_range) {
+ __ Sub_d(a0, current_character(), Operand(from));
+ // Unsigned lower-or-same condition.
+ BranchOrBacktrack(on_in_range, ls, a0, Operand(to - from));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckCharacterNotInRange(
+ base::uc16 from, base::uc16 to, Label* on_not_in_range) {
+ __ Sub_d(a0, current_character(), Operand(from));
+ // Unsigned higher condition.
+ BranchOrBacktrack(on_not_in_range, hi, a0, Operand(to - from));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckBitInTable(Handle<ByteArray> table,
+ Label* on_bit_set) {
+ __ li(a0, Operand(table));
+ if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) {
+ __ And(a1, current_character(), Operand(kTableSize - 1));
+ __ Add_d(a0, a0, a1);
+ } else {
+ __ Add_d(a0, a0, current_character());
+ }
+
+ __ Ld_bu(a0, FieldMemOperand(a0, ByteArray::kHeaderSize));
+ BranchOrBacktrack(on_bit_set, ne, a0, Operand(zero_reg));
+}
+
+bool RegExpMacroAssemblerLOONG64::CheckSpecialCharacterClass(
+ base::uc16 type, Label* on_no_match) {
+ // Range checks (c in min..max) are generally implemented by an unsigned
+ // (c - min) <= (max - min) check.
+ switch (type) {
+ case 's':
+ // Match space-characters.
+ if (mode_ == LATIN1) {
+ // One byte space characters are '\t'..'\r', ' ' and \u00a0.
+ Label success;
+ __ Branch(&success, eq, current_character(), Operand(' '));
+ // Check range 0x09..0x0D.
+ __ Sub_d(a0, current_character(), Operand('\t'));
+ __ Branch(&success, ls, a0, Operand('\r' - '\t'));
+ // \u00a0 (NBSP).
+ BranchOrBacktrack(on_no_match, ne, a0, Operand(0x00A0 - '\t'));
+ __ bind(&success);
+ return true;
+ }
+ return false;
+ case 'S':
+ // The emitted code for generic character classes is good enough.
+ return false;
+ case 'd':
+ // Match Latin1 digits ('0'..'9').
+ __ Sub_d(a0, current_character(), Operand('0'));
+ BranchOrBacktrack(on_no_match, hi, a0, Operand('9' - '0'));
+ return true;
+ case 'D':
+ // Match non Latin1-digits.
+ __ Sub_d(a0, current_character(), Operand('0'));
+ BranchOrBacktrack(on_no_match, ls, a0, Operand('9' - '0'));
+ return true;
+ case '.': {
+ // Match non-newlines (not 0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029).
+ __ Xor(a0, current_character(), Operand(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C.
+ __ Sub_d(a0, a0, Operand(0x0B));
+ BranchOrBacktrack(on_no_match, ls, a0, Operand(0x0C - 0x0B));
+ if (mode_ == UC16) {
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ Sub_d(a0, a0, Operand(0x2028 - 0x0B));
+ BranchOrBacktrack(on_no_match, ls, a0, Operand(1));
+ }
+ return true;
+ }
+ case 'n': {
+ // Match newlines (0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029).
+ __ Xor(a0, current_character(), Operand(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C.
+ __ Sub_d(a0, a0, Operand(0x0B));
+ if (mode_ == LATIN1) {
+ BranchOrBacktrack(on_no_match, hi, a0, Operand(0x0C - 0x0B));
+ } else {
+ Label done;
+ BranchOrBacktrack(&done, ls, a0, Operand(0x0C - 0x0B));
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ Sub_d(a0, a0, Operand(0x2028 - 0x0B));
+ BranchOrBacktrack(on_no_match, hi, a0, Operand(1));
+ __ bind(&done);
+ }
+ return true;
+ }
+ case 'w': {
+ if (mode_ != LATIN1) {
+ // Table is 256 entries, so all Latin1 characters can be tested.
+ BranchOrBacktrack(on_no_match, hi, current_character(), Operand('z'));
+ }
+ ExternalReference map =
+ ExternalReference::re_word_character_map(isolate());
+ __ li(a0, Operand(map));
+ __ Add_d(a0, a0, current_character());
+ __ Ld_bu(a0, MemOperand(a0, 0));
+ BranchOrBacktrack(on_no_match, eq, a0, Operand(zero_reg));
+ return true;
+ }
+ case 'W': {
+ Label done;
+ if (mode_ != LATIN1) {
+ // Table is 256 entries, so all Latin1 characters can be tested.
+ __ Branch(&done, hi, current_character(), Operand('z'));
+ }
+ ExternalReference map =
+ ExternalReference::re_word_character_map(isolate());
+ __ li(a0, Operand(map));
+ __ Add_d(a0, a0, current_character());
+ __ Ld_bu(a0, MemOperand(a0, 0));
+ BranchOrBacktrack(on_no_match, ne, a0, Operand(zero_reg));
+ if (mode_ != LATIN1) {
+ __ bind(&done);
+ }
+ return true;
+ }
+ case '*':
+ // Match any character.
+ return true;
+ // No custom implementation (yet): s(UC16), S(UC16).
+ default:
+ return false;
+ }
+}
+
+void RegExpMacroAssemblerLOONG64::Fail() {
+ __ li(a0, Operand(FAILURE));
+ __ jmp(&exit_label_);
+}
+
+void RegExpMacroAssemblerLOONG64::LoadRegExpStackPointerFromMemory(
+ Register dst) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ li(dst, ref);
+ __ Ld_d(dst, MemOperand(dst, 0));
+}
+
+void RegExpMacroAssemblerLOONG64::StoreRegExpStackPointerToMemory(
+ Register src, Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ li(scratch, ref);
+ __ St_d(src, MemOperand(scratch, 0));
+}
+
+void RegExpMacroAssemblerLOONG64::PushRegExpBasePointer(Register scratch1,
+ Register scratch2) {
+ LoadRegExpStackPointerFromMemory(scratch1);
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ li(scratch2, ref);
+ __ Ld_d(scratch2, MemOperand(scratch2, 0));
+ __ Sub_d(scratch2, scratch1, scratch2);
+ __ St_d(scratch2, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+}
+
+void RegExpMacroAssemblerLOONG64::PopRegExpBasePointer(Register scratch1,
+ Register scratch2) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ Ld_d(scratch1, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ __ li(scratch2, ref);
+ __ Ld_d(scratch2, MemOperand(scratch2, 0));
+ __ Add_d(scratch1, scratch1, scratch2);
+ StoreRegExpStackPointerToMemory(scratch1, scratch2);
+}
+
+Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
+ Label return_v0;
+ if (0 /* todo masm_->has_exception()*/) {
+ // If the code gets corrupted due to long regular expressions and lack of
+ // space on trampolines, an internal exception flag is set. If this case
+ // is detected, we will jump into exit sequence right away.
+ //__ bind_to(&entry_label_, internal_failure_label_.pos());
+ } else {
+ // Finalize code - write the entry point code now we know how many
+ // registers we need.
+
+ // Entry code:
+ __ bind(&entry_label_);
+
+ // Tell the system that we have a stack frame. Because the type is MANUAL,
+ // no is generated.
+ FrameScope scope(masm_.get(), StackFrame::MANUAL);
+
+ // Actually emit code to start a new stack frame.
+ // Push arguments
+ // Save callee-save registers.
+ // Start new stack frame.
+ // Store link register in existing stack-cell.
+ // Order here should correspond to order of offset constants in header file.
+ // TODO(plind): we save s0..s7, but ONLY use s3 here - use the regs
+ // or dont save.
+ RegList registers_to_retain = s0.bit() | s1.bit() | s2.bit() | s3.bit() |
+ s4.bit() | s5.bit() | s6.bit() | s7.bit();
+ RegList argument_registers = a0.bit() | a1.bit() | a2.bit() | a3.bit();
+
+ argument_registers |= a4.bit() | a5.bit() | a6.bit() | a7.bit();
+
+ __ MultiPush(ra.bit(), fp.bit(), argument_registers | registers_to_retain);
+ // Set frame pointer in space for it if this is not a direct call
+ // from generated code.
+ // TODO(plind): this 8 is the # of argument regs, should have definition.
+ __ Add_d(frame_pointer(), sp, Operand(8 * kPointerSize));
+ STATIC_ASSERT(kSuccessfulCaptures == kInputString - kSystemPointerSize);
+ __ mov(a0, zero_reg);
+ __ Push(a0); // Make room for success counter and initialize it to 0.
+ STATIC_ASSERT(kStringStartMinusOne ==
+ kSuccessfulCaptures - kSystemPointerSize);
+ __ Push(a0); // Make room for "string start - 1" constant.
+ STATIC_ASSERT(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize);
+ __ Push(a0); // The backtrack counter
+ STATIC_ASSERT(kRegExpStackBasePointer ==
+ kBacktrackCount - kSystemPointerSize);
+ __ Push(a0); // The regexp stack base ptr.
+
+ // Store the regexp base pointer - we'll later restore it / write it to
+ // memory when returning from this irregexp code object.
+ PushRegExpBasePointer(a0, a1);
+
+ // Check if we have space on the stack for registers.
+ Label stack_limit_hit;
+ Label stack_ok;
+
+ ExternalReference stack_limit =
+ ExternalReference::address_of_jslimit(masm_->isolate());
+ __ li(a0, Operand(stack_limit));
+ __ Ld_d(a0, MemOperand(a0, 0));
+ __ Sub_d(a0, sp, a0);
+ // Handle it if the stack pointer is already below the stack limit.
+ __ Branch(&stack_limit_hit, le, a0, Operand(zero_reg));
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ Branch(&stack_ok, hs, a0, Operand(num_registers_ * kPointerSize));
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ li(a0, Operand(EXCEPTION));
+ __ jmp(&return_v0);
+
+ __ bind(&stack_limit_hit);
+ CallCheckStackGuardState(a0);
+ // If returned value is non-zero, we exit with the returned value as result.
+ __ Branch(&return_v0, ne, a0, Operand(zero_reg));
+
+ __ bind(&stack_ok);
+ // Allocate space on stack for registers.
+ __ Sub_d(sp, sp, Operand(num_registers_ * kPointerSize));
+ // Load string end.
+ __ Ld_d(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ // Load input start.
+ __ Ld_d(a0, MemOperand(frame_pointer(), kInputStart));
+ // Find negative length (offset of start relative to end).
+ __ Sub_d(current_input_offset(), a0, end_of_input_address());
+ // Set a0 to address of char before start of the input string
+ // (effectively string position -1).
+ __ Ld_d(a1, MemOperand(frame_pointer(), kStartIndex));
+ __ Sub_d(a0, current_input_offset(), Operand(char_size()));
+ __ slli_d(t1, a1, (mode_ == UC16) ? 1 : 0);
+ __ Sub_d(a0, a0, t1);
+ // Store this value in a local variable, for use when clearing
+ // position registers.
+ __ St_d(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
+
+ // Initialize code pointer register
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+
+ Label load_char_start_regexp, start_regexp;
+ // Load newline if index is at start, previous character otherwise.
+ __ Branch(&load_char_start_regexp, ne, a1, Operand(zero_reg));
+ __ li(current_character(), Operand('\n'));
+ __ jmp(&start_regexp);
+
+ // Global regexp restarts matching here.
+ __ bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&start_regexp);
+
+ // Initialize on-stack registers.
+ if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
+ // Fill saved registers with initial value = start offset - 1.
+ if (num_saved_registers_ > 8) {
+ // Address of register 0.
+ __ Add_d(a1, frame_pointer(), Operand(kRegisterZero));
+ __ li(a2, Operand(num_saved_registers_));
+ Label init_loop;
+ __ bind(&init_loop);
+ __ St_d(a0, MemOperand(a1, 0));
+ __ Add_d(a1, a1, Operand(-kPointerSize));
+ __ Sub_d(a2, a2, Operand(1));
+ __ Branch(&init_loop, ne, a2, Operand(zero_reg));
+ } else {
+ for (int i = 0; i < num_saved_registers_; i++) {
+ __ St_d(a0, register_location(i));
+ }
+ }
+ }
+
+ // Initialize backtrack stack pointer.
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
+ __ jmp(&start_label_);
+
+ // Exit code:
+ if (success_label_.is_linked()) {
+ // Save captures when successful.
+ __ bind(&success_label_);
+ if (num_saved_registers_ > 0) {
+ // Copy captures to output.
+ __ Ld_d(a1, MemOperand(frame_pointer(), kInputStart));
+ __ Ld_d(a0, MemOperand(frame_pointer(), kRegisterOutput));
+ __ Ld_d(a2, MemOperand(frame_pointer(), kStartIndex));
+ __ Sub_d(a1, end_of_input_address(), a1);
+ // a1 is length of input in bytes.
+ if (mode_ == UC16) {
+ __ srli_d(a1, a1, 1);
+ }
+ // a1 is length of input in characters.
+ __ Add_d(a1, a1, Operand(a2));
+ // a1 is length of string in characters.
+
+ DCHECK_EQ(0, num_saved_registers_ % 2);
+ // Always an even number of capture registers. This allows us to
+ // unroll the loop once to add an operation between a load of a register
+ // and the following use of that register.
+ for (int i = 0; i < num_saved_registers_; i += 2) {
+ __ Ld_d(a2, register_location(i));
+ __ Ld_d(a3, register_location(i + 1));
+ if (i == 0 && global_with_zero_length_check()) {
+ // Keep capture start in a4 for the zero-length check later.
+ __ mov(t3, a2);
+ }
+ if (mode_ == UC16) {
+ __ srai_d(a2, a2, 1);
+ __ Add_d(a2, a2, a1);
+ __ srai_d(a3, a3, 1);
+ __ Add_d(a3, a3, a1);
+ } else {
+ __ Add_d(a2, a1, Operand(a2));
+ __ Add_d(a3, a1, Operand(a3));
+ }
+ // V8 expects the output to be an int32_t array.
+ __ St_w(a2, MemOperand(a0, 0));
+ __ Add_d(a0, a0, kIntSize);
+ __ St_w(a3, MemOperand(a0, 0));
+ __ Add_d(a0, a0, kIntSize);
+ }
+ }
+
+ if (global()) {
+ // Restart matching if the regular expression is flagged as global.
+ __ Ld_d(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ __ Ld_d(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
+ __ Ld_d(a2, MemOperand(frame_pointer(), kRegisterOutput));
+ // Increment success counter.
+ __ Add_d(a0, a0, 1);
+ __ St_d(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ // Capture results have been stored, so the number of remaining global
+ // output registers is reduced by the number of stored captures.
+ __ Sub_d(a1, a1, num_saved_registers_);
+ // Check whether we have enough room for another set of capture results.
+ //__ mov(v0, a0);
+ __ Branch(&return_v0, lt, a1, Operand(num_saved_registers_));
+
+ __ St_d(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
+ // Advance the location for output.
+ __ Add_d(a2, a2, num_saved_registers_ * kIntSize);
+ __ St_d(a2, MemOperand(frame_pointer(), kRegisterOutput));
+
+ // Prepare a0 to initialize registers with its value in the next run.
+ __ Ld_d(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
+
+ if (global_with_zero_length_check()) {
+ // Special case for zero-length matches.
+ // t3: capture start index
+ // Not a zero-length match, restart.
+ __ Branch(&load_char_start_regexp, ne, current_input_offset(),
+ Operand(t3));
+ // Offset from the end is zero if we already reached the end.
+ __ Branch(&exit_label_, eq, current_input_offset(),
+ Operand(zero_reg));
+ // Advance current position after a zero-length match.
+ Label advance;
+ __ bind(&advance);
+ __ Add_d(current_input_offset(), current_input_offset(),
+ Operand((mode_ == UC16) ? 2 : 1));
+ if (global_unicode()) CheckNotInSurrogatePair(0, &advance);
+ }
+
+ __ Branch(&load_char_start_regexp);
+ } else {
+ __ li(a0, Operand(SUCCESS));
+ }
+ }
+ // Exit and return v0.
+ __ bind(&exit_label_);
+ if (global()) {
+ __ Ld_d(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ }
+
+ __ bind(&return_v0);
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(a1, a2);
+
+ // Skip sp past regexp registers and local variables..
+ __ mov(sp, frame_pointer());
+ // Restore registers s0..s7 and return (restoring ra to pc).
+ __ MultiPop(ra.bit(), fp.bit(), registers_to_retain);
+ __ Ret();
+
+ // Backtrack code (branch target for conditional backtracks).
+ if (backtrack_label_.is_linked()) {
+ __ bind(&backtrack_label_);
+ Backtrack();
+ }
+
+ Label exit_with_exception;
+
+ // Preempt-code.
+ if (check_preempt_label_.is_linked()) {
+ SafeCallTarget(&check_preempt_label_);
+ // Put regexp engine registers on stack.
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), a1);
+
+ RegList regexp_registers_to_retain = current_input_offset().bit() |
+ current_character().bit() |
+ backtrack_stackpointer().bit();
+ __ MultiPush(regexp_registers_to_retain);
+ CallCheckStackGuardState(a0);
+ __ MultiPop(regexp_registers_to_retain);
+ // If returning non-zero, we should end execution with the given
+ // result as return value.
+ __ Branch(&return_v0, ne, a0, Operand(zero_reg));
+
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
+ // String might have moved: Reload end of string from frame.
+ __ Ld_d(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+
+ SafeReturn();
+ }
+
+ // Backtrack stack overflow code.
+ if (stack_overflow_label_.is_linked()) {
+ SafeCallTarget(&stack_overflow_label_);
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), a1);
+ // Reached if the backtrack-stack limit has been hit.
+ // Put regexp engine registers on stack first.
+ RegList regexp_registers =
+ current_input_offset().bit() | current_character().bit();
+ __ MultiPush(regexp_registers);
+
+ // Call GrowStack(isolate).
+ static const int kNumArguments = 1;
+ __ PrepareCallCFunction(kNumArguments, a0);
+ __ li(a0, Operand(ExternalReference::isolate_address(masm_->isolate())));
+ ExternalReference grow_stack =
+ ExternalReference::re_grow_stack(masm_->isolate());
+ __ CallCFunction(grow_stack, kNumArguments);
+ // Restore regexp registers.
+ __ MultiPop(regexp_registers);
+ // If nullptr is returned, we have failed to grow the stack, and must exit
+ // with a stack-overflow exception.
+ __ Branch(&exit_with_exception, eq, a0, Operand(zero_reg));
+ // Otherwise use return value as new stack pointer.
+ __ mov(backtrack_stackpointer(), a0);
+ // Restore saved registers and continue.
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+ __ Ld_d(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ SafeReturn();
+ }
+
+ if (exit_with_exception.is_linked()) {
+ // If any of the code above needed to exit with an exception.
+ __ bind(&exit_with_exception);
+ // Exit with Result EXCEPTION(-1) to signal thrown exception.
+ __ li(a0, Operand(EXCEPTION));
+ __ jmp(&return_v0);
+ }
+
+ if (fallback_label_.is_linked()) {
+ __ bind(&fallback_label_);
+ __ li(a0, Operand(FALLBACK_TO_EXPERIMENTAL));
+ __ jmp(&return_v0);
+ }
+ }
+
+ CodeDesc code_desc;
+ masm_->GetCode(isolate(), &code_desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate(), code_desc, CodeKind::REGEXP)
+ .set_self_reference(masm_->CodeObject())
+ .Build();
+ LOG(masm_->isolate(),
+ RegExpCodeCreateEvent(Handle<AbstractCode>::cast(code), source));
+ return Handle<HeapObject>::cast(code);
+}
+
+void RegExpMacroAssemblerLOONG64::GoTo(Label* to) {
+ if (to == nullptr) {
+ Backtrack();
+ return;
+ }
+ __ jmp(to);
+ return;
+}
+
+void RegExpMacroAssemblerLOONG64::IfRegisterGE(int reg, int comparand,
+ Label* if_ge) {
+ __ Ld_d(a0, register_location(reg));
+ BranchOrBacktrack(if_ge, ge, a0, Operand(comparand));
+}
+
+void RegExpMacroAssemblerLOONG64::IfRegisterLT(int reg, int comparand,
+ Label* if_lt) {
+ __ Ld_d(a0, register_location(reg));
+ BranchOrBacktrack(if_lt, lt, a0, Operand(comparand));
+}
+
+void RegExpMacroAssemblerLOONG64::IfRegisterEqPos(int reg, Label* if_eq) {
+ __ Ld_d(a0, register_location(reg));
+ BranchOrBacktrack(if_eq, eq, a0, Operand(current_input_offset()));
+}
+
+RegExpMacroAssembler::IrregexpImplementation
+RegExpMacroAssemblerLOONG64::Implementation() {
+ return kLOONG64Implementation;
+}
+
+void RegExpMacroAssemblerLOONG64::PopCurrentPosition() {
+ Pop(current_input_offset());
+}
+
+void RegExpMacroAssemblerLOONG64::PopRegister(int register_index) {
+ Pop(a0);
+ __ St_d(a0, register_location(register_index));
+}
+
+void RegExpMacroAssemblerLOONG64::PushBacktrack(Label* label) {
+ if (label->is_bound()) {
+ int target = label->pos();
+ __ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
+ } else {
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_.get());
+ Label after_constant;
+ __ Branch(&after_constant);
+ int offset = masm_->pc_offset();
+ int cp_offset = offset + Code::kHeaderSize - kHeapObjectTag;
+ //__ emit(0);
+ __ nop();
+ masm_->label_at_put(label, offset);
+ __ bind(&after_constant);
+ if (is_int12(cp_offset)) {
+ __ Ld_wu(a0, MemOperand(code_pointer(), cp_offset));
+ } else {
+ __ Add_d(a0, code_pointer(), cp_offset);
+ __ Ld_wu(a0, MemOperand(a0, 0));
+ }
+ }
+ Push(a0);
+ CheckStackLimit();
+}
+
+void RegExpMacroAssemblerLOONG64::PushCurrentPosition() {
+ Push(current_input_offset());
+}
+
+void RegExpMacroAssemblerLOONG64::PushRegister(
+ int register_index, StackCheckFlag check_stack_limit) {
+ __ Ld_d(a0, register_location(register_index));
+ Push(a0);
+ if (check_stack_limit) CheckStackLimit();
+}
+
+void RegExpMacroAssemblerLOONG64::ReadCurrentPositionFromRegister(int reg) {
+ __ Ld_d(current_input_offset(), register_location(reg));
+}
+
+void RegExpMacroAssemblerLOONG64::WriteStackPointerToRegister(int reg) {
+ ExternalReference stack_top_address =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ li(a0, stack_top_address);
+ __ Ld_d(a0, MemOperand(a0, 0));
+ __ Sub_d(a0, backtrack_stackpointer(), a0);
+ __ St_d(a0, register_location(reg));
+}
+
+void RegExpMacroAssemblerLOONG64::ReadStackPointerFromRegister(int reg) {
+ ExternalReference stack_top_address =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ li(backtrack_stackpointer(), stack_top_address);
+ __ Ld_d(backtrack_stackpointer(), MemOperand(backtrack_stackpointer(), 0));
+ __ Ld_d(a0, register_location(reg));
+ __ Add_d(backtrack_stackpointer(), backtrack_stackpointer(), Operand(a0));
+}
+
+void RegExpMacroAssemblerLOONG64::SetCurrentPositionFromEnd(int by) {
+ Label after_position;
+ __ Branch(&after_position, ge, current_input_offset(),
+ Operand(-by * char_size()));
+ __ li(current_input_offset(), -by * char_size());
+ // On RegExp code entry (where this operation is used), the character before
+ // the current position is expected to be already loaded.
+ // We have advanced the position, so it's safe to read backwards.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&after_position);
+}
+
+void RegExpMacroAssemblerLOONG64::SetRegister(int register_index, int to) {
+ DCHECK(register_index >= num_saved_registers_); // Reserved for positions!
+ __ li(a0, Operand(to));
+ __ St_d(a0, register_location(register_index));
+}
+
+bool RegExpMacroAssemblerLOONG64::Succeed() {
+ __ jmp(&success_label_);
+ return global();
+}
+
+void RegExpMacroAssemblerLOONG64::WriteCurrentPositionToRegister(
+ int reg, int cp_offset) {
+ if (cp_offset == 0) {
+ __ St_d(current_input_offset(), register_location(reg));
+ } else {
+ __ Add_d(a0, current_input_offset(), Operand(cp_offset * char_size()));
+ __ St_d(a0, register_location(reg));
+ }
+}
+
+void RegExpMacroAssemblerLOONG64::ClearRegisters(int reg_from, int reg_to) {
+ DCHECK(reg_from <= reg_to);
+ __ Ld_d(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
+ for (int reg = reg_from; reg <= reg_to; reg++) {
+ __ St_d(a0, register_location(reg));
+ }
+}
+
+// Private methods:
+
+void RegExpMacroAssemblerLOONG64::CallCheckStackGuardState(Register scratch) {
+ DCHECK(!isolate()->IsGeneratingEmbeddedBuiltins());
+ DCHECK(!masm_->options().isolate_independent_code);
+
+ int stack_alignment = base::OS::ActivationFrameAlignment();
+
+ // Align the stack pointer and save the original sp value on the stack.
+ __ mov(scratch, sp);
+ __ Sub_d(sp, sp, Operand(kPointerSize));
+ DCHECK(base::bits::IsPowerOfTwo(stack_alignment));
+ __ And(sp, sp, Operand(-stack_alignment));
+ __ St_d(scratch, MemOperand(sp, 0));
+
+ __ mov(a2, frame_pointer());
+ // Code of self.
+ __ li(a1, Operand(masm_->CodeObject()), CONSTANT_SIZE);
+
+ // We need to make room for the return address on the stack.
+ DCHECK(IsAligned(stack_alignment, kPointerSize));
+ __ Sub_d(sp, sp, Operand(stack_alignment));
+
+ // The stack pointer now points to cell where the return address will be
+ // written. Arguments are in registers, meaning we treat the return address as
+ // argument 5. Since DirectCEntry will handle allocating space for the C
+ // argument slots, we don't need to care about that here. This is how the
+ // stack will look (sp meaning the value of sp at this moment):
+ // [sp + 3] - empty slot if needed for alignment.
+ // [sp + 2] - saved sp.
+ // [sp + 1] - second word reserved for return value.
+ // [sp + 0] - first word reserved for return value.
+
+ // a0 will point to the return address, placed by DirectCEntry.
+ __ mov(a0, sp);
+
+ ExternalReference stack_guard_check =
+ ExternalReference::re_check_stack_guard_state(masm_->isolate());
+ __ li(t7, Operand(stack_guard_check));
+
+ EmbeddedData d = EmbeddedData::FromBlob();
+ CHECK(Builtins::IsIsolateIndependent(Builtin::kDirectCEntry));
+ Address entry = d.InstructionStartOfBuiltin(Builtin::kDirectCEntry);
+ __ li(kScratchReg, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ __ Call(kScratchReg);
+
+ // DirectCEntry allocated space for the C argument slots so we have to
+ // drop them with the return address from the stack with loading saved sp.
+ // At this point stack must look:
+ // [sp + 7] - empty slot if needed for alignment.
+ // [sp + 6] - saved sp.
+ // [sp + 5] - second word reserved for return value.
+ // [sp + 4] - first word reserved for return value.
+ // [sp + 3] - C argument slot.
+ // [sp + 2] - C argument slot.
+ // [sp + 1] - C argument slot.
+ // [sp + 0] - C argument slot.
+ __ Ld_d(sp, MemOperand(sp, stack_alignment));
+
+ __ li(code_pointer(), Operand(masm_->CodeObject()));
+}
+
+// Helper function for reading a value out of a stack frame.
+template <typename T>
+static T& frame_entry(Address re_frame, int frame_offset) {
+ return reinterpret_cast<T&>(Memory<int32_t>(re_frame + frame_offset));
+}
+
+template <typename T>
+static T* frame_entry_address(Address re_frame, int frame_offset) {
+ return reinterpret_cast<T*>(re_frame + frame_offset);
+}
+
+int64_t RegExpMacroAssemblerLOONG64::CheckStackGuardState(
+ Address* return_address, Address raw_code, Address re_frame) {
+ Code re_code = Code::cast(Object(raw_code));
+ return NativeRegExpMacroAssembler::CheckStackGuardState(
+ frame_entry<Isolate*>(re_frame, kIsolate),
+ static_cast<int>(frame_entry<int64_t>(re_frame, kStartIndex)),
+ static_cast<RegExp::CallOrigin>(
+ frame_entry<int64_t>(re_frame, kDirectCall)),
+ return_address, re_code,
+ frame_entry_address<Address>(re_frame, kInputString),
+ frame_entry_address<const byte*>(re_frame, kInputStart),
+ frame_entry_address<const byte*>(re_frame, kInputEnd));
+}
+
+MemOperand RegExpMacroAssemblerLOONG64::register_location(int register_index) {
+ DCHECK(register_index < (1 << 30));
+ if (num_registers_ <= register_index) {
+ num_registers_ = register_index + 1;
+ }
+ return MemOperand(frame_pointer(),
+ kRegisterZero - register_index * kPointerSize);
+}
+
+void RegExpMacroAssemblerLOONG64::CheckPosition(int cp_offset,
+ Label* on_outside_input) {
+ if (cp_offset >= 0) {
+ BranchOrBacktrack(on_outside_input, ge, current_input_offset(),
+ Operand(-cp_offset * char_size()));
+ } else {
+ __ Ld_d(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Add_d(a0, current_input_offset(), Operand(cp_offset * char_size()));
+ BranchOrBacktrack(on_outside_input, le, a0, Operand(a1));
+ }
+}
+
+void RegExpMacroAssemblerLOONG64::BranchOrBacktrack(Label* to,
+ Condition condition,
+ Register rs,
+ const Operand& rt) {
+ if (condition == al) { // Unconditional.
+ if (to == nullptr) {
+ Backtrack();
+ return;
+ }
+ __ jmp(to);
+ return;
+ }
+ if (to == nullptr) {
+ __ Branch(&backtrack_label_, condition, rs, rt);
+ return;
+ }
+ __ Branch(to, condition, rs, rt);
+}
+
+void RegExpMacroAssemblerLOONG64::SafeCall(Label* to, Condition cond,
+ Register rs, const Operand& rt) {
+ __ Branch(to, cond, rs, rt, true);
+}
+
+void RegExpMacroAssemblerLOONG64::SafeReturn() {
+ __ Pop(ra);
+ __ Add_d(t1, ra, Operand(masm_->CodeObject()));
+ __ Jump(t1);
+}
+
+void RegExpMacroAssemblerLOONG64::SafeCallTarget(Label* name) {
+ __ bind(name);
+ __ Sub_d(ra, ra, Operand(masm_->CodeObject()));
+ __ Push(ra);
+}
+
+void RegExpMacroAssemblerLOONG64::Push(Register source) {
+ DCHECK(source != backtrack_stackpointer());
+ __ Add_d(backtrack_stackpointer(), backtrack_stackpointer(),
+ Operand(-kIntSize));
+ __ St_w(source, MemOperand(backtrack_stackpointer(), 0));
+}
+
+void RegExpMacroAssemblerLOONG64::Pop(Register target) {
+ DCHECK(target != backtrack_stackpointer());
+ __ Ld_w(target, MemOperand(backtrack_stackpointer(), 0));
+ __ Add_d(backtrack_stackpointer(), backtrack_stackpointer(), kIntSize);
+}
+
+void RegExpMacroAssemblerLOONG64::CheckPreemption() {
+ // Check for preemption.
+ ExternalReference stack_limit =
+ ExternalReference::address_of_jslimit(masm_->isolate());
+ __ li(a0, Operand(stack_limit));
+ __ Ld_d(a0, MemOperand(a0, 0));
+ SafeCall(&check_preempt_label_, ls, sp, Operand(a0));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckStackLimit() {
+ ExternalReference stack_limit =
+ ExternalReference::address_of_regexp_stack_limit_address(
+ masm_->isolate());
+
+ __ li(a0, Operand(stack_limit));
+ __ Ld_d(a0, MemOperand(a0, 0));
+ SafeCall(&stack_overflow_label_, ls, backtrack_stackpointer(), Operand(a0));
+}
+
+void RegExpMacroAssemblerLOONG64::LoadCurrentCharacterUnchecked(
+ int cp_offset, int characters) {
+ Register offset = current_input_offset();
+
+ // If unaligned load/stores are not supported then this function must only
+ // be used to load a single character at a time.
+ if (!CanReadUnaligned()) {
+ DCHECK_EQ(1, characters);
+ }
+
+ if (cp_offset != 0) {
+ // t3 is not being used to store the capture start index at this point.
+ __ Add_d(t3, current_input_offset(), Operand(cp_offset * char_size()));
+ offset = t3;
+ }
+
+ if (mode_ == LATIN1) {
+ if (characters == 4) {
+ __ Ld_wu(current_character(), MemOperand(end_of_input_address(), offset));
+ } else if (characters == 2) {
+ __ Ld_hu(current_character(), MemOperand(end_of_input_address(), offset));
+ } else {
+ DCHECK_EQ(1, characters);
+ __ Ld_bu(current_character(), MemOperand(end_of_input_address(), offset));
+ }
+ } else {
+ DCHECK(mode_ == UC16);
+ if (characters == 2) {
+ __ Ld_wu(current_character(), MemOperand(end_of_input_address(), offset));
+ } else {
+ DCHECK_EQ(1, characters);
+ __ Ld_hu(current_character(), MemOperand(end_of_input_address(), offset));
+ }
+ }
+}
+
+#undef __
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_LOONG64
diff --git a/chromium/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h b/chromium/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h
new file mode 100644
index 00000000000..4f1e3217fa3
--- /dev/null
+++ b/chromium/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h
@@ -0,0 +1,223 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGEXP_LOONG64_REGEXP_MACRO_ASSEMBLER_LOONG64_H_
+#define V8_REGEXP_LOONG64_REGEXP_MACRO_ASSEMBLER_LOONG64_H_
+
+#include "src/codegen/macro-assembler.h"
+#include "src/regexp/regexp-macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+class V8_EXPORT_PRIVATE RegExpMacroAssemblerLOONG64
+ : public NativeRegExpMacroAssembler {
+ public:
+ RegExpMacroAssemblerLOONG64(Isolate* isolate, Zone* zone, Mode mode,
+ int registers_to_save);
+ virtual ~RegExpMacroAssemblerLOONG64();
+ virtual int stack_limit_slack();
+ virtual void AdvanceCurrentPosition(int by);
+ virtual void AdvanceRegister(int reg, int by);
+ virtual void Backtrack();
+ virtual void Bind(Label* label);
+ virtual void CheckAtStart(int cp_offset, Label* on_at_start);
+ virtual void CheckCharacter(uint32_t c, Label* on_equal);
+ virtual void CheckCharacterAfterAnd(uint32_t c, uint32_t mask,
+ Label* on_equal);
+ virtual void CheckCharacterGT(base::uc16 limit, Label* on_greater);
+ virtual void CheckCharacterLT(base::uc16 limit, Label* on_less);
+ // A "greedy loop" is a loop that is both greedy and with a simple
+ // body. It has a particularly simple implementation.
+ virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
+ virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, bool read_backward,
+ Label* on_no_match);
+ virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ bool read_backward, bool unicode,
+ Label* on_no_match);
+ virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
+ virtual void CheckNotCharacterAfterAnd(uint32_t c, uint32_t mask,
+ Label* on_not_equal);
+ virtual void CheckNotCharacterAfterMinusAnd(base::uc16 c, base::uc16 minus,
+ base::uc16 mask,
+ Label* on_not_equal);
+ virtual void CheckCharacterInRange(base::uc16 from, base::uc16 to,
+ Label* on_in_range);
+ virtual void CheckCharacterNotInRange(base::uc16 from, base::uc16 to,
+ Label* on_not_in_range);
+ virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
+
+ // Checks whether the given offset from the current position is before
+ // the end of the string.
+ virtual void CheckPosition(int cp_offset, Label* on_outside_input);
+ virtual bool CheckSpecialCharacterClass(base::uc16 type, Label* on_no_match);
+ virtual void Fail();
+ virtual Handle<HeapObject> GetCode(Handle<String> source);
+ virtual void GoTo(Label* label);
+ virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
+ virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
+ virtual void IfRegisterEqPos(int reg, Label* if_eq);
+ virtual IrregexpImplementation Implementation();
+ virtual void LoadCurrentCharacterUnchecked(int cp_offset,
+ int character_count);
+ virtual void PopCurrentPosition();
+ virtual void PopRegister(int register_index);
+ virtual void PushBacktrack(Label* label);
+ virtual void PushCurrentPosition();
+ virtual void PushRegister(int register_index,
+ StackCheckFlag check_stack_limit);
+ virtual void ReadCurrentPositionFromRegister(int reg);
+ virtual void ReadStackPointerFromRegister(int reg);
+ virtual void SetCurrentPositionFromEnd(int by);
+ virtual void SetRegister(int register_index, int to);
+ virtual bool Succeed();
+ virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
+ virtual void ClearRegisters(int reg_from, int reg_to);
+ virtual void WriteStackPointerToRegister(int reg);
+
+ // Called from RegExp if the stack-guard is triggered.
+ // If the code object is relocated, the return address is fixed before
+ // returning.
+ // {raw_code} is an Address because this is called via ExternalReference.
+ static int64_t CheckStackGuardState(Address* return_address, Address raw_code,
+ Address re_frame);
+
+ void print_regexp_frame_constants();
+
+ private:
+ // Offsets from frame_pointer() of function parameters and stored registers.
+ static const int kFramePointer = 0;
+
+ // Above the frame pointer - Stored registers and stack passed parameters.
+ static const int kStoredRegisters = kFramePointer;
+ // Return address (stored from link register, read into pc on return).
+
+ // TODO(plind): This 9 - is 8 s-regs (s0..s7) plus fp.
+
+ static const int kReturnAddress = kStoredRegisters + 9 * kSystemPointerSize;
+ // Stack frame header.
+ static const int kStackFrameHeader = kReturnAddress;
+
+ // Below the frame pointer.
+ // Register parameters stored by setup code.
+ static const int kIsolate = kFramePointer - kSystemPointerSize;
+ static const int kDirectCall = kIsolate - kSystemPointerSize;
+ static const int kNumOutputRegisters = kDirectCall - kSystemPointerSize;
+ static const int kRegisterOutput = kNumOutputRegisters - kSystemPointerSize;
+ static const int kInputEnd = kRegisterOutput - kSystemPointerSize;
+ static const int kInputStart = kInputEnd - kSystemPointerSize;
+ static const int kStartIndex = kInputStart - kSystemPointerSize;
+ static const int kInputString = kStartIndex - kSystemPointerSize;
+ // When adding local variables remember to push space for them in
+ // the frame in GetCode.
+ static const int kSuccessfulCaptures = kInputString - kSystemPointerSize;
+ static const int kStringStartMinusOne =
+ kSuccessfulCaptures - kSystemPointerSize;
+ static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
+ // Stores the initial value of the regexp stack pointer in a
+ // position-independent representation (in case the regexp stack grows and
+ // thus moves).
+ static const int kRegExpStackBasePointer =
+ kBacktrackCount - kSystemPointerSize;
+
+ // First register address. Following registers are below it on the stack.
+ static const int kRegisterZero = kRegExpStackBasePointer - kSystemPointerSize;
+
+ // Initial size of code buffer.
+ static const int kRegExpCodeSize = 1024;
+
+ // Check whether preemption has been requested.
+ void CheckPreemption();
+
+ // Check whether we are exceeding the stack limit on the backtrack stack.
+ void CheckStackLimit();
+
+ // Generate a call to CheckStackGuardState.
+ void CallCheckStackGuardState(Register scratch);
+
+ // The ebp-relative location of a regexp register.
+ MemOperand register_location(int register_index);
+
+ // Register holding the current input position as negative offset from
+ // the end of the string.
+ static constexpr Register current_input_offset() { return a6; }
+
+ // The register containing the current character after LoadCurrentCharacter.
+ static constexpr Register current_character() { return a7; }
+
+ // Register holding address of the end of the input string.
+ static constexpr Register end_of_input_address() { return t2; }
+
+ // Register holding the frame address. Local variables, parameters and
+ // regexp registers are addressed relative to this.
+ static constexpr Register frame_pointer() { return fp; }
+
+ // The register containing the backtrack stack top. Provides a meaningful
+ // name to the register.
+ static constexpr Register backtrack_stackpointer() { return t0; }
+
+ // Register holding pointer to the current code object.
+ static constexpr Register code_pointer() { return a5; }
+
+ // Byte size of chars in the string to match (decided by the Mode argument).
+ inline int char_size() { return static_cast<int>(mode_); }
+
+ // Equivalent to a conditional branch to the label, unless the label
+ // is nullptr, in which case it is a conditional Backtrack.
+ void BranchOrBacktrack(Label* to, Condition condition, Register rs,
+ const Operand& rt);
+
+ // Call and return internally in the generated code in a way that
+ // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
+ inline void SafeCall(Label* to, Condition cond, Register rs,
+ const Operand& rt);
+ inline void SafeReturn();
+ inline void SafeCallTarget(Label* name);
+
+ // Pushes the value of a register on the backtrack stack. Decrements the
+ // stack pointer by a word size and stores the register's value there.
+ inline void Push(Register source);
+
+ // Pops a value from the backtrack stack. Reads the word at the stack pointer
+ // and increments it by a word size.
+ inline void Pop(Register target);
+
+ void LoadRegExpStackPointerFromMemory(Register dst);
+ void StoreRegExpStackPointerToMemory(Register src, Register scratch);
+ void PushRegExpBasePointer(Register scratch1, Register scratch2);
+ void PopRegExpBasePointer(Register scratch1, Register scratch2);
+
+ Isolate* isolate() const { return masm_->isolate(); }
+
+ const std::unique_ptr<MacroAssembler> masm_;
+
+ const NoRootArrayScope no_root_array_scope_;
+
+ // Which mode to generate code for (Latin1 or UC16).
+ const Mode mode_;
+
+ // One greater than maximal register index actually used.
+ int num_registers_;
+
+ // Number of registers to output at the end (the saved registers
+ // are always 0..num_saved_registers_-1).
+ const int num_saved_registers_;
+
+ // Labels used internally.
+ Label entry_label_;
+ Label start_label_;
+ Label success_label_;
+ Label backtrack_label_;
+ Label exit_label_;
+ Label check_preempt_label_;
+ Label stack_overflow_label_;
+ Label internal_failure_label_;
+ Label fallback_label_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_REGEXP_LOONG64_REGEXP_MACRO_ASSEMBLER_LOONG64_H_
diff --git a/chromium/v8/src/regexp/mips/regexp-macro-assembler-mips.cc b/chromium/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
index db4f2480b89..5dee159bebb 100644
--- a/chromium/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
+++ b/chromium/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
@@ -6,14 +6,12 @@
#include "src/regexp/mips/regexp-macro-assembler-mips.h"
-#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler.h"
+#include "src/codegen/mips/assembler-mips-inl.h"
#include "src/logging/log.h"
-#include "src/objects/objects-inl.h"
-#include "src/regexp/regexp-macro-assembler.h"
+#include "src/objects/code-inl.h"
#include "src/regexp/regexp-stack.h"
#include "src/snapshot/embedded/embedded-data.h"
-#include "src/strings/unicode.h"
namespace v8 {
namespace internal {
@@ -39,11 +37,9 @@ namespace internal {
*
* The stack will have the following structure:
*
- * - fp[60] Isolate* isolate (address of the current isolate)
- * - fp[56] direct_call (if 1, direct call from JavaScript code,
+ * - fp[56] Isolate* isolate (address of the current isolate)
+ * - fp[52] direct_call (if 1, direct call from JavaScript code,
* if 0, call through the runtime system).
- * - fp[52] stack_area_base (High end of the memory area to use as
- * backtracking stack).
* - fp[48] capture array size (may fit multiple sets of matches)
* - fp[44] int* capture_array (int[num_saved_registers_], for output).
* --- sp when called ---
@@ -80,7 +76,6 @@ namespace internal {
* Address end,
* int* capture_output_array,
* int num_capture_registers,
- * byte* stack_area_base,
* bool direct_call = false,
* Isolate* isolate);
* The call is performed by NativeRegExpMacroAssembler::Execute()
@@ -95,8 +90,10 @@ RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, CodeObjectRequired::kYes,
- NewAssemblerBuffer(kRegExpCodeSize))),
+ masm_(std::make_unique<MacroAssembler>(
+ isolate, CodeObjectRequired::kYes,
+ NewAssemblerBuffer(kRegExpCodeSize))),
+ no_root_array_scope_(masm_.get()),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -106,8 +103,6 @@ RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone,
backtrack_label_(),
exit_label_(),
internal_failure_label_() {
- masm_->set_root_array_available(false);
-
DCHECK_EQ(0, registers_to_save % 2);
__ jmp(&entry_label_); // We'll write the entry code later.
// If the code gets too big or corrupted, an internal exception will be
@@ -119,7 +114,6 @@ RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone,
}
RegExpMacroAssemblerMIPS::~RegExpMacroAssemblerMIPS() {
- delete masm_;
// Unuse labels in case we throw away the assembler without calling GetCode.
entry_label_.Unuse();
start_label_.Unuse();
@@ -342,7 +336,7 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
__ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
{
- AllowExternalCallThatCantCauseGC scope(masm_);
+ AllowExternalCallThatCantCauseGC scope(masm_.get());
ExternalReference function =
unicode ? ExternalReference::re_case_insensitive_compare_unicode(
isolate())
@@ -607,6 +601,42 @@ void RegExpMacroAssemblerMIPS::Fail() {
__ jmp(&exit_label_);
}
+void RegExpMacroAssemblerMIPS::LoadRegExpStackPointerFromMemory(Register dst) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ li(dst, Operand(ref));
+ __ Lw(dst, MemOperand(dst));
+}
+
+void RegExpMacroAssemblerMIPS::StoreRegExpStackPointerToMemory(
+ Register src, Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ li(scratch, Operand(ref));
+ __ Sw(src, MemOperand(scratch));
+}
+
+void RegExpMacroAssemblerMIPS::PushRegExpBasePointer(Register scratch1,
+ Register scratch2) {
+ LoadRegExpStackPointerFromMemory(scratch1);
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ li(scratch2, Operand(ref));
+ __ Lw(scratch2, MemOperand(scratch2));
+ __ Subu(scratch2, scratch1, scratch2);
+ __ Sw(scratch2, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+}
+
+void RegExpMacroAssemblerMIPS::PopRegExpBasePointer(Register scratch1,
+ Register scratch2) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ Lw(scratch1, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ __ li(scratch2, Operand(ref));
+ __ Lw(scratch2, MemOperand(scratch2));
+ __ Addu(scratch1, scratch1, scratch2);
+ StoreRegExpStackPointerToMemory(scratch1, scratch2);
+}
Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
Label return_v0;
@@ -624,7 +654,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Tell the system that we have a stack frame. Because the type is MANUAL,
// no is generated.
- FrameScope scope(masm_, StackFrame::MANUAL);
+ FrameScope scope(masm_.get(), StackFrame::MANUAL);
// Actually emit code to start a new stack frame.
// Push arguments
@@ -648,6 +678,13 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ push(a0); // Make room for "string start - 1" constant.
STATIC_ASSERT(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize);
__ push(a0);
+ STATIC_ASSERT(kRegExpStackBasePointer ==
+ kBacktrackCount - kSystemPointerSize);
+ __ push(a0); // The regexp stack base ptr.
+
+ // Store the regexp base pointer - we'll later restore it / write it to
+ // memory when returning from this irregexp code object.
+ PushRegExpBasePointer(a0, a1);
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -728,7 +765,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
}
// Initialize backtrack stack pointer.
- __ lw(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
__ jmp(&start_label_);
@@ -830,6 +867,10 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
}
__ bind(&return_v0);
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(a0, a1);
+
// Skip sp past regexp registers and local variables..
__ mov(sp, frame_pointer());
// Restore registers s0..s7 and return (restoring ra to pc).
@@ -847,6 +888,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Preempt-code.
if (check_preempt_label_.is_linked()) {
SafeCallTarget(&check_preempt_label_);
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), a0);
// Put regexp engine registers on stack.
RegList regexp_registers_to_retain = current_input_offset().bit() |
current_character().bit() | backtrack_stackpointer().bit();
@@ -857,6 +899,8 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// result as return value.
__ Branch(&return_v0, ne, v0, Operand(zero_reg));
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
// String might have moved: Reload end of string from frame.
__ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
__ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
@@ -866,25 +910,24 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Backtrack stack overflow code.
if (stack_overflow_label_.is_linked()) {
SafeCallTarget(&stack_overflow_label_);
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), a0);
// Reached if the backtrack-stack limit has been hit.
// Put regexp engine registers on stack first.
RegList regexp_registers = current_input_offset().bit() |
current_character().bit();
__ MultiPush(regexp_registers);
- // Call GrowStack(backtrack_stackpointer(), &stack_base)
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, a0);
- __ mov(a0, backtrack_stackpointer());
- __ Addu(a1, frame_pointer(), Operand(kStackHighEnd));
- __ li(a2, Operand(ExternalReference::isolate_address(masm_->isolate())));
+ // Call GrowStack(isolate).
+ static constexpr int kNumArguments = 1;
+ __ PrepareCallCFunction(kNumArguments, a0);
+ __ li(a0, Operand(ExternalReference::isolate_address(masm_->isolate())));
ExternalReference grow_stack =
ExternalReference::re_grow_stack(masm_->isolate());
- __ CallCFunction(grow_stack, num_arguments);
+ __ CallCFunction(grow_stack, kNumArguments);
// Restore regexp registers.
__ MultiPop(regexp_registers);
- // If return nullptr, we have failed to grow the stack, and
- // must exit with a stack-overflow exception.
+ // If nullptr is returned, we have failed to grow the stack, and must exit
+ // with a stack-overflow exception.
__ Branch(&exit_with_exception, eq, v0, Operand(zero_reg));
// Otherwise use return value as new stack pointer.
__ mov(backtrack_stackpointer(), v0);
@@ -976,7 +1019,7 @@ void RegExpMacroAssemblerMIPS::PushBacktrack(Label* label) {
int target = label->pos();
__ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
} else {
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_.get());
Label after_constant;
__ Branch(&after_constant);
int offset = masm_->pc_offset();
@@ -1013,10 +1056,21 @@ void RegExpMacroAssemblerMIPS::ReadCurrentPositionFromRegister(int reg) {
__ lw(current_input_offset(), register_location(reg));
}
+void RegExpMacroAssemblerMIPS::WriteStackPointerToRegister(int reg) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ li(a0, Operand(ref));
+ __ Lw(a0, MemOperand(a0));
+ __ Subu(a0, backtrack_stackpointer(), a0);
+ __ Sw(a0, register_location(reg));
+}
void RegExpMacroAssemblerMIPS::ReadStackPointerFromRegister(int reg) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ li(a0, Operand(ref));
+ __ Lw(a0, MemOperand(a0));
__ lw(backtrack_stackpointer(), register_location(reg));
- __ lw(a0, MemOperand(frame_pointer(), kStackHighEnd));
__ Addu(backtrack_stackpointer(), backtrack_stackpointer(), Operand(a0));
}
@@ -1068,14 +1122,6 @@ void RegExpMacroAssemblerMIPS::ClearRegisters(int reg_from, int reg_to) {
}
}
-
-void RegExpMacroAssemblerMIPS::WriteStackPointerToRegister(int reg) {
- __ lw(a1, MemOperand(frame_pointer(), kStackHighEnd));
- __ Subu(a0, backtrack_stackpointer(), a1);
- __ sw(a0, register_location(reg));
-}
-
-
bool RegExpMacroAssemblerMIPS::CanReadUnaligned() {
return false;
}
diff --git a/chromium/v8/src/regexp/mips/regexp-macro-assembler-mips.h b/chromium/v8/src/regexp/mips/regexp-macro-assembler-mips.h
index 9f85d94d652..ac69bd7a0fb 100644
--- a/chromium/v8/src/regexp/mips/regexp-macro-assembler-mips.h
+++ b/chromium/v8/src/regexp/mips/regexp-macro-assembler-mips.h
@@ -5,9 +5,7 @@
#ifndef V8_REGEXP_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
#define V8_REGEXP_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
-#include "src/base/strings.h"
#include "src/codegen/macro-assembler.h"
-#include "src/codegen/mips/assembler-mips.h"
#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
@@ -94,7 +92,6 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
static const int kFramePointer = 0;
// Above the frame pointer - Stored registers and stack passed parameters.
- // Registers s0 to s7, fp, and ra.
static const int kStoredRegisters = kFramePointer;
// Return address (stored from link register, read into pc on return).
static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize;
@@ -103,8 +100,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
// Stack parameters placed by caller.
static const int kRegisterOutput = kStackFrameHeader + 20;
static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
- static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
- static const int kDirectCall = kStackHighEnd + kPointerSize;
+ static const int kDirectCall = kNumOutputRegisters + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize;
// Below the frame pointer.
@@ -118,8 +114,14 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
static const int kSuccessfulCaptures = kInputString - kPointerSize;
static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
+ // Stores the initial value of the regexp stack pointer in a
+ // position-independent representation (in case the regexp stack grows and
+ // thus moves).
+ static const int kRegExpStackBasePointer =
+ kBacktrackCount - kSystemPointerSize;
+
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kBacktrackCount - kSystemPointerSize;
+ static const int kRegisterZero = kRegExpStackBasePointer - kSystemPointerSize;
// Initial size of code buffer.
static const int kRegExpCodeSize = 1024;
@@ -130,7 +132,6 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
// Check whether we are exceeding the stack limit on the backtrack stack.
void CheckStackLimit();
-
// Generate a call to CheckStackGuardState.
void CallCheckStackGuardState(Register scratch);
@@ -139,27 +140,27 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
// Register holding the current input position as negative offset from
// the end of the string.
- inline Register current_input_offset() { return t2; }
+ static constexpr Register current_input_offset() { return t2; }
// The register containing the current character after LoadCurrentCharacter.
- inline Register current_character() { return t3; }
+ static constexpr Register current_character() { return t3; }
// Register holding address of the end of the input string.
- inline Register end_of_input_address() { return t6; }
+ static constexpr Register end_of_input_address() { return t6; }
// Register holding the frame address. Local variables, parameters and
// regexp registers are addressed relative to this.
- inline Register frame_pointer() { return fp; }
+ static constexpr Register frame_pointer() { return fp; }
// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
- inline Register backtrack_stackpointer() { return t4; }
+ static constexpr Register backtrack_stackpointer() { return t4; }
// Register holding pointer to the current code object.
- inline Register code_pointer() { return t1; }
+ static constexpr Register code_pointer() { return t1; }
// Byte size of chars in the string to match (decided by the Mode argument).
- inline int char_size() { return static_cast<int>(mode_); }
+ inline int char_size() const { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
// is nullptr, in which case it is a conditional Backtrack.
@@ -185,19 +186,25 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
// and increments it by a word size.
inline void Pop(Register target);
+ void LoadRegExpStackPointerFromMemory(Register dst);
+ void StoreRegExpStackPointerToMemory(Register src, Register scratch);
+ void PushRegExpBasePointer(Register scratch1, Register scratch2);
+ void PopRegExpBasePointer(Register scratch1, Register scratch2);
+
Isolate* isolate() const { return masm_->isolate(); }
- MacroAssembler* masm_;
+ const std::unique_ptr<MacroAssembler> masm_;
+ const NoRootArrayScope no_root_array_scope_;
// Which mode to generate code for (Latin1 or UC16).
- Mode mode_;
+ const Mode mode_;
// One greater than maximal register index actually used.
int num_registers_;
// Number of registers to output at the end (the saved registers
// are always 0..num_saved_registers_-1).
- int num_saved_registers_;
+ const int num_saved_registers_;
// Labels used internally.
Label entry_label_;
diff --git a/chromium/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc b/chromium/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
index 7e3ab11a46d..eb69ad78073 100644
--- a/chromium/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
+++ b/chromium/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
@@ -6,14 +6,13 @@
#include "src/regexp/mips64/regexp-macro-assembler-mips64.h"
-#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler.h"
+#include "src/codegen/mips64/assembler-mips64-inl.h"
+#include "src/heap/factory.h"
#include "src/logging/log.h"
-#include "src/objects/objects-inl.h"
-#include "src/regexp/regexp-macro-assembler.h"
+#include "src/objects/code-inl.h"
#include "src/regexp/regexp-stack.h"
#include "src/snapshot/embedded/embedded-data.h"
-#include "src/strings/unicode.h"
namespace v8 {
namespace internal {
@@ -83,19 +82,18 @@ namespace internal {
* - fp[0..63] s0..s7 Callee-saved registers s0..s7.
* --- frame pointer ----
* - fp[-8] direct_call (1 = direct call from JS, 0 = from runtime) kDirectCall
- * - fp[-16] stack_base (Top of backtracking stack). kStackHighEnd
- * - fp[-24] capture array size (may fit multiple sets of matches) kNumOutputRegisters
- * - fp[-32] int* capture_array (int[num_saved_registers_], for output). kRegisterOutput
- * - fp[-40] end of input (address of end of string). kInputEnd
- * - fp[-48] start of input (address of first character in string). kInputStart
- * - fp[-56] start index (character index of start). kStartIndex
- * - fp[-64] void* input_string (location of a handle containing the string). kInputString
- * - fp[-72] success counter (only for global regexps to count matches). kSuccessfulCaptures
- * - fp[-80] Offset of location before start of input (effectively character kStringStartMinusOne
+ * - fp[-16] capture array size (may fit multiple sets of matches) kNumOutputRegisters
+ * - fp[-24] int* capture_array (int[num_saved_registers_], for output). kRegisterOutput
+ * - fp[-32] end of input (address of end of string). kInputEnd
+ * - fp[-40] start of input (address of first character in string). kInputStart
+ * - fp[-48] start index (character index of start). kStartIndex
+ * - fp[-56] void* input_string (location of a handle containing the string). kInputString
+ * - fp[-64] success counter (only for global regexps to count matches). kSuccessfulCaptures
+ * - fp[-72] Offset of location before start of input (effectively character kStringStartMinusOne
* position -1). Used to initialize capture registers to a
* non-position.
* --------- The following output registers are 32-bit values. ---------
- * - fp[-88] register 0 (Only positions must be stored in the first kRegisterZero
+ * - fp[-80] register 0 (Only positions must be stored in the first kRegisterZero
* - register 1 num_saved_registers_ registers)
* - ...
* - register num_registers-1
@@ -114,7 +112,6 @@ namespace internal {
* Address end,
* int* capture_output_array,
* int num_capture_registers,
- * byte* stack_area_base,
* bool direct_call = false,
* Isolate* isolate);
* The call is performed by NativeRegExpMacroAssembler::Execute()
@@ -131,8 +128,10 @@ RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, CodeObjectRequired::kYes,
- NewAssemblerBuffer(kRegExpCodeSize))),
+ masm_(std::make_unique<MacroAssembler>(
+ isolate, CodeObjectRequired::kYes,
+ NewAssemblerBuffer(kRegExpCodeSize))),
+ no_root_array_scope_(masm_.get()),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -142,8 +141,6 @@ RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone,
backtrack_label_(),
exit_label_(),
internal_failure_label_() {
- masm_->set_root_array_available(false);
-
DCHECK_EQ(0, registers_to_save % 2);
__ jmp(&entry_label_); // We'll write the entry code later.
// If the code gets too big or corrupted, an internal exception will be
@@ -155,7 +152,6 @@ RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone,
}
RegExpMacroAssemblerMIPS::~RegExpMacroAssemblerMIPS() {
- delete masm_;
// Unuse labels in case we throw away the assembler without calling GetCode.
entry_label_.Unuse();
start_label_.Unuse();
@@ -378,7 +374,7 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
__ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
{
- AllowExternalCallThatCantCauseGC scope(masm_);
+ AllowExternalCallThatCantCauseGC scope(masm_.get());
ExternalReference function =
unicode ? ExternalReference::re_case_insensitive_compare_unicode(
isolate())
@@ -637,6 +633,42 @@ void RegExpMacroAssemblerMIPS::Fail() {
__ jmp(&exit_label_);
}
+void RegExpMacroAssemblerMIPS::LoadRegExpStackPointerFromMemory(Register dst) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ li(dst, Operand(ref));
+ __ Ld(dst, MemOperand(dst));
+}
+
+void RegExpMacroAssemblerMIPS::StoreRegExpStackPointerToMemory(
+ Register src, Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ li(scratch, Operand(ref));
+ __ Sd(src, MemOperand(scratch));
+}
+
+void RegExpMacroAssemblerMIPS::PushRegExpBasePointer(Register scratch1,
+ Register scratch2) {
+ LoadRegExpStackPointerFromMemory(scratch1);
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ li(scratch2, Operand(ref));
+ __ Ld(scratch2, MemOperand(scratch2));
+ __ Dsubu(scratch2, scratch1, scratch2);
+ __ Sd(scratch2, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+}
+
+void RegExpMacroAssemblerMIPS::PopRegExpBasePointer(Register scratch1,
+ Register scratch2) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ Ld(scratch1, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ __ li(scratch2, Operand(ref));
+ __ Ld(scratch2, MemOperand(scratch2));
+ __ Daddu(scratch1, scratch1, scratch2);
+ StoreRegExpStackPointerToMemory(scratch1, scratch2);
+}
Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
Label return_v0;
@@ -654,7 +686,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Tell the system that we have a stack frame. Because the type is MANUAL,
// no is generated.
- FrameScope scope(masm_, StackFrame::MANUAL);
+ FrameScope scope(masm_.get(), StackFrame::MANUAL);
// Actually emit code to start a new stack frame.
// Push arguments
@@ -683,6 +715,13 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ push(a0); // Make room for "string start - 1" constant.
STATIC_ASSERT(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize);
__ push(a0); // The backtrack counter
+ STATIC_ASSERT(kRegExpStackBasePointer ==
+ kBacktrackCount - kSystemPointerSize);
+ __ push(a0); // The regexp stack base ptr.
+
+ // Store the regexp base pointer - we'll later restore it / write it to
+ // memory when returning from this irregexp code object.
+ PushRegExpBasePointer(a0, a1);
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -763,7 +802,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
}
// Initialize backtrack stack pointer.
- __ Ld(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
__ jmp(&start_label_);
@@ -866,6 +905,10 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
}
__ bind(&return_v0);
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(a0, a1);
+
// Skip sp past regexp registers and local variables..
__ mov(sp, frame_pointer());
// Restore registers s0..s7 and return (restoring ra to pc).
@@ -883,6 +926,8 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Preempt-code.
if (check_preempt_label_.is_linked()) {
SafeCallTarget(&check_preempt_label_);
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), a0);
+
// Put regexp engine registers on stack.
RegList regexp_registers_to_retain = current_input_offset().bit() |
current_character().bit() | backtrack_stackpointer().bit();
@@ -893,6 +938,8 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// result as return value.
__ Branch(&return_v0, ne, v0, Operand(zero_reg));
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
// String might have moved: Reload end of string from frame.
__ Ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
__ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
@@ -902,25 +949,24 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Backtrack stack overflow code.
if (stack_overflow_label_.is_linked()) {
SafeCallTarget(&stack_overflow_label_);
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), a0);
// Reached if the backtrack-stack limit has been hit.
// Put regexp engine registers on stack first.
RegList regexp_registers = current_input_offset().bit() |
current_character().bit();
__ MultiPush(regexp_registers);
- // Call GrowStack(backtrack_stackpointer(), &stack_base)
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, a0);
- __ mov(a0, backtrack_stackpointer());
- __ Daddu(a1, frame_pointer(), Operand(kStackHighEnd));
- __ li(a2, Operand(ExternalReference::isolate_address(masm_->isolate())));
+ // Call GrowStack(isolate)
+ static constexpr int kNumArguments = 1;
+ __ PrepareCallCFunction(kNumArguments, a0);
+ __ li(a0, Operand(ExternalReference::isolate_address(masm_->isolate())));
ExternalReference grow_stack =
ExternalReference::re_grow_stack(masm_->isolate());
- __ CallCFunction(grow_stack, num_arguments);
+ __ CallCFunction(grow_stack, kNumArguments);
// Restore regexp registers.
__ MultiPop(regexp_registers);
- // If return nullptr, we have failed to grow the stack, and
- // must exit with a stack-overflow exception.
+ // If nullptr is returned, we have failed to grow the stack, and must exit
+ // with a stack-overflow exception.
__ Branch(&exit_with_exception, eq, v0, Operand(zero_reg));
// Otherwise use return value as new stack pointer.
__ mov(backtrack_stackpointer(), v0);
@@ -1012,7 +1058,7 @@ void RegExpMacroAssemblerMIPS::PushBacktrack(Label* label) {
int target = label->pos();
__ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
} else {
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_.get());
Label after_constant;
__ Branch(&after_constant);
int offset = masm_->pc_offset();
@@ -1049,14 +1095,24 @@ void RegExpMacroAssemblerMIPS::ReadCurrentPositionFromRegister(int reg) {
__ Ld(current_input_offset(), register_location(reg));
}
+void RegExpMacroAssemblerMIPS::WriteStackPointerToRegister(int reg) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ li(a0, Operand(ref));
+ __ Ld(a0, MemOperand(a0));
+ __ Dsubu(a0, backtrack_stackpointer(), a0);
+ __ Sd(a0, register_location(reg));
+}
void RegExpMacroAssemblerMIPS::ReadStackPointerFromRegister(int reg) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ li(a0, Operand(ref));
+ __ Ld(a0, MemOperand(a0));
__ Ld(backtrack_stackpointer(), register_location(reg));
- __ Ld(a0, MemOperand(frame_pointer(), kStackHighEnd));
__ Daddu(backtrack_stackpointer(), backtrack_stackpointer(), Operand(a0));
}
-
void RegExpMacroAssemblerMIPS::SetCurrentPositionFromEnd(int by) {
Label after_position;
__ Branch(&after_position,
@@ -1104,14 +1160,6 @@ void RegExpMacroAssemblerMIPS::ClearRegisters(int reg_from, int reg_to) {
}
}
-
-void RegExpMacroAssemblerMIPS::WriteStackPointerToRegister(int reg) {
- __ Ld(a1, MemOperand(frame_pointer(), kStackHighEnd));
- __ Dsubu(a0, backtrack_stackpointer(), a1);
- __ Sd(a0, register_location(reg));
-}
-
-
bool RegExpMacroAssemblerMIPS::CanReadUnaligned() {
return false;
}
diff --git a/chromium/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h b/chromium/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
index 348d52724b2..a6a56235cf3 100644
--- a/chromium/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
+++ b/chromium/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
@@ -5,9 +5,7 @@
#ifndef V8_REGEXP_MIPS64_REGEXP_MACRO_ASSEMBLER_MIPS64_H_
#define V8_REGEXP_MIPS64_REGEXP_MACRO_ASSEMBLER_MIPS64_H_
-#include "src/base/strings.h"
#include "src/codegen/macro-assembler.h"
-#include "src/codegen/mips64/assembler-mips64.h"
#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
@@ -96,35 +94,39 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
static const int kFramePointer = 0;
// Above the frame pointer - Stored registers and stack passed parameters.
- // Registers s0 to s7, fp, and ra.
static const int kStoredRegisters = kFramePointer;
// Return address (stored from link register, read into pc on return).
// TODO(plind): This 9 - is 8 s-regs (s0..s7) plus fp.
- static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize;
+ static const int kReturnAddress = kStoredRegisters + 9 * kSystemPointerSize;
// Stack frame header.
static const int kStackFrameHeader = kReturnAddress;
- // Stack parameters placed by caller.
- static const int kIsolate = kStackFrameHeader + kPointerSize;
// Below the frame pointer.
// Register parameters stored by setup code.
- static const int kDirectCall = kFramePointer - kPointerSize;
- static const int kStackHighEnd = kDirectCall - kPointerSize;
- static const int kNumOutputRegisters = kStackHighEnd - kPointerSize;
- static const int kRegisterOutput = kNumOutputRegisters - kPointerSize;
- static const int kInputEnd = kRegisterOutput - kPointerSize;
- static const int kInputStart = kInputEnd - kPointerSize;
- static const int kStartIndex = kInputStart - kPointerSize;
- static const int kInputString = kStartIndex - kPointerSize;
+ static const int kIsolate = kFramePointer - kSystemPointerSize;
+ static const int kDirectCall = kIsolate - kSystemPointerSize;
+ static const int kNumOutputRegisters = kDirectCall - kSystemPointerSize;
+ static const int kRegisterOutput = kNumOutputRegisters - kSystemPointerSize;
+ static const int kInputEnd = kRegisterOutput - kSystemPointerSize;
+ static const int kInputStart = kInputEnd - kSystemPointerSize;
+ static const int kStartIndex = kInputStart - kSystemPointerSize;
+ static const int kInputString = kStartIndex - kSystemPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
- static const int kSuccessfulCaptures = kInputString - kPointerSize;
- static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ static const int kSuccessfulCaptures = kInputString - kSystemPointerSize;
+ static const int kStringStartMinusOne =
+ kSuccessfulCaptures - kSystemPointerSize;
static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
+ // Stores the initial value of the regexp stack pointer in a
+ // position-independent representation (in case the regexp stack grows and
+ // thus moves).
+ static const int kRegExpStackBasePointer =
+ kBacktrackCount - kSystemPointerSize;
+
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kBacktrackCount - kSystemPointerSize;
+ static const int kRegisterZero = kRegExpStackBasePointer - kSystemPointerSize;
// Initial size of code buffer.
static const int kRegExpCodeSize = 1024;
@@ -144,27 +146,27 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
// Register holding the current input position as negative offset from
// the end of the string.
- inline Register current_input_offset() { return a6; }
+ static constexpr Register current_input_offset() { return a6; }
// The register containing the current character after LoadCurrentCharacter.
- inline Register current_character() { return a7; }
+ static constexpr Register current_character() { return a7; }
// Register holding address of the end of the input string.
- inline Register end_of_input_address() { return t2; }
+ static constexpr Register end_of_input_address() { return t2; }
// Register holding the frame address. Local variables, parameters and
// regexp registers are addressed relative to this.
- inline Register frame_pointer() { return fp; }
+ static constexpr Register frame_pointer() { return fp; }
// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
- inline Register backtrack_stackpointer() { return t0; }
+ static constexpr Register backtrack_stackpointer() { return t0; }
// Register holding pointer to the current code object.
- inline Register code_pointer() { return a5; }
+ static constexpr Register code_pointer() { return a5; }
// Byte size of chars in the string to match (decided by the Mode argument).
- inline int char_size() { return static_cast<int>(mode_); }
+ inline int char_size() const { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
// is nullptr, in which case it is a conditional Backtrack.
@@ -190,19 +192,25 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
// and increments it by a word size.
inline void Pop(Register target);
+ void LoadRegExpStackPointerFromMemory(Register dst);
+ void StoreRegExpStackPointerToMemory(Register src, Register scratch);
+ void PushRegExpBasePointer(Register scratch1, Register scratch2);
+ void PopRegExpBasePointer(Register scratch1, Register scratch2);
+
Isolate* isolate() const { return masm_->isolate(); }
- MacroAssembler* masm_;
+ const std::unique_ptr<MacroAssembler> masm_;
+ const NoRootArrayScope no_root_array_scope_;
// Which mode to generate code for (Latin1 or UC16).
- Mode mode_;
+ const Mode mode_;
// One greater than maximal register index actually used.
int num_registers_;
// Number of registers to output at the end (the saved registers
// are always 0..num_saved_registers_-1).
- int num_saved_registers_;
+ const int num_saved_registers_;
// Labels used internally.
Label entry_label_;
diff --git a/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index bb82c270b75..b7347e5fdff 100644
--- a/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -6,14 +6,13 @@
#include "src/regexp/ppc/regexp-macro-assembler-ppc.h"
-#include "src/base/bits.h"
-#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler.h"
+#include "src/codegen/ppc/assembler-ppc-inl.h"
+#include "src/heap/factory.h"
#include "src/logging/log.h"
-#include "src/regexp/regexp-macro-assembler.h"
+#include "src/objects/code-inl.h"
#include "src/regexp/regexp-stack.h"
#include "src/snapshot/embedded/embedded-data.h"
-#include "src/strings/unicode.h"
namespace v8 {
namespace internal {
@@ -100,8 +99,10 @@ RegExpMacroAssemblerPPC::RegExpMacroAssemblerPPC(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, CodeObjectRequired::kYes,
- NewAssemblerBuffer(kRegExpCodeSize))),
+ masm_(std::make_unique<MacroAssembler>(
+ isolate, CodeObjectRequired::kYes,
+ NewAssemblerBuffer(kRegExpCodeSize))),
+ no_root_array_scope_(masm_.get()),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -111,8 +112,6 @@ RegExpMacroAssemblerPPC::RegExpMacroAssemblerPPC(Isolate* isolate, Zone* zone,
backtrack_label_(),
exit_label_(),
internal_failure_label_() {
- masm_->set_root_array_available(false);
-
DCHECK_EQ(0, registers_to_save % 2);
@@ -126,7 +125,6 @@ RegExpMacroAssemblerPPC::RegExpMacroAssemblerPPC(Isolate* isolate, Zone* zone,
}
RegExpMacroAssemblerPPC::~RegExpMacroAssemblerPPC() {
- delete masm_;
// Unuse labels in case we throw away the assembler without calling GetCode.
entry_label_.Unuse();
start_label_.Unuse();
@@ -362,7 +360,7 @@ void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
__ mov(r6, Operand(ExternalReference::isolate_address(isolate())));
{
- AllowExternalCallThatCantCauseGC scope(masm_);
+ AllowExternalCallThatCantCauseGC scope(masm_.get());
ExternalReference function =
unicode ? ExternalReference::re_case_insensitive_compare_unicode(
isolate())
@@ -652,6 +650,42 @@ void RegExpMacroAssemblerPPC::Fail() {
__ b(&exit_label_);
}
+void RegExpMacroAssemblerPPC::LoadRegExpStackPointerFromMemory(Register dst) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ mov(dst, Operand(ref));
+ __ LoadU64(dst, MemOperand(dst));
+}
+
+void RegExpMacroAssemblerPPC::StoreRegExpStackPointerToMemory(
+ Register src, Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ mov(scratch, Operand(ref));
+ __ StoreU64(src, MemOperand(scratch));
+}
+
+void RegExpMacroAssemblerPPC::PushRegExpBasePointer(Register stack_pointer,
+ Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ mov(scratch, Operand(ref));
+ __ LoadU64(scratch, MemOperand(scratch));
+ __ SubS64(scratch, stack_pointer, scratch);
+ __ StoreU64(scratch, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+}
+
+void RegExpMacroAssemblerPPC::PopRegExpBasePointer(Register stack_pointer_out,
+ Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ LoadU64(stack_pointer_out,
+ MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ __ mov(scratch, Operand(ref));
+ __ LoadU64(scratch, MemOperand(scratch));
+ __ AddS64(stack_pointer_out, stack_pointer_out, scratch);
+ StoreRegExpStackPointerToMemory(stack_pointer_out, scratch);
+}
Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
Label return_r3;
@@ -670,7 +704,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
// Tell the system that we have a stack frame. Because the type
// is MANUAL, no is generated.
- FrameScope scope(masm_, StackFrame::MANUAL);
+ FrameScope scope(masm_.get(), StackFrame::MANUAL);
// Ensure register assigments are consistent with callee save mask
DCHECK(r25.bit() & kRegExpCalleeSaved);
@@ -705,34 +739,48 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
__ push(r3); // Make room for "string start - 1" constant.
STATIC_ASSERT(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize);
__ push(r3); // The backtrack counter.
+ STATIC_ASSERT(kRegExpStackBasePointer ==
+ kBacktrackCount - kSystemPointerSize);
+ __ push(r3); // The regexp stack base ptr.
- // Check if we have space on the stack for registers.
- Label stack_limit_hit;
- Label stack_ok;
-
- ExternalReference stack_limit =
- ExternalReference::address_of_jslimit(isolate());
- __ mov(r3, Operand(stack_limit));
- __ LoadU64(r3, MemOperand(r3));
- __ sub(r3, sp, r3, LeaveOE, SetRC);
- // Handle it if the stack pointer is already below the stack limit.
- __ ble(&stack_limit_hit, cr0);
- // Check if there is room for the variable number of registers above
- // the stack limit.
- __ CmpU64(r3, Operand(num_registers_ * kSystemPointerSize), r0);
- __ bge(&stack_ok);
- // Exit with OutOfMemory exception. There is not enough space on the stack
- // for our working registers.
- __ li(r3, Operand(EXCEPTION));
- __ b(&return_r3);
-
- __ bind(&stack_limit_hit);
- CallCheckStackGuardState(r3);
- __ cmpi(r3, Operand::Zero());
- // If returned value is non-zero, we exit with the returned value as result.
- __ bne(&return_r3);
+ // Initialize backtrack stack pointer. It must not be clobbered from here
+ // on. Note the backtrack_stackpointer is callee-saved.
+ STATIC_ASSERT(backtrack_stackpointer() == r29);
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
+ // Store the regexp base pointer - we'll later restore it / write it to
+ // memory when returning from this irregexp code object.
+ PushRegExpBasePointer(backtrack_stackpointer(), r4);
- __ bind(&stack_ok);
+ {
+ // Check if we have space on the stack for registers.
+ Label stack_limit_hit, stack_ok;
+
+ ExternalReference stack_limit =
+ ExternalReference::address_of_jslimit(isolate());
+ __ mov(r3, Operand(stack_limit));
+ __ LoadU64(r3, MemOperand(r3));
+ __ sub(r3, sp, r3, LeaveOE, SetRC);
+ // Handle it if the stack pointer is already below the stack limit.
+ __ ble(&stack_limit_hit, cr0);
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ CmpU64(r3, Operand(num_registers_ * kSystemPointerSize), r0);
+ __ bge(&stack_ok);
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ li(r3, Operand(EXCEPTION));
+ __ b(&return_r3);
+
+ __ bind(&stack_limit_hit);
+ CallCheckStackGuardState(r3);
+ __ cmpi(r3, Operand::Zero());
+ // If returned value is non-zero, we exit with the returned value as
+ // result.
+ __ bne(&return_r3);
+
+ __ bind(&stack_ok);
+ }
// Allocate space on stack for registers.
__ AddS64(sp, sp, Operand(-num_registers_ * kSystemPointerSize), r0);
@@ -759,18 +807,21 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
// Initialize code pointer register
__ mov(code_pointer(), Operand(masm_->CodeObject()));
- Label load_char_start_regexp, start_regexp;
- // Load newline if index is at start, previous character otherwise.
- __ cmpi(r4, Operand::Zero());
- __ bne(&load_char_start_regexp);
- __ li(current_character(), Operand('\n'));
- __ b(&start_regexp);
-
- // Global regexp restarts matching here.
- __ bind(&load_char_start_regexp);
- // Load previous char as initial value of current character register.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&start_regexp);
+ Label load_char_start_regexp;
+ {
+ Label start_regexp;
+ // Load newline if index is at start, previous character otherwise.
+ __ cmpi(r4, Operand::Zero());
+ __ bne(&load_char_start_regexp);
+ __ li(current_character(), Operand('\n'));
+ __ b(&start_regexp);
+
+ // Global regexp restarts matching here.
+ __ bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&start_regexp);
+ }
// Initialize on-stack registers.
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
@@ -792,10 +843,6 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
}
}
- // Initialize backtrack stack pointer.
- __ LoadU64(backtrack_stackpointer(),
- MemOperand(frame_pointer(), kStackHighEnd));
-
__ b(&start_label_);
// Exit code:
@@ -866,6 +913,10 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
// Prepare r3 to initialize registers with its value in the next run.
__ LoadU64(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(backtrack_stackpointer(), r5);
+
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
// r25: capture start index
@@ -896,6 +947,10 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
}
__ bind(&return_r3);
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(backtrack_stackpointer(), r5);
+
// Skip sp past regexp registers and local variables..
__ mr(sp, frame_pointer());
// Restore registers r25..r31 and return (restoring lr to pc).
@@ -916,12 +971,16 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
if (check_preempt_label_.is_linked()) {
SafeCallTarget(&check_preempt_label_);
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), r4);
+
CallCheckStackGuardState(r3);
__ cmpi(r3, Operand::Zero());
// If returning non-zero, we should end execution with the given
// result as return value.
__ bne(&return_r3);
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
// String might have moved: Reload end of string from frame.
__ LoadU64(end_of_input_address(),
MemOperand(frame_pointer(), kInputEnd));
@@ -932,17 +991,18 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
if (stack_overflow_label_.is_linked()) {
SafeCallTarget(&stack_overflow_label_);
- // Call GrowStack(backtrack_stackpointer(), &stack_base)
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, r3);
- __ mr(r3, backtrack_stackpointer());
- __ addi(r4, frame_pointer(), Operand(kStackHighEnd));
- __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
+ // Call GrowStack(isolate).
+
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), r4);
+
+ static constexpr int kNumArguments = 1;
+ __ PrepareCallCFunction(kNumArguments, r3);
+ __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
ExternalReference grow_stack =
ExternalReference::re_grow_stack(isolate());
- __ CallCFunction(grow_stack, num_arguments);
- // If return nullptr, we have failed to grow the stack, and
- // must exit with a stack-overflow exception.
+ __ CallCFunction(grow_stack, kNumArguments);
+ // If nullptr is returned, we have failed to grow the stack, and must exit
+ // with a stack-overflow exception.
__ cmpi(r3, Operand::Zero());
__ beq(&exit_with_exception);
// Otherwise use return value as new stack pointer.
@@ -1045,14 +1105,24 @@ void RegExpMacroAssemblerPPC::ReadCurrentPositionFromRegister(int reg) {
__ LoadU64(current_input_offset(), register_location(reg), r0);
}
+void RegExpMacroAssemblerPPC::WriteStackPointerToRegister(int reg) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ mov(r4, Operand(ref));
+ __ LoadU64(r4, MemOperand(r4));
+ __ SubS64(r3, backtrack_stackpointer(), r4);
+ __ StoreU64(r3, register_location(reg));
+}
void RegExpMacroAssemblerPPC::ReadStackPointerFromRegister(int reg) {
- __ LoadU64(backtrack_stackpointer(), register_location(reg), r0);
- __ LoadU64(r3, MemOperand(frame_pointer(), kStackHighEnd));
- __ add(backtrack_stackpointer(), backtrack_stackpointer(), r3);
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ mov(r3, Operand(ref));
+ __ LoadU64(r3, MemOperand(r3));
+ __ LoadU64(backtrack_stackpointer(), register_location(reg));
+ __ AddS64(backtrack_stackpointer(), backtrack_stackpointer(), r3);
}
-
void RegExpMacroAssemblerPPC::SetCurrentPositionFromEnd(int by) {
Label after_position;
__ CmpS64(current_input_offset(), Operand(-by * char_size()), r0);
@@ -1099,14 +1169,6 @@ void RegExpMacroAssemblerPPC::ClearRegisters(int reg_from, int reg_to) {
}
}
-
-void RegExpMacroAssemblerPPC::WriteStackPointerToRegister(int reg) {
- __ LoadU64(r4, MemOperand(frame_pointer(), kStackHighEnd));
- __ sub(r3, backtrack_stackpointer(), r4);
- __ StoreU64(r3, register_location(reg), r0);
-}
-
-
// Private methods:
void RegExpMacroAssemblerPPC::CallCheckStackGuardState(Register scratch) {
diff --git a/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h b/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
index 430c3a241c3..212a1f4051a 100644
--- a/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
+++ b/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
@@ -5,9 +5,7 @@
#ifndef V8_REGEXP_PPC_REGEXP_MACRO_ASSEMBLER_PPC_H_
#define V8_REGEXP_PPC_REGEXP_MACRO_ASSEMBLER_PPC_H_
-#include "src/base/strings.h"
#include "src/codegen/macro-assembler.h"
-#include "src/codegen/ppc/assembler-ppc.h"
#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
@@ -91,20 +89,16 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerPPC
static const int kFramePointer = 0;
// Above the frame pointer - Stored registers and stack passed parameters.
- // Register 25..31.
static const int kStoredRegisters = kFramePointer;
// Return address (stored from link register, read into pc on return).
static const int kReturnAddress = kStoredRegisters + 7 * kSystemPointerSize;
static const int kCallerFrame = kReturnAddress + kSystemPointerSize;
- // Stack parameters placed by caller.
- static const int kIsolate =
- kCallerFrame + kStackFrameExtraParamSlot * kSystemPointerSize;
// Below the frame pointer.
// Register parameters stored by setup code.
- static const int kDirectCall = kFramePointer - kSystemPointerSize;
- static const int kStackHighEnd = kDirectCall - kSystemPointerSize;
- static const int kNumOutputRegisters = kStackHighEnd - kSystemPointerSize;
+ static const int kIsolate = kFramePointer - kSystemPointerSize;
+ static const int kDirectCall = kIsolate - kSystemPointerSize;
+ static const int kNumOutputRegisters = kDirectCall - kSystemPointerSize;
static const int kRegisterOutput = kNumOutputRegisters - kSystemPointerSize;
static const int kInputEnd = kRegisterOutput - kSystemPointerSize;
static const int kInputStart = kInputEnd - kSystemPointerSize;
@@ -116,8 +110,14 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerPPC
static const int kStringStartMinusOne =
kSuccessfulCaptures - kSystemPointerSize;
static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
+ // Stores the initial value of the regexp stack pointer in a
+ // position-independent representation (in case the regexp stack grows and
+ // thus moves).
+ static const int kRegExpStackBasePointer =
+ kBacktrackCount - kSystemPointerSize;
+
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kBacktrackCount - kSystemPointerSize;
+ static const int kRegisterZero = kRegExpStackBasePointer - kSystemPointerSize;
// Initial size of code buffer.
static const int kRegExpCodeSize = 1024;
@@ -137,27 +137,27 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerPPC
// Register holding the current input position as negative offset from
// the end of the string.
- inline Register current_input_offset() { return r27; }
+ static constexpr Register current_input_offset() { return r27; }
// The register containing the current character after LoadCurrentCharacter.
- inline Register current_character() { return r28; }
+ static constexpr Register current_character() { return r28; }
// Register holding address of the end of the input string.
- inline Register end_of_input_address() { return r30; }
+ static constexpr Register end_of_input_address() { return r30; }
// Register holding the frame address. Local variables, parameters and
// regexp registers are addressed relative to this.
- inline Register frame_pointer() { return fp; }
+ static constexpr Register frame_pointer() { return fp; }
// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
- inline Register backtrack_stackpointer() { return r29; }
+ static constexpr Register backtrack_stackpointer() { return r29; }
// Register holding pointer to the current code object.
- inline Register code_pointer() { return r26; }
+ static constexpr Register code_pointer() { return r26; }
// Byte size of chars in the string to match (decided by the Mode argument)
- inline int char_size() { return static_cast<int>(mode_); }
+ inline int char_size() const { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
// is nullptr, in which case it is a conditional Backtrack.
@@ -177,19 +177,25 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerPPC
// and increments it by a word size.
inline void Pop(Register target);
+ void LoadRegExpStackPointerFromMemory(Register dst);
+ void StoreRegExpStackPointerToMemory(Register src, Register scratch);
+ void PushRegExpBasePointer(Register stack_pointer, Register scratch);
+ void PopRegExpBasePointer(Register stack_pointer_out, Register scratch);
+
Isolate* isolate() const { return masm_->isolate(); }
- MacroAssembler* masm_;
+ const std::unique_ptr<MacroAssembler> masm_;
+ const NoRootArrayScope no_root_array_scope_;
// Which mode to generate code for (Latin1 or UC16).
- Mode mode_;
+ const Mode mode_;
// One greater than maximal register index actually used.
int num_registers_;
// Number of registers to output at the end (the saved registers
// are always 0..num_saved_registers_-1)
- int num_saved_registers_;
+ const int num_saved_registers_;
// Labels used internally.
Label entry_label_;
diff --git a/chromium/v8/src/regexp/regexp-ast.h b/chromium/v8/src/regexp/regexp-ast.h
index 2b9f767c24d..3f771976fde 100644
--- a/chromium/v8/src/regexp/regexp-ast.h
+++ b/chromium/v8/src/regexp/regexp-ast.h
@@ -6,10 +6,7 @@
#define V8_REGEXP_REGEXP_AST_H_
#include "src/base/strings.h"
-#include "src/objects/js-regexp.h"
-#include "src/objects/objects.h"
-#include "src/objects/string.h"
-#include "src/utils/utils.h"
+#include "src/regexp/regexp-flags.h"
#include "src/zone/zone-containers.h"
#include "src/zone/zone-list.h"
#include "src/zone/zone.h"
@@ -96,13 +93,14 @@ class CharacterRange {
static inline CharacterRange Singleton(base::uc32 value) {
return CharacterRange(value, value);
}
+ static constexpr int kMaxCodePoint = 0x10ffff;
static inline CharacterRange Range(base::uc32 from, base::uc32 to) {
- DCHECK(0 <= from && to <= String::kMaxCodePoint);
+ DCHECK(0 <= from && to <= kMaxCodePoint);
DCHECK(static_cast<uint32_t>(from) <= static_cast<uint32_t>(to));
return CharacterRange(from, to);
}
static inline CharacterRange Everything() {
- return CharacterRange(0, String::kMaxCodePoint);
+ return CharacterRange(0, kMaxCodePoint);
}
static inline ZoneList<CharacterRange>* List(Zone* zone,
CharacterRange range) {
@@ -566,9 +564,9 @@ class RegExpLookaround final : public RegExpTree {
class RegExpBackReference final : public RegExpTree {
public:
- explicit RegExpBackReference(JSRegExp::Flags flags)
+ explicit RegExpBackReference(RegExpFlags flags)
: capture_(nullptr), name_(nullptr), flags_(flags) {}
- RegExpBackReference(RegExpCapture* capture, JSRegExp::Flags flags)
+ RegExpBackReference(RegExpCapture* capture, RegExpFlags flags)
: capture_(capture), name_(nullptr), flags_(flags) {}
void* Accept(RegExpVisitor* visitor, void* data) override;
RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
@@ -587,7 +585,7 @@ class RegExpBackReference final : public RegExpTree {
private:
RegExpCapture* capture_;
const ZoneVector<base::uc16>* name_;
- const JSRegExp::Flags flags_;
+ const RegExpFlags flags_;
};
diff --git a/chromium/v8/src/regexp/regexp-bytecode-generator-inl.h b/chromium/v8/src/regexp/regexp-bytecode-generator-inl.h
index 2a6ffec9297..dfdc2da476f 100644
--- a/chromium/v8/src/regexp/regexp-bytecode-generator-inl.h
+++ b/chromium/v8/src/regexp/regexp-bytecode-generator-inl.h
@@ -7,7 +7,6 @@
#include "src/regexp/regexp-bytecode-generator.h"
-#include "src/ast/ast.h"
#include "src/regexp/regexp-bytecodes.h"
namespace v8 {
@@ -24,29 +23,29 @@ void RegExpBytecodeGenerator::Emit(uint32_t byte, int32_t twenty_four_bits) {
}
void RegExpBytecodeGenerator::Emit16(uint32_t word) {
- DCHECK(pc_ <= buffer_.length());
- if (pc_ + 1 >= buffer_.length()) {
- Expand();
+ DCHECK(pc_ <= static_cast<int>(buffer_.size()));
+ if (pc_ + 1 >= static_cast<int>(buffer_.size())) {
+ ExpandBuffer();
}
- *reinterpret_cast<uint16_t*>(buffer_.begin() + pc_) = word;
+ *reinterpret_cast<uint16_t*>(buffer_.data() + pc_) = word;
pc_ += 2;
}
void RegExpBytecodeGenerator::Emit8(uint32_t word) {
- DCHECK(pc_ <= buffer_.length());
- if (pc_ == buffer_.length()) {
- Expand();
+ DCHECK(pc_ <= static_cast<int>(buffer_.size()));
+ if (pc_ == static_cast<int>(buffer_.size())) {
+ ExpandBuffer();
}
- *reinterpret_cast<unsigned char*>(buffer_.begin() + pc_) = word;
+ *reinterpret_cast<unsigned char*>(buffer_.data() + pc_) = word;
pc_ += 1;
}
void RegExpBytecodeGenerator::Emit32(uint32_t word) {
- DCHECK(pc_ <= buffer_.length());
- if (pc_ + 3 >= buffer_.length()) {
- Expand();
+ DCHECK(pc_ <= static_cast<int>(buffer_.size()));
+ if (pc_ + 3 >= static_cast<int>(buffer_.size())) {
+ ExpandBuffer();
}
- *reinterpret_cast<uint32_t*>(buffer_.begin() + pc_) = word;
+ *reinterpret_cast<uint32_t*>(buffer_.data() + pc_) = word;
pc_ += 4;
}
diff --git a/chromium/v8/src/regexp/regexp-bytecode-generator.cc b/chromium/v8/src/regexp/regexp-bytecode-generator.cc
index 397f4ba87ae..c2b34fa653f 100644
--- a/chromium/v8/src/regexp/regexp-bytecode-generator.cc
+++ b/chromium/v8/src/regexp/regexp-bytecode-generator.cc
@@ -5,7 +5,7 @@
#include "src/regexp/regexp-bytecode-generator.h"
#include "src/ast/ast.h"
-#include "src/objects/objects-inl.h"
+#include "src/objects/fixed-array-inl.h"
#include "src/regexp/regexp-bytecode-generator-inl.h"
#include "src/regexp/regexp-bytecode-peephole.h"
#include "src/regexp/regexp-bytecodes.h"
@@ -16,7 +16,7 @@ namespace internal {
RegExpBytecodeGenerator::RegExpBytecodeGenerator(Isolate* isolate, Zone* zone)
: RegExpMacroAssembler(isolate, zone),
- buffer_(base::Vector<byte>::New(1024)),
+ buffer_(kInitialBufferSize, zone),
pc_(0),
advance_current_end_(kInvalidPC),
jump_edges_(zone),
@@ -24,7 +24,6 @@ RegExpBytecodeGenerator::RegExpBytecodeGenerator(Isolate* isolate, Zone* zone)
RegExpBytecodeGenerator::~RegExpBytecodeGenerator() {
if (backtrack_.is_linked()) backtrack_.Unuse();
- buffer_.Dispose();
}
RegExpBytecodeGenerator::IrregexpImplementation
@@ -39,8 +38,8 @@ void RegExpBytecodeGenerator::Bind(Label* l) {
int pos = l->pos();
while (pos != 0) {
int fixup = pos;
- pos = *reinterpret_cast<int32_t*>(buffer_.begin() + fixup);
- *reinterpret_cast<uint32_t*>(buffer_.begin() + fixup) = pc_;
+ pos = *reinterpret_cast<int32_t*>(buffer_.data() + fixup);
+ *reinterpret_cast<uint32_t*>(buffer_.data() + fixup) = pc_;
jump_edges_.emplace(fixup, pc_);
}
}
@@ -383,7 +382,7 @@ Handle<HeapObject> RegExpBytecodeGenerator::GetCode(Handle<String> source) {
Handle<ByteArray> array;
if (FLAG_regexp_peephole_optimization) {
array = RegExpBytecodePeepholeOptimization::OptimizeBytecode(
- isolate_, zone(), source, buffer_.begin(), length(), jump_edges_);
+ isolate_, zone(), source, buffer_.data(), length(), jump_edges_);
} else {
array = isolate_->factory()->NewByteArray(length());
Copy(array->GetDataStartAddress());
@@ -395,14 +394,13 @@ Handle<HeapObject> RegExpBytecodeGenerator::GetCode(Handle<String> source) {
int RegExpBytecodeGenerator::length() { return pc_; }
void RegExpBytecodeGenerator::Copy(byte* a) {
- MemCopy(a, buffer_.begin(), length());
+ MemCopy(a, buffer_.data(), length());
}
-void RegExpBytecodeGenerator::Expand() {
- base::Vector<byte> old_buffer = buffer_;
- buffer_ = base::Vector<byte>::New(old_buffer.length() * 2);
- MemCopy(buffer_.begin(), old_buffer.begin(), old_buffer.length());
- old_buffer.Dispose();
+void RegExpBytecodeGenerator::ExpandBuffer() {
+ // TODO(jgruber): The growth strategy could be smarter for large sizes.
+ // TODO(jgruber): It's not necessary to default-initialize new elements.
+ buffer_.resize(buffer_.size() * 2);
}
} // namespace internal
diff --git a/chromium/v8/src/regexp/regexp-bytecode-generator.h b/chromium/v8/src/regexp/regexp-bytecode-generator.h
index 466b535c7eb..551421ac7b8 100644
--- a/chromium/v8/src/regexp/regexp-bytecode-generator.h
+++ b/chromium/v8/src/regexp/regexp-bytecode-generator.h
@@ -6,6 +6,7 @@
#define V8_REGEXP_REGEXP_BYTECODE_GENERATOR_H_
#include "src/base/strings.h"
+#include "src/codegen/label.h"
#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
@@ -82,7 +83,8 @@ class V8_EXPORT_PRIVATE RegExpBytecodeGenerator : public RegExpMacroAssembler {
Handle<HeapObject> GetCode(Handle<String> source) override;
private:
- void Expand();
+ void ExpandBuffer();
+
// Code and bitmap emission.
inline void EmitOrLink(Label* label);
inline void Emit32(uint32_t x);
@@ -95,7 +97,9 @@ class V8_EXPORT_PRIVATE RegExpBytecodeGenerator : public RegExpMacroAssembler {
void Copy(byte* a);
// The buffer into which code and relocation info are generated.
- base::Vector<byte> buffer_;
+ static constexpr int kInitialBufferSize = 1024;
+ ZoneVector<byte> buffer_;
+
// The program counter.
int pc_;
Label backtrack_;
diff --git a/chromium/v8/src/regexp/regexp-bytecode-peephole.cc b/chromium/v8/src/regexp/regexp-bytecode-peephole.cc
index fc64db90136..20de4565d28 100644
--- a/chromium/v8/src/regexp/regexp-bytecode-peephole.cc
+++ b/chromium/v8/src/regexp/regexp-bytecode-peephole.cc
@@ -4,10 +4,8 @@
#include "src/regexp/regexp-bytecode-peephole.h"
-#include "src/execution/isolate.h"
#include "src/flags/flags.h"
-#include "src/objects/fixed-array.h"
-#include "src/objects/objects-inl.h"
+#include "src/objects/fixed-array-inl.h"
#include "src/regexp/regexp-bytecodes.h"
#include "src/utils/memcopy.h"
#include "src/utils/utils.h"
diff --git a/chromium/v8/src/regexp/regexp-compiler-tonode.cc b/chromium/v8/src/regexp/regexp-compiler-tonode.cc
index f668aa6d849..b80eefae6dd 100644
--- a/chromium/v8/src/regexp/regexp-compiler-tonode.cc
+++ b/chromium/v8/src/regexp/regexp-compiler-tonode.cc
@@ -6,14 +6,12 @@
#include "src/execution/isolate.h"
#include "src/regexp/regexp.h"
-#ifdef V8_INTL_SUPPORT
-#include "src/regexp/special-case.h"
-#endif // V8_INTL_SUPPORT
#include "src/strings/unicode-inl.h"
#include "src/zone/zone-list-inl.h"
#ifdef V8_INTL_SUPPORT
#include "src/base/strings.h"
+#include "src/regexp/special-case.h"
#include "unicode/locid.h"
#include "unicode/uniset.h"
#include "unicode/utypes.h"
@@ -24,6 +22,11 @@ namespace internal {
using namespace regexp_compiler_constants; // NOLINT(build/namespaces)
+constexpr base::uc32 kMaxCodePoint = 0x10ffff;
+constexpr int kMaxUtf16CodeUnit = 0xffff;
+constexpr uint32_t kMaxUtf16CodeUnitU = 0xffff;
+constexpr int32_t kMaxOneByteCharCode = unibrow::Latin1::kMaxChar;
+
// -------------------------------------------------------------------
// Tree to graph conversion
@@ -65,7 +68,7 @@ static bool CompareInverseRanges(ZoneList<CharacterRange>* ranges,
return false;
}
}
- if (range.to() != String::kMaxCodePoint) {
+ if (range.to() != kMaxCodePoint) {
return false;
}
return true;
@@ -359,8 +362,8 @@ RegExpNode* UnanchoredAdvance(RegExpCompiler* compiler,
// we advanced into the middle of a surrogate pair, it will work out, as
// nothing will match from there. We will have to advance again, consuming
// the associated trail surrogate.
- ZoneList<CharacterRange>* range = CharacterRange::List(
- zone, CharacterRange::Range(0, String::kMaxUtf16CodeUnit));
+ ZoneList<CharacterRange>* range =
+ CharacterRange::List(zone, CharacterRange::Range(0, kMaxUtf16CodeUnit));
return TextNode::CreateForCharacterRanges(zone, range, false, on_success);
}
@@ -518,7 +521,7 @@ bool RegExpDisjunction::SortConsecutiveAtoms(RegExpCompiler* compiler) {
DCHECK_LT(first_atom, alternatives->length());
DCHECK_LE(i, alternatives->length());
DCHECK_LE(first_atom, i);
- if (IgnoreCase(compiler->flags())) {
+ if (IsIgnoreCase(compiler->flags())) {
#ifdef V8_INTL_SUPPORT
alternatives->StableSort(CompareFirstCharCaseInsensitve, first_atom,
i - first_atom);
@@ -570,14 +573,14 @@ void RegExpDisjunction::RationalizeConsecutiveAtoms(RegExpCompiler* compiler) {
#ifdef V8_INTL_SUPPORT
icu::UnicodeString new_prefix(atom->data().at(0));
if (new_prefix != common_prefix) {
- if (!IgnoreCase(compiler->flags())) break;
+ if (!IsIgnoreCase(compiler->flags())) break;
if (common_prefix.caseCompare(new_prefix, U_FOLD_CASE_DEFAULT) != 0)
break;
}
#else
unibrow::uchar new_prefix = atom->data().at(0);
if (new_prefix != common_prefix) {
- if (!IgnoreCase(compiler->flags())) break;
+ if (!IsIgnoreCase(compiler->flags())) break;
unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
compiler->isolate()->regexp_macro_assembler_canonicalize();
new_prefix = Canonical(canonicalize, new_prefix);
@@ -658,7 +661,7 @@ void RegExpDisjunction::FixSingleCharacterDisjunctions(
i++;
continue;
}
- const JSRegExp::Flags flags = compiler->flags();
+ const RegExpFlags flags = compiler->flags();
DCHECK_IMPLIES(IsUnicode(flags),
!unibrow::Utf16::IsLeadSurrogate(atom->data().at(0)));
bool contains_trail_surrogate =
@@ -740,7 +743,7 @@ namespace {
RegExpNode* BoundaryAssertionAsLookaround(RegExpCompiler* compiler,
RegExpNode* on_success,
RegExpAssertion::AssertionType type,
- JSRegExp::Flags flags) {
+ RegExpFlags flags) {
CHECK(NeedsUnicodeCaseEquivalents(flags));
Zone* zone = compiler->zone();
ZoneList<CharacterRange>* word_range =
@@ -1038,7 +1041,7 @@ static void AddClassNegated(const int* elmv, int elmc,
elmc--;
DCHECK_EQ(kRangeEndMarker, elmv[elmc]);
DCHECK_NE(0x0000, elmv[0]);
- DCHECK_NE(String::kMaxCodePoint, elmv[elmc - 1]);
+ DCHECK_NE(kMaxCodePoint, elmv[elmc - 1]);
base::uc16 last = 0x0000;
for (int i = 0; i < elmc; i += 2) {
DCHECK(last <= elmv[i] - 1);
@@ -1046,7 +1049,7 @@ static void AddClassNegated(const int* elmv, int elmc,
ranges->Add(CharacterRange::Range(last, elmv[i] - 1), zone);
last = elmv[i + 1];
}
- ranges->Add(CharacterRange::Range(last, String::kMaxCodePoint), zone);
+ ranges->Add(CharacterRange::Range(last, kMaxCodePoint), zone);
}
void CharacterRange::AddClassEscape(char type, ZoneList<CharacterRange>* ranges,
@@ -1128,13 +1131,13 @@ void CharacterRange::AddCaseEquivalents(Isolate* isolate, Zone* zone,
for (int i = 0; i < range_count; i++) {
CharacterRange range = ranges->at(i);
base::uc32 from = range.from();
- if (from > String::kMaxUtf16CodeUnit) continue;
- base::uc32 to = std::min({range.to(), String::kMaxUtf16CodeUnitU});
+ if (from > kMaxUtf16CodeUnit) continue;
+ base::uc32 to = std::min({range.to(), kMaxUtf16CodeUnitU});
// Nothing to be done for surrogates.
if (from >= kLeadSurrogateStart && to <= kTrailSurrogateEnd) continue;
if (is_one_byte && !RangeContainsLatin1Equivalents(range)) {
- if (from > String::kMaxOneByteCharCode) continue;
- if (to > String::kMaxOneByteCharCode) to = String::kMaxOneByteCharCode;
+ if (from > kMaxOneByteCharCode) continue;
+ if (to > kMaxOneByteCharCode) to = kMaxOneByteCharCode;
}
others.add(from, to);
}
@@ -1171,13 +1174,13 @@ void CharacterRange::AddCaseEquivalents(Isolate* isolate, Zone* zone,
for (int i = 0; i < range_count; i++) {
CharacterRange range = ranges->at(i);
base::uc32 bottom = range.from();
- if (bottom > String::kMaxUtf16CodeUnit) continue;
- base::uc32 top = std::min({range.to(), String::kMaxUtf16CodeUnitU});
+ if (bottom > kMaxUtf16CodeUnit) continue;
+ base::uc32 top = std::min({range.to(), kMaxUtf16CodeUnitU});
// Nothing to be done for surrogates.
if (bottom >= kLeadSurrogateStart && top <= kTrailSurrogateEnd) continue;
if (is_one_byte && !RangeContainsLatin1Equivalents(range)) {
- if (bottom > String::kMaxOneByteCharCode) continue;
- if (top > String::kMaxOneByteCharCode) top = String::kMaxOneByteCharCode;
+ if (bottom > kMaxOneByteCharCode) continue;
+ if (top > kMaxOneByteCharCode) top = kMaxOneByteCharCode;
}
unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
if (top == bottom) {
@@ -1389,9 +1392,8 @@ void CharacterRange::Negate(ZoneList<CharacterRange>* ranges,
from = range.to() + 1;
i++;
}
- if (from < String::kMaxCodePoint) {
- negated_ranges->Add(CharacterRange::Range(from, String::kMaxCodePoint),
- zone);
+ if (from < kMaxCodePoint) {
+ negated_ranges->Add(CharacterRange::Range(from, kMaxCodePoint), zone);
}
}
diff --git a/chromium/v8/src/regexp/regexp-compiler.cc b/chromium/v8/src/regexp/regexp-compiler.cc
index 38a3d4447f5..5123cd138cc 100644
--- a/chromium/v8/src/regexp/regexp-compiler.cc
+++ b/chromium/v8/src/regexp/regexp-compiler.cc
@@ -6,15 +6,13 @@
#include "src/base/safe_conversions.h"
#include "src/execution/isolate.h"
-#include "src/objects/objects-inl.h"
+#include "src/objects/fixed-array-inl.h"
#include "src/regexp/regexp-macro-assembler-arch.h"
-#ifdef V8_INTL_SUPPORT
-#include "src/regexp/special-case.h"
-#endif // V8_INTL_SUPPORT
#include "src/strings/unicode-inl.h"
#include "src/zone/zone-list-inl.h"
#ifdef V8_INTL_SUPPORT
+#include "src/regexp/special-case.h"
#include "unicode/locid.h"
#include "unicode/uniset.h"
#include "unicode/utypes.h"
@@ -240,7 +238,7 @@ class RecursionCheck {
// Attempts to compile the regexp using an Irregexp code generator. Returns
// a fixed array or a null handle depending on whether it succeeded.
RegExpCompiler::RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count,
- JSRegExp::Flags flags, bool one_byte)
+ RegExpFlags flags, bool one_byte)
: next_register_(JSRegExp::RegistersForCaptureCount(capture_count)),
unicode_lookaround_stack_register_(kNoRegister),
unicode_lookaround_position_register_(kNoRegister),
@@ -1589,7 +1587,7 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
QuickCheckDetails::Position* pos =
details->positions(characters_filled_in);
base::uc16 c = quarks[i];
- if (IgnoreCase(compiler->flags())) {
+ if (IsIgnoreCase(compiler->flags())) {
unibrow::uchar chars[4];
int length = GetCaseIndependentLetters(
isolate, c, compiler->one_byte(), chars, 4);
@@ -1819,7 +1817,7 @@ class IterationDecrementer {
LoopChoiceNode* node_;
};
-RegExpNode* SeqRegExpNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
+RegExpNode* SeqRegExpNode::FilterOneByte(int depth, RegExpFlags flags) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
DCHECK(!info()->visited);
@@ -1827,7 +1825,7 @@ RegExpNode* SeqRegExpNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
return FilterSuccessor(depth - 1, flags);
}
-RegExpNode* SeqRegExpNode::FilterSuccessor(int depth, JSRegExp::Flags flags) {
+RegExpNode* SeqRegExpNode::FilterSuccessor(int depth, RegExpFlags flags) {
RegExpNode* next = on_success_->FilterOneByte(depth - 1, flags);
if (next == nullptr) return set_replacement(nullptr);
on_success_ = next;
@@ -1849,7 +1847,7 @@ static bool RangesContainLatin1Equivalents(ZoneList<CharacterRange>* ranges) {
return false;
}
-RegExpNode* TextNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
+RegExpNode* TextNode::FilterOneByte(int depth, RegExpFlags flags) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
DCHECK(!info()->visited);
@@ -1861,7 +1859,7 @@ RegExpNode* TextNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
base::Vector<const base::uc16> quarks = elm.atom()->data();
for (int j = 0; j < quarks.length(); j++) {
base::uc16 c = quarks[j];
- if (IgnoreCase(flags)) {
+ if (IsIgnoreCase(flags)) {
c = unibrow::Latin1::TryConvertToLatin1(c);
}
if (c > unibrow::Latin1::kMaxChar) return set_replacement(nullptr);
@@ -1880,7 +1878,7 @@ RegExpNode* TextNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
if (range_count != 0 && ranges->at(0).from() == 0 &&
ranges->at(0).to() >= String::kMaxOneByteCharCode) {
// This will be handled in a later filter.
- if (IgnoreCase(flags) && RangesContainLatin1Equivalents(ranges)) {
+ if (IsIgnoreCase(flags) && RangesContainLatin1Equivalents(ranges)) {
continue;
}
return set_replacement(nullptr);
@@ -1889,7 +1887,7 @@ RegExpNode* TextNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
if (range_count == 0 ||
ranges->at(0).from() > String::kMaxOneByteCharCode) {
// This will be handled in a later filter.
- if (IgnoreCase(flags) && RangesContainLatin1Equivalents(ranges)) {
+ if (IsIgnoreCase(flags) && RangesContainLatin1Equivalents(ranges)) {
continue;
}
return set_replacement(nullptr);
@@ -1900,7 +1898,7 @@ RegExpNode* TextNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
return FilterSuccessor(depth - 1, flags);
}
-RegExpNode* LoopChoiceNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
+RegExpNode* LoopChoiceNode::FilterOneByte(int depth, RegExpFlags flags) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
if (info()->visited) return this;
@@ -1917,7 +1915,7 @@ RegExpNode* LoopChoiceNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
return ChoiceNode::FilterOneByte(depth - 1, flags);
}
-RegExpNode* ChoiceNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
+RegExpNode* ChoiceNode::FilterOneByte(int depth, RegExpFlags flags) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
if (info()->visited) return this;
@@ -1969,7 +1967,7 @@ RegExpNode* ChoiceNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
}
RegExpNode* NegativeLookaroundChoiceNode::FilterOneByte(int depth,
- JSRegExp::Flags flags) {
+ RegExpFlags flags) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
if (info()->visited) return this;
@@ -2321,13 +2319,13 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler, TextEmitPassType pass,
TextElement elm = elements()->at(i);
int cp_offset = trace->cp_offset() + elm.cp_offset() + backward_offset;
if (elm.text_type() == TextElement::ATOM) {
- if (SkipPass(pass, IgnoreCase(compiler->flags()))) continue;
+ if (SkipPass(pass, IsIgnoreCase(compiler->flags()))) continue;
base::Vector<const base::uc16> quarks = elm.atom()->data();
for (int j = preloaded ? 0 : quarks.length() - 1; j >= 0; j--) {
if (first_element_checked && i == 0 && j == 0) continue;
if (DeterminedAlready(quick_check, elm.cp_offset() + j)) continue;
base::uc16 quark = quarks[j];
- if (IgnoreCase(compiler->flags())) {
+ if (IsIgnoreCase(compiler->flags())) {
// Everywhere else we assume that a non-Latin-1 character cannot match
// a Latin-1 character. Avoid the cases where this is assumption is
// invalid by using the Latin1 equivalent instead.
@@ -2491,8 +2489,8 @@ void Trace::AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler) {
}
void TextNode::MakeCaseIndependent(Isolate* isolate, bool is_one_byte,
- JSRegExp::Flags flags) {
- if (!IgnoreCase(flags)) return;
+ RegExpFlags flags) {
+ if (!IsIgnoreCase(flags)) return;
#ifdef V8_INTL_SUPPORT
if (NeedsUnicodeCaseEquivalents(flags)) return;
#endif
@@ -3444,7 +3442,7 @@ void BackReferenceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
RecursionCheck rc(compiler);
DCHECK_EQ(start_reg_ + 1, end_reg_);
- if (IgnoreCase(flags_)) {
+ if (IsIgnoreCase(flags_)) {
bool unicode = IsUnicode(flags_);
assembler->CheckNotBackReferenceIgnoreCase(start_reg_, read_backward(),
unicode, trace->backtrack());
@@ -3634,7 +3632,7 @@ class EatsAtLeastPropagator : public AllStatic {
template <typename... Propagators>
class Analysis : public NodeVisitor {
public:
- Analysis(Isolate* isolate, bool is_one_byte, JSRegExp::Flags flags)
+ Analysis(Isolate* isolate, bool is_one_byte, RegExpFlags flags)
: isolate_(isolate),
is_one_byte_(is_one_byte),
flags_(flags),
@@ -3746,14 +3744,14 @@ class Analysis : public NodeVisitor {
private:
Isolate* isolate_;
const bool is_one_byte_;
- const JSRegExp::Flags flags_;
+ const RegExpFlags flags_;
RegExpError error_;
DISALLOW_IMPLICIT_CONSTRUCTORS(Analysis);
};
-RegExpError AnalyzeRegExp(Isolate* isolate, bool is_one_byte,
- JSRegExp::Flags flags, RegExpNode* node) {
+RegExpError AnalyzeRegExp(Isolate* isolate, bool is_one_byte, RegExpFlags flags,
+ RegExpNode* node) {
Analysis<AssertionPropagator, EatsAtLeastPropagator> analysis(
isolate, is_one_byte, flags);
DCHECK_EQ(node->info()->been_analyzed, false);
@@ -3809,7 +3807,7 @@ void TextNode::FillInBMInfo(Isolate* isolate, int initial_offset, int budget,
return;
}
base::uc16 character = atom->data()[j];
- if (IgnoreCase(bm->compiler()->flags())) {
+ if (IsIgnoreCase(bm->compiler()->flags())) {
unibrow::uchar chars[4];
int length = GetCaseIndependentLetters(
isolate, character, bm->max_char() == String::kMaxOneByteCharCode,
@@ -3874,7 +3872,7 @@ RegExpNode* RegExpCompiler::OptionallyStepBackToLeadSurrogate(
}
RegExpNode* RegExpCompiler::PreprocessRegExp(RegExpCompileData* data,
- JSRegExp::Flags flags,
+ RegExpFlags flags,
bool is_one_byte) {
// Wrap the body of the regexp in capture #0.
RegExpNode* captured_body =
diff --git a/chromium/v8/src/regexp/regexp-compiler.h b/chromium/v8/src/regexp/regexp-compiler.h
index 2be7a48e9aa..832a9662170 100644
--- a/chromium/v8/src/regexp/regexp-compiler.h
+++ b/chromium/v8/src/regexp/regexp-compiler.h
@@ -9,6 +9,7 @@
#include "src/base/small-vector.h"
#include "src/base/strings.h"
+#include "src/regexp/regexp-flags.h"
#include "src/regexp/regexp-nodes.h"
namespace v8 {
@@ -49,34 +50,10 @@ constexpr int kPatternTooShortForBoyerMoore = 2;
} // namespace regexp_compiler_constants
-inline bool IgnoreCase(JSRegExp::Flags flags) {
- return (flags & JSRegExp::kIgnoreCase) != 0;
-}
-
-inline bool IsUnicode(JSRegExp::Flags flags) {
- return (flags & JSRegExp::kUnicode) != 0;
-}
-
-inline bool IsSticky(JSRegExp::Flags flags) {
- return (flags & JSRegExp::kSticky) != 0;
-}
-
-inline bool IsGlobal(JSRegExp::Flags flags) {
- return (flags & JSRegExp::kGlobal) != 0;
-}
-
-inline bool DotAll(JSRegExp::Flags flags) {
- return (flags & JSRegExp::kDotAll) != 0;
-}
-
-inline bool Multiline(JSRegExp::Flags flags) {
- return (flags & JSRegExp::kMultiline) != 0;
-}
-
-inline bool NeedsUnicodeCaseEquivalents(JSRegExp::Flags flags) {
+inline bool NeedsUnicodeCaseEquivalents(RegExpFlags flags) {
// Both unicode and ignore_case flags are set. We need to use ICU to find
// the closure over case equivalents.
- return IsUnicode(flags) && IgnoreCase(flags);
+ return IsUnicode(flags) && IsIgnoreCase(flags);
}
// Details of a quick mask-compare check that can look ahead in the
@@ -424,8 +401,8 @@ struct PreloadState {
// Analysis performs assertion propagation and computes eats_at_least_ values.
// See the comments on AssertionPropagator and EatsAtLeastPropagator for more
// details.
-RegExpError AnalyzeRegExp(Isolate* isolate, bool is_one_byte,
- JSRegExp::Flags flags, RegExpNode* node);
+RegExpError AnalyzeRegExp(Isolate* isolate, bool is_one_byte, RegExpFlags flags,
+ RegExpNode* node);
class FrequencyCollator {
public:
@@ -475,7 +452,7 @@ class FrequencyCollator {
class RegExpCompiler {
public:
RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count,
- JSRegExp::Flags flags, bool is_one_byte);
+ RegExpFlags flags, bool is_one_byte);
int AllocateRegister() {
if (next_register_ >= RegExpMacroAssembler::kMaxRegister) {
@@ -527,7 +504,7 @@ class RegExpCompiler {
// - Inserting the implicit .* before/after the regexp if necessary.
// - If the input is a one-byte string, filtering out nodes that can't match.
// - Fixing up regexp matches that start within a surrogate pair.
- RegExpNode* PreprocessRegExp(RegExpCompileData* data, JSRegExp::Flags flags,
+ RegExpNode* PreprocessRegExp(RegExpCompileData* data, RegExpFlags flags,
bool is_one_byte);
// If the regexp matching starts within a surrogate pair, step back to the
@@ -553,7 +530,7 @@ class RegExpCompiler {
inline void IncrementRecursionDepth() { recursion_depth_++; }
inline void DecrementRecursionDepth() { recursion_depth_--; }
- JSRegExp::Flags flags() const { return flags_; }
+ RegExpFlags flags() const { return flags_; }
void SetRegExpTooBig() { reg_exp_too_big_ = true; }
@@ -585,7 +562,7 @@ class RegExpCompiler {
int unicode_lookaround_position_register_;
ZoneVector<RegExpNode*>* work_list_;
int recursion_depth_;
- const JSRegExp::Flags flags_;
+ const RegExpFlags flags_;
RegExpMacroAssembler* macro_assembler_;
bool one_byte_;
bool reg_exp_too_big_;
diff --git a/chromium/v8/src/regexp/regexp-error.h b/chromium/v8/src/regexp/regexp-error.h
index 628f93638ec..6485e74bb65 100644
--- a/chromium/v8/src/regexp/regexp-error.h
+++ b/chromium/v8/src/regexp/regexp-error.h
@@ -53,6 +53,11 @@ enum class RegExpError : uint32_t {
V8_EXPORT_PRIVATE const char* RegExpErrorString(RegExpError error);
+inline constexpr bool RegExpErrorIsStackOverflow(RegExpError error) {
+ return error == RegExpError::kStackOverflow ||
+ error == RegExpError::kAnalysisStackOverflow;
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/regexp/regexp-flags.h b/chromium/v8/src/regexp/regexp-flags.h
new file mode 100644
index 00000000000..b35cd7892b9
--- /dev/null
+++ b/chromium/v8/src/regexp/regexp-flags.h
@@ -0,0 +1,71 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGEXP_REGEXP_FLAGS_H_
+#define V8_REGEXP_REGEXP_FLAGS_H_
+
+#include "src/base/flags.h"
+#include "src/base/optional.h"
+
+namespace v8 {
+namespace internal {
+
+// TODO(jgruber,pthier): Decouple more parts of the codebase from
+// JSRegExp::Flags. Consider removing JSRegExp::Flags.
+
+// Order is important! Sorted in alphabetic order by the flag char. Note this
+// means that flag bits are shuffled. Take care to keep them contiguous when
+// adding/removing flags.
+#define REGEXP_FLAG_LIST(V) \
+ V(has_indices, HasIndices, hasIndices, 'd', 7) \
+ V(global, Global, global, 'g', 0) \
+ V(ignore_case, IgnoreCase, ignoreCase, 'i', 1) \
+ V(linear, Linear, linear, 'l', 6) \
+ V(multiline, Multiline, multiline, 'm', 2) \
+ V(dot_all, DotAll, dotAll, 's', 5) \
+ V(unicode, Unicode, unicode, 'u', 4) \
+ V(sticky, Sticky, sticky, 'y', 3)
+
+#define V(Lower, Camel, LowerCamel, Char, Bit) k##Camel = 1 << Bit,
+enum class RegExpFlag { REGEXP_FLAG_LIST(V) };
+#undef V
+
+#define V(...) +1
+constexpr int kRegExpFlagCount = REGEXP_FLAG_LIST(V);
+#undef V
+
+// Assert alpha-sorted chars.
+#define V(Lower, Camel, LowerCamel, Char, Bit) < Char) && (Char
+static_assert((('a' - 1) REGEXP_FLAG_LIST(V) <= 'z'), "alpha-sort chars");
+#undef V
+
+// Assert contiguous indices.
+#define V(Lower, Camel, LowerCamel, Char, Bit) | (1 << Bit)
+static_assert(((1 << kRegExpFlagCount) - 1) == (0 REGEXP_FLAG_LIST(V)),
+ "contiguous bits");
+#undef V
+
+using RegExpFlags = base::Flags<RegExpFlag>;
+DEFINE_OPERATORS_FOR_FLAGS(RegExpFlags)
+
+#define V(Lower, Camel, ...) \
+ constexpr bool Is##Camel(RegExpFlags f) { \
+ return (f & RegExpFlag::k##Camel) != 0; \
+ }
+REGEXP_FLAG_LIST(V)
+#undef V
+
+// clang-format off
+#define V(Lower, Camel, LowerCamel, Char, Bit) \
+ c == Char ? RegExpFlag::k##Camel :
+constexpr base::Optional<RegExpFlag> TryRegExpFlagFromChar(char c) {
+ return REGEXP_FLAG_LIST(V) base::Optional<RegExpFlag>{};
+}
+#undef V
+// clang-format on
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_REGEXP_REGEXP_FLAGS_H_
diff --git a/chromium/v8/src/regexp/regexp-interpreter.cc b/chromium/v8/src/regexp/regexp-interpreter.cc
index 02fc3349208..be3bb45a5f0 100644
--- a/chromium/v8/src/regexp/regexp-interpreter.cc
+++ b/chromium/v8/src/regexp/regexp-interpreter.cc
@@ -6,17 +6,18 @@
#include "src/regexp/regexp-interpreter.h"
-#include "src/ast/ast.h"
#include "src/base/small-vector.h"
#include "src/base/strings.h"
+#include "src/execution/isolate.h"
#include "src/logging/counters.h"
#include "src/objects/js-regexp-inl.h"
-#include "src/objects/objects-inl.h"
+#include "src/objects/string-inl.h"
#include "src/regexp/regexp-bytecodes.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h" // For kMaximumStackSize.
#include "src/regexp/regexp.h"
#include "src/strings/unicode.h"
+#include "src/utils/memcopy.h"
#include "src/utils/utils.h"
#ifdef V8_INTL_SUPPORT
@@ -1059,12 +1060,12 @@ IrregexpInterpreter::Result IrregexpInterpreter::Match(
if (FLAG_regexp_tier_up) regexp.TierUpTick();
bool is_one_byte = String::IsOneByteRepresentationUnderneath(subject_string);
- ByteArray code_array = ByteArray::cast(regexp.Bytecode(is_one_byte));
- int total_register_count = regexp.MaxRegisterCount();
+ ByteArray code_array = ByteArray::cast(regexp.bytecode(is_one_byte));
+ int total_register_count = regexp.max_register_count();
return MatchInternal(isolate, code_array, subject_string, output_registers,
output_register_count, total_register_count,
- start_position, call_origin, regexp.BacktrackLimit());
+ start_position, call_origin, regexp.backtrack_limit());
}
IrregexpInterpreter::Result IrregexpInterpreter::MatchInternal(
@@ -1110,7 +1111,7 @@ IrregexpInterpreter::Result IrregexpInterpreter::MatchInternal(
// builtin.
IrregexpInterpreter::Result IrregexpInterpreter::MatchForCallFromJs(
Address subject, int32_t start_position, Address, Address,
- int* output_registers, int32_t output_register_count, Address,
+ int* output_registers, int32_t output_register_count,
RegExp::CallOrigin call_origin, Isolate* isolate, Address regexp) {
DCHECK_NOT_NULL(isolate);
DCHECK_NOT_NULL(output_registers);
diff --git a/chromium/v8/src/regexp/regexp-interpreter.h b/chromium/v8/src/regexp/regexp-interpreter.h
index 9b4a8c6c307..e9dedd781b5 100644
--- a/chromium/v8/src/regexp/regexp-interpreter.h
+++ b/chromium/v8/src/regexp/regexp-interpreter.h
@@ -12,6 +12,8 @@
namespace v8 {
namespace internal {
+class ByteArray;
+
class V8_EXPORT_PRIVATE IrregexpInterpreter : public AllStatic {
public:
enum Result {
@@ -34,9 +36,8 @@ class V8_EXPORT_PRIVATE IrregexpInterpreter : public AllStatic {
// RETRY is returned if a retry through the runtime is needed (e.g. when
// interrupts have been scheduled or the regexp is marked for tier-up).
//
- // Arguments input_start, input_end and backtrack_stack are
- // unused. They are only passed to match the signature of the native irregex
- // code.
+ // Arguments input_start and input_end are unused. They are only passed to
+ // match the signature of the native irregex code.
//
// Arguments output_registers and output_register_count describe the results
// array, which will contain register values of all captures if SUCCESS is
@@ -45,7 +46,6 @@ class V8_EXPORT_PRIVATE IrregexpInterpreter : public AllStatic {
Address input_start, Address input_end,
int* output_registers,
int32_t output_register_count,
- Address backtrack_stack,
RegExp::CallOrigin call_origin,
Isolate* isolate, Address regexp);
diff --git a/chromium/v8/src/regexp/regexp-macro-assembler-arch.h b/chromium/v8/src/regexp/regexp-macro-assembler-arch.h
index 5d5e3e6a44b..5d4663e3976 100644
--- a/chromium/v8/src/regexp/regexp-macro-assembler-arch.h
+++ b/chromium/v8/src/regexp/regexp-macro-assembler-arch.h
@@ -21,6 +21,8 @@
#include "src/regexp/mips/regexp-macro-assembler-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/regexp/mips64/regexp-macro-assembler-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/regexp/loong64/regexp-macro-assembler-loong64.h"
#elif V8_TARGET_ARCH_S390
#include "src/regexp/s390/regexp-macro-assembler-s390.h"
#elif V8_TARGET_ARCH_RISCV64
diff --git a/chromium/v8/src/regexp/regexp-macro-assembler-tracer.cc b/chromium/v8/src/regexp/regexp-macro-assembler-tracer.cc
index af148eb47ad..ca6abb4e48a 100644
--- a/chromium/v8/src/regexp/regexp-macro-assembler-tracer.cc
+++ b/chromium/v8/src/regexp/regexp-macro-assembler-tracer.cc
@@ -4,8 +4,8 @@
#include "src/regexp/regexp-macro-assembler-tracer.h"
-#include "src/ast/ast.h"
-#include "src/objects/objects-inl.h"
+#include "src/objects/fixed-array-inl.h"
+#include "src/objects/string.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/regexp/regexp-macro-assembler.cc b/chromium/v8/src/regexp/regexp-macro-assembler.cc
index 5457398f39a..0cd103da10e 100644
--- a/chromium/v8/src/regexp/regexp-macro-assembler.cc
+++ b/chromium/v8/src/regexp/regexp-macro-assembler.cc
@@ -5,6 +5,7 @@
#include "src/regexp/regexp-macro-assembler.h"
#include "src/codegen/assembler.h"
+#include "src/codegen/label.h"
#include "src/execution/isolate-inl.h"
#include "src/execution/pointer-authentication.h"
#include "src/execution/simulator.h"
@@ -22,12 +23,17 @@ namespace internal {
RegExpMacroAssembler::RegExpMacroAssembler(Isolate* isolate, Zone* zone)
: slow_safe_compiler_(false),
+ backtrack_limit_(JSRegExp::kNoBacktrackLimit),
global_mode_(NOT_GLOBAL),
isolate_(isolate),
zone_(zone) {}
RegExpMacroAssembler::~RegExpMacroAssembler() = default;
+bool RegExpMacroAssembler::has_backtrack_limit() const {
+ return backtrack_limit_ != JSRegExp::kNoBacktrackLimit;
+}
+
int RegExpMacroAssembler::CaseInsensitiveCompareNonUnicode(Address byte_offset1,
Address byte_offset2,
size_t byte_length,
@@ -300,23 +306,21 @@ int NativeRegExpMacroAssembler::Execute(
String input, // This needs to be the unpacked (sliced, cons) string.
int start_offset, const byte* input_start, const byte* input_end,
int* output, int output_size, Isolate* isolate, JSRegExp regexp) {
- // Ensure that the minimum stack has been allocated.
RegExpStackScope stack_scope(isolate);
- Address stack_base = stack_scope.stack()->stack_base();
bool is_one_byte = String::IsOneByteRepresentationUnderneath(input);
- Code code = FromCodeT(CodeT::cast(regexp.Code(is_one_byte)));
+ Code code = FromCodeT(CodeT::cast(regexp.code(is_one_byte)));
RegExp::CallOrigin call_origin = RegExp::CallOrigin::kFromRuntime;
- using RegexpMatcherSig = int(
- Address input_string, int start_offset, const byte* input_start,
- const byte* input_end, int* output, int output_size, Address stack_base,
- int call_origin, Isolate* isolate, Address regexp);
+ using RegexpMatcherSig =
+ // NOLINTNEXTLINE(readability/casting)
+ int(Address input_string, int start_offset, const byte* input_start,
+ const byte* input_end, int* output, int output_size, int call_origin,
+ Isolate* isolate, Address regexp);
auto fn = GeneratedCode<RegexpMatcherSig>::FromCode(code);
- int result =
- fn.Call(input.ptr(), start_offset, input_start, input_end, output,
- output_size, stack_base, call_origin, isolate, regexp.ptr());
+ int result = fn.Call(input.ptr(), start_offset, input_start, input_end,
+ output, output_size, call_origin, isolate, regexp.ptr());
DCHECK_GE(result, SMALLEST_REGEXP_RESULT);
if (result == EXCEPTION && !isolate->has_pending_exception()) {
@@ -376,22 +380,23 @@ const byte NativeRegExpMacroAssembler::word_character_map[] = {
};
// clang-format on
-Address NativeRegExpMacroAssembler::GrowStack(Address stack_pointer,
- Address* stack_base,
- Isolate* isolate) {
+Address NativeRegExpMacroAssembler::GrowStack(Isolate* isolate) {
+ DisallowGarbageCollection no_gc;
+
RegExpStack* regexp_stack = isolate->regexp_stack();
- size_t size = regexp_stack->stack_capacity();
- Address old_stack_base = regexp_stack->stack_base();
- DCHECK(old_stack_base == *stack_base);
- DCHECK(stack_pointer <= old_stack_base);
- DCHECK(static_cast<size_t>(old_stack_base - stack_pointer) <= size);
- Address new_stack_base = regexp_stack->EnsureCapacity(size * 2);
- if (new_stack_base == kNullAddress) {
- return kNullAddress;
- }
- *stack_base = new_stack_base;
- intptr_t stack_content_size = old_stack_base - stack_pointer;
- return new_stack_base - stack_content_size;
+ const size_t old_size = regexp_stack->memory_size();
+
+#ifdef DEBUG
+ const Address old_stack_top = regexp_stack->memory_top();
+ const Address old_stack_pointer = regexp_stack->stack_pointer();
+ CHECK_LE(old_stack_pointer, old_stack_top);
+ CHECK_LE(static_cast<size_t>(old_stack_top - old_stack_pointer), old_size);
+#endif // DEBUG
+
+ Address new_stack_base = regexp_stack->EnsureCapacity(old_size * 2);
+ if (new_stack_base == kNullAddress) return kNullAddress;
+
+ return regexp_stack->stack_pointer();
}
} // namespace internal
diff --git a/chromium/v8/src/regexp/regexp-macro-assembler.h b/chromium/v8/src/regexp/regexp-macro-assembler.h
index 31e8b1a3703..af3cc2f5caa 100644
--- a/chromium/v8/src/regexp/regexp-macro-assembler.h
+++ b/chromium/v8/src/regexp/regexp-macro-assembler.h
@@ -6,13 +6,15 @@
#define V8_REGEXP_REGEXP_MACRO_ASSEMBLER_H_
#include "src/base/strings.h"
-#include "src/codegen/label.h"
#include "src/regexp/regexp-ast.h"
#include "src/regexp/regexp.h"
namespace v8 {
namespace internal {
+class ByteArray;
+class Label;
+
static const base::uc32 kLeadSurrogateStart = 0xd800;
static const base::uc32 kLeadSurrogateEnd = 0xdbff;
static const base::uc32 kTrailSurrogateStart = 0xdc00;
@@ -45,6 +47,7 @@ class RegExpMacroAssembler {
V(ARM) \
V(ARM64) \
V(MIPS) \
+ V(LOONG64) \
V(RISCV) \
V(S390) \
V(PPC) \
@@ -230,20 +233,18 @@ class RegExpMacroAssembler {
Zone* zone() const { return zone_; }
protected:
- bool has_backtrack_limit() const {
- return backtrack_limit_ != JSRegExp::kNoBacktrackLimit;
- }
+ bool has_backtrack_limit() const;
uint32_t backtrack_limit() const { return backtrack_limit_; }
bool can_fallback() const { return can_fallback_; }
private:
bool slow_safe_compiler_;
- uint32_t backtrack_limit_ = JSRegExp::kNoBacktrackLimit;
+ uint32_t backtrack_limit_;
bool can_fallback_ = false;
GlobalMode global_mode_;
- Isolate* isolate_;
- Zone* zone_;
+ Isolate* const isolate_;
+ Zone* const zone_;
};
class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
@@ -280,13 +281,11 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
int* offsets_vector, int offsets_vector_length,
int previous_index, Isolate* isolate);
- // Called from RegExp if the backtrack stack limit is hit.
- // Tries to expand the stack. Returns the new stack-pointer if
- // successful, and updates the stack_top address, or returns 0 if unable
- // to grow the stack.
+ // Called from RegExp if the backtrack stack limit is hit. Tries to expand
+ // the stack. Returns the new stack-pointer if successful, or returns 0 if
+ // unable to grow the stack.
// This function must not trigger a garbage collection.
- static Address GrowStack(Address stack_pointer, Address* stack_top,
- Isolate* isolate);
+ static Address GrowStack(Isolate* isolate);
static int CheckStackGuardState(Isolate* isolate, int start_index,
RegExp::CallOrigin call_origin,
diff --git a/chromium/v8/src/regexp/regexp-nodes.h b/chromium/v8/src/regexp/regexp-nodes.h
index 537cf962010..46b6f5ce21e 100644
--- a/chromium/v8/src/regexp/regexp-nodes.h
+++ b/chromium/v8/src/regexp/regexp-nodes.h
@@ -5,6 +5,7 @@
#ifndef V8_REGEXP_REGEXP_NODES_H_
#define V8_REGEXP_REGEXP_NODES_H_
+#include "src/codegen/label.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/zone/zone.h"
@@ -14,7 +15,6 @@ namespace internal {
class AlternativeGenerationList;
class BoyerMooreLookahead;
class GreedyLoopState;
-class Label;
class NodeVisitor;
class QuickCheckDetails;
class RegExpCompiler;
@@ -205,7 +205,7 @@ class RegExpNode : public ZoneObject {
// If we know that the input is one-byte then there are some nodes that can
// never match. This method returns a node that can be substituted for
// itself, or nullptr if the node can never match.
- virtual RegExpNode* FilterOneByte(int depth, JSRegExp::Flags flags) {
+ virtual RegExpNode* FilterOneByte(int depth, RegExpFlags flags) {
return this;
}
// Helper for FilterOneByte.
@@ -296,7 +296,7 @@ class SeqRegExpNode : public RegExpNode {
: RegExpNode(on_success->zone()), on_success_(on_success) {}
RegExpNode* on_success() { return on_success_; }
void set_on_success(RegExpNode* node) { on_success_ = node; }
- RegExpNode* FilterOneByte(int depth, JSRegExp::Flags flags) override;
+ RegExpNode* FilterOneByte(int depth, RegExpFlags flags) override;
void FillInBMInfo(Isolate* isolate, int offset, int budget,
BoyerMooreLookahead* bm, bool not_at_start) override {
on_success_->FillInBMInfo(isolate, offset, budget - 1, bm, not_at_start);
@@ -304,7 +304,7 @@ class SeqRegExpNode : public RegExpNode {
}
protected:
- RegExpNode* FilterSuccessor(int depth, JSRegExp::Flags flags);
+ RegExpNode* FilterSuccessor(int depth, RegExpFlags flags);
private:
RegExpNode* on_success_;
@@ -423,14 +423,14 @@ class TextNode : public SeqRegExpNode {
ZoneList<TextElement>* elements() { return elms_; }
bool read_backward() { return read_backward_; }
void MakeCaseIndependent(Isolate* isolate, bool is_one_byte,
- JSRegExp::Flags flags);
+ RegExpFlags flags);
int GreedyLoopTextLength() override;
RegExpNode* GetSuccessorOfOmnivorousTextNode(
RegExpCompiler* compiler) override;
void FillInBMInfo(Isolate* isolate, int offset, int budget,
BoyerMooreLookahead* bm, bool not_at_start) override;
void CalculateOffsets();
- RegExpNode* FilterOneByte(int depth, JSRegExp::Flags flags) override;
+ RegExpNode* FilterOneByte(int depth, RegExpFlags flags) override;
int Length();
private:
@@ -498,7 +498,7 @@ class AssertionNode : public SeqRegExpNode {
class BackReferenceNode : public SeqRegExpNode {
public:
- BackReferenceNode(int start_reg, int end_reg, JSRegExp::Flags flags,
+ BackReferenceNode(int start_reg, int end_reg, RegExpFlags flags,
bool read_backward, RegExpNode* on_success)
: SeqRegExpNode(on_success),
start_reg_(start_reg),
@@ -521,7 +521,7 @@ class BackReferenceNode : public SeqRegExpNode {
private:
int start_reg_;
int end_reg_;
- JSRegExp::Flags flags_;
+ RegExpFlags flags_;
bool read_backward_;
};
@@ -623,7 +623,7 @@ class ChoiceNode : public RegExpNode {
virtual bool try_to_emit_quick_check_for_alternative(bool is_first) {
return true;
}
- RegExpNode* FilterOneByte(int depth, JSRegExp::Flags flags) override;
+ RegExpNode* FilterOneByte(int depth, RegExpFlags flags) override;
virtual bool read_backward() { return false; }
protected:
@@ -695,7 +695,7 @@ class NegativeLookaroundChoiceNode : public ChoiceNode {
return !is_first;
}
void Accept(NodeVisitor* visitor) override;
- RegExpNode* FilterOneByte(int depth, JSRegExp::Flags flags) override;
+ RegExpNode* FilterOneByte(int depth, RegExpFlags flags) override;
};
class LoopChoiceNode : public ChoiceNode {
@@ -728,7 +728,7 @@ class LoopChoiceNode : public ChoiceNode {
int min_loop_iterations() const { return min_loop_iterations_; }
bool read_backward() override { return read_backward_; }
void Accept(NodeVisitor* visitor) override;
- RegExpNode* FilterOneByte(int depth, JSRegExp::Flags flags) override;
+ RegExpNode* FilterOneByte(int depth, RegExpFlags flags) override;
private:
// AddAlternative is made private for loop nodes because alternatives
diff --git a/chromium/v8/src/regexp/regexp-parser.cc b/chromium/v8/src/regexp/regexp-parser.cc
index 1201e555ad3..4b0e554764d 100644
--- a/chromium/v8/src/regexp/regexp-parser.cc
+++ b/chromium/v8/src/regexp/regexp-parser.cc
@@ -4,12 +4,9 @@
#include "src/regexp/regexp-parser.h"
-#include <vector>
-
#include "src/execution/isolate.h"
-#include "src/heap/factory.h"
-#include "src/objects/objects-inl.h"
#include "src/regexp/property-sequences.h"
+#include "src/regexp/regexp-ast.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp.h"
#include "src/strings/char-predicates-inl.h"
@@ -24,14 +21,395 @@
namespace v8 {
namespace internal {
-RegExpParser::RegExpParser(FlatStringReader* in, JSRegExp::Flags flags,
- Isolate* isolate, Zone* zone)
- : isolate_(isolate),
- zone_(zone),
+namespace {
+
+// Whether we're currently inside the ClassEscape production
+// (tc39.es/ecma262/#prod-annexB-CharacterEscape).
+enum class InClassEscapeState {
+ kInClass,
+ kNotInClass,
+};
+
+// A BufferedZoneList is an automatically growing list, just like (and backed
+// by) a ZoneList, that is optimized for the case of adding and removing
+// a single element. The last element added is stored outside the backing list,
+// and if no more than one element is ever added, the ZoneList isn't even
+// allocated.
+// Elements must not be nullptr pointers.
+template <typename T, int initial_size>
+class BufferedZoneList {
+ public:
+ BufferedZoneList() : list_(nullptr), last_(nullptr) {}
+
+ // Adds element at end of list. This element is buffered and can
+ // be read using last() or removed using RemoveLast until a new Add or until
+ // RemoveLast or GetList has been called.
+ void Add(T* value, Zone* zone) {
+ if (last_ != nullptr) {
+ if (list_ == nullptr) {
+ list_ = zone->New<ZoneList<T*>>(initial_size, zone);
+ }
+ list_->Add(last_, zone);
+ }
+ last_ = value;
+ }
+
+ T* last() {
+ DCHECK(last_ != nullptr);
+ return last_;
+ }
+
+ T* RemoveLast() {
+ DCHECK(last_ != nullptr);
+ T* result = last_;
+ if ((list_ != nullptr) && (list_->length() > 0))
+ last_ = list_->RemoveLast();
+ else
+ last_ = nullptr;
+ return result;
+ }
+
+ T* Get(int i) {
+ DCHECK((0 <= i) && (i < length()));
+ if (list_ == nullptr) {
+ DCHECK_EQ(0, i);
+ return last_;
+ } else {
+ if (i == list_->length()) {
+ DCHECK(last_ != nullptr);
+ return last_;
+ } else {
+ return list_->at(i);
+ }
+ }
+ }
+
+ void Clear() {
+ list_ = nullptr;
+ last_ = nullptr;
+ }
+
+ int length() {
+ int length = (list_ == nullptr) ? 0 : list_->length();
+ return length + ((last_ == nullptr) ? 0 : 1);
+ }
+
+ ZoneList<T*>* GetList(Zone* zone) {
+ if (list_ == nullptr) {
+ list_ = zone->New<ZoneList<T*>>(initial_size, zone);
+ }
+ if (last_ != nullptr) {
+ list_->Add(last_, zone);
+ last_ = nullptr;
+ }
+ return list_;
+ }
+
+ private:
+ ZoneList<T*>* list_;
+ T* last_;
+};
+
+// Accumulates RegExp atoms and assertions into lists of terms and alternatives.
+class RegExpBuilder : public ZoneObject {
+ public:
+ RegExpBuilder(Zone* zone, RegExpFlags flags);
+ void AddCharacter(base::uc16 character);
+ void AddUnicodeCharacter(base::uc32 character);
+ void AddEscapedUnicodeCharacter(base::uc32 character);
+ // "Adds" an empty expression. Does nothing except consume a
+ // following quantifier
+ void AddEmpty();
+ void AddCharacterClass(RegExpCharacterClass* cc);
+ void AddCharacterClassForDesugaring(base::uc32 c);
+ void AddAtom(RegExpTree* tree);
+ void AddTerm(RegExpTree* tree);
+ void AddAssertion(RegExpTree* tree);
+ void NewAlternative(); // '|'
+ bool AddQuantifierToAtom(int min, int max,
+ RegExpQuantifier::QuantifierType type);
+ void FlushText();
+ RegExpTree* ToRegExp();
+ RegExpFlags flags() const { return flags_; }
+
+ bool ignore_case() const { return IsIgnoreCase(flags_); }
+ bool multiline() const { return IsMultiline(flags_); }
+ bool dotall() const { return IsDotAll(flags_); }
+
+ private:
+ static const base::uc16 kNoPendingSurrogate = 0;
+ void AddLeadSurrogate(base::uc16 lead_surrogate);
+ void AddTrailSurrogate(base::uc16 trail_surrogate);
+ void FlushPendingSurrogate();
+ void FlushCharacters();
+ void FlushTerms();
+ bool NeedsDesugaringForUnicode(RegExpCharacterClass* cc);
+ bool NeedsDesugaringForIgnoreCase(base::uc32 c);
+ Zone* zone() const { return zone_; }
+ bool unicode() const { return IsUnicode(flags_); }
+
+ Zone* const zone_;
+ bool pending_empty_;
+ const RegExpFlags flags_;
+ ZoneList<base::uc16>* characters_;
+ base::uc16 pending_surrogate_;
+ BufferedZoneList<RegExpTree, 2> terms_;
+ BufferedZoneList<RegExpTree, 2> text_;
+ BufferedZoneList<RegExpTree, 2> alternatives_;
+#ifdef DEBUG
+ enum {ADD_NONE, ADD_CHAR, ADD_TERM, ADD_ASSERT, ADD_ATOM} last_added_;
+#define LAST(x) last_added_ = x;
+#else
+#define LAST(x)
+#endif
+};
+
+enum SubexpressionType {
+ INITIAL,
+ CAPTURE, // All positive values represent captures.
+ POSITIVE_LOOKAROUND,
+ NEGATIVE_LOOKAROUND,
+ GROUPING
+};
+
+class RegExpParserState : public ZoneObject {
+ public:
+ // Push a state on the stack.
+ RegExpParserState(RegExpParserState* previous_state,
+ SubexpressionType group_type,
+ RegExpLookaround::Type lookaround_type,
+ int disjunction_capture_index,
+ const ZoneVector<base::uc16>* capture_name,
+ RegExpFlags flags, Zone* zone)
+ : previous_state_(previous_state),
+ builder_(zone->New<RegExpBuilder>(zone, flags)),
+ group_type_(group_type),
+ lookaround_type_(lookaround_type),
+ disjunction_capture_index_(disjunction_capture_index),
+ capture_name_(capture_name) {}
+ // Parser state of containing expression, if any.
+ RegExpParserState* previous_state() const { return previous_state_; }
+ bool IsSubexpression() { return previous_state_ != nullptr; }
+ // RegExpBuilder building this regexp's AST.
+ RegExpBuilder* builder() const { return builder_; }
+ // Type of regexp being parsed (parenthesized group or entire regexp).
+ SubexpressionType group_type() const { return group_type_; }
+ // Lookahead or Lookbehind.
+ RegExpLookaround::Type lookaround_type() const { return lookaround_type_; }
+ // Index in captures array of first capture in this sub-expression, if any.
+ // Also the capture index of this sub-expression itself, if group_type
+ // is CAPTURE.
+ int capture_index() const { return disjunction_capture_index_; }
+ // The name of the current sub-expression, if group_type is CAPTURE. Only
+ // used for named captures.
+ const ZoneVector<base::uc16>* capture_name() const { return capture_name_; }
+
+ bool IsNamedCapture() const { return capture_name_ != nullptr; }
+
+ // Check whether the parser is inside a capture group with the given index.
+ bool IsInsideCaptureGroup(int index) const {
+ for (const RegExpParserState* s = this; s != nullptr;
+ s = s->previous_state()) {
+ if (s->group_type() != CAPTURE) continue;
+ // Return true if we found the matching capture index.
+ if (index == s->capture_index()) return true;
+ // Abort if index is larger than what has been parsed up till this state.
+ if (index > s->capture_index()) return false;
+ }
+ return false;
+ }
+
+ // Check whether the parser is inside a capture group with the given name.
+ bool IsInsideCaptureGroup(const ZoneVector<base::uc16>* name) const {
+ DCHECK_NOT_NULL(name);
+ for (const RegExpParserState* s = this; s != nullptr;
+ s = s->previous_state()) {
+ if (s->capture_name() == nullptr) continue;
+ if (*s->capture_name() == *name) return true;
+ }
+ return false;
+ }
+
+ private:
+ // Linked list implementation of stack of states.
+ RegExpParserState* const previous_state_;
+ // Builder for the stored disjunction.
+ RegExpBuilder* const builder_;
+ // Stored disjunction type (capture, look-ahead or grouping), if any.
+ const SubexpressionType group_type_;
+ // Stored read direction.
+ const RegExpLookaround::Type lookaround_type_;
+ // Stored disjunction's capture index (if any).
+ const int disjunction_capture_index_;
+ // Stored capture name (if any).
+ const ZoneVector<base::uc16>* const capture_name_;
+};
+
+template <class CharT>
+class RegExpParserImpl final {
+ private:
+ RegExpParserImpl(const CharT* input, int input_length, RegExpFlags flags,
+ uintptr_t stack_limit, Zone* zone,
+ const DisallowGarbageCollection& no_gc);
+
+ bool Parse(RegExpCompileData* result);
+
+ RegExpTree* ParsePattern();
+ RegExpTree* ParseDisjunction();
+ RegExpTree* ParseGroup();
+
+ // Parses a {...,...} quantifier and stores the range in the given
+ // out parameters.
+ bool ParseIntervalQuantifier(int* min_out, int* max_out);
+
+ // Checks whether the following is a length-digit hexadecimal number,
+ // and sets the value if it is.
+ bool ParseHexEscape(int length, base::uc32* value);
+ bool ParseUnicodeEscape(base::uc32* value);
+ bool ParseUnlimitedLengthHexNumber(int max_value, base::uc32* value);
+
+ bool ParsePropertyClassName(ZoneVector<char>* name_1,
+ ZoneVector<char>* name_2);
+ bool AddPropertyClassRange(ZoneList<CharacterRange>* add_to, bool negate,
+ const ZoneVector<char>& name_1,
+ const ZoneVector<char>& name_2);
+
+ RegExpTree* ParseCharacterClass(const RegExpBuilder* state);
+
+ base::uc32 ParseOctalLiteral();
+
+ // Tries to parse the input as a back reference. If successful it
+ // stores the result in the output parameter and returns true. If
+ // it fails it will push back the characters read so the same characters
+ // can be reparsed.
+ bool ParseBackReferenceIndex(int* index_out);
+
+ // Parse inside a class. Either add escaped class to the range, or return
+ // false and pass parsed single character through |char_out|.
+ void ParseClassEscape(ZoneList<CharacterRange>* ranges, Zone* zone,
+ bool add_unicode_case_equivalents, base::uc32* char_out,
+ bool* is_class_escape);
+ // Returns true iff parsing was successful.
+ bool TryParseCharacterClassEscape(base::uc32 next,
+ InClassEscapeState in_class_escape_state,
+ ZoneList<CharacterRange>* ranges,
+ Zone* zone,
+ bool add_unicode_case_equivalents);
+ // Parses and returns a single escaped character.
+ base::uc32 ParseCharacterEscape(InClassEscapeState in_class_escape_state,
+ bool* is_escaped_unicode_character);
+
+ RegExpTree* ReportError(RegExpError error);
+ void Advance();
+ void Advance(int dist);
+ void Reset(int pos);
+
+ // Reports whether the pattern might be used as a literal search string.
+ // Only use if the result of the parse is a single atom node.
+ bool simple();
+ bool contains_anchor() { return contains_anchor_; }
+ void set_contains_anchor() { contains_anchor_ = true; }
+ int captures_started() { return captures_started_; }
+ int position() { return next_pos_ - 1; }
+ bool failed() { return failed_; }
+ bool unicode() const { return IsUnicode(top_level_flags_); }
+
+ static bool IsSyntaxCharacterOrSlash(base::uc32 c);
+
+ static const base::uc32 kEndMarker = (1 << 21);
+
+ private:
+ // Return the 1-indexed RegExpCapture object, allocate if necessary.
+ RegExpCapture* GetCapture(int index);
+
+ // Creates a new named capture at the specified index. Must be called exactly
+ // once for each named capture. Fails if a capture with the same name is
+ // encountered.
+ bool CreateNamedCaptureAtIndex(const ZoneVector<base::uc16>* name, int index);
+
+ // Parses the name of a capture group (?<name>pattern). The name must adhere
+ // to IdentifierName in the ECMAScript standard.
+ const ZoneVector<base::uc16>* ParseCaptureGroupName();
+
+ bool ParseNamedBackReference(RegExpBuilder* builder,
+ RegExpParserState* state);
+ RegExpParserState* ParseOpenParenthesis(RegExpParserState* state);
+
+ // After the initial parsing pass, patch corresponding RegExpCapture objects
+ // into all RegExpBackReferences. This is done after initial parsing in order
+ // to avoid complicating cases in which references comes before the capture.
+ void PatchNamedBackReferences();
+
+ ZoneVector<RegExpCapture*>* GetNamedCaptures() const;
+
+ // Returns true iff the pattern contains named captures. May call
+ // ScanForCaptures to look ahead at the remaining pattern.
+ bool HasNamedCaptures(InClassEscapeState in_class_escape_state);
+
+ Zone* zone() const { return zone_; }
+
+ base::uc32 current() { return current_; }
+ bool has_more() { return has_more_; }
+ bool has_next() { return next_pos_ < input_length(); }
+ base::uc32 Next();
+ template <bool update_position>
+ base::uc32 ReadNext();
+ CharT InputAt(int index) const {
+ DCHECK(0 <= index && index < input_length());
+ return input_[index];
+ }
+ int input_length() const { return input_length_; }
+ void ScanForCaptures(InClassEscapeState in_class_escape_state);
+
+ struct RegExpCaptureNameLess {
+ bool operator()(const RegExpCapture* lhs, const RegExpCapture* rhs) const {
+ DCHECK_NOT_NULL(lhs);
+ DCHECK_NOT_NULL(rhs);
+ return *lhs->name() < *rhs->name();
+ }
+ };
+
+ const DisallowGarbageCollection no_gc_;
+ Zone* const zone_;
+ RegExpError error_ = RegExpError::kNone;
+ int error_pos_ = 0;
+ ZoneList<RegExpCapture*>* captures_;
+ ZoneSet<RegExpCapture*, RegExpCaptureNameLess>* named_captures_;
+ ZoneList<RegExpBackReference*>* named_back_references_;
+ const CharT* const input_;
+ const int input_length_;
+ base::uc32 current_;
+ const RegExpFlags top_level_flags_;
+ int next_pos_;
+ int captures_started_;
+ int capture_count_; // Only valid after we have scanned for captures.
+ bool has_more_;
+ bool simple_;
+ bool contains_anchor_;
+ bool is_scanned_for_captures_;
+ bool has_named_captures_; // Only valid after we have scanned for captures.
+ bool failed_;
+ const uintptr_t stack_limit_;
+
+ friend bool RegExpParser::ParseRegExpFromHeapString(Isolate*, Zone*,
+ Handle<String>,
+ RegExpFlags,
+ RegExpCompileData*);
+ friend bool RegExpParser::VerifyRegExpSyntax<CharT>(
+ Zone*, uintptr_t, const CharT*, int, RegExpFlags, RegExpCompileData*,
+ const DisallowGarbageCollection&);
+};
+
+template <class CharT>
+RegExpParserImpl<CharT>::RegExpParserImpl(
+ const CharT* input, int input_length, RegExpFlags flags,
+ uintptr_t stack_limit, Zone* zone, const DisallowGarbageCollection& no_gc)
+ : zone_(zone),
captures_(nullptr),
named_captures_(nullptr),
named_back_references_(nullptr),
- in_(in),
+ input_(input),
+ input_length_(input_length),
current_(kEndMarker),
top_level_flags_(flags),
next_pos_(0),
@@ -42,30 +420,44 @@ RegExpParser::RegExpParser(FlatStringReader* in, JSRegExp::Flags flags,
contains_anchor_(false),
is_scanned_for_captures_(false),
has_named_captures_(false),
- failed_(false) {
+ failed_(false),
+ stack_limit_(stack_limit) {
Advance();
}
+template <>
+template <bool update_position>
+inline base::uc32 RegExpParserImpl<uint8_t>::ReadNext() {
+ int position = next_pos_;
+ base::uc16 c0 = InputAt(position);
+ position++;
+ DCHECK(!unibrow::Utf16::IsLeadSurrogate(c0));
+ if (update_position) next_pos_ = position;
+ return c0;
+}
+
+template <>
template <bool update_position>
-inline base::uc32 RegExpParser::ReadNext() {
+inline base::uc32 RegExpParserImpl<base::uc16>::ReadNext() {
int position = next_pos_;
- base::uc32 c0 = in()->Get(position);
+ base::uc16 c0 = InputAt(position);
+ base::uc32 result = c0;
position++;
// Read the whole surrogate pair in case of unicode flag, if possible.
- if (unicode() && position < in()->length() &&
- unibrow::Utf16::IsLeadSurrogate(static_cast<base::uc16>(c0))) {
- base::uc16 c1 = in()->Get(position);
+ if (unicode() && position < input_length() &&
+ unibrow::Utf16::IsLeadSurrogate(c0)) {
+ base::uc16 c1 = InputAt(position);
if (unibrow::Utf16::IsTrailSurrogate(c1)) {
- c0 =
- unibrow::Utf16::CombineSurrogatePair(static_cast<base::uc16>(c0), c1);
+ result = unibrow::Utf16::CombineSurrogatePair(c0, c1);
position++;
}
}
if (update_position) next_pos_ = position;
- return c0;
+ return result;
}
-base::uc32 RegExpParser::Next() {
+template <class CharT>
+base::uc32 RegExpParserImpl<CharT>::Next() {
if (has_next()) {
return ReadNext<false>();
} else {
@@ -73,10 +465,10 @@ base::uc32 RegExpParser::Next() {
}
}
-void RegExpParser::Advance() {
+template <class CharT>
+void RegExpParserImpl<CharT>::Advance() {
if (has_next()) {
- StackLimitCheck check(isolate());
- if (check.HasOverflowed()) {
+ if (GetCurrentStackPosition() < stack_limit_) {
if (FLAG_correctness_fuzzer_suppressions) {
FATAL("Aborting on stack overflow");
}
@@ -93,27 +485,31 @@ void RegExpParser::Advance() {
current_ = kEndMarker;
// Advance so that position() points to 1-after-the-last-character. This is
// important so that Reset() to this position works correctly.
- next_pos_ = in()->length() + 1;
+ next_pos_ = input_length() + 1;
has_more_ = false;
}
}
-
-void RegExpParser::Reset(int pos) {
+template <class CharT>
+void RegExpParserImpl<CharT>::Reset(int pos) {
next_pos_ = pos;
- has_more_ = (pos < in()->length());
+ has_more_ = (pos < input_length());
Advance();
}
-void RegExpParser::Advance(int dist) {
+template <class CharT>
+void RegExpParserImpl<CharT>::Advance(int dist) {
next_pos_ += dist - 1;
Advance();
}
+template <class CharT>
+bool RegExpParserImpl<CharT>::simple() {
+ return simple_;
+}
-bool RegExpParser::simple() { return simple_; }
-
-bool RegExpParser::IsSyntaxCharacterOrSlash(base::uc32 c) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::IsSyntaxCharacterOrSlash(base::uc32 c) {
switch (c) {
case '^':
case '$':
@@ -137,14 +533,15 @@ bool RegExpParser::IsSyntaxCharacterOrSlash(base::uc32 c) {
return false;
}
-RegExpTree* RegExpParser::ReportError(RegExpError error) {
+template <class CharT>
+RegExpTree* RegExpParserImpl<CharT>::ReportError(RegExpError error) {
if (failed_) return nullptr; // Do not overwrite any existing error.
failed_ = true;
error_ = error;
error_pos_ = position();
// Zip to the end to make sure no more input is read.
current_ = kEndMarker;
- next_pos_ = in()->length();
+ next_pos_ = input_length();
return nullptr;
}
@@ -154,19 +551,19 @@ RegExpTree* RegExpParser::ReportError(RegExpError error) {
// Pattern ::
// Disjunction
-RegExpTree* RegExpParser::ParsePattern() {
+template <class CharT>
+RegExpTree* RegExpParserImpl<CharT>::ParsePattern() {
RegExpTree* result = ParseDisjunction(CHECK_FAILED);
PatchNamedBackReferences(CHECK_FAILED);
DCHECK(!has_more());
// If the result of parsing is a literal string atom, and it has the
// same length as the input, then the atom is identical to the input.
- if (result->IsAtom() && result->AsAtom()->length() == in()->length()) {
+ if (result->IsAtom() && result->AsAtom()->length() == input_length()) {
simple_ = true;
}
return result;
}
-
// Disjunction ::
// Alternative
// Alternative | Disjunction
@@ -177,7 +574,8 @@ RegExpTree* RegExpParser::ParsePattern() {
// Assertion
// Atom
// Atom Quantifier
-RegExpTree* RegExpParser::ParseDisjunction() {
+template <class CharT>
+RegExpTree* RegExpParserImpl<CharT>::ParseDisjunction() {
// Used to store current state while parsing subexpressions.
RegExpParserState initial_state(nullptr, INITIAL, RegExpLookaround::LOOKAHEAD,
0, nullptr, top_level_flags_, zone());
@@ -187,6 +585,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
while (true) {
switch (current()) {
case kEndMarker:
+ if (failed()) return nullptr; // E.g. the initial Advance failed.
if (state->IsSubexpression()) {
// Inside a parenthesized group when hitting end of input.
return ReportError(RegExpError::kUnterminatedGroup);
@@ -220,12 +619,12 @@ RegExpTree* RegExpParser::ParseDisjunction() {
capture->set_body(body);
body = capture;
} else if (group_type == GROUPING) {
- body = zone()->New<RegExpGroup>(body);
+ body = zone()->template New<RegExpGroup>(body);
} else {
DCHECK(group_type == POSITIVE_LOOKAROUND ||
group_type == NEGATIVE_LOOKAROUND);
bool is_positive = (group_type == POSITIVE_LOOKAROUND);
- body = zone()->New<RegExpLookaround>(
+ body = zone()->template New<RegExpLookaround>(
body, is_positive, end_capture_index - capture_index,
capture_index, state->lookaround_type());
}
@@ -250,7 +649,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
return ReportError(RegExpError::kNothingToRepeat);
case '^': {
Advance();
- builder->AddAssertion(zone()->New<RegExpAssertion>(
+ builder->AddAssertion(zone()->template New<RegExpAssertion>(
builder->multiline() ? RegExpAssertion::START_OF_LINE
: RegExpAssertion::START_OF_INPUT));
set_contains_anchor();
@@ -261,13 +660,14 @@ RegExpTree* RegExpParser::ParseDisjunction() {
RegExpAssertion::AssertionType assertion_type =
builder->multiline() ? RegExpAssertion::END_OF_LINE
: RegExpAssertion::END_OF_INPUT;
- builder->AddAssertion(zone()->New<RegExpAssertion>(assertion_type));
+ builder->AddAssertion(
+ zone()->template New<RegExpAssertion>(assertion_type));
continue;
}
case '.': {
Advance();
ZoneList<CharacterRange>* ranges =
- zone()->New<ZoneList<CharacterRange>>(2, zone());
+ zone()->template New<ZoneList<CharacterRange>>(2, zone());
if (builder->dotall()) {
// Everything.
@@ -278,7 +678,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
}
RegExpCharacterClass* cc =
- zone()->New<RegExpCharacterClass>(zone(), ranges);
+ zone()->template New<RegExpCharacterClass>(zone(), ranges);
builder->AddCharacterClass(cc);
break;
}
@@ -298,68 +698,19 @@ RegExpTree* RegExpParser::ParseDisjunction() {
switch (Next()) {
case kEndMarker:
return ReportError(RegExpError::kEscapeAtEndOfPattern);
- case 'b':
- Advance(2);
- builder->AddAssertion(
- zone()->New<RegExpAssertion>(RegExpAssertion::BOUNDARY));
- continue;
- case 'B':
- Advance(2);
- builder->AddAssertion(
- zone()->New<RegExpAssertion>(RegExpAssertion::NON_BOUNDARY));
- continue;
// AtomEscape ::
- // CharacterClassEscape
+ // [+UnicodeMode] DecimalEscape
+ // [~UnicodeMode] DecimalEscape but only if the CapturingGroupNumber
+ // of DecimalEscape is ≤ NcapturingParens
+ // CharacterEscape (some cases of this mixed in too)
//
- // CharacterClassEscape :: one of
- // d D s S w W
- case 'd':
- case 'D':
- case 's':
- case 'S':
- case 'w':
- case 'W': {
- base::uc32 c = Next();
- Advance(2);
- ZoneList<CharacterRange>* ranges =
- zone()->New<ZoneList<CharacterRange>>(2, zone());
- CharacterRange::AddClassEscape(
- c, ranges, unicode() && builder->ignore_case(), zone());
- RegExpCharacterClass* cc =
- zone()->New<RegExpCharacterClass>(zone(), ranges);
- builder->AddCharacterClass(cc);
- break;
- }
- case 'p':
- case 'P': {
- base::uc32 p = Next();
- Advance(2);
- if (unicode()) {
- ZoneList<CharacterRange>* ranges =
- zone()->New<ZoneList<CharacterRange>>(2, zone());
- ZoneVector<char> name_1(zone());
- ZoneVector<char> name_2(zone());
- if (ParsePropertyClassName(&name_1, &name_2)) {
- if (AddPropertyClassRange(ranges, p == 'P', name_1, name_2)) {
- RegExpCharacterClass* cc =
- zone()->New<RegExpCharacterClass>(zone(), ranges);
- builder->AddCharacterClass(cc);
- break;
- }
- if (p == 'p' && name_2.empty()) {
- RegExpTree* sequence = GetPropertySequence(name_1);
- if (sequence != nullptr) {
- builder->AddAtom(sequence);
- break;
- }
- }
- }
- return ReportError(RegExpError::kInvalidPropertyName);
- } else {
- builder->AddCharacter(p);
- }
- break;
- }
+ // TODO(jgruber): It may make sense to disentangle all the different
+ // cases and make the structure mirror the spec, e.g. for AtomEscape:
+ //
+ // if (TryParseDecimalEscape(...)) return;
+ // if (TryParseCharacterClassEscape(...)) return;
+ // if (TryParseCharacterEscape(...)) return;
+ // if (TryParseGroupName(...)) return;
case '1':
case '2':
case '3':
@@ -370,7 +721,8 @@ RegExpTree* RegExpParser::ParseDisjunction() {
case '8':
case '9': {
int index = 0;
- bool is_backref = ParseBackReferenceIndex(&index CHECK_FAILED);
+ const bool is_backref =
+ ParseBackReferenceIndex(&index CHECK_FAILED);
if (is_backref) {
if (state->IsInsideCaptureGroup(index)) {
// The back reference is inside the capture group it refers to.
@@ -381,8 +733,8 @@ RegExpTree* RegExpParser::ParseDisjunction() {
builder->AddEmpty();
} else {
RegExpCapture* capture = GetCapture(index);
- RegExpTree* atom =
- zone()->New<RegExpBackReference>(capture, builder->flags());
+ RegExpTree* atom = zone()->template New<RegExpBackReference>(
+ capture, builder->flags());
builder->AddAtom(atom);
}
break;
@@ -410,99 +762,77 @@ RegExpTree* RegExpParser::ParseDisjunction() {
builder->AddCharacter(octal);
break;
}
- // ControlEscape :: one of
- // f n r t v
- case 'f':
- Advance(2);
- builder->AddCharacter('\f');
- break;
- case 'n':
- Advance(2);
- builder->AddCharacter('\n');
- break;
- case 'r':
- Advance(2);
- builder->AddCharacter('\r');
- break;
- case 't':
+ case 'b':
Advance(2);
- builder->AddCharacter('\t');
- break;
- case 'v':
+ builder->AddAssertion(zone()->template New<RegExpAssertion>(
+ RegExpAssertion::BOUNDARY));
+ continue;
+ case 'B':
Advance(2);
- builder->AddCharacter('\v');
- break;
- case 'c': {
- Advance();
- base::uc32 controlLetter = Next();
- // Special case if it is an ASCII letter.
- // Convert lower case letters to uppercase.
- base::uc32 letter = controlLetter & ~('a' ^ 'A');
- if (letter < 'A' || 'Z' < letter) {
- // controlLetter is not in range 'A'-'Z' or 'a'-'z'.
- // Read the backslash as a literal character instead of as
- // starting an escape.
- // ES#prod-annexB-ExtendedPatternCharacter
- if (unicode()) {
- // With /u, invalid escapes are not treated as identity escapes.
- return ReportError(RegExpError::kInvalidUnicodeEscape);
- }
- builder->AddCharacter('\\');
+ builder->AddAssertion(zone()->template New<RegExpAssertion>(
+ RegExpAssertion::NON_BOUNDARY));
+ continue;
+ // AtomEscape ::
+ // CharacterClassEscape
+ case 'd':
+ case 'D':
+ case 's':
+ case 'S':
+ case 'w':
+ case 'W':
+ case 'p':
+ case 'P': {
+ base::uc32 next = Next();
+ ZoneList<CharacterRange>* ranges =
+ zone()->template New<ZoneList<CharacterRange>>(2, zone());
+ bool add_unicode_case_equivalents =
+ unicode() && builder->ignore_case();
+ bool parsed_character_class_escape = TryParseCharacterClassEscape(
+ next, InClassEscapeState::kNotInClass, ranges, zone(),
+ add_unicode_case_equivalents CHECK_FAILED);
+
+ if (parsed_character_class_escape) {
+ RegExpCharacterClass* cc =
+ zone()->template New<RegExpCharacterClass>(zone(), ranges);
+ builder->AddCharacterClass(cc);
} else {
+ CHECK(!unicode());
Advance(2);
- builder->AddCharacter(controlLetter & 0x1F);
- }
- break;
- }
- case 'x': {
- Advance(2);
- base::uc32 value;
- if (ParseHexEscape(2, &value)) {
- builder->AddCharacter(value);
- } else if (!unicode()) {
- builder->AddCharacter('x');
- } else {
- // With /u, invalid escapes are not treated as identity escapes.
- return ReportError(RegExpError::kInvalidEscape);
- }
- break;
- }
- case 'u': {
- Advance(2);
- base::uc32 value;
- if (ParseUnicodeEscape(&value)) {
- builder->AddEscapedUnicodeCharacter(value);
- } else if (!unicode()) {
- builder->AddCharacter('u');
- } else {
- // With /u, invalid escapes are not treated as identity escapes.
- return ReportError(RegExpError::kInvalidUnicodeEscape);
+ builder->AddCharacter(next); // IdentityEscape.
}
break;
}
- case 'k':
+ // AtomEscape ::
+ // k GroupName
+ case 'k': {
// Either an identity escape or a named back-reference. The two
// interpretations are mutually exclusive: '\k' is interpreted as
// an identity escape for non-Unicode patterns without named
// capture groups, and as the beginning of a named back-reference
// in all other cases.
- if (unicode() || HasNamedCaptures()) {
+ const bool has_named_captures =
+ HasNamedCaptures(InClassEscapeState::kNotInClass CHECK_FAILED);
+ if (unicode() || has_named_captures) {
Advance(2);
ParseNamedBackReference(builder, state CHECK_FAILED);
break;
}
+ }
V8_FALLTHROUGH;
- default:
- Advance();
- // With /u, no identity escapes except for syntax characters
- // are allowed. Otherwise, all identity escapes are allowed.
- if (!unicode() || IsSyntaxCharacterOrSlash(current())) {
- builder->AddCharacter(current());
- Advance();
+ // AtomEscape ::
+ // CharacterEscape
+ default: {
+ bool is_escaped_unicode_character = false;
+ base::uc32 c = ParseCharacterEscape(
+ InClassEscapeState::kNotInClass,
+ &is_escaped_unicode_character CHECK_FAILED);
+ if (is_escaped_unicode_character) {
+ builder->AddEscapedUnicodeCharacter(c);
} else {
- return ReportError(RegExpError::kInvalidEscape);
+ builder->AddCharacter(c);
}
break;
+ }
}
break;
case '{': {
@@ -575,12 +905,11 @@ RegExpTree* RegExpParser::ParseDisjunction() {
}
}
-RegExpParser::RegExpParserState* RegExpParser::ParseOpenParenthesis(
+template <class CharT>
+RegExpParserState* RegExpParserImpl<CharT>::ParseOpenParenthesis(
RegExpParserState* state) {
RegExpLookaround::Type lookaround_type = state->lookaround_type();
bool is_named_capture = false;
- JSRegExp::Flags switch_on = JSRegExp::kNone;
- JSRegExp::Flags switch_off = JSRegExp::kNone;
const ZoneVector<base::uc16>* capture_name = nullptr;
SubexpressionType subexpr_type = CAPTURE;
Advance();
@@ -623,7 +952,7 @@ RegExpParser::RegExpParserState* RegExpParser::ParseOpenParenthesis(
}
}
if (subexpr_type == CAPTURE) {
- if (captures_started_ >= JSRegExp::kMaxCaptures) {
+ if (captures_started_ >= RegExpMacroAssembler::kMaxRegisterCount) {
ReportError(RegExpError::kTooManyCaptures);
return nullptr;
}
@@ -633,11 +962,10 @@ RegExpParser::RegExpParserState* RegExpParser::ParseOpenParenthesis(
capture_name = ParseCaptureGroupName(CHECK_FAILED);
}
}
- JSRegExp::Flags flags = (state->builder()->flags() | switch_on) & ~switch_off;
// Store current state and begin new disjunction parsing.
- return zone()->New<RegExpParserState>(state, subexpr_type, lookaround_type,
- captures_started_, capture_name, flags,
- zone());
+ return zone()->template New<RegExpParserState>(
+ state, subexpr_type, lookaround_type, captures_started_, capture_name,
+ state->builder()->flags(), zone());
}
#ifdef DEBUG
@@ -657,18 +985,34 @@ static bool IsSpecialClassEscape(base::uc32 c) {
}
#endif
-
// In order to know whether an escape is a backreference or not we have to scan
// the entire regexp and find the number of capturing parentheses. However we
// don't want to scan the regexp twice unless it is necessary. This mini-parser
// is called when needed. It can see the difference between capturing and
// noncapturing parentheses and can skip character classes and backslash-escaped
// characters.
-void RegExpParser::ScanForCaptures() {
+//
+// Important: The scanner has to be in a consistent state when calling
+// ScanForCaptures, e.g. not in the middle of an escape sequence '\['.
+template <class CharT>
+void RegExpParserImpl<CharT>::ScanForCaptures(
+ InClassEscapeState in_class_escape_state) {
DCHECK(!is_scanned_for_captures_);
const int saved_position = position();
// Start with captures started previous to current position
int capture_count = captures_started();
+ // When we start inside a character class, skip everything inside the class.
+ if (in_class_escape_state == InClassEscapeState::kInClass) {
+ int c;
+ while ((c = current()) != kEndMarker) {
+ Advance();
+ if (c == '\\') {
+ Advance();
+ } else {
+ if (c == ']') break;
+ }
+ }
+ }
// Add count of captures after this position.
int n;
while ((n = current()) != kEndMarker) {
@@ -718,8 +1062,8 @@ void RegExpParser::ScanForCaptures() {
Reset(saved_position);
}
-
-bool RegExpParser::ParseBackReferenceIndex(int* index_out) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::ParseBackReferenceIndex(int* index_out) {
DCHECK_EQ('\\', current());
DCHECK('1' <= Next() && Next() <= '9');
// Try to parse a decimal literal that is no greater than the total number
@@ -731,7 +1075,7 @@ bool RegExpParser::ParseBackReferenceIndex(int* index_out) {
base::uc32 c = current();
if (IsDecimalDigit(c)) {
value = 10 * value + (c - '0');
- if (value > JSRegExp::kMaxCaptures) {
+ if (value > RegExpMacroAssembler::kMaxRegisterCount) {
Reset(start);
return false;
}
@@ -741,7 +1085,8 @@ bool RegExpParser::ParseBackReferenceIndex(int* index_out) {
}
}
if (value > captures_started()) {
- if (!is_scanned_for_captures_) ScanForCaptures();
+ if (!is_scanned_for_captures_)
+ ScanForCaptures(InClassEscapeState::kNotInClass);
if (value > capture_count_) {
Reset(start);
return false;
@@ -751,7 +1096,9 @@ bool RegExpParser::ParseBackReferenceIndex(int* index_out) {
return true;
}
-static void push_code_unit(ZoneVector<base::uc16>* v, uint32_t code_unit) {
+namespace {
+
+void push_code_unit(ZoneVector<base::uc16>* v, uint32_t code_unit) {
if (code_unit <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
v->push_back(code_unit);
} else {
@@ -760,8 +1107,12 @@ static void push_code_unit(ZoneVector<base::uc16>* v, uint32_t code_unit) {
}
}
-const ZoneVector<base::uc16>* RegExpParser::ParseCaptureGroupName() {
- ZoneVector<base::uc16>* name = zone()->New<ZoneVector<base::uc16>>(zone());
+} // namespace
+
+template <class CharT>
+const ZoneVector<base::uc16>* RegExpParserImpl<CharT>::ParseCaptureGroupName() {
+ ZoneVector<base::uc16>* name =
+ zone()->template New<ZoneVector<base::uc16>>(zone());
bool at_start = true;
while (true) {
@@ -805,8 +1156,9 @@ const ZoneVector<base::uc16>* RegExpParser::ParseCaptureGroupName() {
return name;
}
-bool RegExpParser::CreateNamedCaptureAtIndex(const ZoneVector<base::uc16>* name,
- int index) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::CreateNamedCaptureAtIndex(
+ const ZoneVector<base::uc16>* name, int index) {
DCHECK(0 < index && index <= captures_started_);
DCHECK_NOT_NULL(name);
@@ -817,7 +1169,8 @@ bool RegExpParser::CreateNamedCaptureAtIndex(const ZoneVector<base::uc16>* name,
if (named_captures_ == nullptr) {
named_captures_ =
- zone_->New<ZoneSet<RegExpCapture*, RegExpCaptureNameLess>>(zone());
+ zone_->template New<ZoneSet<RegExpCapture*, RegExpCaptureNameLess>>(
+ zone());
} else {
// Check for duplicates and bail if we find any.
@@ -833,8 +1186,9 @@ bool RegExpParser::CreateNamedCaptureAtIndex(const ZoneVector<base::uc16>* name,
return true;
}
-bool RegExpParser::ParseNamedBackReference(RegExpBuilder* builder,
- RegExpParserState* state) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::ParseNamedBackReference(
+ RegExpBuilder* builder, RegExpParserState* state) {
// The parser is assumed to be on the '<' in \k<name>.
if (current() != '<') {
ReportError(RegExpError::kInvalidNamedReference);
@@ -851,14 +1205,14 @@ bool RegExpParser::ParseNamedBackReference(RegExpBuilder* builder,
builder->AddEmpty();
} else {
RegExpBackReference* atom =
- zone()->New<RegExpBackReference>(builder->flags());
+ zone()->template New<RegExpBackReference>(builder->flags());
atom->set_name(name);
builder->AddAtom(atom);
if (named_back_references_ == nullptr) {
named_back_references_ =
- zone()->New<ZoneList<RegExpBackReference*>>(1, zone());
+ zone()->template New<ZoneList<RegExpBackReference*>>(1, zone());
}
named_back_references_->Add(atom, zone());
}
@@ -866,7 +1220,8 @@ bool RegExpParser::ParseNamedBackReference(RegExpBuilder* builder,
return true;
}
-void RegExpParser::PatchNamedBackReferences() {
+template <class CharT>
+void RegExpParserImpl<CharT>::PatchNamedBackReferences() {
if (named_back_references_ == nullptr) return;
if (named_captures_ == nullptr) {
@@ -882,7 +1237,8 @@ void RegExpParser::PatchNamedBackReferences() {
// Capture used to search the named_captures_ by name, index of the
// capture is never used.
static const int kInvalidIndex = 0;
- RegExpCapture* search_capture = zone()->New<RegExpCapture>(kInvalidIndex);
+ RegExpCapture* search_capture =
+ zone()->template New<RegExpCapture>(kInvalidIndex);
DCHECK_NULL(search_capture->name());
search_capture->set_name(ref->name());
@@ -899,100 +1255,46 @@ void RegExpParser::PatchNamedBackReferences() {
}
}
-RegExpCapture* RegExpParser::GetCapture(int index) {
+template <class CharT>
+RegExpCapture* RegExpParserImpl<CharT>::GetCapture(int index) {
// The index for the capture groups are one-based. Its index in the list is
// zero-based.
- int know_captures =
+ const int known_captures =
is_scanned_for_captures_ ? capture_count_ : captures_started_;
- DCHECK(index <= know_captures);
+ DCHECK(index <= known_captures);
if (captures_ == nullptr) {
- captures_ = zone()->New<ZoneList<RegExpCapture*>>(know_captures, zone());
+ captures_ =
+ zone()->template New<ZoneList<RegExpCapture*>>(known_captures, zone());
}
- while (captures_->length() < know_captures) {
- captures_->Add(zone()->New<RegExpCapture>(captures_->length() + 1), zone());
+ while (captures_->length() < known_captures) {
+ captures_->Add(zone()->template New<RegExpCapture>(captures_->length() + 1),
+ zone());
}
return captures_->at(index - 1);
}
-namespace {
-
-struct RegExpCaptureIndexLess {
- bool operator()(const RegExpCapture* lhs, const RegExpCapture* rhs) const {
- DCHECK_NOT_NULL(lhs);
- DCHECK_NOT_NULL(rhs);
- return lhs->index() < rhs->index();
- }
-};
-
-} // namespace
-
-Handle<FixedArray> RegExpParser::CreateCaptureNameMap() {
+template <class CharT>
+ZoneVector<RegExpCapture*>* RegExpParserImpl<CharT>::GetNamedCaptures() const {
if (named_captures_ == nullptr || named_captures_->empty()) {
- return Handle<FixedArray>();
+ return nullptr;
}
- // Named captures are sorted by name (because the set is used to ensure
- // name uniqueness). But the capture name map must to be sorted by index.
-
- ZoneVector<RegExpCapture*> sorted_named_captures(
+ return zone()->template New<ZoneVector<RegExpCapture*>>(
named_captures_->begin(), named_captures_->end(), zone());
- std::sort(sorted_named_captures.begin(), sorted_named_captures.end(),
- RegExpCaptureIndexLess{});
- DCHECK_EQ(sorted_named_captures.size(), named_captures_->size());
-
- Factory* factory = isolate()->factory();
-
- int len = static_cast<int>(sorted_named_captures.size()) * 2;
- Handle<FixedArray> array = factory->NewFixedArray(len);
-
- int i = 0;
- for (const auto& capture : sorted_named_captures) {
- base::Vector<const base::uc16> capture_name(capture->name()->data(),
- capture->name()->size());
- // CSA code in ConstructNewResultFromMatchInfo requires these strings to be
- // internalized so they can be used as property names in the 'exec' results.
- Handle<String> name = factory->InternalizeString(capture_name);
- array->set(i * 2, *name);
- array->set(i * 2 + 1, Smi::FromInt(capture->index()));
-
- i++;
- }
- DCHECK_EQ(i * 2, len);
-
- return array;
}
-bool RegExpParser::HasNamedCaptures() {
+template <class CharT>
+bool RegExpParserImpl<CharT>::HasNamedCaptures(
+ InClassEscapeState in_class_escape_state) {
if (has_named_captures_ || is_scanned_for_captures_) {
return has_named_captures_;
}
- ScanForCaptures();
+ ScanForCaptures(in_class_escape_state);
DCHECK(is_scanned_for_captures_);
return has_named_captures_;
}
-bool RegExpParser::RegExpParserState::IsInsideCaptureGroup(int index) {
- for (RegExpParserState* s = this; s != nullptr; s = s->previous_state()) {
- if (s->group_type() != CAPTURE) continue;
- // Return true if we found the matching capture index.
- if (index == s->capture_index()) return true;
- // Abort if index is larger than what has been parsed up till this state.
- if (index > s->capture_index()) return false;
- }
- return false;
-}
-
-bool RegExpParser::RegExpParserState::IsInsideCaptureGroup(
- const ZoneVector<base::uc16>* name) {
- DCHECK_NOT_NULL(name);
- for (RegExpParserState* s = this; s != nullptr; s = s->previous_state()) {
- if (s->capture_name() == nullptr) continue;
- if (*s->capture_name() == *name) return true;
- }
- return false;
-}
-
// QuantifierPrefix ::
// { DecimalDigits }
// { DecimalDigits , }
@@ -1000,7 +1302,9 @@ bool RegExpParser::RegExpParserState::IsInsideCaptureGroup(
//
// Returns true if parsing succeeds, and set the min_out and max_out
// values. Values are truncated to RegExpTree::kInfinity if they overflow.
-bool RegExpParser::ParseIntervalQuantifier(int* min_out, int* max_out) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::ParseIntervalQuantifier(int* min_out,
+ int* max_out) {
DCHECK_EQ(current(), '{');
int start = position();
Advance();
@@ -1059,7 +1363,8 @@ bool RegExpParser::ParseIntervalQuantifier(int* min_out, int* max_out) {
return true;
}
-base::uc32 RegExpParser::ParseOctalLiteral() {
+template <class CharT>
+base::uc32 RegExpParserImpl<CharT>::ParseOctalLiteral() {
DCHECK(('0' <= current() && current() <= '7') || current() == kEndMarker);
// For compatibility with some other browsers (not all), we parse
// up to three octal digits with a value below 256.
@@ -1077,7 +1382,8 @@ base::uc32 RegExpParser::ParseOctalLiteral() {
return value;
}
-bool RegExpParser::ParseHexEscape(int length, base::uc32* value) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::ParseHexEscape(int length, base::uc32* value) {
int start = position();
base::uc32 val = 0;
for (int i = 0; i < length; ++i) {
@@ -1095,7 +1401,8 @@ bool RegExpParser::ParseHexEscape(int length, base::uc32* value) {
}
// This parses RegExpUnicodeEscapeSequence as described in ECMA262.
-bool RegExpParser::ParseUnicodeEscape(base::uc32* value) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::ParseUnicodeEscape(base::uc32* value) {
// Accept both \uxxxx and \u{xxxxxx} (if harmony unicode escapes are
// allowed). In the latter case, the number of hex digits between { } is
// arbitrary. \ and u have already been read.
@@ -1308,10 +1615,11 @@ bool IsUnicodePropertyValueCharacter(char c) {
return (c == '_');
}
-} // anonymous namespace
+} // namespace
-bool RegExpParser::ParsePropertyClassName(ZoneVector<char>* name_1,
- ZoneVector<char>* name_2) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::ParsePropertyClassName(ZoneVector<char>* name_1,
+ ZoneVector<char>* name_2) {
DCHECK(name_1->empty());
DCHECK(name_2->empty());
// Parse the property class as follows:
@@ -1348,10 +1656,10 @@ bool RegExpParser::ParsePropertyClassName(ZoneVector<char>* name_1,
return true;
}
-bool RegExpParser::AddPropertyClassRange(ZoneList<CharacterRange>* add_to,
- bool negate,
- const ZoneVector<char>& name_1,
- const ZoneVector<char>& name_2) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::AddPropertyClassRange(
+ ZoneList<CharacterRange>* add_to, bool negate,
+ const ZoneVector<char>& name_1, const ZoneVector<char>& name_2) {
if (name_2.empty()) {
// First attempt to interpret as general category property value name.
const char* name = name_1.data();
@@ -1388,92 +1696,26 @@ bool RegExpParser::AddPropertyClassRange(ZoneList<CharacterRange>* add_to,
}
}
-RegExpTree* RegExpParser::GetPropertySequence(const ZoneVector<char>& name_1) {
- if (!FLAG_harmony_regexp_sequence) return nullptr;
- const char* name = name_1.data();
- const base::uc32* sequence_list = nullptr;
- JSRegExp::Flags flags = JSRegExp::kUnicode;
- if (NameEquals(name, "Emoji_Flag_Sequence")) {
- sequence_list = UnicodePropertySequences::kEmojiFlagSequences;
- } else if (NameEquals(name, "Emoji_Tag_Sequence")) {
- sequence_list = UnicodePropertySequences::kEmojiTagSequences;
- } else if (NameEquals(name, "Emoji_ZWJ_Sequence")) {
- sequence_list = UnicodePropertySequences::kEmojiZWJSequences;
- }
- if (sequence_list != nullptr) {
- // TODO(yangguo): this creates huge regexp code. Alternative to this is
- // to create a new operator that checks for these sequences at runtime.
- RegExpBuilder builder(zone(), flags);
- while (true) { // Iterate through list of sequences.
- while (*sequence_list != 0) { // Iterate through sequence.
- builder.AddUnicodeCharacter(*sequence_list);
- sequence_list++;
- }
- sequence_list++;
- if (*sequence_list == 0) break;
- builder.NewAlternative();
- }
- return builder.ToRegExp();
- }
-
- if (NameEquals(name, "Emoji_Keycap_Sequence")) {
- // https://unicode.org/reports/tr51/#def_emoji_keycap_sequence
- // emoji_keycap_sequence := [0-9#*] \x{FE0F 20E3}
- RegExpBuilder builder(zone(), flags);
- ZoneList<CharacterRange>* prefix_ranges =
- zone()->New<ZoneList<CharacterRange>>(2, zone());
- prefix_ranges->Add(CharacterRange::Range('0', '9'), zone());
- prefix_ranges->Add(CharacterRange::Singleton('#'), zone());
- prefix_ranges->Add(CharacterRange::Singleton('*'), zone());
- builder.AddCharacterClass(
- zone()->New<RegExpCharacterClass>(zone(), prefix_ranges));
- builder.AddCharacter(0xFE0F);
- builder.AddCharacter(0x20E3);
- return builder.ToRegExp();
- } else if (NameEquals(name, "Emoji_Modifier_Sequence")) {
- // https://unicode.org/reports/tr51/#def_emoji_modifier_sequence
- // emoji_modifier_sequence := emoji_modifier_base emoji_modifier
- RegExpBuilder builder(zone(), flags);
- ZoneList<CharacterRange>* modifier_base_ranges =
- zone()->New<ZoneList<CharacterRange>>(2, zone());
- LookupPropertyValueName(UCHAR_EMOJI_MODIFIER_BASE, "Y", false,
- modifier_base_ranges, zone());
- builder.AddCharacterClass(
- zone()->New<RegExpCharacterClass>(zone(), modifier_base_ranges));
- ZoneList<CharacterRange>* modifier_ranges =
- zone()->New<ZoneList<CharacterRange>>(2, zone());
- LookupPropertyValueName(UCHAR_EMOJI_MODIFIER, "Y", false, modifier_ranges,
- zone());
- builder.AddCharacterClass(
- zone()->New<RegExpCharacterClass>(zone(), modifier_ranges));
- return builder.ToRegExp();
- }
-
- return nullptr;
-}
-
#else // V8_INTL_SUPPORT
-bool RegExpParser::ParsePropertyClassName(ZoneVector<char>* name_1,
- ZoneVector<char>* name_2) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::ParsePropertyClassName(ZoneVector<char>* name_1,
+ ZoneVector<char>* name_2) {
return false;
}
-bool RegExpParser::AddPropertyClassRange(ZoneList<CharacterRange>* add_to,
- bool negate,
- const ZoneVector<char>& name_1,
- const ZoneVector<char>& name_2) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::AddPropertyClassRange(
+ ZoneList<CharacterRange>* add_to, bool negate,
+ const ZoneVector<char>& name_1, const ZoneVector<char>& name_2) {
return false;
}
-RegExpTree* RegExpParser::GetPropertySequence(const ZoneVector<char>& name) {
- return nullptr;
-}
-
#endif // V8_INTL_SUPPORT
-bool RegExpParser::ParseUnlimitedLengthHexNumber(int max_value,
- base::uc32* value) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::ParseUnlimitedLengthHexNumber(int max_value,
+ base::uc32* value) {
base::uc32 x = 0;
int d = base::HexValue(current());
if (d < 0) {
@@ -1491,16 +1733,21 @@ bool RegExpParser::ParseUnlimitedLengthHexNumber(int max_value,
return true;
}
-base::uc32 RegExpParser::ParseClassCharacterEscape() {
+// https://tc39.es/ecma262/#prod-CharacterEscape
+template <class CharT>
+base::uc32 RegExpParserImpl<CharT>::ParseCharacterEscape(
+ InClassEscapeState in_class_escape_state,
+ bool* is_escaped_unicode_character) {
DCHECK_EQ('\\', current());
DCHECK(has_next() && !IsSpecialClassEscape(Next()));
+
Advance();
- switch (current()) {
- case 'b':
- Advance();
- return '\b';
- // ControlEscape :: one of
- // f n r t v
+
+ const base::uc32 c = current();
+ switch (c) {
+ // CharacterEscape ::
+ // ControlEscape :: one of
+ // f n r t v
case 'f':
Advance();
return '\f';
@@ -1516,12 +1763,11 @@ base::uc32 RegExpParser::ParseClassCharacterEscape() {
case 'v':
Advance();
return '\v';
+ // CharacterEscape ::
+ // c ControlLetter
case 'c': {
base::uc32 controlLetter = Next();
base::uc32 letter = controlLetter & ~('A' ^ 'a');
- // Inside a character class, we also accept digits and underscore as
- // control characters, unless with /u. See Annex B:
- // ES#prod-annexB-ClassControlLetter
if (letter >= 'A' && letter <= 'Z') {
Advance(2);
// Control letters mapped to ASCII control characters in the range
@@ -1530,22 +1776,29 @@ base::uc32 RegExpParser::ParseClassCharacterEscape() {
}
if (unicode()) {
// With /u, invalid escapes are not treated as identity escapes.
- ReportError(RegExpError::kInvalidClassEscape);
+ ReportError(RegExpError::kInvalidUnicodeEscape);
return 0;
}
- if ((controlLetter >= '0' && controlLetter <= '9') ||
- controlLetter == '_') {
- Advance(2);
- return controlLetter & 0x1F;
+ if (in_class_escape_state == InClassEscapeState::kInClass) {
+ // Inside a character class, we also accept digits and underscore as
+ // control characters, unless with /u. See Annex B:
+ // ES#prod-annexB-ClassControlLetter
+ if ((controlLetter >= '0' && controlLetter <= '9') ||
+ controlLetter == '_') {
+ Advance(2);
+ return controlLetter & 0x1F;
+ }
}
// We match JSC in reading the backslash as a literal
// character instead of as starting an escape.
- // TODO(v8:6201): Not yet covered by the spec.
return '\\';
}
+ // CharacterEscape ::
+ // 0 [lookahead ∉ DecimalDigit]
+ // [~UnicodeMode] LegacyOctalEscapeSequence
case '0':
- // With /u, \0 is interpreted as NUL if not followed by another digit.
- if (unicode() && !(Next() >= '0' && Next() <= '9')) {
+ // \0 is interpreted as NUL if not followed by another digit.
+ if (Next() < '0' || Next() > '9') {
Advance();
return 0;
}
@@ -1567,6 +1820,8 @@ base::uc32 RegExpParser::ParseClassCharacterEscape() {
return 0;
}
return ParseOctalLiteral();
+ // CharacterEscape ::
+ // HexEscapeSequence
case 'x': {
Advance();
base::uc32 value;
@@ -1580,10 +1835,15 @@ base::uc32 RegExpParser::ParseClassCharacterEscape() {
// as an identity escape.
return 'x';
}
+ // CharacterEscape ::
+ // RegExpUnicodeEscapeSequence [?UnicodeMode]
case 'u': {
Advance();
base::uc32 value;
- if (ParseUnicodeEscape(&value)) return value;
+ if (ParseUnicodeEscape(&value)) {
+ *is_escaped_unicode_character = true;
+ return value;
+ }
if (unicode()) {
// With /u, invalid escapes are not treated as identity escapes.
ReportError(RegExpError::kInvalidUnicodeEscape);
@@ -1593,72 +1853,130 @@ base::uc32 RegExpParser::ParseClassCharacterEscape() {
// as an identity escape.
return 'u';
}
- default: {
- base::uc32 result = current();
- // With /u, no identity escapes except for syntax characters and '-' are
- // allowed. Otherwise, all identity escapes are allowed.
- if (!unicode() || IsSyntaxCharacterOrSlash(result) || result == '-') {
- Advance();
- return result;
- }
+ default:
+ break;
+ }
+
+ // CharacterEscape ::
+ // IdentityEscape[?UnicodeMode, ?N]
+ //
+ // * With /u, no identity escapes except for syntax characters are
+ // allowed.
+ // * Without /u:
+ // * '\c' is not an IdentityEscape.
+ // * '\k' is not an IdentityEscape when named captures exist.
+ // * Otherwise, all identity escapes are allowed.
+ if (unicode()) {
+ if (!IsSyntaxCharacterOrSlash(c)) {
ReportError(RegExpError::kInvalidEscape);
return 0;
}
+ Advance();
+ return c;
+ }
+ DCHECK(!unicode());
+ if (c == 'c') {
+ ReportError(RegExpError::kInvalidEscape);
+ return 0;
+ }
+ Advance();
+ // Note: It's important to Advance before the HasNamedCaptures call s.t. we
+ // don't start scanning in the middle of an escape.
+ if (c == 'k' && HasNamedCaptures(in_class_escape_state)) {
+ ReportError(RegExpError::kInvalidEscape);
+ return 0;
+ }
+ return c;
+}
+
+// https://tc39.es/ecma262/#prod-ClassEscape
+template <class CharT>
+void RegExpParserImpl<CharT>::ParseClassEscape(
+ ZoneList<CharacterRange>* ranges, Zone* zone,
+ bool add_unicode_case_equivalents, base::uc32* char_out,
+ bool* is_class_escape) {
+ *is_class_escape = false;
+
+ if (current() != '\\') {
+ // Not a ClassEscape.
+ *char_out = current();
+ Advance();
+ return;
}
- UNREACHABLE();
-}
-void RegExpParser::ParseClassEscape(ZoneList<CharacterRange>* ranges,
- Zone* zone,
- bool add_unicode_case_equivalents,
- base::uc32* char_out,
- bool* is_class_escape) {
- base::uc32 current_char = current();
- if (current_char == '\\') {
- switch (Next()) {
- case 'w':
- case 'W':
- case 'd':
- case 'D':
- case 's':
- case 'S': {
- CharacterRange::AddClassEscape(static_cast<char>(Next()), ranges,
- add_unicode_case_equivalents, zone);
+ const base::uc32 next = Next();
+ switch (next) {
+ case 'b':
+ *char_out = '\b';
+ Advance(2);
+ return;
+ case '-':
+ if (unicode()) {
+ *char_out = next;
Advance(2);
- *is_class_escape = true;
return;
}
- case kEndMarker:
- ReportError(RegExpError::kEscapeAtEndOfPattern);
- return;
- case 'p':
- case 'P':
- if (unicode()) {
- bool negate = Next() == 'P';
- Advance(2);
- ZoneVector<char> name_1(zone);
- ZoneVector<char> name_2(zone);
- if (!ParsePropertyClassName(&name_1, &name_2) ||
- !AddPropertyClassRange(ranges, negate, name_1, name_2)) {
- ReportError(RegExpError::kInvalidClassPropertyName);
- }
- *is_class_escape = true;
- return;
- }
- break;
- default:
- break;
+ break;
+ case kEndMarker:
+ ReportError(RegExpError::kEscapeAtEndOfPattern);
+ return;
+ default:
+ break;
+ }
+
+ static constexpr InClassEscapeState kInClassEscape =
+ InClassEscapeState::kInClass;
+ *is_class_escape = TryParseCharacterClassEscape(
+ next, kInClassEscape, ranges, zone, add_unicode_case_equivalents);
+ if (*is_class_escape) return;
+
+ bool dummy = false; // Unused.
+ *char_out = ParseCharacterEscape(kInClassEscape, &dummy);
+}
+
+// https://tc39.es/ecma262/#prod-CharacterClassEscape
+template <class CharT>
+bool RegExpParserImpl<CharT>::TryParseCharacterClassEscape(
+ base::uc32 next, InClassEscapeState in_class_escape_state,
+ ZoneList<CharacterRange>* ranges, Zone* zone,
+ bool add_unicode_case_equivalents) {
+ DCHECK_EQ(current(), '\\');
+ DCHECK_EQ(Next(), next);
+
+ switch (next) {
+ case 'd':
+ case 'D':
+ case 's':
+ case 'S':
+ case 'w':
+ case 'W':
+ CharacterRange::AddClassEscape(static_cast<char>(next), ranges,
+ add_unicode_case_equivalents, zone);
+ Advance(2);
+ return true;
+ case 'p':
+ case 'P': {
+ if (!unicode()) return false;
+ bool negate = next == 'P';
+ Advance(2);
+ ZoneVector<char> name_1(zone);
+ ZoneVector<char> name_2(zone);
+ if (!ParsePropertyClassName(&name_1, &name_2) ||
+ !AddPropertyClassRange(ranges, negate, name_1, name_2)) {
+ ReportError(in_class_escape_state == InClassEscapeState::kInClass
+ ? RegExpError::kInvalidClassPropertyName
+ : RegExpError::kInvalidPropertyName);
+ }
+ return true;
}
- *char_out = ParseClassCharacterEscape();
- *is_class_escape = false;
- } else {
- Advance();
- *char_out = current_char;
- *is_class_escape = false;
+ default:
+ return false;
}
}
-RegExpTree* RegExpParser::ParseCharacterClass(const RegExpBuilder* builder) {
+template <class CharT>
+RegExpTree* RegExpParserImpl<CharT>::ParseCharacterClass(
+ const RegExpBuilder* builder) {
DCHECK_EQ(current(), '[');
Advance();
bool is_negated = false;
@@ -1667,7 +1985,7 @@ RegExpTree* RegExpParser::ParseCharacterClass(const RegExpBuilder* builder) {
Advance();
}
ZoneList<CharacterRange>* ranges =
- zone()->New<ZoneList<CharacterRange>>(2, zone());
+ zone()->template New<ZoneList<CharacterRange>>(2, zone());
bool add_unicode_case_equivalents = unicode() && builder->ignore_case();
while (has_more() && current() != ']') {
base::uc32 char_1, char_2;
@@ -1713,64 +2031,43 @@ RegExpTree* RegExpParser::ParseCharacterClass(const RegExpBuilder* builder) {
Advance();
RegExpCharacterClass::CharacterClassFlags character_class_flags;
if (is_negated) character_class_flags = RegExpCharacterClass::NEGATED;
- return zone()->New<RegExpCharacterClass>(zone(), ranges,
- character_class_flags);
+ return zone()->template New<RegExpCharacterClass>(zone(), ranges,
+ character_class_flags);
}
-
#undef CHECK_FAILED
-bool RegExpParser::Parse(RegExpCompileData* result,
- const DisallowGarbageCollection&) {
- DCHECK(result != nullptr);
+template <class CharT>
+bool RegExpParserImpl<CharT>::Parse(RegExpCompileData* result) {
+ DCHECK_NOT_NULL(result);
RegExpTree* tree = ParsePattern();
+
if (failed()) {
- DCHECK(tree == nullptr);
- DCHECK(error_ != RegExpError::kNone);
+ DCHECK_NULL(tree);
+ DCHECK_NE(error_, RegExpError::kNone);
result->error = error_;
result->error_pos = error_pos_;
- } else {
- DCHECK(tree != nullptr);
- DCHECK(error_ == RegExpError::kNone);
- if (FLAG_trace_regexp_parser) {
- StdoutStream os;
- tree->Print(os, zone());
- os << "\n";
- }
- result->tree = tree;
- int capture_count = captures_started();
- result->simple = tree->IsAtom() && simple() && capture_count == 0;
- result->contains_anchor = contains_anchor();
- result->capture_count = capture_count;
+ return false;
}
- return !failed();
-}
-bool RegExpParser::ParseRegExp(Isolate* isolate, Zone* zone,
- FlatStringReader* input, JSRegExp::Flags flags,
- RegExpCompileData* result) {
- RegExpParser parser(input, flags, isolate, zone);
- bool success;
- {
- DisallowGarbageCollection no_gc;
- success = parser.Parse(result, no_gc);
- }
- if (success) {
- result->capture_name_map = parser.CreateCaptureNameMap();
+ DCHECK_NOT_NULL(tree);
+ DCHECK_EQ(error_, RegExpError::kNone);
+ if (FLAG_trace_regexp_parser) {
+ StdoutStream os;
+ tree->Print(os, zone());
+ os << "\n";
}
- return success;
-}
-bool RegExpParser::VerifyRegExpSyntax(Isolate* isolate, Zone* zone,
- FlatStringReader* input,
- JSRegExp::Flags flags,
- RegExpCompileData* result,
- const DisallowGarbageCollection& no_gc) {
- RegExpParser parser(input, flags, isolate, zone);
- return parser.Parse(result, no_gc);
+ result->tree = tree;
+ const int capture_count = captures_started();
+ result->simple = tree->IsAtom() && simple() && capture_count == 0;
+ result->contains_anchor = contains_anchor();
+ result->capture_count = capture_count;
+ result->named_captures = GetNamedCaptures();
+ return true;
}
-RegExpBuilder::RegExpBuilder(Zone* zone, JSRegExp::Flags flags)
+RegExpBuilder::RegExpBuilder(Zone* zone, RegExpFlags flags)
: zone_(zone),
pending_empty_(false),
flags_(flags),
@@ -2054,5 +2351,58 @@ bool RegExpBuilder::AddQuantifierToAtom(
return true;
}
+template class RegExpParserImpl<uint8_t>;
+template class RegExpParserImpl<base::uc16>;
+
+} // namespace
+
+// static
+bool RegExpParser::ParseRegExpFromHeapString(Isolate* isolate, Zone* zone,
+ Handle<String> input,
+ RegExpFlags flags,
+ RegExpCompileData* result) {
+ DisallowGarbageCollection no_gc;
+ uintptr_t stack_limit = isolate->stack_guard()->real_climit();
+ String::FlatContent content = input->GetFlatContent(no_gc);
+ if (content.IsOneByte()) {
+ base::Vector<const uint8_t> v = content.ToOneByteVector();
+ return RegExpParserImpl<uint8_t>{v.begin(), v.length(), flags,
+ stack_limit, zone, no_gc}
+ .Parse(result);
+ } else {
+ base::Vector<const base::uc16> v = content.ToUC16Vector();
+ return RegExpParserImpl<base::uc16>{v.begin(), v.length(), flags,
+ stack_limit, zone, no_gc}
+ .Parse(result);
+ }
+}
+
+// static
+template <class CharT>
+bool RegExpParser::VerifyRegExpSyntax(Zone* zone, uintptr_t stack_limit,
+ const CharT* input, int input_length,
+ RegExpFlags flags,
+ RegExpCompileData* result,
+ const DisallowGarbageCollection& no_gc) {
+ return RegExpParserImpl<CharT>{input, input_length, flags,
+ stack_limit, zone, no_gc}
+ .Parse(result);
+}
+
+template bool RegExpParser::VerifyRegExpSyntax<uint8_t>(
+ Zone*, uintptr_t, const uint8_t*, int, RegExpFlags, RegExpCompileData*,
+ const DisallowGarbageCollection&);
+template bool RegExpParser::VerifyRegExpSyntax<base::uc16>(
+ Zone*, uintptr_t, const base::uc16*, int, RegExpFlags, RegExpCompileData*,
+ const DisallowGarbageCollection&);
+
+// static
+bool RegExpParser::VerifyRegExpSyntax(Isolate* isolate, Zone* zone,
+ Handle<String> input, RegExpFlags flags,
+ RegExpCompileData* result,
+ const DisallowGarbageCollection&) {
+ return ParseRegExpFromHeapString(isolate, zone, input, flags, result);
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/regexp/regexp-parser.h b/chromium/v8/src/regexp/regexp-parser.h
index 3766d43fb59..4fc64002973 100644
--- a/chromium/v8/src/regexp/regexp-parser.h
+++ b/chromium/v8/src/regexp/regexp-parser.h
@@ -5,367 +5,35 @@
#ifndef V8_REGEXP_REGEXP_PARSER_H_
#define V8_REGEXP_REGEXP_PARSER_H_
-#include "src/base/strings.h"
-#include "src/objects/js-regexp.h"
-#include "src/objects/objects.h"
-#include "src/regexp/regexp-ast.h"
-#include "src/regexp/regexp-error.h"
-#include "src/zone/zone.h"
+#include "src/common/assert-scope.h"
+#include "src/handles/handles.h"
+#include "src/regexp/regexp-flags.h"
namespace v8 {
namespace internal {
-struct RegExpCompileData;
-
-// A BufferedZoneList is an automatically growing list, just like (and backed
-// by) a ZoneList, that is optimized for the case of adding and removing
-// a single element. The last element added is stored outside the backing list,
-// and if no more than one element is ever added, the ZoneList isn't even
-// allocated.
-// Elements must not be nullptr pointers.
-template <typename T, int initial_size>
-class BufferedZoneList {
- public:
- BufferedZoneList() : list_(nullptr), last_(nullptr) {}
-
- // Adds element at end of list. This element is buffered and can
- // be read using last() or removed using RemoveLast until a new Add or until
- // RemoveLast or GetList has been called.
- void Add(T* value, Zone* zone) {
- if (last_ != nullptr) {
- if (list_ == nullptr) {
- list_ = zone->New<ZoneList<T*>>(initial_size, zone);
- }
- list_->Add(last_, zone);
- }
- last_ = value;
- }
-
- T* last() {
- DCHECK(last_ != nullptr);
- return last_;
- }
-
- T* RemoveLast() {
- DCHECK(last_ != nullptr);
- T* result = last_;
- if ((list_ != nullptr) && (list_->length() > 0))
- last_ = list_->RemoveLast();
- else
- last_ = nullptr;
- return result;
- }
-
- T* Get(int i) {
- DCHECK((0 <= i) && (i < length()));
- if (list_ == nullptr) {
- DCHECK_EQ(0, i);
- return last_;
- } else {
- if (i == list_->length()) {
- DCHECK(last_ != nullptr);
- return last_;
- } else {
- return list_->at(i);
- }
- }
- }
-
- void Clear() {
- list_ = nullptr;
- last_ = nullptr;
- }
-
- int length() {
- int length = (list_ == nullptr) ? 0 : list_->length();
- return length + ((last_ == nullptr) ? 0 : 1);
- }
-
- ZoneList<T*>* GetList(Zone* zone) {
- if (list_ == nullptr) {
- list_ = zone->New<ZoneList<T*>>(initial_size, zone);
- }
- if (last_ != nullptr) {
- list_->Add(last_, zone);
- last_ = nullptr;
- }
- return list_;
- }
-
- private:
- ZoneList<T*>* list_;
- T* last_;
-};
-
+class String;
+class Zone;
-// Accumulates RegExp atoms and assertions into lists of terms and alternatives.
-class RegExpBuilder : public ZoneObject {
- public:
- RegExpBuilder(Zone* zone, JSRegExp::Flags flags);
- void AddCharacter(base::uc16 character);
- void AddUnicodeCharacter(base::uc32 character);
- void AddEscapedUnicodeCharacter(base::uc32 character);
- // "Adds" an empty expression. Does nothing except consume a
- // following quantifier
- void AddEmpty();
- void AddCharacterClass(RegExpCharacterClass* cc);
- void AddCharacterClassForDesugaring(base::uc32 c);
- void AddAtom(RegExpTree* tree);
- void AddTerm(RegExpTree* tree);
- void AddAssertion(RegExpTree* tree);
- void NewAlternative(); // '|'
- bool AddQuantifierToAtom(int min, int max,
- RegExpQuantifier::QuantifierType type);
- void FlushText();
- RegExpTree* ToRegExp();
- JSRegExp::Flags flags() const { return flags_; }
- void set_flags(JSRegExp::Flags flags) { flags_ = flags; }
-
- bool ignore_case() const { return (flags_ & JSRegExp::kIgnoreCase) != 0; }
- bool multiline() const { return (flags_ & JSRegExp::kMultiline) != 0; }
- bool dotall() const { return (flags_ & JSRegExp::kDotAll) != 0; }
-
- private:
- static const base::uc16 kNoPendingSurrogate = 0;
- void AddLeadSurrogate(base::uc16 lead_surrogate);
- void AddTrailSurrogate(base::uc16 trail_surrogate);
- void FlushPendingSurrogate();
- void FlushCharacters();
- void FlushTerms();
- bool NeedsDesugaringForUnicode(RegExpCharacterClass* cc);
- bool NeedsDesugaringForIgnoreCase(base::uc32 c);
- Zone* zone() const { return zone_; }
- bool unicode() const { return (flags_ & JSRegExp::kUnicode) != 0; }
-
- Zone* zone_;
- bool pending_empty_;
- JSRegExp::Flags flags_;
- ZoneList<base::uc16>* characters_;
- base::uc16 pending_surrogate_;
- BufferedZoneList<RegExpTree, 2> terms_;
- BufferedZoneList<RegExpTree, 2> text_;
- BufferedZoneList<RegExpTree, 2> alternatives_;
-#ifdef DEBUG
- enum { ADD_NONE, ADD_CHAR, ADD_TERM, ADD_ASSERT, ADD_ATOM } last_added_;
-#define LAST(x) last_added_ = x;
-#else
-#define LAST(x)
-#endif
-};
+struct RegExpCompileData;
-class V8_EXPORT_PRIVATE RegExpParser {
+class V8_EXPORT_PRIVATE RegExpParser : public AllStatic {
public:
- RegExpParser(FlatStringReader* in, JSRegExp::Flags flags, Isolate* isolate,
- Zone* zone);
+ static bool ParseRegExpFromHeapString(Isolate* isolate, Zone* zone,
+ Handle<String> input, RegExpFlags flags,
+ RegExpCompileData* result);
- static bool ParseRegExp(Isolate* isolate, Zone* zone, FlatStringReader* input,
- JSRegExp::Flags flags, RegExpCompileData* result);
+ template <class CharT>
+ static bool VerifyRegExpSyntax(Zone* zone, uintptr_t stack_limit,
+ const CharT* input, int input_length,
+ RegExpFlags flags, RegExpCompileData* result,
+ const DisallowGarbageCollection& no_gc);
// Used by the SpiderMonkey embedding of irregexp.
static bool VerifyRegExpSyntax(Isolate* isolate, Zone* zone,
- FlatStringReader* input, JSRegExp::Flags flags,
+ Handle<String> input, RegExpFlags flags,
RegExpCompileData* result,
- const DisallowGarbageCollection& nogc);
-
- private:
- bool Parse(RegExpCompileData* result, const DisallowGarbageCollection&);
-
- RegExpTree* ParsePattern();
- RegExpTree* ParseDisjunction();
- RegExpTree* ParseGroup();
-
- // Parses a {...,...} quantifier and stores the range in the given
- // out parameters.
- bool ParseIntervalQuantifier(int* min_out, int* max_out);
-
- // Parses and returns a single escaped character. The character
- // must not be 'b' or 'B' since they are usually handle specially.
- base::uc32 ParseClassCharacterEscape();
-
- // Checks whether the following is a length-digit hexadecimal number,
- // and sets the value if it is.
- bool ParseHexEscape(int length, base::uc32* value);
- bool ParseUnicodeEscape(base::uc32* value);
- bool ParseUnlimitedLengthHexNumber(int max_value, base::uc32* value);
-
- bool ParsePropertyClassName(ZoneVector<char>* name_1,
- ZoneVector<char>* name_2);
- bool AddPropertyClassRange(ZoneList<CharacterRange>* add_to, bool negate,
- const ZoneVector<char>& name_1,
- const ZoneVector<char>& name_2);
-
- RegExpTree* GetPropertySequence(const ZoneVector<char>& name_1);
- RegExpTree* ParseCharacterClass(const RegExpBuilder* state);
-
- base::uc32 ParseOctalLiteral();
-
- // Tries to parse the input as a back reference. If successful it
- // stores the result in the output parameter and returns true. If
- // it fails it will push back the characters read so the same characters
- // can be reparsed.
- bool ParseBackReferenceIndex(int* index_out);
-
- // Parse inside a class. Either add escaped class to the range, or return
- // false and pass parsed single character through |char_out|.
- void ParseClassEscape(ZoneList<CharacterRange>* ranges, Zone* zone,
- bool add_unicode_case_equivalents, base::uc32* char_out,
- bool* is_class_escape);
-
- char ParseClassEscape();
-
- RegExpTree* ReportError(RegExpError error);
- void Advance();
- void Advance(int dist);
- void Reset(int pos);
-
- // Reports whether the pattern might be used as a literal search string.
- // Only use if the result of the parse is a single atom node.
- bool simple();
- bool contains_anchor() { return contains_anchor_; }
- void set_contains_anchor() { contains_anchor_ = true; }
- int captures_started() { return captures_started_; }
- int position() { return next_pos_ - 1; }
- bool failed() { return failed_; }
- // The Unicode flag can't be changed using in-regexp syntax, so it's OK to
- // just read the initial flag value here.
- bool unicode() const { return (top_level_flags_ & JSRegExp::kUnicode) != 0; }
-
- static bool IsSyntaxCharacterOrSlash(base::uc32 c);
-
- static const base::uc32 kEndMarker = (1 << 21);
-
- private:
- enum SubexpressionType {
- INITIAL,
- CAPTURE, // All positive values represent captures.
- POSITIVE_LOOKAROUND,
- NEGATIVE_LOOKAROUND,
- GROUPING
- };
-
- class RegExpParserState : public ZoneObject {
- public:
- // Push a state on the stack.
- RegExpParserState(RegExpParserState* previous_state,
- SubexpressionType group_type,
- RegExpLookaround::Type lookaround_type,
- int disjunction_capture_index,
- const ZoneVector<base::uc16>* capture_name,
- JSRegExp::Flags flags, Zone* zone)
- : previous_state_(previous_state),
- builder_(zone->New<RegExpBuilder>(zone, flags)),
- group_type_(group_type),
- lookaround_type_(lookaround_type),
- disjunction_capture_index_(disjunction_capture_index),
- capture_name_(capture_name) {}
- // Parser state of containing expression, if any.
- RegExpParserState* previous_state() const { return previous_state_; }
- bool IsSubexpression() { return previous_state_ != nullptr; }
- // RegExpBuilder building this regexp's AST.
- RegExpBuilder* builder() const { return builder_; }
- // Type of regexp being parsed (parenthesized group or entire regexp).
- SubexpressionType group_type() const { return group_type_; }
- // Lookahead or Lookbehind.
- RegExpLookaround::Type lookaround_type() const { return lookaround_type_; }
- // Index in captures array of first capture in this sub-expression, if any.
- // Also the capture index of this sub-expression itself, if group_type
- // is CAPTURE.
- int capture_index() const { return disjunction_capture_index_; }
- // The name of the current sub-expression, if group_type is CAPTURE. Only
- // used for named captures.
- const ZoneVector<base::uc16>* capture_name() const { return capture_name_; }
-
- bool IsNamedCapture() const { return capture_name_ != nullptr; }
-
- // Check whether the parser is inside a capture group with the given index.
- bool IsInsideCaptureGroup(int index);
- // Check whether the parser is inside a capture group with the given name.
- bool IsInsideCaptureGroup(const ZoneVector<base::uc16>* name);
-
- private:
- // Linked list implementation of stack of states.
- RegExpParserState* const previous_state_;
- // Builder for the stored disjunction.
- RegExpBuilder* const builder_;
- // Stored disjunction type (capture, look-ahead or grouping), if any.
- const SubexpressionType group_type_;
- // Stored read direction.
- const RegExpLookaround::Type lookaround_type_;
- // Stored disjunction's capture index (if any).
- const int disjunction_capture_index_;
- // Stored capture name (if any).
- const ZoneVector<base::uc16>* const capture_name_;
- };
-
- // Return the 1-indexed RegExpCapture object, allocate if necessary.
- RegExpCapture* GetCapture(int index);
-
- // Creates a new named capture at the specified index. Must be called exactly
- // once for each named capture. Fails if a capture with the same name is
- // encountered.
- bool CreateNamedCaptureAtIndex(const ZoneVector<base::uc16>* name, int index);
-
- // Parses the name of a capture group (?<name>pattern). The name must adhere
- // to IdentifierName in the ECMAScript standard.
- const ZoneVector<base::uc16>* ParseCaptureGroupName();
-
- bool ParseNamedBackReference(RegExpBuilder* builder,
- RegExpParserState* state);
- RegExpParserState* ParseOpenParenthesis(RegExpParserState* state);
-
- // After the initial parsing pass, patch corresponding RegExpCapture objects
- // into all RegExpBackReferences. This is done after initial parsing in order
- // to avoid complicating cases in which references comes before the capture.
- void PatchNamedBackReferences();
-
- Handle<FixedArray> CreateCaptureNameMap();
-
- // Returns true iff the pattern contains named captures. May call
- // ScanForCaptures to look ahead at the remaining pattern.
- bool HasNamedCaptures();
-
- Isolate* isolate() { return isolate_; }
- Zone* zone() const { return zone_; }
-
- base::uc32 current() { return current_; }
- bool has_more() { return has_more_; }
- bool has_next() { return next_pos_ < in()->length(); }
- base::uc32 Next();
- template <bool update_position>
- base::uc32 ReadNext();
- FlatStringReader* in() { return in_; }
- void ScanForCaptures();
-
- struct RegExpCaptureNameLess {
- bool operator()(const RegExpCapture* lhs, const RegExpCapture* rhs) const {
- DCHECK_NOT_NULL(lhs);
- DCHECK_NOT_NULL(rhs);
- return *lhs->name() < *rhs->name();
- }
- };
-
- Isolate* isolate_;
- Zone* zone_;
- RegExpError error_ = RegExpError::kNone;
- int error_pos_ = 0;
- ZoneList<RegExpCapture*>* captures_;
- ZoneSet<RegExpCapture*, RegExpCaptureNameLess>* named_captures_;
- ZoneList<RegExpBackReference*>* named_back_references_;
- FlatStringReader* in_;
- base::uc32 current_;
- // These are the flags specified outside the regexp syntax ie after the
- // terminating '/' or in the second argument to the constructor. The current
- // flags are stored on the RegExpBuilder.
- JSRegExp::Flags top_level_flags_;
- int next_pos_;
- int captures_started_;
- int capture_count_; // Only valid after we have scanned for captures.
- bool has_more_;
- bool simple_;
- bool contains_anchor_;
- bool is_scanned_for_captures_;
- bool has_named_captures_; // Only valid after we have scanned for captures.
- bool failed_;
+ const DisallowGarbageCollection& no_gc);
};
} // namespace internal
diff --git a/chromium/v8/src/regexp/regexp-stack.cc b/chromium/v8/src/regexp/regexp-stack.cc
index 6d73b7c03d6..9c403eed089 100644
--- a/chromium/v8/src/regexp/regexp-stack.cc
+++ b/chromium/v8/src/regexp/regexp-stack.cc
@@ -11,23 +11,17 @@ namespace v8 {
namespace internal {
RegExpStackScope::RegExpStackScope(Isolate* isolate)
- : regexp_stack_(isolate->regexp_stack()) {
+ : regexp_stack_(isolate->regexp_stack()),
+ old_sp_top_delta_(regexp_stack_->sp_top_delta()) {
DCHECK(regexp_stack_->IsValid());
- // Irregexp is not reentrant in several ways; in particular, the
- // RegExpStackScope is not reentrant since the destructor frees allocated
- // memory. Protect against reentrancy here.
- CHECK(!regexp_stack_->is_in_use());
- regexp_stack_->set_is_in_use(true);
}
-
RegExpStackScope::~RegExpStackScope() {
- // Reset the buffer if it has grown.
- regexp_stack_->Reset();
- DCHECK(!regexp_stack_->is_in_use());
+ CHECK_EQ(old_sp_top_delta_, regexp_stack_->sp_top_delta());
+ regexp_stack_->ResetIfEmpty();
}
-RegExpStack::RegExpStack() : thread_local_(this), isolate_(nullptr) {}
+RegExpStack::RegExpStack() : thread_local_(this) {}
RegExpStack::~RegExpStack() { thread_local_.FreeAndInvalidate(); }
@@ -52,18 +46,16 @@ char* RegExpStack::RestoreStack(char* from) {
return from + kThreadLocalSize;
}
-void RegExpStack::Reset() { thread_local_.ResetToStaticStack(this); }
-
void RegExpStack::ThreadLocal::ResetToStaticStack(RegExpStack* regexp_stack) {
if (owns_memory_) DeleteArray(memory_);
memory_ = regexp_stack->static_stack_;
memory_top_ = regexp_stack->static_stack_ + kStaticStackSize;
memory_size_ = kStaticStackSize;
+ stack_pointer_ = memory_top_;
limit_ = reinterpret_cast<Address>(regexp_stack->static_stack_) +
kStackLimitSlack * kSystemPointerSize;
owns_memory_ = false;
- is_in_use_ = false;
}
void RegExpStack::ThreadLocal::FreeAndInvalidate() {
@@ -74,6 +66,7 @@ void RegExpStack::ThreadLocal::FreeAndInvalidate() {
memory_ = nullptr;
memory_top_ = nullptr;
memory_size_ = 0;
+ stack_pointer_ = nullptr;
limit_ = kMemoryTop;
}
@@ -88,9 +81,11 @@ Address RegExpStack::EnsureCapacity(size_t size) {
thread_local_.memory_, thread_local_.memory_size_);
if (thread_local_.owns_memory_) DeleteArray(thread_local_.memory_);
}
+ ptrdiff_t delta = sp_top_delta();
thread_local_.memory_ = new_memory;
thread_local_.memory_top_ = new_memory + size;
thread_local_.memory_size_ = size;
+ thread_local_.stack_pointer_ = thread_local_.memory_top_ + delta;
thread_local_.limit_ = reinterpret_cast<Address>(new_memory) +
kStackLimitSlack * kSystemPointerSize;
thread_local_.owns_memory_ = true;
diff --git a/chromium/v8/src/regexp/regexp-stack.h b/chromium/v8/src/regexp/regexp-stack.h
index adca683ff89..d52ca3e1d07 100644
--- a/chromium/v8/src/regexp/regexp-stack.h
+++ b/chromium/v8/src/regexp/regexp-stack.h
@@ -16,10 +16,7 @@ class RegExpStack;
// Maintains a per-v8thread stack area that can be used by irregexp
// implementation for its backtracking stack.
-// Since there is only one stack area, the Irregexp implementation is not
-// re-entrant. I.e., no regular expressions may be executed in the same thread
-// during a preempted Irregexp execution.
-class V8_NODISCARD RegExpStackScope {
+class V8_NODISCARD RegExpStackScope final {
public:
// Create and delete an instance to control the life-time of a growing stack.
@@ -32,46 +29,45 @@ class V8_NODISCARD RegExpStackScope {
RegExpStack* stack() const { return regexp_stack_; }
private:
- RegExpStack* regexp_stack_;
+ RegExpStack* const regexp_stack_;
+ const ptrdiff_t old_sp_top_delta_;
};
-class RegExpStack {
+class RegExpStack final {
public:
RegExpStack();
~RegExpStack();
RegExpStack(const RegExpStack&) = delete;
RegExpStack& operator=(const RegExpStack&) = delete;
- // Number of allocated locations on the stack below the limit.
- // No sequence of pushes must be longer that this without doing a stack-limit
- // check.
+ // Number of allocated locations on the stack below the limit. No sequence of
+ // pushes must be longer than this without doing a stack-limit check.
static constexpr int kStackLimitSlack = 32;
- // Gives the top of the memory used as stack.
- Address stack_base() {
+ Address memory_top() const {
DCHECK_NE(0, thread_local_.memory_size_);
DCHECK_EQ(thread_local_.memory_top_,
thread_local_.memory_ + thread_local_.memory_size_);
return reinterpret_cast<Address>(thread_local_.memory_top_);
}
- // The total size of the memory allocated for the stack.
- size_t stack_capacity() { return thread_local_.memory_size_; }
+ Address stack_pointer() const {
+ return reinterpret_cast<Address>(thread_local_.stack_pointer_);
+ }
+
+ size_t memory_size() const { return thread_local_.memory_size_; }
// If the stack pointer gets below the limit, we should react and
// either grow the stack or report an out-of-stack exception.
// There is only a limited number of locations below the stack limit,
// so users of the stack should check the stack limit during any
// sequence of pushes longer that this.
- Address* limit_address_address() { return &(thread_local_.limit_); }
+ Address* limit_address_address() { return &thread_local_.limit_; }
// Ensures that there is a memory area with at least the specified size.
// If passing zero, the default/minimum size buffer is allocated.
Address EnsureCapacity(size_t size);
- bool is_in_use() const { return thread_local_.is_in_use_; }
- void set_is_in_use(bool v) { thread_local_.is_in_use_ = v; }
-
// Thread local archiving.
static constexpr int ArchiveSpacePerThread() {
return static_cast<int>(kThreadLocalSize);
@@ -103,44 +99,59 @@ class RegExpStack {
STATIC_ASSERT(kStaticStackSize <= kMaximumStackSize);
- // Structure holding the allocated memory, size and limit.
+ // Structure holding the allocated memory, size and limit. Thread switching
+ // archives and restores this struct.
struct ThreadLocal {
explicit ThreadLocal(RegExpStack* regexp_stack) {
ResetToStaticStack(regexp_stack);
}
- // If memory_size_ > 0 then memory_ and memory_top_ must be non-nullptr
- // and memory_top_ = memory_ + memory_size_
+ // If memory_size_ > 0 then
+ // - memory_, memory_top_, stack_pointer_ must be non-nullptr
+ // - memory_top_ = memory_ + memory_size_
+ // - memory_ <= stack_pointer_ <= memory_top_
byte* memory_ = nullptr;
byte* memory_top_ = nullptr;
size_t memory_size_ = 0;
+ byte* stack_pointer_ = nullptr;
Address limit_ = kNullAddress;
bool owns_memory_ = false; // Whether memory_ is owned and must be freed.
- bool is_in_use_ = false; // To guard against reentrancy.
void ResetToStaticStack(RegExpStack* regexp_stack);
+ void ResetToStaticStackIfEmpty(RegExpStack* regexp_stack) {
+ if (stack_pointer_ == memory_top_) ResetToStaticStack(regexp_stack);
+ }
void FreeAndInvalidate();
};
static constexpr size_t kThreadLocalSize = sizeof(ThreadLocal);
- // Address of top of memory used as stack.
Address memory_top_address_address() {
return reinterpret_cast<Address>(&thread_local_.memory_top_);
}
- // Resets the buffer if it has grown beyond the default/minimum size.
- // After this, the buffer is either the default size, or it is empty, so
- // you have to call EnsureCapacity before using it again.
- void Reset();
+ Address stack_pointer_address() {
+ return reinterpret_cast<Address>(&thread_local_.stack_pointer_);
+ }
+
+ // A position-independent representation of the stack pointer.
+ ptrdiff_t sp_top_delta() const {
+ ptrdiff_t result =
+ reinterpret_cast<intptr_t>(thread_local_.stack_pointer_) -
+ reinterpret_cast<intptr_t>(thread_local_.memory_top_);
+ DCHECK_LE(result, 0);
+ return result;
+ }
+
+ // Resets the buffer if it has grown beyond the default/minimum size and is
+ // empty.
+ void ResetIfEmpty() { thread_local_.ResetToStaticStackIfEmpty(this); }
// Whether the ThreadLocal storage has been invalidated.
bool IsValid() const { return thread_local_.memory_ != nullptr; }
ThreadLocal thread_local_;
- Isolate* isolate_;
friend class ExternalReference;
- friend class Isolate;
friend class RegExpStackScope;
};
diff --git a/chromium/v8/src/regexp/regexp-utils.cc b/chromium/v8/src/regexp/regexp-utils.cc
index 1e72a124c95..dabe5ee4a28 100644
--- a/chromium/v8/src/regexp/regexp-utils.cc
+++ b/chromium/v8/src/regexp/regexp-utils.cc
@@ -120,33 +120,6 @@ MaybeHandle<Object> RegExpUtils::RegExpExec(Isolate* isolate,
}
}
-Maybe<bool> RegExpUtils::IsRegExp(Isolate* isolate, Handle<Object> object) {
- if (!object->IsJSReceiver()) return Just(false);
-
- Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
-
- Handle<Object> match;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, match,
- JSObject::GetProperty(isolate, receiver,
- isolate->factory()->match_symbol()),
- Nothing<bool>());
-
- if (!match->IsUndefined(isolate)) {
- const bool match_as_boolean = match->BooleanValue(isolate);
-
- if (match_as_boolean && !object->IsJSRegExp()) {
- isolate->CountUsage(v8::Isolate::kRegExpMatchIsTrueishOnNonJSRegExp);
- } else if (!match_as_boolean && object->IsJSRegExp()) {
- isolate->CountUsage(v8::Isolate::kRegExpMatchIsFalseishOnJSRegExp);
- }
-
- return Just(match_as_boolean);
- }
-
- return Just(object->IsJSRegExp());
-}
-
bool RegExpUtils::IsUnmodifiedRegExp(Isolate* isolate, Handle<Object> obj) {
#ifdef V8_ENABLE_FORCE_SLOW_PATH
if (isolate->force_slow_path()) return false;
diff --git a/chromium/v8/src/regexp/regexp-utils.h b/chromium/v8/src/regexp/regexp-utils.h
index 19f1f240399..c0333fb170e 100644
--- a/chromium/v8/src/regexp/regexp-utils.h
+++ b/chromium/v8/src/regexp/regexp-utils.h
@@ -5,12 +5,15 @@
#ifndef V8_REGEXP_REGEXP_UTILS_H_
#define V8_REGEXP_REGEXP_UTILS_H_
-#include "src/objects/objects.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
+class JSReceiver;
+class Object;
class RegExpMatchInfo;
+class String;
// Helper methods for C++ regexp builtins.
class RegExpUtils : public AllStatic {
@@ -31,10 +34,6 @@ class RegExpUtils : public AllStatic {
Isolate* isolate, Handle<JSReceiver> regexp, Handle<String> string,
Handle<Object> exec);
- // ES#sec-isregexp IsRegExp ( argument )
- // Includes checking of the match property.
- static Maybe<bool> IsRegExp(Isolate* isolate, Handle<Object> object);
-
// Checks whether the given object is an unmodified JSRegExp instance.
// Neither the object's map, nor its prototype's map, nor any relevant
// method on the prototype may be modified.
diff --git a/chromium/v8/src/regexp/regexp.cc b/chromium/v8/src/regexp/regexp.cc
index 9bdebe1918f..d739f0bc4ee 100644
--- a/chromium/v8/src/regexp/regexp.cc
+++ b/chromium/v8/src/regexp/regexp.cc
@@ -37,7 +37,7 @@ class RegExpImpl final : public AllStatic {
// Prepares a JSRegExp object with Irregexp-specific data.
static void IrregexpInitialize(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> pattern, JSRegExp::Flags flags,
+ Handle<String> pattern, RegExpFlags flags,
int capture_count, uint32_t backtrack_limit);
// Prepare a RegExp for being executed one or more times (using
@@ -51,7 +51,7 @@ class RegExpImpl final : public AllStatic {
Handle<String> subject);
static void AtomCompile(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> pattern, JSRegExp::Flags flags,
+ Handle<String> pattern, RegExpFlags flags,
Handle<String> match_pattern);
static int AtomExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
@@ -90,7 +90,7 @@ class RegExpImpl final : public AllStatic {
// Returns true on success, false on failure.
static bool Compile(Isolate* isolate, Zone* zone, RegExpCompileData* input,
- JSRegExp::Flags flags, Handle<String> pattern,
+ RegExpFlags flags, Handle<String> pattern,
Handle<String> sample_subject, bool is_one_byte,
uint32_t& backtrack_limit);
@@ -102,6 +102,32 @@ class RegExpImpl final : public AllStatic {
static Code IrregexpNativeCode(FixedArray re, bool is_one_byte);
};
+// static
+bool RegExp::CanGenerateBytecode() {
+ return FLAG_regexp_interpret_all || FLAG_regexp_tier_up;
+}
+
+// static
+template <class CharT>
+bool RegExp::VerifySyntax(Zone* zone, uintptr_t stack_limit, const CharT* input,
+ int input_length, RegExpFlags flags,
+ RegExpError* regexp_error_out,
+ const DisallowGarbageCollection& no_gc) {
+ RegExpCompileData data;
+ bool pattern_is_valid = RegExpParser::VerifyRegExpSyntax(
+ zone, stack_limit, input, input_length, flags, &data, no_gc);
+ *regexp_error_out = data.error;
+ return pattern_is_valid;
+}
+
+template bool RegExp::VerifySyntax<uint8_t>(Zone*, uintptr_t, const uint8_t*,
+ int, RegExpFlags,
+ RegExpError* regexp_error_out,
+ const DisallowGarbageCollection&);
+template bool RegExp::VerifySyntax<base::uc16>(
+ Zone*, uintptr_t, const base::uc16*, int, RegExpFlags,
+ RegExpError* regexp_error_out, const DisallowGarbageCollection&);
+
MaybeHandle<Object> RegExp::ThrowRegExpException(Isolate* isolate,
Handle<JSRegExp> re,
Handle<String> pattern,
@@ -120,7 +146,7 @@ MaybeHandle<Object> RegExp::ThrowRegExpException(Isolate* isolate,
void RegExp::ThrowRegExpException(Isolate* isolate, Handle<JSRegExp> re,
RegExpError error_text) {
- USE(ThrowRegExpException(isolate, re, Handle<String>(re->Pattern(), isolate),
+ USE(ThrowRegExpException(isolate, re, Handle<String>(re->source(), isolate),
error_text));
}
@@ -154,8 +180,7 @@ static bool HasFewDifferentCharacters(Handle<String> pattern) {
// static
MaybeHandle<Object> RegExp::Compile(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> pattern,
- JSRegExp::Flags flags,
+ Handle<String> pattern, RegExpFlags flags,
uint32_t backtrack_limit) {
DCHECK(pattern->IsFlat());
@@ -169,8 +194,8 @@ MaybeHandle<Object> RegExp::Compile(Isolate* isolate, Handle<JSRegExp> re,
CompilationCache* compilation_cache = nullptr;
if (is_compilation_cache_enabled) {
compilation_cache = isolate->compilation_cache();
- MaybeHandle<FixedArray> maybe_cached =
- compilation_cache->LookupRegExp(pattern, flags);
+ MaybeHandle<FixedArray> maybe_cached = compilation_cache->LookupRegExp(
+ pattern, JSRegExp::AsJSRegExpFlags(flags));
Handle<FixedArray> cached;
if (maybe_cached.ToHandle(&cached)) {
re->set_data(*cached);
@@ -180,10 +205,9 @@ MaybeHandle<Object> RegExp::Compile(Isolate* isolate, Handle<JSRegExp> re,
PostponeInterruptsScope postpone(isolate);
RegExpCompileData parse_result;
- FlatStringReader reader(isolate, pattern);
DCHECK(!isolate->has_pending_exception());
- if (!RegExpParser::ParseRegExp(isolate, &zone, &reader, flags,
- &parse_result)) {
+ if (!RegExpParser::ParseRegExpFromHeapString(isolate, &zone, pattern, flags,
+ &parse_result)) {
// Throw an exception if we fail to parse the pattern.
return RegExp::ThrowRegExpException(isolate, re, pattern,
parse_result.error);
@@ -210,7 +234,7 @@ MaybeHandle<Object> RegExp::Compile(Isolate* isolate, Handle<JSRegExp> re,
ExperimentalRegExp::Initialize(isolate, re, pattern, flags,
parse_result.capture_count);
has_been_compiled = true;
- } else if (parse_result.simple && !IgnoreCase(flags) && !IsSticky(flags) &&
+ } else if (parse_result.simple && !IsIgnoreCase(flags) && !IsSticky(flags) &&
!HasFewDifferentCharacters(pattern)) {
// Parse-tree is a single atom that is equal to the pattern.
RegExpImpl::AtomCompile(isolate, re, pattern, flags, pattern);
@@ -225,7 +249,7 @@ MaybeHandle<Object> RegExp::Compile(Isolate* isolate, Handle<JSRegExp> re,
ASSIGN_RETURN_ON_EXCEPTION(
isolate, atom_string,
isolate->factory()->NewStringFromTwoByte(atom_pattern), Object);
- if (!IgnoreCase(flags) && !HasFewDifferentCharacters(atom_string)) {
+ if (!IsIgnoreCase(flags) && !HasFewDifferentCharacters(atom_string)) {
RegExpImpl::AtomCompile(isolate, re, pattern, flags, atom_string);
has_been_compiled = true;
}
@@ -239,7 +263,8 @@ MaybeHandle<Object> RegExp::Compile(Isolate* isolate, Handle<JSRegExp> re,
// and we can store it in the cache.
Handle<FixedArray> data(FixedArray::cast(re->data()), isolate);
if (is_compilation_cache_enabled) {
- compilation_cache->PutRegExp(pattern, flags, data);
+ compilation_cache->PutRegExp(pattern, JSRegExp::AsJSRegExpFlags(flags),
+ data);
}
return re;
@@ -248,7 +273,7 @@ MaybeHandle<Object> RegExp::Compile(Isolate* isolate, Handle<JSRegExp> re,
// static
bool RegExp::EnsureFullyCompiled(Isolate* isolate, Handle<JSRegExp> re,
Handle<String> subject) {
- switch (re->TypeTag()) {
+ switch (re->type_tag()) {
case JSRegExp::NOT_COMPILED:
UNREACHABLE();
case JSRegExp::ATOM:
@@ -283,7 +308,7 @@ MaybeHandle<Object> RegExp::Exec(Isolate* isolate, Handle<JSRegExp> regexp,
Handle<String> subject, int index,
Handle<RegExpMatchInfo> last_match_info,
ExecQuirks exec_quirks) {
- switch (regexp->TypeTag()) {
+ switch (regexp->type_tag()) {
case JSRegExp::NOT_COMPILED:
UNREACHABLE();
case JSRegExp::ATOM:
@@ -301,9 +326,10 @@ MaybeHandle<Object> RegExp::Exec(Isolate* isolate, Handle<JSRegExp> regexp,
// RegExp Atom implementation: Simple string search using indexOf.
void RegExpImpl::AtomCompile(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> pattern, JSRegExp::Flags flags,
+ Handle<String> pattern, RegExpFlags flags,
Handle<String> match_pattern) {
- isolate->factory()->SetRegExpAtomData(re, pattern, flags, match_pattern);
+ isolate->factory()->SetRegExpAtomData(
+ re, pattern, JSRegExp::AsJSRegExpFlags(flags), match_pattern);
}
static void SetAtomLastCapture(Isolate* isolate,
@@ -326,7 +352,7 @@ int RegExpImpl::AtomExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
subject = String::Flatten(isolate, subject);
DisallowGarbageCollection no_gc; // ensure vectors stay valid
- String needle = String::cast(regexp->DataAt(JSRegExp::kAtomPatternIndex));
+ String needle = regexp->atom_pattern();
int needle_len = needle.length();
DCHECK(needle.IsFlat());
DCHECK_LT(0, needle_len);
@@ -394,8 +420,8 @@ Handle<Object> RegExpImpl::AtomExec(Isolate* isolate, Handle<JSRegExp> re,
bool RegExpImpl::EnsureCompiledIrregexp(Isolate* isolate, Handle<JSRegExp> re,
Handle<String> sample_subject,
bool is_one_byte) {
- Object compiled_code = re->Code(is_one_byte);
- Object bytecode = re->Bytecode(is_one_byte);
+ Object compiled_code = re->code(is_one_byte);
+ Object bytecode = re->bytecode(is_one_byte);
bool needs_initial_compilation =
compiled_code == Smi::FromInt(JSRegExp::kUninitializedValue);
// Recompile is needed when we're dealing with the first execution of the
@@ -420,12 +446,12 @@ bool RegExpImpl::EnsureCompiledIrregexp(Isolate* isolate, Handle<JSRegExp> re,
return CompileIrregexp(isolate, re, sample_subject, is_one_byte);
}
-#ifdef DEBUG
namespace {
+#ifdef DEBUG
bool RegExpCodeIsValidForPreCompilation(Handle<JSRegExp> re, bool is_one_byte) {
- Object entry = re->Code(is_one_byte);
- Object bytecode = re->Bytecode(is_one_byte);
+ Object entry = re->code(is_one_byte);
+ Object bytecode = re->bytecode(is_one_byte);
// If we're not using the tier-up strategy, entry can only be a smi
// representing an uncompiled regexp here. If we're using the tier-up
// strategy, entry can still be a smi representing an uncompiled regexp, when
@@ -448,9 +474,50 @@ bool RegExpCodeIsValidForPreCompilation(Handle<JSRegExp> re, bool is_one_byte) {
return true;
}
+#endif
+
+struct RegExpCaptureIndexLess {
+ bool operator()(const RegExpCapture* lhs, const RegExpCapture* rhs) const {
+ DCHECK_NOT_NULL(lhs);
+ DCHECK_NOT_NULL(rhs);
+ return lhs->index() < rhs->index();
+ }
+};
} // namespace
-#endif
+
+// static
+Handle<FixedArray> RegExp::CreateCaptureNameMap(
+ Isolate* isolate, ZoneVector<RegExpCapture*>* named_captures) {
+ if (named_captures == nullptr) return Handle<FixedArray>();
+
+ DCHECK(!named_captures->empty());
+
+ // Named captures are sorted by name (because the set is used to ensure
+ // name uniqueness). But the capture name map must to be sorted by index.
+
+ std::sort(named_captures->begin(), named_captures->end(),
+ RegExpCaptureIndexLess{});
+
+ int len = static_cast<int>(named_captures->size()) * 2;
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(len);
+
+ int i = 0;
+ for (const RegExpCapture* capture : *named_captures) {
+ base::Vector<const base::uc16> capture_name(capture->name()->data(),
+ capture->name()->size());
+ // CSA code in ConstructNewResultFromMatchInfo requires these strings to be
+ // internalized so they can be used as property names in the 'exec' results.
+ Handle<String> name = isolate->factory()->InternalizeString(capture_name);
+ array->set(i * 2, *name);
+ array->set(i * 2 + 1, Smi::FromInt(capture->index()));
+
+ i++;
+ }
+ DCHECK_EQ(i * 2, len);
+
+ return array;
+}
bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
Handle<String> sample_subject,
@@ -461,14 +528,13 @@ bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
DCHECK(RegExpCodeIsValidForPreCompilation(re, is_one_byte));
- JSRegExp::Flags flags = re->GetFlags();
+ RegExpFlags flags = JSRegExp::AsRegExpFlags(re->flags());
- Handle<String> pattern(re->Pattern(), isolate);
+ Handle<String> pattern(re->source(), isolate);
pattern = String::Flatten(isolate, pattern);
RegExpCompileData compile_data;
- FlatStringReader reader(isolate, pattern);
- if (!RegExpParser::ParseRegExp(isolate, &zone, &reader, flags,
- &compile_data)) {
+ if (!RegExpParser::ParseRegExpFromHeapString(isolate, &zone, pattern, flags,
+ &compile_data)) {
// Throw an exception if we fail to parse the pattern.
// THIS SHOULD NOT HAPPEN. We already pre-parsed it successfully once.
USE(RegExp::ThrowRegExpException(isolate, re, pattern, compile_data.error));
@@ -482,7 +548,7 @@ bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
compile_data.compilation_target = re->ShouldProduceBytecode()
? RegExpCompilationTarget::kBytecode
: RegExpCompilationTarget::kNative;
- uint32_t backtrack_limit = re->BacktrackLimit();
+ uint32_t backtrack_limit = re->backtrack_limit();
const bool compilation_succeeded =
Compile(isolate, &zone, &compile_data, flags, pattern, sample_subject,
is_one_byte, backtrack_limit);
@@ -513,7 +579,9 @@ bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
BUILTIN_CODE(isolate, RegExpInterpreterTrampoline);
data->set(JSRegExp::code_index(is_one_byte), ToCodeT(*trampoline));
}
- re->SetCaptureNameMap(compile_data.capture_name_map);
+ Handle<FixedArray> capture_name_map =
+ RegExp::CreateCaptureNameMap(isolate, compile_data.named_captures);
+ re->set_capture_name_map(capture_name_map);
int register_max = IrregexpMaxRegisterCount(*data);
if (compile_data.register_count > register_max) {
SetIrregexpMaxRegisterCount(*data, compile_data.register_count);
@@ -553,12 +621,13 @@ Code RegExpImpl::IrregexpNativeCode(FixedArray re, bool is_one_byte) {
}
void RegExpImpl::IrregexpInitialize(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> pattern,
- JSRegExp::Flags flags, int capture_count,
+ Handle<String> pattern, RegExpFlags flags,
+ int capture_count,
uint32_t backtrack_limit) {
// Initialize compiled code entries to null.
- isolate->factory()->SetRegExpIrregexpData(re, pattern, flags, capture_count,
- backtrack_limit);
+ isolate->factory()->SetRegExpIrregexpData(re, pattern,
+ JSRegExp::AsJSRegExpFlags(flags),
+ capture_count, backtrack_limit);
}
// static
@@ -575,7 +644,7 @@ int RegExpImpl::IrregexpPrepare(Isolate* isolate, Handle<JSRegExp> regexp,
// Only reserve room for output captures. Internal registers are allocated by
// the engine.
- return JSRegExp::RegistersForCaptureCount(regexp->CaptureCount());
+ return JSRegExp::RegistersForCaptureCount(regexp->capture_count());
}
int RegExpImpl::IrregexpExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
@@ -585,7 +654,7 @@ int RegExpImpl::IrregexpExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
DCHECK_LE(index, subject->length());
DCHECK(subject->IsFlat());
DCHECK_GE(output_size,
- JSRegExp::RegistersForCaptureCount(regexp->CaptureCount()));
+ JSRegExp::RegistersForCaptureCount(regexp->capture_count()));
bool is_one_byte = String::IsOneByteRepresentationUnderneath(*subject);
@@ -652,14 +721,13 @@ MaybeHandle<Object> RegExpImpl::IrregexpExec(
Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
int previous_index, Handle<RegExpMatchInfo> last_match_info,
RegExp::ExecQuirks exec_quirks) {
- DCHECK_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
+ DCHECK_EQ(regexp->type_tag(), JSRegExp::IRREGEXP);
subject = String::Flatten(isolate, subject);
#ifdef DEBUG
if (FLAG_trace_regexp_bytecodes && regexp->ShouldProduceBytecode()) {
- String pattern = regexp->Pattern();
- PrintF("\n\nRegexp match: /%s/\n\n", pattern.ToCString().get());
+ PrintF("\n\nRegexp match: /%s/\n\n", regexp->source().ToCString().get());
PrintF("\n\nSubject string: '%s'\n\n", subject->ToCString().get());
}
#endif
@@ -706,7 +774,7 @@ MaybeHandle<Object> RegExpImpl::IrregexpExec(
return isolate->factory()->null_value();
}
}
- int capture_count = regexp->CaptureCount();
+ int capture_count = regexp->capture_count();
return RegExp::SetLastMatchInfo(isolate, last_match_info, subject,
capture_count, output_registers);
} else if (res == RegExp::RE_FALLBACK_TO_EXPERIMENTAL) {
@@ -783,7 +851,7 @@ bool TooMuchRegExpCode(Isolate* isolate, Handle<String> pattern) {
// static
bool RegExp::CompileForTesting(Isolate* isolate, Zone* zone,
- RegExpCompileData* data, JSRegExp::Flags flags,
+ RegExpCompileData* data, RegExpFlags flags,
Handle<String> pattern,
Handle<String> sample_subject,
bool is_one_byte) {
@@ -793,7 +861,7 @@ bool RegExp::CompileForTesting(Isolate* isolate, Zone* zone,
}
bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
- JSRegExp::Flags flags, Handle<String> pattern,
+ RegExpFlags flags, Handle<String> pattern,
Handle<String> sample_subject, bool is_one_byte,
uint32_t& backtrack_limit) {
if (JSRegExp::RegistersForCaptureCount(data->capture_count) >
@@ -868,6 +936,9 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
#elif V8_TARGET_ARCH_RISCV64
macro_assembler.reset(new RegExpMacroAssemblerRISCV(isolate, zone, mode,
output_register_count));
+#elif V8_TARGET_ARCH_LOONG64
+ macro_assembler.reset(new RegExpMacroAssemblerLOONG64(
+ isolate, zone, mode, output_register_count));
#else
#error "Unsupported architecture"
#endif
@@ -970,9 +1041,9 @@ RegExpGlobalCache::RegExpGlobalCache(Handle<JSRegExp> regexp,
regexp_(regexp),
subject_(subject),
isolate_(isolate) {
- DCHECK(IsGlobal(regexp->GetFlags()));
+ DCHECK(IsGlobal(JSRegExp::AsRegExpFlags(regexp->flags())));
- switch (regexp_->TypeTag()) {
+ switch (regexp_->type_tag()) {
case JSRegExp::NOT_COMPILED:
UNREACHABLE();
case JSRegExp::ATOM: {
@@ -1009,7 +1080,7 @@ RegExpGlobalCache::RegExpGlobalCache(Handle<JSRegExp> regexp,
return;
}
registers_per_match_ =
- JSRegExp::RegistersForCaptureCount(regexp->CaptureCount());
+ JSRegExp::RegistersForCaptureCount(regexp->capture_count());
register_array_size_ = std::max(
{registers_per_match_, Isolate::kJSRegexpStaticOffsetsVectorSize});
break;
@@ -1045,7 +1116,8 @@ RegExpGlobalCache::~RegExpGlobalCache() {
}
int RegExpGlobalCache::AdvanceZeroLength(int last_index) {
- if (IsUnicode(regexp_->GetFlags()) && last_index + 1 < subject_->length() &&
+ if (IsUnicode(JSRegExp::AsRegExpFlags(regexp_->flags())) &&
+ last_index + 1 < subject_->length() &&
unibrow::Utf16::IsLeadSurrogate(subject_->Get(last_index)) &&
unibrow::Utf16::IsTrailSurrogate(subject_->Get(last_index + 1))) {
// Advance over the surrogate pair.
@@ -1069,7 +1141,7 @@ int32_t* RegExpGlobalCache::FetchNext() {
&register_array_[(current_match_index_ - 1) * registers_per_match_];
int last_end_index = last_match[1];
- switch (regexp_->TypeTag()) {
+ switch (regexp_->type_tag()) {
case JSRegExp::NOT_COMPILED:
UNREACHABLE();
case JSRegExp::ATOM:
diff --git a/chromium/v8/src/regexp/regexp.h b/chromium/v8/src/regexp/regexp.h
index 40fe832fd7d..60a240f2597 100644
--- a/chromium/v8/src/regexp/regexp.h
+++ b/chromium/v8/src/regexp/regexp.h
@@ -5,12 +5,18 @@
#ifndef V8_REGEXP_REGEXP_H_
#define V8_REGEXP_REGEXP_H_
-#include "src/objects/js-regexp.h"
+#include "src/common/assert-scope.h"
+#include "src/handles/handles.h"
#include "src/regexp/regexp-error.h"
+#include "src/regexp/regexp-flags.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
+class JSRegExp;
+class RegExpCapture;
+class RegExpMatchInfo;
class RegExpNode;
class RegExpTree;
@@ -37,9 +43,9 @@ struct RegExpCompileData {
// True, iff the pattern is anchored at the start of the string with '^'.
bool contains_anchor = false;
- // Only use if the pattern contains named captures. If so, this contains a
- // mapping of capture names to capture indices.
- Handle<FixedArray> capture_name_map;
+ // Only set if the pattern contains named captures.
+ // Note: the lifetime equals that of the parse/compile zone.
+ ZoneVector<RegExpCapture*>* named_captures = nullptr;
// The error message. Only used if an error occurred during parsing or
// compilation.
@@ -62,9 +68,15 @@ struct RegExpCompileData {
class RegExp final : public AllStatic {
public:
// Whether the irregexp engine generates interpreter bytecode.
- static bool CanGenerateBytecode() {
- return FLAG_regexp_interpret_all || FLAG_regexp_tier_up;
- }
+ static bool CanGenerateBytecode();
+
+ // Verify the given pattern, i.e. check that parsing succeeds. If
+ // verification fails, `regexp_error_out` is set.
+ template <class CharT>
+ static bool VerifySyntax(Zone* zone, uintptr_t stack_limit,
+ const CharT* input, int input_length,
+ RegExpFlags flags, RegExpError* regexp_error_out,
+ const DisallowGarbageCollection& no_gc);
// Parses the RegExp pattern and prepares the JSRegExp object with
// generic data and choice of implementation - as well as what
@@ -72,7 +84,7 @@ class RegExp final : public AllStatic {
// Returns false if compilation fails.
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> Compile(
Isolate* isolate, Handle<JSRegExp> re, Handle<String> pattern,
- JSRegExp::Flags flags, uint32_t backtrack_limit);
+ RegExpFlags flags, uint32_t backtrack_limit);
// Ensures that a regexp is fully compiled and ready to be executed on a
// subject string. Returns true on success. Return false on failure, and
@@ -131,12 +143,9 @@ class RegExp final : public AllStatic {
Isolate* isolate, Handle<RegExpMatchInfo> last_match_info,
Handle<String> subject, int capture_count, int32_t* match);
- V8_EXPORT_PRIVATE static bool CompileForTesting(Isolate* isolate, Zone* zone,
- RegExpCompileData* input,
- JSRegExp::Flags flags,
- Handle<String> pattern,
- Handle<String> sample_subject,
- bool is_one_byte);
+ V8_EXPORT_PRIVATE static bool CompileForTesting(
+ Isolate* isolate, Zone* zone, RegExpCompileData* input, RegExpFlags flags,
+ Handle<String> pattern, Handle<String> sample_subject, bool is_one_byte);
V8_EXPORT_PRIVATE static void DotPrintForTesting(const char* label,
RegExpNode* node);
@@ -152,6 +161,9 @@ class RegExp final : public AllStatic {
RegExpError error_text);
static bool IsUnmodifiedRegExp(Isolate* isolate, Handle<JSRegExp> regexp);
+
+ static Handle<FixedArray> CreateCaptureNameMap(
+ Isolate* isolate, ZoneVector<RegExpCapture*>* named_captures);
};
// Uses a special global mode of irregexp-generated code to perform a global
diff --git a/chromium/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.cc b/chromium/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.cc
index 3269779efa2..bb15bc24edd 100644
--- a/chromium/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.cc
+++ b/chromium/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.cc
@@ -40,17 +40,16 @@ namespace internal {
*
* The stack will have the following structure:
*
- * - fp[80] Isolate* isolate (address of the current isolate) kIsolate
* kStackFrameHeader
* --- sp when called ---
* - fp[72] ra Return from RegExp code (ra). kReturnAddress
* - fp[64] s9, old-fp Old fp, callee saved(s9).
* - fp[0..63] fp..s7 Callee-saved registers fp..s7.
* --- frame pointer ----
- * - fp[-8] direct_call (1 = direct call from JS, 0 = from runtime) kDirectCall
- * - fp[-16] stack_base (Top of backtracking stack). kStackHighEnd
- * - fp[-24] capture array size (may fit multiple sets of matches) kNumOutputRegisters
- * - fp[-32] int* capture_array (int[num_saved_registers_], for output). kRegisterOutput
+ * - fp[-8] Isolate* isolate (address of the current isolate) kIsolate
+ * - fp[-16] direct_call (1 = direct call from JS, 0 = from runtime) kDirectCall
+ * - fp[-24] output_size (may fit multiple sets of matches) kNumOutputRegisters
+ * - fp[-32] int* output (int[num_saved_registers_], for output). kRegisterOutput
* - fp[-40] end of input (address of end of string). kInputEnd
* - fp[-48] start of input (address of first character in string). kInputStart
* - fp[-56] start index (character index of start). kStartIndex
@@ -77,11 +76,11 @@ namespace internal {
* int start_index,
* Address start,
* Address end,
- * int* capture_output_array,
- * int num_capture_registers,
- * byte* stack_area_base,
+ * int* output,
+ * int output_size,
* bool direct_call = false,
- * Isolate* isolate);
+ * Isolate* isolate,
+ * Address regexp);
* The call is performed by NativeRegExpMacroAssembler::Execute()
* (in regexp-macro-assembler.cc) via the GeneratedCode wrapper.
*
@@ -96,8 +95,10 @@ RegExpMacroAssemblerRISCV::RegExpMacroAssemblerRISCV(Isolate* isolate,
Zone* zone, Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, CodeObjectRequired::kYes,
- NewAssemblerBuffer(kRegExpCodeSize))),
+ masm_(std::make_unique<MacroAssembler>(
+ isolate, CodeObjectRequired::kYes,
+ NewAssemblerBuffer(kRegExpCodeSize))),
+ no_root_array_scope_(masm_.get()),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -107,8 +108,6 @@ RegExpMacroAssemblerRISCV::RegExpMacroAssemblerRISCV(Isolate* isolate,
backtrack_label_(),
exit_label_(),
internal_failure_label_() {
- masm_->set_root_array_available(false);
-
DCHECK_EQ(0, registers_to_save % 2);
__ jmp(&entry_label_); // We'll write the entry code later.
// If the code gets too big or corrupted, an internal exception will be
@@ -120,7 +119,6 @@ RegExpMacroAssemblerRISCV::RegExpMacroAssemblerRISCV(Isolate* isolate,
}
RegExpMacroAssemblerRISCV::~RegExpMacroAssemblerRISCV() {
- delete masm_;
// Unuse labels in case we throw away the assembler without calling GetCode.
entry_label_.Unuse();
start_label_.Unuse();
@@ -335,7 +333,7 @@ void RegExpMacroAssemblerRISCV::CheckNotBackReferenceIgnoreCase(
__ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
{
- AllowExternalCallThatCantCauseGC scope(masm_);
+ AllowExternalCallThatCantCauseGC scope(masm_.get());
ExternalReference function =
unicode ? ExternalReference::re_case_insensitive_compare_unicode(
isolate())
@@ -593,6 +591,43 @@ void RegExpMacroAssemblerRISCV::Fail() {
__ jmp(&exit_label_);
}
+void RegExpMacroAssemblerRISCV::LoadRegExpStackPointerFromMemory(Register dst) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ li(dst, Operand(ref));
+ __ Ld(dst, MemOperand(dst));
+}
+
+void RegExpMacroAssemblerRISCV::StoreRegExpStackPointerToMemory(
+ Register src, Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ li(scratch, Operand(ref));
+ __ Sd(src, MemOperand(scratch));
+}
+
+void RegExpMacroAssemblerRISCV::PushRegExpBasePointer(Register scratch1,
+ Register scratch2) {
+ LoadRegExpStackPointerFromMemory(scratch1);
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ li(scratch2, Operand(ref));
+ __ Ld(scratch2, MemOperand(scratch2));
+ __ Sub64(scratch2, scratch1, scratch2);
+ __ Sd(scratch2, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+}
+
+void RegExpMacroAssemblerRISCV::PopRegExpBasePointer(Register scratch1,
+ Register scratch2) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ Ld(scratch1, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ __ li(scratch2, ref);
+ __ Ld(scratch2, MemOperand(scratch2));
+ __ Add64(scratch1, scratch1, scratch2);
+ StoreRegExpStackPointerToMemory(scratch1, scratch2);
+}
+
Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
Label return_a0;
if (masm_->has_exception()) {
@@ -609,7 +644,7 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
// Tell the system that we have a stack frame. Because the type is MANUAL,
// no is generated.
- FrameScope scope(masm_, StackFrame::MANUAL);
+ FrameScope scope(masm_.get(), StackFrame::MANUAL);
// Actually emit code to start a new stack frame.
// Push arguments
@@ -628,14 +663,14 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
// entry as cast to a function with the signature:
//
// *int(*match)(String input_string, // a0
- // int start_index, // a1
- // Address start, // a2
- // Address end, // a3
- // int*capture_output_array, // a4
- // int num_capture_registers, // a5
- // byte* stack_area_base, // a6
- // bool direct_call = false, // a7
- // Isolate * isolate); // on the stack
+ // int start_offset, // a1
+ // byte* input_start, // a2
+ // byte* input_end, // a3
+ // int* output, // a4
+ // int output_size, // a5
+ // int call_origin, // a6
+ // Isolate* isolate, // a7
+ // Address regexp); // on the stack
RegList argument_registers = a0.bit() | a1.bit() | a2.bit() | a3.bit() |
a4.bit() | a5.bit() | a6.bit() | a7.bit();
@@ -656,6 +691,12 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
__ push(a0); // Make room for "string start - 1" constant.
STATIC_ASSERT(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize);
__ push(a0); // The backtrack counter
+ STATIC_ASSERT(kRegExpStackBasePointer ==
+ kBacktrackCount - kSystemPointerSize);
+ __ push(a0); // The regexp stack base ptr.
+ // Store the regexp base pointer - we'll later restore it / write it to
+ // memory when returning from this irregexp code object.
+ PushRegExpBasePointer(a0, a1);
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -737,7 +778,7 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
}
// Initialize backtrack stack pointer.
- __ Ld(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
__ jmp(&start_label_);
@@ -838,6 +879,9 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
}
__ bind(&return_a0);
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(a1, a2);
// Skip sp past regexp registers and local variables..
__ mv(sp, frame_pointer());
@@ -857,6 +901,7 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
// Preempt-code.
if (check_preempt_label_.is_linked()) {
SafeCallTarget(&check_preempt_label_);
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), a1);
// Put regexp engine registers on stack.
RegList regexp_registers_to_retain = current_input_offset().bit() |
current_character().bit() |
@@ -867,7 +912,7 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
// If returning non-zero, we should end execution with the given
// result as return value.
__ Branch(&return_a0, ne, a0, Operand(zero_reg));
-
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
// String might have moved: Reload end of string from frame.
__ Ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
__ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
@@ -877,25 +922,18 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
// Backtrack stack overflow code.
if (stack_overflow_label_.is_linked()) {
SafeCallTarget(&stack_overflow_label_);
- // Reached if the backtrack-stack limit has been hit.
- // Put regexp engine registers on stack first.
- RegList regexp_registers =
- current_input_offset().bit() | current_character().bit();
- __ MultiPush(regexp_registers);
-
- // Call GrowStack(backtrack_stackpointer(), &stack_base)
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, a0);
- __ mv(a0, backtrack_stackpointer());
- __ Add64(a1, frame_pointer(), Operand(kStackHighEnd));
- __ li(a2, Operand(ExternalReference::isolate_address(masm_->isolate())));
+ // Call GrowStack(isolate).
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(),
+ a1);
+
+ static constexpr int kNumArguments = 1;
+ __ PrepareCallCFunction(kNumArguments, 0, a0);
+ __ li(a0, ExternalReference::isolate_address(isolate()));
ExternalReference grow_stack =
- ExternalReference::re_grow_stack(masm_->isolate());
- __ CallCFunction(grow_stack, num_arguments);
- // Restore regexp registers.
- __ MultiPop(regexp_registers);
- // If return nullptr, we have failed to grow the stack, and
- // must exit with a stack-overflow exception.
+ ExternalReference::re_grow_stack(isolate());
+ __ CallCFunction(grow_stack, kNumArguments);
+ // If nullptr is returned, we have failed to grow the stack, and must exit
+ // with a stack-overflow exception.
__ BranchShort(&exit_with_exception, eq, a0, Operand(zero_reg));
// Otherwise use return value as new stack pointer.
__ mv(backtrack_stackpointer(), a0);
@@ -976,7 +1014,7 @@ void RegExpMacroAssemblerRISCV::PushBacktrack(Label* label) {
int target = label->pos();
__ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
} else {
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_.get());
Label after_constant;
__ BranchShort(&after_constant);
int offset = masm_->pc_offset();
@@ -1010,10 +1048,22 @@ void RegExpMacroAssemblerRISCV::ReadCurrentPositionFromRegister(int reg) {
__ Ld(current_input_offset(), register_location(reg));
}
+void RegExpMacroAssemblerRISCV::WriteStackPointerToRegister(int reg) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ li(a0, ref);
+ __ Ld(a0, MemOperand(a0));
+ __ Sub64(a0, backtrack_stackpointer(), a0);
+ __ Sw(a0, register_location(reg));
+}
+
void RegExpMacroAssemblerRISCV::ReadStackPointerFromRegister(int reg) {
- __ Ld(backtrack_stackpointer(), register_location(reg));
- __ Ld(a0, MemOperand(frame_pointer(), kStackHighEnd));
- __ Add64(backtrack_stackpointer(), backtrack_stackpointer(), Operand(a0));
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ li(a1, ref);
+ __ Ld(a1, MemOperand(a1));
+ __ Lw(backtrack_stackpointer(), register_location(reg));
+ __ Add64(backtrack_stackpointer(), backtrack_stackpointer(), a1);
}
void RegExpMacroAssemblerRISCV::SetCurrentPositionFromEnd(int by) {
@@ -1057,11 +1107,6 @@ void RegExpMacroAssemblerRISCV::ClearRegisters(int reg_from, int reg_to) {
}
}
-void RegExpMacroAssemblerRISCV::WriteStackPointerToRegister(int reg) {
- __ Ld(a1, MemOperand(frame_pointer(), kStackHighEnd));
- __ Sub64(a0, backtrack_stackpointer(), a1);
- __ Sd(a0, register_location(reg));
-}
bool RegExpMacroAssemblerRISCV::CanReadUnaligned() { return false; }
diff --git a/chromium/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.h b/chromium/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.h
index a5d5bb529ed..211f17b314b 100644
--- a/chromium/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.h
+++ b/chromium/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.h
@@ -105,14 +105,11 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerRISCV
// Stack frame header.
static const int kStackFrameHeader = kReturnAddress;
- // Stack parameters placed by caller.
- static const int kIsolate = kStackFrameHeader + kSystemPointerSize;
-
// Below the frame pointer.
// Register parameters stored by setup code.
- static const int kDirectCall = kFramePointer - kSystemPointerSize;
- static const int kStackHighEnd = kDirectCall - kSystemPointerSize;
- static const int kNumOutputRegisters = kStackHighEnd - kSystemPointerSize;
+ static const int kIsolate = kFramePointer - kSystemPointerSize;
+ static const int kDirectCall = kIsolate - kSystemPointerSize;
+ static const int kNumOutputRegisters = kDirectCall - kSystemPointerSize;
static const int kRegisterOutput = kNumOutputRegisters - kSystemPointerSize;
static const int kInputEnd = kRegisterOutput - kSystemPointerSize;
static const int kInputStart = kInputEnd - kSystemPointerSize;
@@ -124,8 +121,14 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerRISCV
static const int kStringStartMinusOne =
kSuccessfulCaptures - kSystemPointerSize;
static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
+ // Stores the initial value of the regexp stack pointer in a
+ // position-independent representation (in case the regexp stack grows and
+ // thus moves).
+ static const int kRegExpStackBasePointer =
+ kBacktrackCount - kSystemPointerSize;
+ static constexpr int kNumberOfStackLocals = 4;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kBacktrackCount - kSystemPointerSize;
+ static const int kRegisterZero = kRegExpStackBasePointer - kSystemPointerSize;
// Initial size of code buffer.
static const int kRegExpCodeSize = 1024;
@@ -144,27 +147,27 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerRISCV
// Register holding the current input position as negative offset from
// the end of the string.
- inline Register current_input_offset() { return a6; }
+ static constexpr Register current_input_offset() { return a6; }
// The register containing the current character after LoadCurrentCharacter.
- inline Register current_character() { return a7; }
+ static constexpr Register current_character() { return a7; }
// Register holding address of the end of the input string.
- inline Register end_of_input_address() { return t2; }
+ static constexpr Register end_of_input_address() { return t2; }
// Register holding the frame address. Local variables, parameters and
// regexp registers are addressed relative to this.
- inline Register frame_pointer() { return fp; }
+ static constexpr Register frame_pointer() { return fp; }
// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
- inline Register backtrack_stackpointer() { return t0; }
+ static constexpr Register backtrack_stackpointer() { return t0; }
// Register holding pointer to the current code object.
- inline Register code_pointer() { return a5; }
+ static constexpr Register code_pointer() { return a5; }
// Byte size of chars in the string to match (decided by the Mode argument).
- inline int char_size() { return static_cast<int>(mode_); }
+ inline int char_size() const { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
// is nullptr, in which case it is a conditional Backtrack.
@@ -186,19 +189,25 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerRISCV
// and increments it by a word size.
inline void Pop(Register target);
+ void LoadRegExpStackPointerFromMemory(Register dst);
+ void StoreRegExpStackPointerToMemory(Register src, Register scratch);
+ void PushRegExpBasePointer(Register scratch1, Register scratch2);
+ void PopRegExpBasePointer(Register scratch1, Register scratch2);
+
Isolate* isolate() const { return masm_->isolate(); }
- MacroAssembler* masm_;
+ const std::unique_ptr<MacroAssembler> masm_;
+ const NoRootArrayScope no_root_array_scope_;
// Which mode to generate code for (Latin1 or UC16).
- Mode mode_;
+ const Mode mode_;
// One greater than maximal register index actually used.
int num_registers_;
// Number of registers to output at the end (the saved registers
// are always 0..num_saved_registers_-1).
- int num_saved_registers_;
+ const int num_saved_registers_;
// Labels used internally.
Label entry_label_;
diff --git a/chromium/v8/src/regexp/s390/regexp-macro-assembler-s390.cc b/chromium/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
index 83092e53365..6945aa3f6ee 100644
--- a/chromium/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
+++ b/chromium/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
@@ -6,15 +6,14 @@
#if V8_TARGET_ARCH_S390
-#include "src/base/bits.h"
-#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler.h"
+#include "src/codegen/s390/assembler-s390-inl.h"
+#include "src/heap/factory.h"
#include "src/logging/log.h"
-#include "src/regexp/regexp-macro-assembler.h"
+#include "src/objects/code-inl.h"
#include "src/regexp/regexp-stack.h"
#include "src/regexp/s390/regexp-macro-assembler-s390.h"
#include "src/snapshot/embedded/embedded-data.h"
-#include "src/strings/unicode.h"
namespace v8 {
namespace internal {
@@ -102,8 +101,10 @@ RegExpMacroAssemblerS390::RegExpMacroAssemblerS390(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, CodeObjectRequired::kYes,
- NewAssemblerBuffer(kRegExpCodeSize))),
+ masm_(std::make_unique<MacroAssembler>(
+ isolate, CodeObjectRequired::kYes,
+ NewAssemblerBuffer(kRegExpCodeSize))),
+ no_root_array_scope_(masm_.get()),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -113,8 +114,6 @@ RegExpMacroAssemblerS390::RegExpMacroAssemblerS390(Isolate* isolate, Zone* zone,
backtrack_label_(),
exit_label_(),
internal_failure_label_() {
- masm_->set_root_array_available(false);
-
DCHECK_EQ(0, registers_to_save % 2);
__ b(&entry_label_); // We'll write the entry code later.
@@ -127,7 +126,6 @@ RegExpMacroAssemblerS390::RegExpMacroAssemblerS390(Isolate* isolate, Zone* zone,
}
RegExpMacroAssemblerS390::~RegExpMacroAssemblerS390() {
- delete masm_;
// Unuse labels in case we throw away the assembler without calling GetCode.
entry_label_.Unuse();
start_label_.Unuse();
@@ -353,7 +351,7 @@ void RegExpMacroAssemblerS390::CheckNotBackReferenceIgnoreCase(
__ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
{
- AllowExternalCallThatCantCauseGC scope(masm_);
+ AllowExternalCallThatCantCauseGC scope(masm_.get());
ExternalReference function =
unicode ? ExternalReference::re_case_insensitive_compare_unicode(
isolate())
@@ -629,6 +627,43 @@ void RegExpMacroAssemblerS390::Fail() {
__ b(&exit_label_);
}
+void RegExpMacroAssemblerS390::LoadRegExpStackPointerFromMemory(Register dst) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ mov(dst, Operand(ref));
+ __ LoadU64(dst, MemOperand(dst));
+}
+
+void RegExpMacroAssemblerS390::StoreRegExpStackPointerToMemory(
+ Register src, Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ mov(scratch, Operand(ref));
+ __ StoreU64(src, MemOperand(scratch));
+}
+
+void RegExpMacroAssemblerS390::PushRegExpBasePointer(Register stack_pointer,
+ Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ mov(scratch, Operand(ref));
+ __ LoadU64(scratch, MemOperand(scratch));
+ __ SubS64(scratch, stack_pointer, scratch);
+ __ StoreU64(scratch, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+}
+
+void RegExpMacroAssemblerS390::PopRegExpBasePointer(Register stack_pointer_out,
+ Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ LoadU64(stack_pointer_out,
+ MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ __ mov(scratch, Operand(ref));
+ __ LoadU64(scratch, MemOperand(scratch));
+ __ AddS64(stack_pointer_out, stack_pointer_out, scratch);
+ StoreRegExpStackPointerToMemory(stack_pointer_out, scratch);
+}
+
Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
Label return_r2;
@@ -640,7 +675,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
// Tell the system that we have a stack frame. Because the type
// is MANUAL, no is generated.
- FrameScope scope(masm_, StackFrame::MANUAL);
+ FrameScope scope(masm_.get(), StackFrame::MANUAL);
// Ensure register assigments are consistent with callee save mask
DCHECK(r6.bit() & kRegExpCalleeSaved);
@@ -689,33 +724,47 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
__ StoreMultipleP(r0, r9, MemOperand(sp, 0));
STATIC_ASSERT(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize);
__ Push(r1); // The backtrack counter.
- // Check if we have space on the stack for registers.
- Label stack_limit_hit;
- Label stack_ok;
+ STATIC_ASSERT(kRegExpStackBasePointer ==
+ kBacktrackCount - kSystemPointerSize);
+ __ push(r1); // The regexp stack base ptr.
+
+ // Initialize backtrack stack pointer. It must not be clobbered from here on.
+ // Note the backtrack_stackpointer is callee-saved.
+ STATIC_ASSERT(backtrack_stackpointer() == r13);
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
+ // Store the regexp base pointer - we'll later restore it / write it to
+ // memory when returning from this irregexp code object.
+ PushRegExpBasePointer(backtrack_stackpointer(), r3);
+
+ {
+ // Check if we have space on the stack for registers.
+ Label stack_limit_hit, stack_ok;
+
+ ExternalReference stack_limit =
+ ExternalReference::address_of_jslimit(isolate());
+ __ mov(r2, Operand(stack_limit));
+ __ LoadU64(r2, MemOperand(r2));
+ __ SubS64(r2, sp, r2);
+ // Handle it if the stack pointer is already below the stack limit.
+ __ ble(&stack_limit_hit);
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ CmpU64(r2, Operand(num_registers_ * kSystemPointerSize));
+ __ bge(&stack_ok);
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ mov(r2, Operand(EXCEPTION));
+ __ b(&return_r2);
- ExternalReference stack_limit =
- ExternalReference::address_of_jslimit(isolate());
- __ mov(r2, Operand(stack_limit));
- __ LoadU64(r2, MemOperand(r2));
- __ SubS64(r2, sp, r2);
- // Handle it if the stack pointer is already below the stack limit.
- __ ble(&stack_limit_hit);
- // Check if there is room for the variable number of registers above
- // the stack limit.
- __ CmpU64(r2, Operand(num_registers_ * kSystemPointerSize));
- __ bge(&stack_ok);
- // Exit with OutOfMemory exception. There is not enough space on the stack
- // for our working registers.
- __ mov(r2, Operand(EXCEPTION));
- __ b(&return_r2);
-
- __ bind(&stack_limit_hit);
- CallCheckStackGuardState(r2);
- __ CmpS64(r2, Operand::Zero());
- // If returned value is non-zero, we exit with the returned value as result.
- __ bne(&return_r2);
+ __ bind(&stack_limit_hit);
+ CallCheckStackGuardState(r2);
+ __ CmpS64(r2, Operand::Zero());
+ // If returned value is non-zero, we exit with the returned value as result.
+ __ bne(&return_r2);
- __ bind(&stack_ok);
+ __ bind(&stack_ok);
+ }
// Allocate space on stack for registers.
__ lay(sp, MemOperand(sp, (-num_registers_ * kSystemPointerSize)));
@@ -743,18 +792,21 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
// Initialize code pointer register
__ mov(code_pointer(), Operand(masm_->CodeObject()));
- Label load_char_start_regexp, start_regexp;
- // Load newline if index is at start, previous character otherwise.
- __ CmpS64(r3, Operand::Zero());
- __ bne(&load_char_start_regexp);
- __ mov(current_character(), Operand('\n'));
- __ b(&start_regexp);
-
- // Global regexp restarts matching here.
- __ bind(&load_char_start_regexp);
- // Load previous char as initial value of current character register.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&start_regexp);
+ Label load_char_start_regexp;
+ {
+ Label start_regexp;
+ // Load newline if index is at start, previous character otherwise.
+ __ CmpS64(r3, Operand::Zero());
+ __ bne(&load_char_start_regexp);
+ __ mov(current_character(), Operand('\n'));
+ __ b(&start_regexp);
+
+ // Global regexp restarts matching here.
+ __ bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&start_regexp);
+ }
// Initialize on-stack registers.
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
@@ -776,10 +828,6 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
}
}
- // Initialize backtrack stack pointer.
- __ LoadU64(backtrack_stackpointer(),
- MemOperand(frame_pointer(), kStackHighEnd));
-
__ b(&start_label_);
// Exit code:
@@ -872,6 +920,10 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
// Prepare r2 to initialize registers with its value in the next run.
__ LoadU64(r2, MemOperand(frame_pointer(), kStringStartMinusOne));
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(backtrack_stackpointer(), r4);
+
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
// r6: capture start index
@@ -901,6 +953,10 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
}
__ bind(&return_r2);
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(backtrack_stackpointer(), r4);
+
// Skip sp past regexp registers and local variables..
__ mov(sp, frame_pointer());
// Restore registers r6..r15.
@@ -920,12 +976,16 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
if (check_preempt_label_.is_linked()) {
SafeCallTarget(&check_preempt_label_);
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), r3);
+
CallCheckStackGuardState(r2);
__ CmpS64(r2, Operand::Zero());
// If returning non-zero, we should end execution with the given
// result as return value.
__ bne(&return_r2);
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
// String might have moved: Reload end of string from frame.
__ LoadU64(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
SafeReturn();
@@ -936,16 +996,17 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
SafeCallTarget(&stack_overflow_label_);
// Reached if the backtrack-stack limit has been hit.
- // Call GrowStack(backtrack_stackpointer(), &stack_base)
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, r2);
- __ mov(r2, backtrack_stackpointer());
- __ AddS64(r3, frame_pointer(), Operand(kStackHighEnd));
- __ mov(r4, Operand(ExternalReference::isolate_address(isolate())));
+ // Call GrowStack(isolate).
+
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), r3);
+
+ static constexpr int kNumArguments = 1;
+ __ PrepareCallCFunction(kNumArguments, r2);
+ __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
ExternalReference grow_stack = ExternalReference::re_grow_stack(isolate());
- __ CallCFunction(grow_stack, num_arguments);
- // If return nullptr, we have failed to grow the stack, and
- // must exit with a stack-overflow exception.
+ __ CallCFunction(grow_stack, kNumArguments);
+ // If nullptr is returned, we have failed to grow the stack, and must exit
+ // with a stack-overflow exception.
__ CmpS64(r2, Operand::Zero());
__ beq(&exit_with_exception);
// Otherwise use return value as new stack pointer.
@@ -1041,10 +1102,22 @@ void RegExpMacroAssemblerS390::ReadCurrentPositionFromRegister(int reg) {
__ LoadU64(current_input_offset(), register_location(reg), r0);
}
+void RegExpMacroAssemblerS390::WriteStackPointerToRegister(int reg) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ mov(r3, Operand(ref));
+ __ LoadU64(r3, MemOperand(r3));
+ __ SubS64(r2, backtrack_stackpointer(), r3);
+ __ StoreU64(r2, register_location(reg));
+}
+
void RegExpMacroAssemblerS390::ReadStackPointerFromRegister(int reg) {
- __ LoadU64(backtrack_stackpointer(), register_location(reg), r0);
- __ LoadU64(r2, MemOperand(frame_pointer(), kStackHighEnd));
- __ AddS64(backtrack_stackpointer(), r2);
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ mov(r2, Operand(ref));
+ __ LoadU64(r2, MemOperand(r2));
+ __ LoadU64(backtrack_stackpointer(), register_location(reg));
+ __ AddS64(backtrack_stackpointer(), backtrack_stackpointer(), r2);
}
void RegExpMacroAssemblerS390::SetCurrentPositionFromEnd(int by) {
@@ -1088,12 +1161,6 @@ void RegExpMacroAssemblerS390::ClearRegisters(int reg_from, int reg_to) {
}
}
-void RegExpMacroAssemblerS390::WriteStackPointerToRegister(int reg) {
- __ LoadU64(r3, MemOperand(frame_pointer(), kStackHighEnd));
- __ SubS64(r2, backtrack_stackpointer(), r3);
- __ StoreU64(r2, register_location(reg));
-}
-
// Private methods:
void RegExpMacroAssemblerS390::CallCheckStackGuardState(Register scratch) {
diff --git a/chromium/v8/src/regexp/s390/regexp-macro-assembler-s390.h b/chromium/v8/src/regexp/s390/regexp-macro-assembler-s390.h
index 8e8601fc7c3..458eec2c8e9 100644
--- a/chromium/v8/src/regexp/s390/regexp-macro-assembler-s390.h
+++ b/chromium/v8/src/regexp/s390/regexp-macro-assembler-s390.h
@@ -5,9 +5,7 @@
#ifndef V8_REGEXP_S390_REGEXP_MACRO_ASSEMBLER_S390_H_
#define V8_REGEXP_S390_REGEXP_MACRO_ASSEMBLER_S390_H_
-#include "src/base/strings.h"
#include "src/codegen/macro-assembler.h"
-#include "src/codegen/s390/assembler-s390.h"
#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
@@ -90,21 +88,15 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerS390
static const int kFramePointer = 0;
// Above the frame pointer - Stored registers and stack passed parameters.
- // Register 6-15(sp)
static const int kStoredRegisters = kFramePointer;
static const int kCallerFrame =
kStoredRegisters + kCalleeRegisterSaveAreaSize;
- // Stack parameters placed by caller.
- static const int kCaptureArraySize = kCallerFrame;
- static const int kStackAreaBase = kCallerFrame + kSystemPointerSize;
- // kDirectCall again
- static const int kIsolate = kStackAreaBase + 2 * kSystemPointerSize;
// Below the frame pointer.
// Register parameters stored by setup code.
- static const int kDirectCall = kFramePointer - kSystemPointerSize;
- static const int kStackHighEnd = kDirectCall - kSystemPointerSize;
- static const int kNumOutputRegisters = kStackHighEnd - kSystemPointerSize;
+ static const int kIsolate = kFramePointer - kSystemPointerSize;
+ static const int kDirectCall = kIsolate - kSystemPointerSize;
+ static const int kNumOutputRegisters = kDirectCall - kSystemPointerSize;
static const int kRegisterOutput = kNumOutputRegisters - kSystemPointerSize;
static const int kInputEnd = kRegisterOutput - kSystemPointerSize;
static const int kInputStart = kInputEnd - kSystemPointerSize;
@@ -116,8 +108,14 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerS390
static const int kStringStartMinusOne =
kSuccessfulCaptures - kSystemPointerSize;
static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
+ // Stores the initial value of the regexp stack pointer in a
+ // position-independent representation (in case the regexp stack grows and
+ // thus moves).
+ static const int kRegExpStackBasePointer =
+ kBacktrackCount - kSystemPointerSize;
+
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kBacktrackCount - kSystemPointerSize;
+ static const int kRegisterZero = kRegExpStackBasePointer - kSystemPointerSize;
// Initial size of code buffer.
static const int kRegExpCodeSize = 1024;
@@ -137,27 +135,27 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerS390
// Register holding the current input position as negative offset from
// the end of the string.
- inline Register current_input_offset() { return r8; }
+ static constexpr Register current_input_offset() { return r8; }
// The register containing the current character after LoadCurrentCharacter.
- inline Register current_character() { return r9; }
+ static constexpr Register current_character() { return r9; }
// Register holding address of the end of the input string.
- inline Register end_of_input_address() { return r10; }
+ static constexpr Register end_of_input_address() { return r10; }
// Register holding the frame address. Local variables, parameters and
// regexp registers are addressed relative to this.
- inline Register frame_pointer() { return fp; }
+ static constexpr Register frame_pointer() { return fp; }
// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
- inline Register backtrack_stackpointer() { return r13; }
+ static constexpr Register backtrack_stackpointer() { return r13; }
// Register holding pointer to the current code object.
- inline Register code_pointer() { return r7; }
+ static constexpr Register code_pointer() { return r7; }
// Byte size of chars in the string to match (decided by the Mode argument)
- inline int char_size() { return static_cast<int>(mode_); }
+ inline int char_size() const { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
// is nullptr, in which case it is a conditional Backtrack.
@@ -177,19 +175,25 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerS390
// and increments it by a word size.
inline void Pop(Register target);
+ void LoadRegExpStackPointerFromMemory(Register dst);
+ void StoreRegExpStackPointerToMemory(Register src, Register scratch);
+ void PushRegExpBasePointer(Register stack_pointer, Register scratch);
+ void PopRegExpBasePointer(Register stack_pointer_out, Register scratch);
+
Isolate* isolate() const { return masm_->isolate(); }
- MacroAssembler* masm_;
+ const std::unique_ptr<MacroAssembler> masm_;
+ const NoRootArrayScope no_root_array_scope_;
// Which mode to generate code for (Latin1 or UC16).
- Mode mode_;
+ const Mode mode_;
// One greater than maximal register index actually used.
int num_registers_;
// Number of registers to output at the end (the saved registers
// are always 0..num_saved_registers_-1)
- int num_saved_registers_;
+ const int num_saved_registers_;
// Labels used internally.
Label entry_label_;
diff --git a/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.cc b/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
index 6f0cb53e8f5..c2185dbcc55 100644
--- a/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -6,13 +6,13 @@
#include "src/regexp/x64/regexp-macro-assembler-x64.h"
+#include "src/codegen/code-desc.h"
#include "src/codegen/macro-assembler.h"
#include "src/heap/factory.h"
#include "src/logging/log.h"
-#include "src/objects/objects-inl.h"
+#include "src/objects/code-inl.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
-#include "src/strings/unicode.h"
namespace v8 {
namespace internal {
@@ -47,14 +47,12 @@ namespace internal {
* Each call to a C++ method should retain these registers.
*
* The stack will have the following content, in some order, indexable from the
- * frame pointer (see, e.g., kStackHighEnd):
+ * frame pointer (see, e.g., kDirectCall):
* - Address regexp (address of the JSRegExp object; unused in native
* code, passed to match signature of interpreter)
* - Isolate* isolate (address of the current isolate)
* - direct_call (if 1, direct call from JavaScript code, if 0 call
* through the runtime system)
- * - stack_area_base (high end of the memory area to use as
- * backtracking stack)
* - capture array size (may fit multiple sets of matches)
* - int* capture_array (int[num_saved_registers_], for output).
* - end of input (address of end of string)
@@ -85,7 +83,6 @@ namespace internal {
* Address end,
* int* capture_output_array,
* int num_capture_registers,
- * byte* stack_area_base,
* bool direct_call = false,
* Isolate* isolate,
* Address regexp);
@@ -664,31 +661,64 @@ void RegExpMacroAssemblerX64::Fail() {
__ jmp(&exit_label_);
}
+void RegExpMacroAssemblerX64::LoadRegExpStackPointerFromMemory(Register dst) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ movq(dst, __ ExternalReferenceAsOperand(ref, dst));
+}
+
+void RegExpMacroAssemblerX64::StoreRegExpStackPointerToMemory(
+ Register src, Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ movq(__ ExternalReferenceAsOperand(ref, scratch), src);
+}
+
+void RegExpMacroAssemblerX64::PushRegExpBasePointer(Register stack_pointer,
+ Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ movq(scratch, __ ExternalReferenceAsOperand(ref, scratch));
+ __ subq(scratch, stack_pointer);
+ __ movq(Operand(rbp, kRegExpStackBasePointer), scratch);
+}
+
+void RegExpMacroAssemblerX64::PopRegExpBasePointer(Register stack_pointer_out,
+ Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ movq(scratch, Operand(rbp, kRegExpStackBasePointer));
+ __ movq(stack_pointer_out,
+ __ ExternalReferenceAsOperand(ref, stack_pointer_out));
+ __ subq(stack_pointer_out, scratch);
+ StoreRegExpStackPointerToMemory(stack_pointer_out, scratch);
+}
Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
Label return_rax;
- // Finalize code - write the entry point code now we know how many
- // registers we need.
- // Entry code:
+ // Finalize code - write the entry point code now we know how many registers
+ // we need.
__ bind(&entry_label_);
- // Tell the system that we have a stack frame. Because the type is MANUAL, no
- // is generated.
+ // Tell the system that we have a stack frame. Because the type is MANUAL, no
+ // physical frame is generated.
FrameScope scope(&masm_, StackFrame::MANUAL);
// Actually emit code to start a new stack frame.
__ pushq(rbp);
__ movq(rbp, rsp);
+
// Save parameters and callee-save registers. Order here should correspond
// to order of kBackup_ebx etc.
#ifdef V8_TARGET_OS_WIN
// MSVC passes arguments in rcx, rdx, r8, r9, with backing stack slots.
- // Store register parameters in pre-allocated stack slots,
- __ movq(Operand(rbp, kInputString), rcx);
- __ movq(Operand(rbp, kStartIndex), rdx); // Passed as int32 in edx.
- __ movq(Operand(rbp, kInputStart), r8);
- __ movq(Operand(rbp, kInputEnd), r9);
- // Callee-save on Win64.
+ // Store register parameters in pre-allocated stack slots.
+ __ movq(Operand(rbp, kInputString), arg_reg_1);
+ __ movq(Operand(rbp, kStartIndex), arg_reg_2); // Passed as int32 in edx.
+ __ movq(Operand(rbp, kInputStart), arg_reg_3);
+ __ movq(Operand(rbp, kInputEnd), arg_reg_4);
+
+ STATIC_ASSERT(kNumCalleeSaveRegisters == 3);
__ pushq(rsi);
__ pushq(rdi);
__ pushq(rbx);
@@ -701,14 +731,15 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
DCHECK_EQ(kInputEnd, -4 * kSystemPointerSize);
DCHECK_EQ(kRegisterOutput, -5 * kSystemPointerSize);
DCHECK_EQ(kNumOutputRegisters, -6 * kSystemPointerSize);
- __ pushq(rdi);
- __ pushq(rsi);
- __ pushq(rdx);
- __ pushq(rcx);
+ __ pushq(arg_reg_1);
+ __ pushq(arg_reg_2);
+ __ pushq(arg_reg_3);
+ __ pushq(arg_reg_4);
__ pushq(r8);
__ pushq(r9);
- __ pushq(rbx); // Callee-save
+ STATIC_ASSERT(kNumCalleeSaveRegisters == 1);
+ __ pushq(rbx);
#endif
STATIC_ASSERT(kSuccessfulCaptures ==
@@ -719,35 +750,50 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ Push(Immediate(0)); // Make room for "string start - 1" constant.
STATIC_ASSERT(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize);
__ Push(Immediate(0)); // The backtrack counter.
+ STATIC_ASSERT(kRegExpStackBasePointer ==
+ kBacktrackCount - kSystemPointerSize);
+ __ Push(Immediate(0)); // The regexp stack base ptr.
+
+ // Initialize backtrack stack pointer. It must not be clobbered from here on.
+ // Note the backtrack_stackpointer is *not* callee-saved.
+ STATIC_ASSERT(backtrack_stackpointer() == rcx);
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
+ // Store the regexp base pointer - we'll later restore it / write it to
+ // memory when returning from this irregexp code object.
+ PushRegExpBasePointer(backtrack_stackpointer(), kScratchRegister);
+
+ {
+ // Check if we have space on the stack for registers.
+ Label stack_limit_hit, stack_ok;
+
+ ExternalReference stack_limit =
+ ExternalReference::address_of_jslimit(isolate());
+ __ movq(r9, rsp);
+ __ Move(kScratchRegister, stack_limit);
+ __ subq(r9, Operand(kScratchRegister, 0));
+ // Handle it if the stack pointer is already below the stack limit.
+ __ j(below_equal, &stack_limit_hit);
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ cmpq(r9, Immediate(num_registers_ * kSystemPointerSize));
+ __ j(above_equal, &stack_ok);
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ Move(rax, EXCEPTION);
+ __ jmp(&return_rax);
- // Check if we have space on the stack for registers.
- Label stack_limit_hit;
- Label stack_ok;
-
- ExternalReference stack_limit =
- ExternalReference::address_of_jslimit(isolate());
- __ movq(rcx, rsp);
- __ Move(kScratchRegister, stack_limit);
- __ subq(rcx, Operand(kScratchRegister, 0));
- // Handle it if the stack pointer is already below the stack limit.
- __ j(below_equal, &stack_limit_hit);
- // Check if there is room for the variable number of registers above
- // the stack limit.
- __ cmpq(rcx, Immediate(num_registers_ * kSystemPointerSize));
- __ j(above_equal, &stack_ok);
- // Exit with OutOfMemory exception. There is not enough space on the stack
- // for our working registers.
- __ Move(rax, EXCEPTION);
- __ jmp(&return_rax);
-
- __ bind(&stack_limit_hit);
- __ Move(code_object_pointer(), masm_.CodeObject());
- CallCheckStackGuardState(); // Preserves no registers beside rbp and rsp.
- __ testq(rax, rax);
- // If returned value is non-zero, we exit with the returned value as result.
- __ j(not_zero, &return_rax);
+ __ bind(&stack_limit_hit);
+ __ Move(code_object_pointer(), masm_.CodeObject());
+ __ pushq(backtrack_stackpointer());
+ CallCheckStackGuardState(); // Preserves no registers beside rbp and rsp.
+ __ popq(backtrack_stackpointer());
+ __ testq(rax, rax);
+ // If returned value is non-zero, we exit with the returned value as result.
+ __ j(not_zero, &return_rax);
- __ bind(&stack_ok);
+ __ bind(&stack_ok);
+ }
// Allocate space on stack for registers.
__ AllocateStackSpace(num_registers_ * kSystemPointerSize);
@@ -773,18 +819,23 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Initialize code object pointer.
__ Move(code_object_pointer(), masm_.CodeObject());
- Label load_char_start_regexp, start_regexp;
- // Load newline if index is at start, previous character otherwise.
- __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
- __ j(not_equal, &load_char_start_regexp, Label::kNear);
- __ Move(current_character(), '\n');
- __ jmp(&start_regexp, Label::kNear);
+ Label load_char_start_regexp; // Execution restarts here for global regexps.
+ {
+ Label start_regexp;
- // Global regexp restarts matching here.
- __ bind(&load_char_start_regexp);
- // Load previous char as initial value of current character register.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&start_regexp);
+ // Load newline if index is at start, previous character otherwise.
+ __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
+ __ j(not_equal, &load_char_start_regexp, Label::kNear);
+ __ Move(current_character(), '\n');
+ __ jmp(&start_regexp, Label::kNear);
+
+ // Global regexp restarts matching here.
+ __ bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+
+ __ bind(&start_regexp);
+ }
// Initialize on-stack registers.
if (num_saved_registers_ > 0) {
@@ -792,13 +843,13 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Fill in stack push order, to avoid accessing across an unwritten
// page (a problem on Windows).
if (num_saved_registers_ > 8) {
- __ Move(rcx, kRegisterZero);
+ __ Move(r9, kRegisterZero);
Label init_loop;
__ bind(&init_loop);
- __ movq(Operand(rbp, rcx, times_1, 0), rax);
- __ subq(rcx, Immediate(kSystemPointerSize));
- __ cmpq(rcx, Immediate(kRegisterZero -
- num_saved_registers_ * kSystemPointerSize));
+ __ movq(Operand(rbp, r9, times_1, 0), rax);
+ __ subq(r9, Immediate(kSystemPointerSize));
+ __ cmpq(r9, Immediate(kRegisterZero -
+ num_saved_registers_ * kSystemPointerSize));
__ j(greater, &init_loop);
} else { // Unroll the loop.
for (int i = 0; i < num_saved_registers_; i++) {
@@ -807,9 +858,6 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
}
}
- // Initialize backtrack stack pointer.
- __ movq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
-
__ jmp(&start_label_);
// Exit code:
@@ -861,6 +909,10 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Prepare rax to initialize registers with its value in the next run.
__ movq(rax, Operand(rbp, kStringStartMinusOne));
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(backtrack_stackpointer(), kScratchRegister);
+
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
// rdx: capture start index
@@ -894,19 +946,26 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
}
__ bind(&return_rax);
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(backtrack_stackpointer(), kScratchRegister);
+
#ifdef V8_TARGET_OS_WIN
// Restore callee save registers.
__ leaq(rsp, Operand(rbp, kLastCalleeSaveRegister));
+ STATIC_ASSERT(kNumCalleeSaveRegisters == 3);
__ popq(rbx);
__ popq(rdi);
__ popq(rsi);
// Stack now at rbp.
#else
// Restore callee save register.
+ STATIC_ASSERT(kNumCalleeSaveRegisters == 1);
__ movq(rbx, Operand(rbp, kBackup_rbx));
// Skip rsp to rbp.
__ movq(rsp, rbp);
#endif
+
// Exit function frame, restore previous one.
__ popq(rbp);
__ ret(0);
@@ -923,9 +982,10 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
if (check_preempt_label_.is_linked()) {
SafeCallTarget(&check_preempt_label_);
- __ pushq(backtrack_stackpointer());
__ pushq(rdi);
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), kScratchRegister);
+
CallCheckStackGuardState();
__ testq(rax, rax);
// If returning non-zero, we should end execution with the given
@@ -935,7 +995,9 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Restore registers.
__ Move(code_object_pointer(), masm_.CodeObject());
__ popq(rdi);
- __ popq(backtrack_stackpointer());
+
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
// String might have moved: Reload esi from frame.
__ movq(rsi, Operand(rbp, kInputEnd));
SafeReturn();
@@ -953,25 +1015,19 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ pushq(rdi);
#endif
- // Call GrowStack(backtrack_stackpointer())
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments);
-#ifdef V8_TARGET_OS_WIN
- // Microsoft passes parameters in rcx, rdx, r8.
- // First argument, backtrack stackpointer, is already in rcx.
- __ leaq(rdx, Operand(rbp, kStackHighEnd)); // Second argument
- __ LoadAddress(r8, ExternalReference::isolate_address(isolate()));
-#else
- // AMD64 ABI passes parameters in rdi, rsi, rdx.
- __ movq(rdi, backtrack_stackpointer()); // First argument.
- __ leaq(rsi, Operand(rbp, kStackHighEnd)); // Second argument.
- __ LoadAddress(rdx, ExternalReference::isolate_address(isolate()));
-#endif
+ // Call GrowStack(isolate).
+
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), kScratchRegister);
+
+ static constexpr int kNumArguments = 1;
+ __ PrepareCallCFunction(kNumArguments);
+ __ LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
+
ExternalReference grow_stack =
ExternalReference::re_grow_stack(isolate());
- __ CallCFunction(grow_stack, num_arguments);
- // If return nullptr, we have failed to grow the stack, and
- // must exit with a stack-overflow exception.
+ __ CallCFunction(grow_stack, kNumArguments);
+ // If nullptr is returned, we have failed to grow the stack, and must exit
+ // with a stack-overflow exception.
__ testq(rax, rax);
__ j(equal, &exit_with_exception);
// Otherwise use return value as new stack pointer.
@@ -1085,13 +1141,25 @@ void RegExpMacroAssemblerX64::ReadPositionFromRegister(Register dst, int reg) {
__ movq(dst, register_location(reg));
}
+// Preserves a position-independent representation of the stack pointer in reg:
+// reg = top - sp.
+void RegExpMacroAssemblerX64::WriteStackPointerToRegister(int reg) {
+ ExternalReference stack_top_address =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ movq(rax, __ ExternalReferenceAsOperand(stack_top_address, rax));
+ __ subq(rax, backtrack_stackpointer());
+ __ movq(register_location(reg), rax);
+}
void RegExpMacroAssemblerX64::ReadStackPointerFromRegister(int reg) {
- __ movq(backtrack_stackpointer(), register_location(reg));
- __ addq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
+ ExternalReference stack_top_address =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ movq(backtrack_stackpointer(),
+ __ ExternalReferenceAsOperand(stack_top_address,
+ backtrack_stackpointer()));
+ __ subq(backtrack_stackpointer(), register_location(reg));
}
-
void RegExpMacroAssemblerX64::SetCurrentPositionFromEnd(int by) {
Label after_position;
__ cmpq(rdi, Immediate(-by * char_size()));
@@ -1136,14 +1204,6 @@ void RegExpMacroAssemblerX64::ClearRegisters(int reg_from, int reg_to) {
}
}
-
-void RegExpMacroAssemblerX64::WriteStackPointerToRegister(int reg) {
- __ movq(rax, backtrack_stackpointer());
- __ subq(rax, Operand(rbp, kStackHighEnd));
- __ movq(register_location(reg), rax);
-}
-
-
// Private methods:
void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
diff --git a/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.h b/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.h
index c3a3cb90f2a..69bb399c3e6 100644
--- a/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.h
+++ b/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.h
@@ -5,9 +5,7 @@
#ifndef V8_REGEXP_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
#define V8_REGEXP_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
-#include "src/base/strings.h"
#include "src/codegen/macro-assembler.h"
-#include "src/codegen/x64/assembler-x64.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/zone/zone-chunk-list.h"
@@ -110,9 +108,8 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerX64
// this value. NumOutputRegisters is passed as 32-bit value. The upper
// 32 bit of this 64-bit stack slot may contain garbage.
static const int kNumOutputRegisters = kRegisterOutput + kSystemPointerSize;
- static const int kStackHighEnd = kNumOutputRegisters + kSystemPointerSize;
// DirectCall is passed as 32 bit int (values 0 or 1).
- static const int kDirectCall = kStackHighEnd + kSystemPointerSize;
+ static const int kDirectCall = kNumOutputRegisters + kSystemPointerSize;
static const int kIsolate = kDirectCall + kSystemPointerSize;
#else
// In AMD64 ABI Calling Convention, the first six integer parameters
@@ -123,28 +120,26 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerX64
static const int kInputStart = kStartIndex - kSystemPointerSize;
static const int kInputEnd = kInputStart - kSystemPointerSize;
static const int kRegisterOutput = kInputEnd - kSystemPointerSize;
-
// For the case of global regular expression, we have room to store at least
// one set of capture results. For the case of non-global regexp, we ignore
// this value.
static const int kNumOutputRegisters = kRegisterOutput - kSystemPointerSize;
- static const int kStackHighEnd = kFrameAlign;
- static const int kDirectCall = kStackHighEnd + kSystemPointerSize;
+
+ static const int kDirectCall = kFrameAlign;
static const int kIsolate = kDirectCall + kSystemPointerSize;
#endif
+ // We push callee-save registers that we use after the frame pointer (and
+ // after the parameters).
#ifdef V8_TARGET_OS_WIN
- // Microsoft calling convention has three callee-saved registers
- // (that we are using). We push these after the frame pointer.
static const int kBackup_rsi = kFramePointer - kSystemPointerSize;
static const int kBackup_rdi = kBackup_rsi - kSystemPointerSize;
static const int kBackup_rbx = kBackup_rdi - kSystemPointerSize;
+ static const int kNumCalleeSaveRegisters = 3;
static const int kLastCalleeSaveRegister = kBackup_rbx;
#else
- // AMD64 Calling Convention has only one callee-save register that
- // we use. We push this after the frame pointer (and after the
- // parameters).
static const int kBackup_rbx = kNumOutputRegisters - kSystemPointerSize;
+ static const int kNumCalleeSaveRegisters = 1;
static const int kLastCalleeSaveRegister = kBackup_rbx;
#endif
@@ -155,9 +150,14 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerX64
static const int kStringStartMinusOne =
kSuccessfulCaptures - kSystemPointerSize;
static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
+ // Stores the initial value of the regexp stack pointer in a
+ // position-independent representation (in case the regexp stack grows and
+ // thus moves).
+ static const int kRegExpStackBasePointer =
+ kBacktrackCount - kSystemPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kBacktrackCount - kSystemPointerSize;
+ static const int kRegisterZero = kRegExpStackBasePointer - kSystemPointerSize;
// Initial size of code buffer.
static const int kRegExpCodeSize = 1024;
@@ -175,14 +175,14 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerX64
Operand register_location(int register_index);
// The register containing the current character after LoadCurrentCharacter.
- inline Register current_character() { return rdx; }
+ static constexpr Register current_character() { return rdx; }
// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
- inline Register backtrack_stackpointer() { return rcx; }
+ static constexpr Register backtrack_stackpointer() { return rcx; }
// The registers containing a self pointer to this code's Code object.
- inline Register code_object_pointer() { return r8; }
+ static constexpr Register code_object_pointer() { return r8; }
// Byte size of chars in the string to match (decided by the Mode argument)
inline int char_size() { return static_cast<int>(mode_); }
@@ -224,24 +224,36 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerX64
// Increments the stack pointer (rcx) by a word size.
inline void Drop();
+ void LoadRegExpStackPointerFromMemory(Register dst);
+ void StoreRegExpStackPointerToMemory(Register src, Register scratch);
+ void PushRegExpBasePointer(Register scratch_pointer, Register scratch);
+ void PopRegExpBasePointer(Register scratch_pointer_out, Register scratch);
+
inline void ReadPositionFromRegister(Register dst, int reg);
Isolate* isolate() const { return masm_.isolate(); }
MacroAssembler masm_;
- NoRootArrayScope no_root_array_scope_;
+
+ // On x64, there is no reason to keep the kRootRegister uninitialized; we
+ // could easily use it by 1. initializing it and 2. storing/restoring it
+ // as callee-save on entry/exit.
+ // But: on other platforms, specifically ia32, it would be tricky to enable
+ // the kRootRegister since it's currently used for other purposes. Thus, for
+ // consistency, we also keep it uninitialized here.
+ const NoRootArrayScope no_root_array_scope_;
ZoneChunkList<int> code_relative_fixup_positions_;
// Which mode to generate code for (LATIN1 or UC16).
- Mode mode_;
+ const Mode mode_;
// One greater than maximal register index actually used.
int num_registers_;
// Number of registers to output at the end (the saved registers
// are always 0..num_saved_registers_-1)
- int num_saved_registers_;
+ const int num_saved_registers_;
// Labels used internally.
Label entry_label_;
diff --git a/chromium/v8/src/roots/DIR_METADATA b/chromium/v8/src/roots/DIR_METADATA
index ff55846b318..af999da1f2a 100644
--- a/chromium/v8/src/roots/DIR_METADATA
+++ b/chromium/v8/src/roots/DIR_METADATA
@@ -7,5 +7,5 @@
# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
monorail {
- component: "Blink>JavaScript>GC"
-} \ No newline at end of file
+ component: "Blink>JavaScript>GarbageCollection"
+}
diff --git a/chromium/v8/src/runtime/runtime-atomics.cc b/chromium/v8/src/runtime/runtime-atomics.cc
index 32a13531775..1fb80f780dd 100644
--- a/chromium/v8/src/runtime/runtime-atomics.cc
+++ b/chromium/v8/src/runtime/runtime-atomics.cc
@@ -20,7 +20,7 @@ namespace internal {
// Other platforms have CSA support, see builtins-sharedarraybuffer-gen.h.
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X || \
- V8_TARGET_ARCH_RISCV64
+ V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_LOONG64
namespace {
@@ -606,6 +606,6 @@ RUNTIME_FUNCTION(Runtime_AtomicsXor) { UNREACHABLE(); }
#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64
// || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
- // || V8_TARGET_ARCH_RISCV64
+ // || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_LOONG64
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/runtime/runtime-classes.cc b/chromium/v8/src/runtime/runtime-classes.cc
index 1cf4f9f644d..b584a7de991 100644
--- a/chromium/v8/src/runtime/runtime-classes.cc
+++ b/chromium/v8/src/runtime/runtime-classes.cc
@@ -298,7 +298,8 @@ bool AddDescriptorsByTemplate(
int count = 0;
for (InternalIndex i : InternalIndex::Range(nof_descriptors)) {
PropertyDetails details = descriptors_template->GetDetails(i);
- if (details.location() == kDescriptor && details.kind() == kData) {
+ if (details.location() == PropertyLocation::kDescriptor &&
+ details.kind() == kData) {
count++;
}
}
@@ -319,7 +320,7 @@ bool AddDescriptorsByTemplate(
Name name = descriptors_template->GetKey(i);
DCHECK(name.IsUniqueName());
PropertyDetails details = descriptors_template->GetDetails(i);
- if (details.location() == kDescriptor) {
+ if (details.location() == PropertyLocation::kDescriptor) {
if (details.kind() == kData) {
if (value.IsSmi()) {
value = GetMethodWithSharedName(isolate, args, value);
@@ -344,11 +345,13 @@ bool AddDescriptorsByTemplate(
UNREACHABLE();
}
DCHECK(value.FitsRepresentation(details.representation()));
- if (details.location() == kDescriptor && details.kind() == kData) {
- details = PropertyDetails(details.kind(), details.attributes(), kField,
- PropertyConstness::kConst,
- details.representation(), field_index)
- .set_pointer(details.pointer());
+ if (details.location() == PropertyLocation::kDescriptor &&
+ details.kind() == kData) {
+ details =
+ PropertyDetails(details.kind(), details.attributes(),
+ PropertyLocation::kField, PropertyConstness::kConst,
+ details.representation(), field_index)
+ .set_pointer(details.pointer());
property_array->set(field_index, value);
field_index++;
@@ -626,7 +629,12 @@ MaybeHandle<Object> DefineClass(Isolate* isolate,
Handle<JSObject> prototype = CreateClassPrototype(isolate);
DCHECK_EQ(*constructor, args[ClassBoilerplate::kConstructorArgumentIndex]);
- args.set_at(ClassBoilerplate::kPrototypeArgumentIndex, *prototype);
+ // Temporarily change ClassBoilerplate::kPrototypeArgumentIndex for the
+ // subsequent calls, but use a scope to make sure to change it back before
+ // returning, to not corrupt the caller's argument frame (in particular, for
+ // the interpreter, to not clobber the register frame).
+ RuntimeArguments::ChangeValueScope set_prototype_value_scope(
+ isolate, &args, ClassBoilerplate::kPrototypeArgumentIndex, *prototype);
if (!InitClassConstructor(isolate, class_boilerplate, constructor_parent,
constructor, args) ||
diff --git a/chromium/v8/src/runtime/runtime-collections.cc b/chromium/v8/src/runtime/runtime-collections.cc
index e03a9c06fff..7a67c78db11 100644
--- a/chromium/v8/src/runtime/runtime-collections.cc
+++ b/chromium/v8/src/runtime/runtime-collections.cc
@@ -29,7 +29,9 @@ RUNTIME_FUNCTION(Runtime_SetGrow) {
OrderedHashSet::EnsureGrowable(isolate, table);
if (!table_candidate.ToHandle(&table)) {
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kValueOutOfRange));
+ isolate,
+ NewRangeError(MessageTemplate::kCollectionGrowFailed,
+ isolate->factory()->NewStringFromAsciiChecked("Set")));
}
holder->set_table(*table);
return ReadOnlyRoots(isolate).undefined_value();
@@ -64,7 +66,9 @@ RUNTIME_FUNCTION(Runtime_MapGrow) {
OrderedHashMap::EnsureGrowable(isolate, table);
if (!table_candidate.ToHandle(&table)) {
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kValueOutOfRange));
+ isolate,
+ NewRangeError(MessageTemplate::kCollectionGrowFailed,
+ isolate->factory()->NewStringFromAsciiChecked("Map")));
}
holder->set_table(*table);
return ReadOnlyRoots(isolate).undefined_value();
diff --git a/chromium/v8/src/runtime/runtime-compiler.cc b/chromium/v8/src/runtime/runtime-compiler.cc
index 7088e4074e4..54924e0f7bb 100644
--- a/chromium/v8/src/runtime/runtime-compiler.cc
+++ b/chromium/v8/src/runtime/runtime-compiler.cc
@@ -83,13 +83,13 @@ RUNTIME_FUNCTION(Runtime_InstallBaselineCode) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
Handle<SharedFunctionInfo> sfi(function->shared(), isolate);
- DCHECK(sfi->HasBaselineData());
+ DCHECK(sfi->HasBaselineCode());
IsCompiledScope is_compiled_scope(*sfi, isolate);
DCHECK(!function->HasAvailableOptimizedCode());
DCHECK(!function->HasOptimizationMarker());
DCHECK(!function->has_feedback_vector());
JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
- Code baseline_code = sfi->baseline_data().baseline_code();
+ Code baseline_code = sfi->baseline_code(kAcquireLoad);
function->set_code(baseline_code);
return baseline_code;
}
diff --git a/chromium/v8/src/runtime/runtime-debug.cc b/chromium/v8/src/runtime/runtime-debug.cc
index cb92eae13c6..588dce9222b 100644
--- a/chromium/v8/src/runtime/runtime-debug.cc
+++ b/chromium/v8/src/runtime/runtime-debug.cc
@@ -335,16 +335,15 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
"[[ArrayBufferByteLength]]"),
isolate->factory()->NewNumberFromSize(byte_length));
- // Use the backing store pointer as a unique ID
- base::EmbeddedVector<char, 32> buffer_data_vec;
- int len =
- SNPrintF(buffer_data_vec, V8PRIxPTR_FMT,
- reinterpret_cast<Address>(js_array_buffer->backing_store()));
+ auto backing_store = js_array_buffer->GetBackingStore();
+ Handle<Object> array_buffer_data =
+ backing_store
+ ? isolate->factory()->NewNumberFromUint(backing_store->id())
+ : isolate->factory()->null_value();
result = ArrayList::Add(
isolate, result,
isolate->factory()->NewStringFromAsciiChecked("[[ArrayBufferData]]"),
- isolate->factory()->InternalizeUtf8String(
- buffer_data_vec.SubVector(0, len)));
+ array_buffer_data);
Handle<Symbol> memory_symbol =
isolate->factory()->array_buffer_wasm_memory_symbol();
diff --git a/chromium/v8/src/runtime/runtime-generator.cc b/chromium/v8/src/runtime/runtime-generator.cc
index a67a6f09c69..f9e60c64b3b 100644
--- a/chromium/v8/src/runtime/runtime-generator.cc
+++ b/chromium/v8/src/runtime/runtime-generator.cc
@@ -54,8 +54,9 @@ RUNTIME_FUNCTION(Runtime_CreateJSGeneratorObject) {
// Underlying function needs to have bytecode available.
DCHECK(function->shared().HasBytecodeArray());
- int size = function->shared().internal_formal_parameter_count() +
- function->shared().GetBytecodeArray(isolate).register_count();
+ int size =
+ function->shared().internal_formal_parameter_count_without_receiver() +
+ function->shared().GetBytecodeArray(isolate).register_count();
Handle<FixedArray> parameters_and_registers =
isolate->factory()->NewFixedArray(size);
diff --git a/chromium/v8/src/runtime/runtime-internal.cc b/chromium/v8/src/runtime/runtime-internal.cc
index f9dce4d271c..d86fc236224 100644
--- a/chromium/v8/src/runtime/runtime-internal.cc
+++ b/chromium/v8/src/runtime/runtime-internal.cc
@@ -31,6 +31,12 @@
#include "src/strings/string-builder-inl.h"
#include "src/utils/ostreams.h"
+#if V8_ENABLE_WEBASSEMBLY
+// TODO(jkummerow): Drop this when the "SaveAndClearThreadInWasmFlag"
+// short-term mitigation is no longer needed.
+#include "src/trap-handler/trap-handler.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
namespace v8 {
namespace internal {
@@ -418,6 +424,34 @@ RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterruptFromCode) {
return ReadOnlyRoots(isolate).undefined_value();
}
+namespace {
+
+#if V8_ENABLE_WEBASSEMBLY
+class SaveAndClearThreadInWasmFlag {
+ public:
+ SaveAndClearThreadInWasmFlag() {
+ if (trap_handler::IsTrapHandlerEnabled()) {
+ if (trap_handler::IsThreadInWasm()) {
+ thread_was_in_wasm_ = true;
+ trap_handler::ClearThreadInWasm();
+ }
+ }
+ }
+ ~SaveAndClearThreadInWasmFlag() {
+ if (thread_was_in_wasm_) {
+ trap_handler::SetThreadInWasm();
+ }
+ }
+
+ private:
+ bool thread_was_in_wasm_{false};
+};
+#else
+class SaveAndClearThreadInWasmFlag {};
+#endif // V8_ENABLE_WEBASSEMBLY
+
+} // namespace
+
RUNTIME_FUNCTION(Runtime_AllocateInYoungGeneration) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -434,6 +468,14 @@ RUNTIME_FUNCTION(Runtime_AllocateInYoungGeneration) {
CHECK(size <= kMaxRegularHeapObjectSize);
}
+#if V8_ENABLE_WEBASSEMBLY
+ // Short-term mitigation for crbug.com/1236668. When this is called from
+ // WasmGC code, clear the "thread in wasm" flag, which is important in case
+ // any GC needs to happen.
+ // TODO(jkummerow): Find a better fix, likely by replacing the global flag.
+ SaveAndClearThreadInWasmFlag clear_wasm_flag;
+#endif // V8_ENABLE_WEBASSEMBLY
+
// TODO(v8:9472): Until double-aligned allocation is fixed for new-space
// allocations, don't request it.
double_align = false;
diff --git a/chromium/v8/src/runtime/runtime-literals.cc b/chromium/v8/src/runtime/runtime-literals.cc
index 31e50fa3e8f..958bc2277f2 100644
--- a/chromium/v8/src/runtime/runtime-literals.cc
+++ b/chromium/v8/src/runtime/runtime-literals.cc
@@ -110,7 +110,7 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
copy->map(isolate).instance_descriptors(isolate), isolate);
for (InternalIndex i : copy->map(isolate).IterateOwnDescriptors()) {
PropertyDetails details = descriptors->GetDetails(i);
- DCHECK_EQ(kField, details.location());
+ DCHECK_EQ(PropertyLocation::kField, details.location());
DCHECK_EQ(kData, details.kind());
FieldIndex index = FieldIndex::ForPropertyIndex(
copy->map(isolate), details.field_index(),
@@ -678,7 +678,8 @@ RUNTIME_FUNCTION(Runtime_CreateRegExpLiteral) {
Handle<String> source(String::cast(regexp_instance->source()), isolate);
Handle<RegExpBoilerplateDescription> boilerplate =
isolate->factory()->NewRegExpBoilerplateDescription(
- data, source, Smi::cast(regexp_instance->flags()));
+ data, source,
+ Smi::FromInt(static_cast<int>(regexp_instance->flags())));
vector->SynchronizedSet(literal_slot, *boilerplate);
DCHECK(HasBoilerplate(
diff --git a/chromium/v8/src/runtime/runtime-module.cc b/chromium/v8/src/runtime/runtime-module.cc
index 52fadb8c8c2..9adde80fd9f 100644
--- a/chromium/v8/src/runtime/runtime-module.cc
+++ b/chromium/v8/src/runtime/runtime-module.cc
@@ -12,6 +12,18 @@
namespace v8 {
namespace internal {
+namespace {
+Handle<Script> GetEvalOrigin(Isolate* isolate, Script origin_script) {
+ DisallowGarbageCollection no_gc;
+ while (origin_script.has_eval_from_shared()) {
+ HeapObject maybe_script = origin_script.eval_from_shared().script();
+ CHECK(maybe_script.IsScript());
+ origin_script = Script::cast(maybe_script);
+ }
+ return handle(origin_script, isolate);
+}
+} // namespace
+
RUNTIME_FUNCTION(Runtime_DynamicImportCall) {
HandleScope scope(isolate);
DCHECK_LE(2, args.length());
@@ -25,17 +37,11 @@ RUNTIME_FUNCTION(Runtime_DynamicImportCall) {
import_assertions = args.at<Object>(2);
}
- Handle<Script> script(Script::cast(function->shared().script()), isolate);
-
- while (script->has_eval_from_shared()) {
- Object maybe_script = script->eval_from_shared().script();
- CHECK(maybe_script.IsScript());
- script = handle(Script::cast(maybe_script), isolate);
- }
-
+ Handle<Script> referrer_script =
+ GetEvalOrigin(isolate, Script::cast(function->shared().script()));
RETURN_RESULT_OR_FAILURE(isolate,
isolate->RunHostImportModuleDynamicallyCallback(
- script, specifier, import_assertions));
+ referrer_script, specifier, import_assertions));
}
RUNTIME_FUNCTION(Runtime_GetModuleNamespace) {
diff --git a/chromium/v8/src/runtime/runtime-object.cc b/chromium/v8/src/runtime/runtime-object.cc
index 42bbb10d92e..3da21358d80 100644
--- a/chromium/v8/src/runtime/runtime-object.cc
+++ b/chromium/v8/src/runtime/runtime-object.cc
@@ -49,22 +49,10 @@ MaybeHandle<Object> Runtime::GetObjectProperty(
if (!it.IsFound() && key->IsSymbol() &&
Symbol::cast(*key).is_private_name()) {
- Handle<Symbol> sym = Handle<Symbol>::cast(key);
- Handle<Object> name(sym->description(), isolate);
- DCHECK(name->IsString());
- Handle<String> name_string = Handle<String>::cast(name);
- if (sym->IsPrivateBrand()) {
- Handle<String> class_name = (name_string->length() == 0)
- ? isolate->factory()->anonymous_string()
- : name_string;
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kInvalidPrivateBrand,
- class_name, lookup_start_object),
- Object);
- }
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kInvalidPrivateMemberRead,
- name_string, lookup_start_object),
+ MessageTemplate message = Symbol::cast(*key).IsPrivateBrand()
+ ? MessageTemplate::kInvalidPrivateBrand
+ : MessageTemplate::kInvalidPrivateMemberRead;
+ THROW_NEW_ERROR(isolate, NewTypeError(message, key, lookup_start_object),
Object);
}
return result;
@@ -198,7 +186,7 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
// Zap the property to avoid keeping objects alive. Zapping is not necessary
// for properties stored in the descriptor array.
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
DisallowGarbageCollection no_gc;
// Invalidate slots manually later in case we delete an in-object tagged
@@ -1424,7 +1412,9 @@ RUNTIME_FUNCTION(Runtime_AddPrivateBrand) {
if (it.IsFound()) {
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kVarRedeclaration, brand));
+ isolate,
+ NewTypeError(MessageTemplate::kInvalidPrivateBrandReinitialization,
+ brand));
}
PropertyAttributes attributes =
@@ -1447,7 +1437,8 @@ RUNTIME_FUNCTION(Runtime_AddPrivateField) {
if (it.IsFound()) {
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kVarRedeclaration, key));
+ isolate,
+ NewTypeError(MessageTemplate::kInvalidPrivateFieldReitialization, key));
}
CHECK(Object::AddDataProperty(&it, value, NONE, Just(kDontThrow),
diff --git a/chromium/v8/src/runtime/runtime-regexp.cc b/chromium/v8/src/runtime/runtime-regexp.cc
index c52449a642b..eb16e9c24f5 100644
--- a/chromium/v8/src/runtime/runtime-regexp.cc
+++ b/chromium/v8/src/runtime/runtime-regexp.cc
@@ -333,8 +333,8 @@ bool CompiledReplacement::Compile(Isolate* isolate, Handle<JSRegExp> regexp,
FixedArray capture_name_map;
if (capture_count > 0) {
- DCHECK(JSRegExp::TypeSupportsCaptures(regexp->TypeTag()));
- Object maybe_capture_name_map = regexp->CaptureNameMap();
+ DCHECK(JSRegExp::TypeSupportsCaptures(regexp->type_tag()));
+ Object maybe_capture_name_map = regexp->capture_name_map();
if (maybe_capture_name_map.IsFixedArray()) {
capture_name_map = FixedArray::cast(maybe_capture_name_map);
}
@@ -550,9 +550,8 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalAtomRegExpWithString(
std::vector<int>* indices = GetRewoundRegexpIndicesList(isolate);
- DCHECK_EQ(JSRegExp::ATOM, pattern_regexp->TypeTag());
- String pattern =
- String::cast(pattern_regexp->DataAt(JSRegExp::kAtomPatternIndex));
+ DCHECK_EQ(JSRegExp::ATOM, pattern_regexp->type_tag());
+ String pattern = pattern_regexp->atom_pattern();
int subject_len = subject->length();
int pattern_len = pattern.length();
int replacement_len = replacement->length();
@@ -595,7 +594,7 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalAtomRegExpWithString(
// Copy non-matched subject content.
if (subject_pos < index) {
String::WriteToFlat(*subject, result->GetChars(no_gc) + result_pos,
- subject_pos, index);
+ subject_pos, index - subject_pos);
result_pos += index - subject_pos;
}
@@ -611,7 +610,7 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalAtomRegExpWithString(
// Add remaining subject content at the end.
if (subject_pos < subject_len) {
String::WriteToFlat(*subject, result->GetChars(no_gc) + result_pos,
- subject_pos, subject_len);
+ subject_pos, subject_len - subject_pos);
}
int32_t match_indices[] = {indices->back(), indices->back() + pattern_len};
@@ -628,7 +627,7 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithString(
DCHECK(subject->IsFlat());
DCHECK(replacement->IsFlat());
- int capture_count = regexp->CaptureCount();
+ int capture_count = regexp->capture_count();
int subject_length = subject->length();
// Ensure the RegExp is compiled so we can access the capture-name map.
@@ -641,7 +640,7 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithString(
isolate, regexp, replacement, capture_count, subject_length);
// Shortcut for simple non-regexp global replacements
- if (regexp->TypeTag() == JSRegExp::ATOM && simple_replace) {
+ if (regexp->type_tag() == JSRegExp::ATOM && simple_replace) {
if (subject->IsOneByteRepresentation() &&
replacement->IsOneByteRepresentation()) {
return StringReplaceGlobalAtomRegExpWithString<SeqOneByteString>(
@@ -706,7 +705,7 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithEmptyString(
DCHECK(subject->IsFlat());
// Shortcut for simple non-regexp global replacements
- if (regexp->TypeTag() == JSRegExp::ATOM) {
+ if (regexp->type_tag() == JSRegExp::ATOM) {
Handle<String> empty_string = isolate->factory()->empty_string();
if (subject->IsOneByteRepresentation()) {
return StringReplaceGlobalAtomRegExpWithString<SeqOneByteString>(
@@ -728,7 +727,7 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithEmptyString(
int start = current_match[0];
int end = current_match[1];
- int capture_count = regexp->CaptureCount();
+ int capture_count = regexp->capture_count();
int subject_length = subject->length();
int new_length = subject_length - (end - start);
@@ -753,7 +752,7 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithEmptyString(
if (prev < start) {
// Add substring subject[prev;start] to answer string.
String::WriteToFlat(*subject, answer->GetChars(no_gc) + position, prev,
- start);
+ start - prev);
position += start - prev;
}
prev = end;
@@ -769,7 +768,7 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithEmptyString(
if (prev < subject_length) {
// Add substring subject[prev;length] to answer string.
String::WriteToFlat(*subject, answer->GetChars(no_gc) + position, prev,
- subject_length);
+ subject_length - prev);
position += subject_length - prev;
}
@@ -967,7 +966,7 @@ RUNTIME_FUNCTION(Runtime_RegExpBuildIndices) {
CONVERT_ARG_HANDLE_CHECKED(Object, maybe_names, 2);
#ifdef DEBUG
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
- DCHECK(regexp->GetFlags() & JSRegExp::kHasIndices);
+ DCHECK(regexp->flags() & JSRegExp::kHasIndices);
#endif
return *JSRegExpResultIndices::BuildIndices(isolate, match_info, maybe_names);
@@ -983,8 +982,8 @@ class MatchInfoBackedMatch : public String::Match {
: isolate_(isolate), match_info_(match_info) {
subject_ = String::Flatten(isolate, subject);
- if (JSRegExp::TypeSupportsCaptures(regexp->TypeTag())) {
- Object o = regexp->CaptureNameMap();
+ if (JSRegExp::TypeSupportsCaptures(regexp->type_tag())) {
+ Object o = regexp->capture_name_map();
has_named_captures_ = o.IsFixedArray();
if (has_named_captures_) {
capture_name_map_ = handle(FixedArray::cast(o), isolate);
@@ -1165,7 +1164,7 @@ static Object SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
Handle<RegExpMatchInfo> last_match_array,
Handle<JSArray> result_array) {
DCHECK(RegExpUtils::IsUnmodifiedRegExp(isolate, regexp));
- DCHECK_NE(has_capture, regexp->CaptureCount() == 0);
+ DCHECK_NE(has_capture, regexp->capture_count() == 0);
DCHECK(subject->IsFlat());
// Force tier up to native code for global replaces. The global replace is
@@ -1173,7 +1172,7 @@ static Object SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
// native code expects an array to store all the matches, and the bytecode
// matches one at a time, so it's easier to tier-up to native code from the
// start.
- if (FLAG_regexp_tier_up && regexp->TypeTag() == JSRegExp::IRREGEXP) {
+ if (FLAG_regexp_tier_up && regexp->type_tag() == JSRegExp::IRREGEXP) {
regexp->MarkTierUpForNextExec();
if (FLAG_trace_regexp_tier_up) {
PrintF("Forcing tier-up of JSRegExp object %p in SearchRegExpMultiple\n",
@@ -1181,7 +1180,7 @@ static Object SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
}
}
- int capture_count = regexp->CaptureCount();
+ int capture_count = regexp->capture_count();
int subject_length = subject->length();
static const int kMinLengthToCache = 0x1000;
@@ -1260,7 +1259,7 @@ static Object SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
// subject, i.e., 3 + capture count in total. If the RegExp contains
// named captures, they are also passed as the last argument.
- Handle<Object> maybe_capture_map(regexp->CaptureNameMap(), isolate);
+ Handle<Object> maybe_capture_map(regexp->capture_name_map(), isolate);
const bool has_named_captures = maybe_capture_map->IsFixedArray();
const int argc =
@@ -1350,7 +1349,7 @@ V8_WARN_UNUSED_RESULT MaybeHandle<String> RegExpReplace(
Factory* factory = isolate->factory();
- const int flags = regexp->GetFlags();
+ const int flags = regexp->flags();
const bool global = (flags & JSRegExp::kGlobal) != 0;
const bool sticky = (flags & JSRegExp::kSticky) != 0;
@@ -1422,7 +1421,7 @@ V8_WARN_UNUSED_RESULT MaybeHandle<String> RegExpReplace(
// native code expects an array to store all the matches, and the bytecode
// matches one at a time, so it's easier to tier-up to native code from the
// start.
- if (FLAG_regexp_tier_up && regexp->TypeTag() == JSRegExp::IRREGEXP) {
+ if (FLAG_regexp_tier_up && regexp->type_tag() == JSRegExp::IRREGEXP) {
regexp->MarkTierUpForNextExec();
if (FLAG_trace_regexp_tier_up) {
PrintF("Forcing tier-up of JSRegExp object %p in RegExpReplace\n",
@@ -1472,10 +1471,10 @@ RUNTIME_FUNCTION(Runtime_RegExpExecMultiple) {
CHECK(result_array->HasObjectElements());
subject = String::Flatten(isolate, subject);
- CHECK(regexp->GetFlags() & JSRegExp::kGlobal);
+ CHECK(regexp->flags() & JSRegExp::kGlobal);
Object result;
- if (regexp->CaptureCount() == 0) {
+ if (regexp->capture_count() == 0) {
result = SearchRegExpMultiple<false>(isolate, subject, regexp,
last_match_info, result_array);
} else {
@@ -1499,7 +1498,7 @@ RUNTIME_FUNCTION(Runtime_StringReplaceNonGlobalRegExpWithFunction) {
Factory* factory = isolate->factory();
Handle<RegExpMatchInfo> last_match_info = isolate->regexp_last_match_info();
- const int flags = regexp->GetFlags();
+ const int flags = regexp->flags();
DCHECK_EQ(flags & JSRegExp::kGlobal, 0);
// TODO(jgruber): This should be an easy port to CSA with massive payback.
@@ -1552,9 +1551,9 @@ RUNTIME_FUNCTION(Runtime_StringReplaceNonGlobalRegExpWithFunction) {
bool has_named_captures = false;
Handle<FixedArray> capture_map;
if (m > 1) {
- DCHECK(JSRegExp::TypeSupportsCaptures(regexp->TypeTag()));
+ DCHECK(JSRegExp::TypeSupportsCaptures(regexp->type_tag()));
- Object maybe_capture_map = regexp->CaptureNameMap();
+ Object maybe_capture_map = regexp->capture_name_map();
if (maybe_capture_map.IsFixedArray()) {
has_named_captures = true;
capture_map = handle(FixedArray::cast(maybe_capture_map), isolate);
@@ -2015,7 +2014,7 @@ RUNTIME_FUNCTION(Runtime_RegExpStringFromFlags) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
- Handle<String> flags = JSRegExp::StringFromFlags(isolate, regexp.GetFlags());
+ Handle<String> flags = JSRegExp::StringFromFlags(isolate, regexp.flags());
return *flags;
}
diff --git a/chromium/v8/src/runtime/runtime-scopes.cc b/chromium/v8/src/runtime/runtime-scopes.cc
index f49689c2920..8b65ffb7ccc 100644
--- a/chromium/v8/src/runtime/runtime-scopes.cc
+++ b/chromium/v8/src/runtime/runtime-scopes.cc
@@ -401,7 +401,8 @@ Handle<JSObject> NewSloppyArguments(Isolate* isolate, Handle<JSFunction> callee,
isolate->factory()->NewArgumentsObject(callee, argument_count);
// Allocate the elements if needed.
- int parameter_count = callee->shared().internal_formal_parameter_count();
+ int parameter_count =
+ callee->shared().internal_formal_parameter_count_without_receiver();
if (argument_count > 0) {
if (parameter_count > 0) {
int mapped_count = std::min(argument_count, parameter_count);
@@ -526,7 +527,8 @@ RUNTIME_FUNCTION(Runtime_NewRestParameter) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0)
- int start_index = callee->shared().internal_formal_parameter_count();
+ int start_index =
+ callee->shared().internal_formal_parameter_count_without_receiver();
// This generic runtime function can also be used when the caller has been
// inlined, we use the slow but accurate {GetCallerArguments}.
int argument_count = 0;
diff --git a/chromium/v8/src/runtime/runtime-test-wasm.cc b/chromium/v8/src/runtime/runtime-test-wasm.cc
index 8425b1fa189..b33cbeae39f 100644
--- a/chromium/v8/src/runtime/runtime-test-wasm.cc
+++ b/chromium/v8/src/runtime/runtime-test-wasm.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "include/v8-wasm.h"
#include "src/base/memory.h"
#include "src/base/platform/mutex.h"
#include "src/execution/arguments-inl.h"
diff --git a/chromium/v8/src/runtime/runtime-test.cc b/chromium/v8/src/runtime/runtime-test.cc
index 69b0f6241bd..38cdf576714 100644
--- a/chromium/v8/src/runtime/runtime-test.cc
+++ b/chromium/v8/src/runtime/runtime-test.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/base/numbers/double.h"
#include "src/base/platform/mutex.h"
@@ -70,6 +71,18 @@ V8_WARN_UNUSED_RESULT Object ReturnFuzzSafe(Object value, Isolate* isolate) {
if (!args[index].IsBoolean()) return CrashUnlessFuzzing(isolate); \
bool name = args[index].IsTrue(isolate);
+bool IsAsmWasmFunction(Isolate* isolate, JSFunction function) {
+ DisallowGarbageCollection no_gc;
+#if V8_ENABLE_WEBASSEMBLY
+ // For simplicity we include invalid asm.js functions whose code hasn't yet
+ // been updated to CompileLazy but is still the InstantiateAsmJs builtin.
+ return function.shared().HasAsmWasmData() ||
+ function.code().builtin_id() == Builtin::kInstantiateAsmJs;
+#else
+ return false;
+#endif // V8_ENABLE_WEBASSEMBLY
+}
+
} // namespace
RUNTIME_FUNCTION(Runtime_ClearMegamorphicStubCache) {
@@ -242,11 +255,9 @@ bool CanOptimizeFunction(Handle<JSFunction> function, Isolate* isolate,
return CrashUnlessFuzzingReturnFalse(isolate);
}
-#if V8_ENABLE_WEBASSEMBLY
- if (function->shared().HasAsmWasmData()) {
+ if (IsAsmWasmFunction(isolate, *function)) {
return CrashUnlessFuzzingReturnFalse(isolate);
}
-#endif // V8_ENABLE_WEBASSEMBLY
if (FLAG_testing_d8_test_runner) {
PendingOptimizationTable::MarkedForOptimization(isolate, function);
@@ -362,12 +373,12 @@ RUNTIME_FUNCTION(Runtime_CompileBaseline) {
// First compile the bytecode, if we have to.
if (!is_compiled_scope.is_compiled() &&
- !Compiler::Compile(isolate, function, Compiler::KEEP_EXCEPTION,
+ !Compiler::Compile(isolate, function, Compiler::CLEAR_EXCEPTION,
&is_compiled_scope)) {
return CrashUnlessFuzzing(isolate);
}
- if (!Compiler::CompileBaseline(isolate, function, Compiler::KEEP_EXCEPTION,
+ if (!Compiler::CompileBaseline(isolate, function, Compiler::CLEAR_EXCEPTION,
&is_compiled_scope)) {
return CrashUnlessFuzzing(isolate);
}
@@ -424,9 +435,7 @@ RUNTIME_FUNCTION(Runtime_PrepareFunctionForOptimization) {
return CrashUnlessFuzzing(isolate);
}
-#if V8_ENABLE_WEBASSEMBLY
- if (function->shared().HasAsmWasmData()) return CrashUnlessFuzzing(isolate);
-#endif // V8_ENABLE_WEBASSEMBLY
+ if (IsAsmWasmFunction(isolate, *function)) return CrashUnlessFuzzing(isolate);
// Hold onto the bytecode array between marking and optimization to ensure
// it's not flushed.
@@ -569,7 +578,7 @@ RUNTIME_FUNCTION(Runtime_NeverOptimizeFunction) {
RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1 || args.length() == 2);
+ DCHECK_EQ(args.length(), 1);
int status = 0;
if (FLAG_lite_mode || FLAG_jitless) {
// Both jitless and lite modes cannot optimize. Unit tests should handle
@@ -590,32 +599,8 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
if (function_object->IsUndefined()) return Smi::FromInt(status);
if (!function_object->IsJSFunction()) return CrashUnlessFuzzing(isolate);
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
-
status |= static_cast<int>(OptimizationStatus::kIsFunction);
- bool sync_with_compiler_thread = true;
- if (args.length() == 2) {
- CONVERT_ARG_HANDLE_CHECKED(Object, sync_object, 1);
- if (!sync_object->IsString()) return CrashUnlessFuzzing(isolate);
- Handle<String> sync = Handle<String>::cast(sync_object);
- if (sync->IsOneByteEqualTo(base::StaticCharVector("no sync"))) {
- sync_with_compiler_thread = false;
- } else if (sync->IsOneByteEqualTo(base::StaticCharVector("sync")) ||
- sync->length() == 0) {
- DCHECK(sync_with_compiler_thread);
- } else {
- return CrashUnlessFuzzing(isolate);
- }
- }
-
- if (isolate->concurrent_recompilation_enabled() &&
- sync_with_compiler_thread) {
- while (function->IsInOptimizationQueue()) {
- isolate->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
- base::OS::Sleep(base::TimeDelta::FromMilliseconds(50));
- }
- }
-
if (function->IsMarkedForOptimization()) {
status |= static_cast<int>(OptimizationStatus::kMarkedForOptimization);
} else if (function->IsMarkedForConcurrentOptimization()) {
@@ -670,39 +655,32 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
return Smi::FromInt(status);
}
-RUNTIME_FUNCTION(Runtime_UnblockConcurrentRecompilation) {
- DCHECK_EQ(0, args.length());
- CHECK(FLAG_block_concurrent_recompilation);
- CHECK(isolate->concurrent_recompilation_enabled());
- isolate->optimizing_compile_dispatcher()->Unblock();
- return ReadOnlyRoots(isolate).undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_DisableOptimizationFinalization) {
DCHECK_EQ(0, args.length());
- DCHECK(!FLAG_block_concurrent_recompilation);
- CHECK(isolate->concurrent_recompilation_enabled());
- isolate->optimizing_compile_dispatcher()->AwaitCompileTasks();
- isolate->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
- isolate->optimizing_compile_dispatcher()->set_finalize(false);
+ if (isolate->concurrent_recompilation_enabled()) {
+ isolate->optimizing_compile_dispatcher()->AwaitCompileTasks();
+ isolate->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
+ isolate->stack_guard()->ClearInstallCode();
+ isolate->optimizing_compile_dispatcher()->set_finalize(false);
+ }
return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_WaitForBackgroundOptimization) {
DCHECK_EQ(0, args.length());
- DCHECK(!FLAG_block_concurrent_recompilation);
- CHECK(isolate->concurrent_recompilation_enabled());
- isolate->optimizing_compile_dispatcher()->AwaitCompileTasks();
+ if (isolate->concurrent_recompilation_enabled()) {
+ isolate->optimizing_compile_dispatcher()->AwaitCompileTasks();
+ }
return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_FinalizeOptimization) {
DCHECK_EQ(0, args.length());
- DCHECK(!FLAG_block_concurrent_recompilation);
- CHECK(isolate->concurrent_recompilation_enabled());
- isolate->optimizing_compile_dispatcher()->AwaitCompileTasks();
- isolate->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
- isolate->optimizing_compile_dispatcher()->set_finalize(true);
+ if (isolate->concurrent_recompilation_enabled()) {
+ isolate->optimizing_compile_dispatcher()->AwaitCompileTasks();
+ isolate->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
+ isolate->optimizing_compile_dispatcher()->set_finalize(true);
+ }
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -1005,11 +983,11 @@ RUNTIME_FUNCTION(Runtime_AbortJS) {
UNREACHABLE();
}
-RUNTIME_FUNCTION(Runtime_AbortCSAAssert) {
+RUNTIME_FUNCTION(Runtime_AbortCSADcheck) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, message, 0);
- base::OS::PrintError("abort: CSA_ASSERT failed: %s\n",
+ base::OS::PrintError("abort: CSA_DCHECK failed: %s\n",
message->ToCString().get());
isolate->PrintStack(stderr);
base::OS::Abort();
@@ -1117,6 +1095,11 @@ RUNTIME_FUNCTION(Runtime_PretenureAllocationSite) {
JSObject object = JSObject::cast(arg);
Heap* heap = object.GetHeap();
+ if (!heap->InYoungGeneration(object)) {
+ // Object is not in new space, thus there is no memento and nothing to do.
+ return ReturnFuzzSafe(ReadOnlyRoots(isolate).false_value(), isolate);
+ }
+
AllocationMemento memento =
heap->FindAllocationMemento<Heap::kForRuntime>(object.map(), object);
if (memento.is_null())
@@ -1152,8 +1135,8 @@ RUNTIME_FUNCTION(Runtime_RegexpHasBytecode) {
CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
CONVERT_BOOLEAN_ARG_CHECKED(is_latin1, 1);
bool result;
- if (regexp.TypeTag() == JSRegExp::IRREGEXP) {
- result = regexp.Bytecode(is_latin1).IsByteArray();
+ if (regexp.type_tag() == JSRegExp::IRREGEXP) {
+ result = regexp.bytecode(is_latin1).IsByteArray();
} else {
result = false;
}
@@ -1166,8 +1149,8 @@ RUNTIME_FUNCTION(Runtime_RegexpHasNativeCode) {
CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
CONVERT_BOOLEAN_ARG_CHECKED(is_latin1, 1);
bool result;
- if (regexp.TypeTag() == JSRegExp::IRREGEXP) {
- result = regexp.Code(is_latin1).IsCodeT();
+ if (regexp.type_tag() == JSRegExp::IRREGEXP) {
+ result = regexp.code(is_latin1).IsCodeT();
} else {
result = false;
}
@@ -1179,7 +1162,7 @@ RUNTIME_FUNCTION(Runtime_RegexpTypeTag) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
const char* type_str;
- switch (regexp.TypeTag()) {
+ switch (regexp.type_tag()) {
case JSRegExp::NOT_COMPILED:
type_str = "NOT_COMPILED";
break;
@@ -1422,10 +1405,8 @@ RUNTIME_FUNCTION(Runtime_NewRegExpWithBacktrackLimit) {
CONVERT_ARG_HANDLE_CHECKED(String, flags_string, 1);
CONVERT_UINT32_ARG_CHECKED(backtrack_limit, 2);
- bool success = false;
JSRegExp::Flags flags =
- JSRegExp::FlagsFromString(isolate, flags_string, &success);
- CHECK(success);
+ JSRegExp::FlagsFromString(isolate, flags_string).value();
RETURN_RESULT_OR_FAILURE(
isolate, JSRegExp::New(isolate, pattern, flags, backtrack_limit));
diff --git a/chromium/v8/src/runtime/runtime-typedarray.cc b/chromium/v8/src/runtime/runtime-typedarray.cc
index 5d0fc35944a..ca3a50ee76e 100644
--- a/chromium/v8/src/runtime/runtime-typedarray.cc
+++ b/chromium/v8/src/runtime/runtime-typedarray.cc
@@ -94,7 +94,7 @@ RUNTIME_FUNCTION(Runtime_TypedArraySortFast) {
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, array, 0);
DCHECK(!array->WasDetached());
-#if V8_OS_LINUX
+#if MULTI_MAPPED_ALLOCATOR_AVAILABLE
if (FLAG_multi_mapped_mock_allocator) {
// Sorting is meaningless with the mock allocator, and std::sort
// might crash (because aliasing elements violate its assumptions).
diff --git a/chromium/v8/src/runtime/runtime-wasm.cc b/chromium/v8/src/runtime/runtime-wasm.cc
index df4ea141648..9083a0dcdd9 100644
--- a/chromium/v8/src/runtime/runtime-wasm.cc
+++ b/chromium/v8/src/runtime/runtime-wasm.cc
@@ -245,7 +245,7 @@ RUNTIME_FUNCTION(Runtime_WasmCompileWrapper) {
const wasm::WasmModule* module = instance->module();
const int function_index = function_data->function_index();
- const wasm::WasmFunction function = module->functions[function_index];
+ const wasm::WasmFunction& function = module->functions[function_index];
const wasm::FunctionSig* sig = function.sig;
// The start function is not guaranteed to be registered as
@@ -277,8 +277,8 @@ RUNTIME_FUNCTION(Runtime_WasmCompileWrapper) {
continue;
}
int index = static_cast<int>(exp.index);
- wasm::WasmFunction function = module->functions[index];
- if (function.sig == sig && index != function_index) {
+ const wasm::WasmFunction& exp_function = module->functions[index];
+ if (exp_function.sig == sig && index != function_index) {
ReplaceWrapper(isolate, instance, index, wrapper_code);
}
}
@@ -572,9 +572,8 @@ RUNTIME_FUNCTION(Runtime_WasmDebugBreak) {
i::WeakArrayList weak_instance_list = script->wasm_weak_instance_list();
for (int i = 0; i < weak_instance_list.length(); ++i) {
if (weak_instance_list.Get(i)->IsCleared()) continue;
- i::WasmInstanceObject instance = i::WasmInstanceObject::cast(
- weak_instance_list.Get(i)->GetHeapObject());
- instance.set_break_on_entry(false);
+ i::WasmInstanceObject::cast(weak_instance_list.Get(i)->GetHeapObject())
+ .set_break_on_entry(false);
}
DCHECK(!instance->break_on_entry());
Handle<FixedArray> on_entry_breakpoints;
@@ -641,7 +640,7 @@ inline void* ArrayElementAddress(Handle<WasmArray> array, uint32_t index,
}
} // namespace
-// Assumes copy ranges are in-bounds.
+// Assumes copy ranges are in-bounds and copy length > 0.
RUNTIME_FUNCTION(Runtime_WasmArrayCopy) {
ClearThreadInWasmScope flag_scope(isolate);
HandleScope scope(isolate);
@@ -651,6 +650,7 @@ RUNTIME_FUNCTION(Runtime_WasmArrayCopy) {
CONVERT_ARG_HANDLE_CHECKED(WasmArray, src_array, 2);
CONVERT_UINT32_ARG_CHECKED(src_index, 3);
CONVERT_UINT32_ARG_CHECKED(length, 4);
+ DCHECK_GT(length, 0);
bool overlapping_ranges =
dst_array->ptr() == src_array->ptr() &&
(dst_index < src_index ? dst_index + length > src_index
diff --git a/chromium/v8/src/runtime/runtime.cc b/chromium/v8/src/runtime/runtime.cc
index 47f184a3a09..3bcd41dfcb0 100644
--- a/chromium/v8/src/runtime/runtime.cc
+++ b/chromium/v8/src/runtime/runtime.cc
@@ -203,7 +203,9 @@ bool Runtime::IsAllowListedForFuzzing(FunctionId id) {
case Runtime::kArrayBufferDetach:
case Runtime::kDeoptimizeFunction:
case Runtime::kDeoptimizeNow:
+ case Runtime::kDisableOptimizationFinalization:
case Runtime::kEnableCodeLoggingForTesting:
+ case Runtime::kFinalizeOptimization:
case Runtime::kGetUndetectable:
case Runtime::kNeverOptimizeFunction:
case Runtime::kOptimizeFunctionOnNextCall:
@@ -212,6 +214,7 @@ bool Runtime::IsAllowListedForFuzzing(FunctionId id) {
case Runtime::kPretenureAllocationSite:
case Runtime::kSetAllocationTimeout:
case Runtime::kSimulateNewspaceFull:
+ case Runtime::kWaitForBackgroundOptimization:
return true;
// Runtime functions only permitted for non-differential fuzzers.
// This list may contain functions performing extra checks or returning
@@ -221,9 +224,9 @@ bool Runtime::IsAllowListedForFuzzing(FunctionId id) {
case Runtime::kIsBeingInterpreted:
case Runtime::kVerifyType:
return !FLAG_allow_natives_for_differential_fuzzing;
- case Runtime::kCompileBaseline:
case Runtime::kBaselineOsr:
- return FLAG_sparkplug;
+ case Runtime::kCompileBaseline:
+ return ENABLE_SPARKPLUG;
default:
return false;
}
diff --git a/chromium/v8/src/runtime/runtime.h b/chromium/v8/src/runtime/runtime.h
index 045ffb36418..2e6fc6fa6ed 100644
--- a/chromium/v8/src/runtime/runtime.h
+++ b/chromium/v8/src/runtime/runtime.h
@@ -7,7 +7,7 @@
#include <memory>
-#include "include/v8.h"
+#include "include/v8-maybe.h"
#include "src/base/bit-field.h"
#include "src/base/platform/time.h"
#include "src/common/globals.h"
@@ -463,7 +463,7 @@ namespace internal {
#define FOR_EACH_INTRINSIC_TEST(F, I) \
F(Abort, 1, 1) \
- F(AbortCSAAssert, 1, 1) \
+ F(AbortCSADcheck, 1, 1) \
F(AbortJS, 1, 1) \
F(ArrayIteratorProtector, 0, 1) \
F(ArraySpeciesProtector, 0, 1) \
@@ -488,7 +488,7 @@ namespace internal {
F(FinalizeOptimization, 0, 1) \
F(GetCallable, 0, 1) \
F(GetInitializerFunction, 1, 1) \
- F(GetOptimizationStatus, -1, 1) \
+ F(GetOptimizationStatus, 1, 1) \
F(GetUndetectable, 0, 1) \
F(GlobalPrint, 1, 1) \
F(HasDictionaryElements, 1, 1) \
@@ -558,7 +558,6 @@ namespace internal {
F(TraceExit, 1, 1) \
F(TurbofanStaticAssert, 1, 1) \
F(TypedArraySpeciesProtector, 0, 1) \
- F(UnblockConcurrentRecompilation, 0, 1) \
F(WaitForBackgroundOptimization, 0, 1) \
I(DeoptimizeNow, 0, 1)
diff --git a/chromium/v8/src/snapshot/context-deserializer.cc b/chromium/v8/src/snapshot/context-deserializer.cc
index ad109baccaf..fb643ba0142 100644
--- a/chromium/v8/src/snapshot/context-deserializer.cc
+++ b/chromium/v8/src/snapshot/context-deserializer.cc
@@ -61,7 +61,6 @@ void ContextDeserializer::SetupOffHeapArrayBufferBackingStores() {
for (Handle<JSArrayBuffer> buffer : new_off_heap_array_buffers()) {
uint32_t store_index = buffer->GetBackingStoreRefForDeserialization();
auto bs = backing_store(store_index);
- buffer->AllocateExternalPointerEntries(isolate());
// TODO(v8:11111): Support RAB / GSAB.
CHECK(!buffer->is_resizable());
SharedFlag shared =
diff --git a/chromium/v8/src/snapshot/context-serializer.cc b/chromium/v8/src/snapshot/context-serializer.cc
index 7a02a50caa6..96d9d5f03e2 100644
--- a/chromium/v8/src/snapshot/context-serializer.cc
+++ b/chromium/v8/src/snapshot/context-serializer.cc
@@ -177,8 +177,8 @@ void ContextSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
Handle<JSFunction> closure = Handle<JSFunction>::cast(obj);
closure->ResetIfCodeFlushed();
if (closure->is_compiled()) {
- if (closure->shared().HasBaselineData()) {
- closure->shared().flush_baseline_data();
+ if (closure->shared().HasBaselineCode()) {
+ closure->shared().FlushBaselineCode();
}
closure->set_code(closure->shared().GetCode(), kReleaseStore);
}
diff --git a/chromium/v8/src/snapshot/deserializer.cc b/chromium/v8/src/snapshot/deserializer.cc
index fab2f80355a..597da110813 100644
--- a/chromium/v8/src/snapshot/deserializer.cc
+++ b/chromium/v8/src/snapshot/deserializer.cc
@@ -482,7 +482,6 @@ void Deserializer<IsolateT>::PostProcessNewObject(Handle<Map> map,
// a numbered reference to an already deserialized backing store.
backing_store = backing_stores_[store_index]->buffer_start();
}
- data_view->AllocateExternalPointerEntries(main_thread_isolate());
data_view->set_data_pointer(
main_thread_isolate(),
reinterpret_cast<uint8_t*>(backing_store) + data_view->byte_offset());
@@ -491,7 +490,6 @@ void Deserializer<IsolateT>::PostProcessNewObject(Handle<Map> map,
// Fixup typed array pointers.
if (typed_array->is_on_heap()) {
Address raw_external_pointer = typed_array->external_pointer_raw();
- typed_array->AllocateExternalPointerEntries(main_thread_isolate());
typed_array->SetOnHeapDataPtr(
main_thread_isolate(), HeapObject::cast(typed_array->base_pointer()),
raw_external_pointer);
@@ -503,7 +501,6 @@ void Deserializer<IsolateT>::PostProcessNewObject(Handle<Map> map,
auto start = backing_store
? reinterpret_cast<byte*>(backing_store->buffer_start())
: nullptr;
- typed_array->AllocateExternalPointerEntries(main_thread_isolate());
typed_array->SetOffHeapDataPtr(main_thread_isolate(), start,
typed_array->byte_offset());
}
@@ -513,8 +510,7 @@ void Deserializer<IsolateT>::PostProcessNewObject(Handle<Map> map,
if (buffer->GetBackingStoreRefForDeserialization() != kNullRefSentinel) {
new_off_heap_array_buffers_.push_back(buffer);
} else {
- buffer->AllocateExternalPointerEntries(main_thread_isolate());
- buffer->set_backing_store(main_thread_isolate(), nullptr);
+ buffer->set_backing_store(nullptr);
}
} else if (InstanceTypeChecker::IsBytecodeArray(instance_type)) {
// TODO(mythria): Remove these once we store the default values for these
diff --git a/chromium/v8/src/snapshot/embedded/embedded-data.cc b/chromium/v8/src/snapshot/embedded/embedded-data.cc
index 166e41d3240..188ed6e8797 100644
--- a/chromium/v8/src/snapshot/embedded/embedded-data.cc
+++ b/chromium/v8/src/snapshot/embedded/embedded-data.cc
@@ -218,7 +218,7 @@ void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) {
#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \
defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_S390) || \
- defined(V8_TARGET_ARCH_RISCV64)
+ defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_LOONG64)
// On these platforms we emit relative builtin-to-builtin
// jumps for isolate independent builtins in the snapshot. This fixes up the
// relative jumps to the right offsets in the snapshot.
@@ -246,7 +246,7 @@ void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) {
// indirection through the root register.
CHECK(on_heap_it.done());
CHECK(off_heap_it.done());
-#endif // defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64)
+#endif
}
}
diff --git a/chromium/v8/src/snapshot/embedded/embedded-empty.cc b/chromium/v8/src/snapshot/embedded/embedded-empty.cc
index c32b459d9d7..e5355215f25 100644
--- a/chromium/v8/src/snapshot/embedded/embedded-empty.cc
+++ b/chromium/v8/src/snapshot/embedded/embedded-empty.cc
@@ -17,15 +17,3 @@ const uint8_t* v8_Default_embedded_blob_code_ = nullptr;
uint32_t v8_Default_embedded_blob_code_size_ = 0;
const uint8_t* v8_Default_embedded_blob_data_ = nullptr;
uint32_t v8_Default_embedded_blob_data_size_ = 0;
-
-#ifdef V8_MULTI_SNAPSHOTS
-extern "C" const uint8_t* v8_Trusted_embedded_blob_code_;
-extern "C" uint32_t v8_Trusted_embedded_blob_code_size_;
-extern "C" const uint8_t* v8_Trusted_embedded_blob_data_;
-extern "C" uint32_t v8_Trusted_embedded_blob_data_size_;
-
-const uint8_t* v8_Trusted_embedded_blob_code_ = nullptr;
-uint32_t v8_Trusted_embedded_blob_code_size_ = 0;
-const uint8_t* v8_Trusted_embedded_blob_data_ = nullptr;
-uint32_t v8_Trusted_embedded_blob_data_size_ = 0;
-#endif
diff --git a/chromium/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc b/chromium/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc
index 41cd9dbca09..e858da90b52 100644
--- a/chromium/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc
+++ b/chromium/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc
@@ -65,8 +65,14 @@ void PlatformEmbeddedFileWriterAIX::DeclareSymbolGlobal(const char* name) {
}
void PlatformEmbeddedFileWriterAIX::AlignToCodeAlignment() {
+#if V8_TARGET_ARCH_X64
+ // On x64 use 64-bytes code alignment to allow 64-bytes loop header alignment.
+ STATIC_ASSERT((1 << 6) >= kCodeAlignment);
+ fprintf(fp_, ".align 6\n");
+#else
STATIC_ASSERT((1 << 5) >= kCodeAlignment);
fprintf(fp_, ".align 5\n");
+#endif
}
void PlatformEmbeddedFileWriterAIX::AlignToDataAlignment() {
diff --git a/chromium/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc b/chromium/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
index e2d5dcb41cf..641d3638f36 100644
--- a/chromium/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
+++ b/chromium/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
@@ -74,8 +74,14 @@ void PlatformEmbeddedFileWriterGeneric::DeclareSymbolGlobal(const char* name) {
}
void PlatformEmbeddedFileWriterGeneric::AlignToCodeAlignment() {
+#if V8_TARGET_ARCH_X64
+ // On x64 use 64-bytes code alignment to allow 64-bytes loop header alignment.
+ STATIC_ASSERT(64 >= kCodeAlignment);
+ fprintf(fp_, ".balign 64\n");
+#else
STATIC_ASSERT(32 >= kCodeAlignment);
fprintf(fp_, ".balign 32\n");
+#endif
}
void PlatformEmbeddedFileWriterGeneric::AlignToDataAlignment() {
@@ -152,8 +158,9 @@ int PlatformEmbeddedFileWriterGeneric::IndentedDataDirective(
DataDirective PlatformEmbeddedFileWriterGeneric::ByteChunkDataDirective()
const {
-#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)
- // MIPS uses a fixed 4 byte instruction set, using .long
+#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
+ defined(V8_TARGET_ARCH_LOONG64)
+ // MIPS and LOONG64 uses a fixed 4 byte instruction set, using .long
// to prevent any unnecessary padding.
return kLong;
#else
diff --git a/chromium/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc b/chromium/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc
index 5fa12ec6ea2..cfe9bbcde19 100644
--- a/chromium/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc
+++ b/chromium/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc
@@ -56,12 +56,18 @@ void PlatformEmbeddedFileWriterMac::DeclareSymbolGlobal(const char* name) {
// prevents something along the compilation chain from messing with the
// embedded blob. Using .global here causes embedded blob hash verification
// failures at runtime.
- STATIC_ASSERT(32 >= kCodeAlignment);
fprintf(fp_, ".private_extern _%s\n", name);
}
void PlatformEmbeddedFileWriterMac::AlignToCodeAlignment() {
+#if V8_TARGET_ARCH_X64
+ // On x64 use 64-bytes code alignment to allow 64-bytes loop header alignment.
+ STATIC_ASSERT(64 >= kCodeAlignment);
+ fprintf(fp_, ".balign 64\n");
+#else
+ STATIC_ASSERT(32 >= kCodeAlignment);
fprintf(fp_, ".balign 32\n");
+#endif
}
void PlatformEmbeddedFileWriterMac::AlignToDataAlignment() {
diff --git a/chromium/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc b/chromium/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
index 7b4eadd98a9..83b85c8df9c 100644
--- a/chromium/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
+++ b/chromium/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
@@ -637,7 +637,14 @@ void PlatformEmbeddedFileWriterWin::DeclareSymbolGlobal(const char* name) {
}
void PlatformEmbeddedFileWriterWin::AlignToCodeAlignment() {
+#if V8_TARGET_ARCH_X64
+ // On x64 use 64-bytes code alignment to allow 64-bytes loop header alignment.
+ STATIC_ASSERT(64 >= kCodeAlignment);
+ fprintf(fp_, ".balign 64\n");
+#else
+ STATIC_ASSERT(32 >= kCodeAlignment);
fprintf(fp_, ".balign 32\n");
+#endif
}
void PlatformEmbeddedFileWriterWin::AlignToDataAlignment() {
diff --git a/chromium/v8/src/snapshot/mksnapshot.cc b/chromium/v8/src/snapshot/mksnapshot.cc
index 4e5b43b23ff..86b0304fb03 100644
--- a/chromium/v8/src/snapshot/mksnapshot.cc
+++ b/chromium/v8/src/snapshot/mksnapshot.cc
@@ -9,6 +9,7 @@
#include <iomanip>
#include "include/libplatform/libplatform.h"
+#include "include/v8-initialization.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/wrappers.h"
#include "src/base/sanitizer/msan.h"
@@ -239,6 +240,11 @@ int main(int argc, char** argv) {
v8::V8::InitializeICUDefaultLocation(argv[0]);
std::unique_ptr<v8::Platform> platform = v8::platform::NewDefaultPlatform();
v8::V8::InitializePlatform(platform.get());
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ if (!v8::V8::InitializeVirtualMemoryCage()) {
+ FATAL("Could not initialize the virtual memory cage");
+ }
+#endif
v8::V8::Initialize();
{
diff --git a/chromium/v8/src/snapshot/serializer.cc b/chromium/v8/src/snapshot/serializer.cc
index 68fb1a01a69..aa595a67a27 100644
--- a/chromium/v8/src/snapshot/serializer.cc
+++ b/chromium/v8/src/snapshot/serializer.cc
@@ -6,6 +6,7 @@
#include "src/codegen/assembler-inl.h"
#include "src/common/globals.h"
+#include "src/handles/global-handles-inl.h"
#include "src/heap/heap-inl.h" // For Space::identity().
#include "src/heap/memory-chunk-inl.h"
#include "src/heap/read-only-heap.h"
@@ -48,6 +49,10 @@ Serializer::Serializer(Isolate* isolate, Snapshot::SerializerFlags flags)
#endif // OBJECT_PRINT
}
+#ifdef DEBUG
+void Serializer::PopStack() { stack_.Pop(); }
+#endif
+
void Serializer::CountAllocation(Map map, int size, SnapshotSpace space) {
DCHECK(FLAG_serialization_statistics);
@@ -121,12 +126,14 @@ void Serializer::SerializeObject(Handle<HeapObject> obj) {
// indirection and serialize the actual string directly.
if (obj->IsThinString(isolate())) {
obj = handle(ThinString::cast(*obj).actual(isolate()), isolate());
- } else if (obj->IsBaselineData()) {
- // For now just serialize the BytecodeArray instead of baseline data.
- // TODO(v8:11429,pthier): Handle BaselineData in cases we want to serialize
- // Baseline code.
- obj = handle(Handle<BaselineData>::cast(obj)->GetActiveBytecodeArray(),
- isolate());
+ } else if (obj->IsCodeT()) {
+ Code code = FromCodeT(CodeT::cast(*obj));
+ if (code.kind() == CodeKind::BASELINE) {
+ // For now just serialize the BytecodeArray instead of baseline code.
+ // TODO(v8:11429,pthier): Handle Baseline code in cases we want to
+ // serialize it.
+ obj = handle(code.bytecode_or_interpreter_data(isolate()), isolate());
+ }
}
SerializeObjectImpl(obj);
}
@@ -521,10 +528,6 @@ void Serializer::ObjectSerializer::SerializeJSArrayBuffer() {
ArrayBufferExtension* extension = buffer->extension();
// The embedder-allocated backing store only exists for the off-heap case.
-#ifdef V8_HEAP_SANDBOX
- uint32_t external_pointer_entry =
- buffer->GetBackingStoreRefForDeserialization();
-#endif
if (backing_store != nullptr) {
uint32_t ref = SerializeBackingStore(backing_store, byte_length);
buffer->SetBackingStoreRefForSerialization(ref);
@@ -538,11 +541,7 @@ void Serializer::ObjectSerializer::SerializeJSArrayBuffer() {
SerializeObject();
-#ifdef V8_HEAP_SANDBOX
- buffer->SetBackingStoreRefForSerialization(external_pointer_entry);
-#else
- buffer->set_backing_store(isolate(), backing_store);
-#endif
+ buffer->set_backing_store(backing_store);
buffer->set_extension(extension);
}
diff --git a/chromium/v8/src/snapshot/serializer.h b/chromium/v8/src/snapshot/serializer.h
index 82b1d8ed1e0..3695e217198 100644
--- a/chromium/v8/src/snapshot/serializer.h
+++ b/chromium/v8/src/snapshot/serializer.h
@@ -280,7 +280,7 @@ class Serializer : public SerializerDeserializer {
#ifdef DEBUG
void PushStack(Handle<HeapObject> o) { stack_.Push(*o); }
- void PopStack() { stack_.Pop(); }
+ void PopStack();
void PrintStack();
void PrintStack(std::ostream&);
#endif // DEBUG
diff --git a/chromium/v8/src/snapshot/snapshot.h b/chromium/v8/src/snapshot/snapshot.h
index 2f16eee6d58..f176faa607f 100644
--- a/chromium/v8/src/snapshot/snapshot.h
+++ b/chromium/v8/src/snapshot/snapshot.h
@@ -5,7 +5,7 @@
#ifndef V8_SNAPSHOT_SNAPSHOT_H_
#define V8_SNAPSHOT_SNAPSHOT_H_
-#include "include/v8.h" // For StartupData.
+#include "include/v8-snapshot.h" // For StartupData.
#include "src/common/assert-scope.h"
#include "src/common/globals.h"
diff --git a/chromium/v8/src/strings/string-builder.cc b/chromium/v8/src/strings/string-builder.cc
index 71534d635fd..7135d556bc3 100644
--- a/chromium/v8/src/strings/string-builder.cc
+++ b/chromium/v8/src/strings/string-builder.cc
@@ -34,7 +34,7 @@ void StringBuilderConcatHelper(String special, sinkchar* sink,
pos = Smi::ToInt(obj);
len = -encoded_slice;
}
- String::WriteToFlat(special, sink + position, pos, pos + len);
+ String::WriteToFlat(special, sink + position, pos, len);
position += len;
} else {
String string = String::cast(element);
diff --git a/chromium/v8/src/strings/string-stream.cc b/chromium/v8/src/strings/string-stream.cc
index 66140a14551..39494a7827f 100644
--- a/chromium/v8/src/strings/string-stream.cc
+++ b/chromium/v8/src/strings/string-stream.cc
@@ -302,7 +302,7 @@ void StringStream::PrintUsingMap(JSObject js_object) {
DescriptorArray descs = map.instance_descriptors(js_object.GetIsolate());
for (InternalIndex i : map.IterateOwnDescriptors()) {
PropertyDetails details = descs.GetDetails(i);
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
DCHECK_EQ(kData, details.kind());
Object key = descs.GetKey(i);
if (key.IsString() || key.IsNumber()) {
diff --git a/chromium/v8/src/tasks/OWNERS b/chromium/v8/src/tasks/OWNERS
index f7a22ea908e..69a86ca9844 100644
--- a/chromium/v8/src/tasks/OWNERS
+++ b/chromium/v8/src/tasks/OWNERS
@@ -1,4 +1,3 @@
ahaas@chromium.org
clemensb@chromium.org
mlippautz@chromium.org
-rmcilroy@chromium.org
diff --git a/chromium/v8/src/third_party/vtune/BUILD.gn b/chromium/v8/src/third_party/vtune/BUILD.gn
index e8582dbb793..d763da10646 100644
--- a/chromium/v8/src/third_party/vtune/BUILD.gn
+++ b/chromium/v8/src/third_party/vtune/BUILD.gn
@@ -22,6 +22,11 @@ static_library("v8_vtune") {
"vtune-jit.h",
]
configs += [ ":vtune_ittapi" ]
+
+ # TODO(delphick): Consider changing these to be v8_source_sets
+ if (!build_with_chromium && is_clang) {
+ configs -= [ "//build/config/clang:find_bad_constructs" ]
+ }
deps = [ "../../..:v8" ]
}
diff --git a/chromium/v8/src/third_party/vtune/v8-vtune.h b/chromium/v8/src/third_party/vtune/v8-vtune.h
index 34da9cb5bf8..2ef1bf8cc45 100644
--- a/chromium/v8/src/third_party/vtune/v8-vtune.h
+++ b/chromium/v8/src/third_party/vtune/v8-vtune.h
@@ -58,7 +58,7 @@
#ifndef V8_VTUNE_H_
#define V8_VTUNE_H_
-#include "../../../include/v8.h"
+#include "../../../include/v8-callbacks.h"
namespace vTune {
diff --git a/chromium/v8/src/third_party/vtune/vtune-jit.cc b/chromium/v8/src/third_party/vtune/vtune-jit.cc
index 08fbfbfe397..7b9d338c3e8 100644
--- a/chromium/v8/src/third_party/vtune/vtune-jit.cc
+++ b/chromium/v8/src/third_party/vtune/vtune-jit.cc
@@ -56,6 +56,8 @@
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include "vtune-jit.h"
+
#include <stdlib.h>
#include <string.h>
@@ -65,8 +67,12 @@
#include <unordered_map>
#include <vector>
+#include "../../../include/v8-callbacks.h"
+#include "../../../include/v8-initialization.h"
+#include "../../../include/v8-local-handle.h"
+#include "../../../include/v8-primitive.h"
+#include "../../../include/v8-script.h"
#include "v8-vtune.h"
-#include "vtune-jit.h"
namespace vTune {
namespace internal {
diff --git a/chromium/v8/src/third_party/vtune/vtune-jit.h b/chromium/v8/src/third_party/vtune/vtune-jit.h
index 4e5af45c618..148c82434fd 100644
--- a/chromium/v8/src/third_party/vtune/vtune-jit.h
+++ b/chromium/v8/src/third_party/vtune/vtune-jit.h
@@ -58,11 +58,14 @@
#ifndef VTUNE_VTUNE_JIT_H_
#define VTUNE_VTUNE_JIT_H_
-#include "../../../include/v8.h"
#include "third_party/ittapi/include/jitprofiling.h"
#define VTUNERUNNING (iJIT_IsProfilingActive() == iJIT_SAMPLING_ON)
+namespace v8 {
+struct JitCodeEvent;
+}
+
namespace vTune {
namespace internal {
using namespace v8;
diff --git a/chromium/v8/src/torque/ast.h b/chromium/v8/src/torque/ast.h
index 2e1aed3ec9b..d5c99d4890b 100644
--- a/chromium/v8/src/torque/ast.h
+++ b/chromium/v8/src/torque/ast.h
@@ -625,18 +625,18 @@ struct BasicTypeExpression : TypeExpression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(BasicTypeExpression)
BasicTypeExpression(SourcePosition pos,
std::vector<std::string> namespace_qualification,
- std::string name,
+ Identifier* name,
std::vector<TypeExpression*> generic_arguments)
: TypeExpression(kKind, pos),
namespace_qualification(std::move(namespace_qualification)),
- is_constexpr(IsConstexprName(name)),
- name(std::move(name)),
+ is_constexpr(IsConstexprName(name->value)),
+ name(name),
generic_arguments(std::move(generic_arguments)) {}
- BasicTypeExpression(SourcePosition pos, std::string name)
- : BasicTypeExpression(pos, {}, std::move(name), {}) {}
+ BasicTypeExpression(SourcePosition pos, Identifier* name)
+ : BasicTypeExpression(pos, {}, name, {}) {}
std::vector<std::string> namespace_qualification;
bool is_constexpr;
- std::string name;
+ Identifier* name;
std::vector<TypeExpression*> generic_arguments;
};
@@ -721,7 +721,7 @@ struct DebugStatement : Statement {
struct AssertStatement : Statement {
DEFINE_AST_NODE_LEAF_BOILERPLATE(AssertStatement)
- enum class AssertKind { kAssert, kCheck, kStaticAssert };
+ enum class AssertKind { kDcheck, kCheck, kStaticAssert };
AssertStatement(SourcePosition pos, AssertKind kind, Expression* expression,
std::string source)
: Statement(kKind, pos),
@@ -939,7 +939,6 @@ struct ClassFieldExpression {
std::vector<ConditionalAnnotation> conditions;
bool weak;
bool const_qualified;
- bool generate_verify;
FieldSynchronization read_synchronization;
FieldSynchronization write_synchronization;
};
@@ -1306,10 +1305,9 @@ inline VarDeclarationStatement* MakeConstDeclarationStatement(
}
inline BasicTypeExpression* MakeBasicTypeExpression(
- std::vector<std::string> namespace_qualification, std::string name,
+ std::vector<std::string> namespace_qualification, Identifier* name,
std::vector<TypeExpression*> generic_arguments = {}) {
- return MakeNode<BasicTypeExpression>(std::move(namespace_qualification),
- std::move(name),
+ return MakeNode<BasicTypeExpression>(std::move(namespace_qualification), name,
std::move(generic_arguments));
}
diff --git a/chromium/v8/src/torque/cc-generator.cc b/chromium/v8/src/torque/cc-generator.cc
index 0dea634ba47..a1f4d496cf9 100644
--- a/chromium/v8/src/torque/cc-generator.cc
+++ b/chromium/v8/src/torque/cc-generator.cc
@@ -105,7 +105,6 @@ std::vector<std::string> CCGenerator::ProcessArgumentsCommon(
std::vector<std::string> args;
for (auto it = parameter_types.rbegin(); it != parameter_types.rend(); ++it) {
const Type* type = *it;
- VisitResult arg;
if (type->IsConstexpr()) {
args.push_back(std::move(constexpr_arguments.back()));
constexpr_arguments.pop_back();
diff --git a/chromium/v8/src/torque/constants.h b/chromium/v8/src/torque/constants.h
index 57ff3ef4e1e..6490a30d380 100644
--- a/chromium/v8/src/torque/constants.h
+++ b/chromium/v8/src/torque/constants.h
@@ -83,8 +83,6 @@ static const char* const FIXED_ARRAY_BASE_TYPE_STRING = "FixedArrayBase";
static const char* const WEAK_HEAP_OBJECT = "WeakHeapObject";
static const char* const STATIC_ASSERT_MACRO_STRING = "StaticAssert";
-static const char* const ANNOTATION_GENERATE_PRINT = "@generatePrint";
-static const char* const ANNOTATION_NO_VERIFIER = "@noVerifier";
static const char* const ANNOTATION_ABSTRACT = "@abstract";
static const char* const ANNOTATION_HAS_SAME_INSTANCE_TYPE_AS_PARENT =
"@hasSameInstanceTypeAsParent";
@@ -144,21 +142,19 @@ using AbstractTypeFlags = base::Flags<AbstractTypeFlag>;
enum class ClassFlag {
kNone = 0,
kExtern = 1 << 0,
- kGeneratePrint = 1 << 1,
- kGenerateVerify = 1 << 2,
- kTransient = 1 << 3,
- kAbstract = 1 << 4,
- kIsShape = 1 << 5,
- kHasSameInstanceTypeAsParent = 1 << 6,
- kGenerateCppClassDefinitions = 1 << 7,
- kCustomCppClass = 1 << 8,
- kHighestInstanceTypeWithinParent = 1 << 9,
- kLowestInstanceTypeWithinParent = 1 << 10,
- kUndefinedLayout = 1 << 11,
- kGenerateBodyDescriptor = 1 << 12,
- kExport = 1 << 13,
- kDoNotGenerateCast = 1 << 14,
- kCustomMap = 1 << 15,
+ kTransient = 1 << 1,
+ kAbstract = 1 << 2,
+ kIsShape = 1 << 3,
+ kHasSameInstanceTypeAsParent = 1 << 4,
+ kGenerateCppClassDefinitions = 1 << 5,
+ kCustomCppClass = 1 << 6,
+ kHighestInstanceTypeWithinParent = 1 << 7,
+ kLowestInstanceTypeWithinParent = 1 << 8,
+ kUndefinedLayout = 1 << 9,
+ kGenerateBodyDescriptor = 1 << 10,
+ kExport = 1 << 11,
+ kDoNotGenerateCast = 1 << 12,
+ kCustomMap = 1 << 13,
};
using ClassFlags = base::Flags<ClassFlag>;
diff --git a/chromium/v8/src/torque/cpp-builder.cc b/chromium/v8/src/torque/cpp-builder.cc
index 425fae920ea..f48ca562e9b 100644
--- a/chromium/v8/src/torque/cpp-builder.cc
+++ b/chromium/v8/src/torque/cpp-builder.cc
@@ -14,6 +14,8 @@ void Function::PrintDeclarationHeader(std::ostream& stream,
if (!description_.empty()) {
stream << std::string(indentation, ' ') << "// " << description_ << "\n";
}
+ stream << std::string(indentation, ' ') << "// " << PositionAsString(pos_)
+ << "\n";
stream << std::string(indentation, ' ');
if (IsExport()) stream << "V8_EXPORT_PRIVATE ";
if (IsV8Inline())
@@ -36,6 +38,9 @@ void Function::PrintDeclarationHeader(std::ostream& stream,
}
void Function::PrintDeclaration(std::ostream& stream, int indentation) const {
+ if (indentation == kAutomaticIndentation) {
+ indentation = owning_class_ ? 2 : 0;
+ }
PrintDeclarationHeader(stream, indentation);
stream << ";\n";
}
@@ -63,6 +68,8 @@ void Function::PrintInlineDefinition(
void Function::PrintBeginDefinition(std::ostream& stream,
int indentation) const {
+ stream << std::string(indentation, ' ') << "// " << PositionAsString(pos_)
+ << "\n";
std::string scope;
if (owning_class_) {
scope = owning_class_->GetName();
@@ -116,7 +123,7 @@ void File::BeginIncludeGuard(const std::string& name) {
s() << "#ifndef " << name
<< "\n"
"#define "
- << name << "\n";
+ << name << "\n\n";
}
void File::EndIncludeGuard(const std::string& name) {
diff --git a/chromium/v8/src/torque/cpp-builder.h b/chromium/v8/src/torque/cpp-builder.h
index f741ae6dede..428862c4e2a 100644
--- a/chromium/v8/src/torque/cpp-builder.h
+++ b/chromium/v8/src/torque/cpp-builder.h
@@ -71,9 +71,13 @@ class Function {
};
explicit Function(std::string name)
- : owning_class_(nullptr), name_(std::move(name)) {}
+ : pos_(CurrentSourcePosition::Get()),
+ owning_class_(nullptr),
+ name_(std::move(name)) {}
Function(Class* owning_class, std::string name)
- : owning_class_(owning_class), name_(std::move(name)) {}
+ : pos_(CurrentSourcePosition::Get()),
+ owning_class_(owning_class),
+ name_(std::move(name)) {}
~Function() = default;
static Function DefaultGetter(std::string return_type, Class* owner,
@@ -145,13 +149,15 @@ class Function {
return names;
}
- void PrintDeclaration(std::ostream& stream, int indentation = 0) const;
+ static constexpr int kAutomaticIndentation = -1;
+ void PrintDeclaration(std::ostream& stream,
+ int indentation = kAutomaticIndentation) const;
void PrintDefinition(std::ostream& stream,
const std::function<void(std::ostream&)>& builder,
int indentation = 0) const;
void PrintInlineDefinition(std::ostream& stream,
const std::function<void(std::ostream&)>& builder,
- int indentation = 0) const;
+ int indentation = 2) const;
void PrintBeginDefinition(std::ostream& stream, int indentation = 0) const;
void PrintEndDefinition(std::ostream& stream, int indentation = 0) const;
@@ -159,6 +165,7 @@ class Function {
void PrintDeclarationHeader(std::ostream& stream, int indentation) const;
private:
+ SourcePosition pos_;
Class* owning_class_;
std::string description_;
std::string name_;
diff --git a/chromium/v8/src/torque/csa-generator.cc b/chromium/v8/src/torque/csa-generator.cc
index 5443e812fdb..adc86486c74 100644
--- a/chromium/v8/src/torque/csa-generator.cc
+++ b/chromium/v8/src/torque/csa-generator.cc
@@ -157,7 +157,6 @@ std::vector<std::string> CSAGenerator::ProcessArgumentsCommon(
std::vector<std::string> args;
for (auto it = parameter_types.rbegin(); it != parameter_types.rend(); ++it) {
const Type* type = *it;
- VisitResult arg;
if (type->IsConstexpr()) {
args.push_back(std::move(constexpr_arguments.back()));
constexpr_arguments.pop_back();
diff --git a/chromium/v8/src/torque/declarable.cc b/chromium/v8/src/torque/declarable.cc
index 479a1249b3d..10b0d09daf3 100644
--- a/chromium/v8/src/torque/declarable.cc
+++ b/chromium/v8/src/torque/declarable.cc
@@ -82,13 +82,12 @@ std::ostream& operator<<(std::ostream& os, const GenericCallable& g) {
}
SpecializationRequester::SpecializationRequester(SourcePosition position,
- Scope* scope, std::string name)
+ Scope* s, std::string name)
: position(position), name(std::move(name)) {
// Skip scopes that are not related to template specializations, they might be
// stack-allocated and not live for long enough.
- while (scope && scope->GetSpecializationRequester().IsNone())
- scope = scope->ParentScope();
- this->scope = scope;
+ while (s && s->GetSpecializationRequester().IsNone()) s = s->ParentScope();
+ this->scope = s;
}
std::vector<Declarable*> Scope::Lookup(const QualifiedName& name) {
@@ -165,11 +164,11 @@ TypeArgumentInference GenericCallable::InferSpecializationTypes(
}
base::Optional<Statement*> GenericCallable::CallableBody() {
- if (auto* decl = TorqueMacroDeclaration::DynamicCast(declaration())) {
- return decl->body;
- } else if (auto* decl =
+ if (auto* macro_decl = TorqueMacroDeclaration::DynamicCast(declaration())) {
+ return macro_decl->body;
+ } else if (auto* builtin_decl =
TorqueBuiltinDeclaration::DynamicCast(declaration())) {
- return decl->body;
+ return builtin_decl->body;
} else {
return base::nullopt;
}
diff --git a/chromium/v8/src/torque/declaration-visitor.cc b/chromium/v8/src/torque/declaration-visitor.cc
index 71cde509635..7e46ce59c21 100644
--- a/chromium/v8/src/torque/declaration-visitor.cc
+++ b/chromium/v8/src/torque/declaration-visitor.cc
@@ -5,6 +5,7 @@
#include "src/torque/declaration-visitor.h"
#include "src/torque/ast.h"
+#include "src/torque/kythe-data.h"
#include "src/torque/server-data.h"
#include "src/torque/type-inference.h"
#include "src/torque/type-visitor.h"
@@ -109,16 +110,20 @@ Builtin* DeclarationVisitor::CreateBuiltin(BuiltinDeclaration* decl,
Error("Builtins cannot have return type void.");
}
- return Declarations::CreateBuiltin(std::move(external_name),
- std::move(readable_name), kind,
- std::move(signature), body);
+ Builtin* builtin = Declarations::CreateBuiltin(std::move(external_name),
+ std::move(readable_name), kind,
+ std::move(signature), body);
+ // TODO(v8:12261): Recheck this.
+ // builtin->SetIdentifierPosition(decl->name->pos);
+ return builtin;
}
void DeclarationVisitor::Visit(ExternalBuiltinDeclaration* decl) {
- Declarations::Declare(
- decl->name->value,
+ Builtin* builtin =
CreateBuiltin(decl, decl->name->value, decl->name->value,
- TypeVisitor::MakeSignature(decl), base::nullopt));
+ TypeVisitor::MakeSignature(decl), base::nullopt);
+ builtin->SetIdentifierPosition(decl->name->pos);
+ Declarations::Declare(decl->name->value, builtin);
}
void DeclarationVisitor::Visit(ExternalRuntimeDeclaration* decl) {
@@ -152,29 +157,43 @@ void DeclarationVisitor::Visit(ExternalRuntimeDeclaration* decl) {
}
}
- Declarations::DeclareRuntimeFunction(decl->name->value, signature);
+ RuntimeFunction* function =
+ Declarations::DeclareRuntimeFunction(decl->name->value, signature);
+ function->SetIdentifierPosition(decl->name->pos);
+ function->SetPosition(decl->pos);
+ if (GlobalContext::collect_kythe_data()) {
+ KytheData::AddFunctionDefinition(function);
+ }
}
void DeclarationVisitor::Visit(ExternalMacroDeclaration* decl) {
- Declarations::DeclareMacro(
+ Macro* macro = Declarations::DeclareMacro(
decl->name->value, true, decl->external_assembler_name,
TypeVisitor::MakeSignature(decl), base::nullopt, decl->op);
+ macro->SetIdentifierPosition(decl->name->pos);
+ macro->SetPosition(decl->pos);
+ if (GlobalContext::collect_kythe_data()) {
+ KytheData::AddFunctionDefinition(macro);
+ }
}
void DeclarationVisitor::Visit(TorqueBuiltinDeclaration* decl) {
- Declarations::Declare(
- decl->name->value,
- CreateBuiltin(decl, decl->name->value, decl->name->value,
- TypeVisitor::MakeSignature(decl), decl->body));
+ auto builtin = CreateBuiltin(decl, decl->name->value, decl->name->value,
+ TypeVisitor::MakeSignature(decl), decl->body);
+ builtin->SetIdentifierPosition(decl->name->pos);
+ builtin->SetPosition(decl->pos);
+ Declarations::Declare(decl->name->value, builtin);
}
void DeclarationVisitor::Visit(TorqueMacroDeclaration* decl) {
Macro* macro = Declarations::DeclareMacro(
decl->name->value, decl->export_to_csa, base::nullopt,
TypeVisitor::MakeSignature(decl), decl->body, decl->op);
- // TODO(szuend): Set identifier_position to decl->name->pos once all callable
- // names are changed from std::string to Identifier*.
+ macro->SetIdentifierPosition(decl->name->pos);
macro->SetPosition(decl->pos);
+ if (GlobalContext::collect_kythe_data()) {
+ KytheData::AddFunctionDefinition(macro);
+ }
}
void DeclarationVisitor::Visit(IntrinsicDeclaration* decl) {
@@ -183,8 +202,11 @@ void DeclarationVisitor::Visit(IntrinsicDeclaration* decl) {
}
void DeclarationVisitor::Visit(ConstDeclaration* decl) {
- Declarations::DeclareNamespaceConstant(
+ auto constant = Declarations::DeclareNamespaceConstant(
decl->name, TypeVisitor::ComputeType(decl->type), decl->expression);
+ if (GlobalContext::collect_kythe_data()) {
+ KytheData::AddConstantDefinition(constant);
+ }
}
void DeclarationVisitor::Visit(SpecializationDeclaration* decl) {
@@ -260,7 +282,11 @@ void DeclarationVisitor::Visit(ExternConstDeclaration* decl) {
ReportError(stream.str());
}
- Declarations::DeclareExternConstant(decl->name, type, decl->literal);
+ ExternConstant* constant =
+ Declarations::DeclareExternConstant(decl->name, type, decl->literal);
+ if (GlobalContext::collect_kythe_data()) {
+ KytheData::AddConstantDefinition(constant);
+ }
}
void DeclarationVisitor::Visit(CppIncludeDeclaration* decl) {
diff --git a/chromium/v8/src/torque/declarations.cc b/chromium/v8/src/torque/declarations.cc
index 1e1c89da868..2ff3680bcf8 100644
--- a/chromium/v8/src/torque/declarations.cc
+++ b/chromium/v8/src/torque/declarations.cc
@@ -277,11 +277,12 @@ RuntimeFunction* Declarations::DeclareRuntimeFunction(
new RuntimeFunction(name, signature))));
}
-void Declarations::DeclareExternConstant(Identifier* name, const Type* type,
- std::string value) {
+ExternConstant* Declarations::DeclareExternConstant(Identifier* name,
+ const Type* type,
+ std::string value) {
CheckAlreadyDeclared<Value>(name->value, "constant");
- Declare(name->value, std::unique_ptr<ExternConstant>(
- new ExternConstant(name, type, value)));
+ return Declare(name->value, std::unique_ptr<ExternConstant>(
+ new ExternConstant(name, type, value)));
}
NamespaceConstant* Declarations::DeclareNamespaceConstant(Identifier* name,
diff --git a/chromium/v8/src/torque/declarations.h b/chromium/v8/src/torque/declarations.h
index d417e45ca29..739c021fec3 100644
--- a/chromium/v8/src/torque/declarations.h
+++ b/chromium/v8/src/torque/declarations.h
@@ -132,8 +132,9 @@ class Declarations {
static RuntimeFunction* DeclareRuntimeFunction(const std::string& name,
const Signature& signature);
- static void DeclareExternConstant(Identifier* name, const Type* type,
- std::string value);
+ static ExternConstant* DeclareExternConstant(Identifier* name,
+ const Type* type,
+ std::string value);
static NamespaceConstant* DeclareNamespaceConstant(Identifier* name,
const Type* type,
Expression* body);
diff --git a/chromium/v8/src/torque/earley-parser.cc b/chromium/v8/src/torque/earley-parser.cc
index 9ebb132c826..7326996c70c 100644
--- a/chromium/v8/src/torque/earley-parser.cc
+++ b/chromium/v8/src/torque/earley-parser.cc
@@ -18,11 +18,12 @@ namespace torque {
namespace {
struct LineAndColumnTracker {
- LineAndColumn previous{0, 0};
- LineAndColumn current{0, 0};
+ LineAndColumn previous{0, 0, 0};
+ LineAndColumn current{0, 0, 0};
void Advance(InputPosition from, InputPosition to) {
previous = current;
+ current.offset += std::distance(from, to);
while (from != to) {
if (*from == '\n') {
current.line += 1;
@@ -187,7 +188,8 @@ const Item* RunEarleyAlgorithm(
// Worklist for items at the next position.
std::vector<Item> future_items;
CurrentSourcePosition::Scope source_position(
- SourcePosition{CurrentSourceFile::Get(), {0, 0}, {0, 0}});
+ SourcePosition{CurrentSourceFile::Get(), LineAndColumn::Invalid(),
+ LineAndColumn::Invalid()});
std::vector<const Item*> completed_items;
std::unordered_map<std::pair<size_t, Symbol*>, std::set<const Item*>,
base::hash<std::pair<size_t, Symbol*>>>
diff --git a/chromium/v8/src/torque/global-context.cc b/chromium/v8/src/torque/global-context.cc
index a70e8ec41fc..b3372f7542f 100644
--- a/chromium/v8/src/torque/global-context.cc
+++ b/chromium/v8/src/torque/global-context.cc
@@ -13,12 +13,14 @@ DEFINE_CONTEXTUAL_VARIABLE(TargetArchitecture)
GlobalContext::GlobalContext(Ast ast)
: collect_language_server_data_(false),
+ collect_kythe_data_(false),
force_assert_statements_(false),
annotate_ir_(false),
ast_(std::move(ast)) {
CurrentScope::Scope current_scope(nullptr);
CurrentSourcePosition::Scope current_source_position(
- SourcePosition{CurrentSourceFile::Get(), {-1, -1}, {-1, -1}});
+ SourcePosition{CurrentSourceFile::Get(), LineAndColumn::Invalid(),
+ LineAndColumn::Invalid()});
default_namespace_ =
RegisterDeclarable(std::make_unique<Namespace>(kBaseNamespaceName));
}
diff --git a/chromium/v8/src/torque/global-context.h b/chromium/v8/src/torque/global-context.h
index b5b704d0b28..c0945e575a0 100644
--- a/chromium/v8/src/torque/global-context.h
+++ b/chromium/v8/src/torque/global-context.h
@@ -49,6 +49,8 @@ class GlobalContext : public ContextualClass<GlobalContext> {
static bool collect_language_server_data() {
return Get().collect_language_server_data_;
}
+ static void SetCollectKytheData() { Get().collect_kythe_data_ = true; }
+ static bool collect_kythe_data() { return Get().collect_kythe_data_; }
static void SetForceAssertStatements() {
Get().force_assert_statements_ = true;
}
@@ -118,6 +120,7 @@ class GlobalContext : public ContextualClass<GlobalContext> {
private:
bool collect_language_server_data_;
+ bool collect_kythe_data_;
bool force_assert_statements_;
bool annotate_ir_;
Namespace* default_namespace_;
diff --git a/chromium/v8/src/torque/implementation-visitor.cc b/chromium/v8/src/torque/implementation-visitor.cc
index b814f0cc636..1a1bec711b5 100644
--- a/chromium/v8/src/torque/implementation-visitor.cc
+++ b/chromium/v8/src/torque/implementation-visitor.cc
@@ -17,6 +17,7 @@
#include "src/torque/csa-generator.h"
#include "src/torque/declaration-visitor.h"
#include "src/torque/global-context.h"
+#include "src/torque/kythe-data.h"
#include "src/torque/parameter-difference.h"
#include "src/torque/server-data.h"
#include "src/torque/source-positions.h"
@@ -29,6 +30,8 @@ namespace v8 {
namespace internal {
namespace torque {
+uint64_t next_unique_binding_index = 0;
+
// Sadly, 'using std::string_literals::operator""s;' is bugged in MSVC (see
// https://developercommunity.visualstudio.com/t/Incorrect-warning-when-using-standard-st/673948).
// TODO(nicohartmann@): Change to 'using std::string_literals::operator""s;'
@@ -74,7 +77,7 @@ const Type* ImplementationVisitor::Visit(Statement* stmt) {
void ImplementationVisitor::BeginGeneratedFiles() {
std::set<SourceId> contains_class_definitions;
for (const ClassType* type : TypeOracle::GetClasses()) {
- if (type->GenerateCppClassDefinitions()) {
+ if (type->ShouldGenerateCppClassDefinitions()) {
contains_class_definitions.insert(type->AttributedToFile());
}
}
@@ -103,6 +106,7 @@ void ImplementationVisitor::BeginGeneratedFiles() {
file << "\n";
streams.csa_cc.BeginNamespace("v8", "internal");
+ streams.csa_ccfile << "\n";
}
// Output beginning of CSA .h file.
{
@@ -115,6 +119,7 @@ void ImplementationVisitor::BeginGeneratedFiles() {
file << "\n";
streams.csa_header.BeginNamespace("v8", "internal");
+ streams.csa_headerfile << "\n";
}
// Output beginning of class definition .cc file.
{
@@ -128,6 +133,7 @@ void ImplementationVisitor::BeginGeneratedFiles() {
}
streams.class_definition_cc.BeginNamespace("v8", "internal");
+ streams.class_definition_ccfile << "\n";
}
}
}
@@ -146,6 +152,7 @@ void ImplementationVisitor::EndGeneratedFiles() {
UnderlinifyPath(SourceFileMap::PathFromV8Root(file)) + "_CSA_H_";
streams.csa_header.EndNamespace("v8", "internal");
+ streams.csa_headerfile << "\n";
streams.csa_header.EndIncludeGuard(header_define);
}
@@ -291,24 +298,33 @@ VisitResult ImplementationVisitor::InlineMacro(
DCHECK(macro->IsMethod());
parameter_bindings.Add(kThisParameterName, LocalValue{*this_reference},
true);
+ // TODO(v8:12261): Tracking 'this'-binding for kythe led to a few weird
+ // issues. Review to fully support 'this' in methods.
}
- size_t i = 0;
+ size_t count = 0;
for (auto arg : arguments) {
- if (this_reference && i == signature.implicit_count) i++;
- const bool mark_as_used = signature.implicit_count > i;
- const Identifier* name = macro->parameter_names()[i++];
- parameter_bindings.Add(name,
- LocalValue{LocationReference::Temporary(
- arg, "parameter " + name->value)},
- mark_as_used);
+ if (this_reference && count == signature.implicit_count) count++;
+ const bool mark_as_used = signature.implicit_count > count;
+ const Identifier* name = macro->parameter_names()[count++];
+ Binding<LocalValue>* binding =
+ parameter_bindings.Add(name,
+ LocalValue{LocationReference::Temporary(
+ arg, "parameter " + name->value)},
+ mark_as_used);
+ if (GlobalContext::collect_kythe_data()) {
+ KytheData::AddBindingDefinition(binding);
+ }
}
DCHECK_EQ(label_blocks.size(), signature.labels.size());
for (size_t i = 0; i < signature.labels.size(); ++i) {
const LabelDeclaration& label_info = signature.labels[i];
- label_bindings.Add(label_info.name,
- LocalLabel{label_blocks[i], label_info.types});
+ Binding<LocalLabel>* binding = label_bindings.Add(
+ label_info.name, LocalLabel{label_blocks[i], label_info.types});
+ if (GlobalContext::collect_kythe_data()) {
+ KytheData::AddBindingDefinition(binding);
+ }
}
Block* macro_end;
@@ -382,6 +398,7 @@ void ImplementationVisitor::VisitMacroCommon(Macro* macro) {
cpp::Function f = GenerateMacroFunctionDeclaration(macro);
f.PrintDeclaration(csa_headerfile());
+ csa_headerfile() << "\n";
cpp::File csa_cc(csa_ccfile());
@@ -475,9 +492,9 @@ void ImplementationVisitor::VisitMacroCommon(Macro* macro) {
const LabelDeclaration& label_info = signature.labels[i];
assembler().Bind(label_block);
std::vector<std::string> label_parameter_variables;
- for (size_t i = 0; i < label_info.types.size(); ++i) {
- LowerLabelParameter(label_info.types[i],
- ExternalLabelParameterName(label_info.name->value, i),
+ for (size_t j = 0; j < label_info.types.size(); ++j) {
+ LowerLabelParameter(label_info.types[j],
+ ExternalLabelParameterName(label_info.name->value, j),
&label_parameter_variables);
}
assembler().Emit(GotoExternalInstruction{
@@ -518,7 +535,6 @@ void ImplementationVisitor::VisitMacroCommon(Macro* macro) {
f.PrintEndDefinition(csa_ccfile());
include_guard.reset();
- csa_ccfile() << "\n";
}
void ImplementationVisitor::Visit(TorqueMacro* macro) {
@@ -542,11 +558,14 @@ std::string AddParameter(size_t i, Builtin* builtin,
std::string external_name = "parameter" + std::to_string(i);
parameters->Push(external_name);
StackRange range = parameter_types->PushMany(LowerType(type));
- parameter_bindings->Add(
+ Binding<LocalValue>* binding = parameter_bindings->Add(
name,
LocalValue{LocationReference::Temporary(VisitResult(type, range),
"parameter " + name->value)},
mark_as_used);
+ if (GlobalContext::collect_kythe_data()) {
+ KytheData::AddBindingDefinition(binding);
+ }
return external_name;
}
@@ -579,7 +598,7 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
.Position(signature.parameter_names[signature.implicit_count]->pos);
}
- csa_ccfile() << " TNode<Word32T> argc = UncheckedParameter<Word32T>("
+ csa_ccfile() << " TNode<Word32T> argc = UncheckedParameter<Word32T>("
<< "Descriptor::kJSActualArgumentsCount);\n";
csa_ccfile() << " TNode<IntPtrT> "
"arguments_length(ChangeInt32ToIntPtr(UncheckedCast<"
@@ -588,13 +607,17 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
"UncheckedCast<RawPtrT>(LoadFramePointer());\n";
csa_ccfile() << " TorqueStructArguments "
"torque_arguments(GetFrameArguments(arguments_frame, "
- "arguments_length));\n";
+ "arguments_length, (kJSArgcIncludesReceiver ? "
+ "FrameArgumentsArgcType::kCountIncludesReceiver : "
+ "FrameArgumentsArgcType::kCountExcludesReceiver)"
+ << "));\n";
csa_ccfile()
<< " CodeStubArguments arguments(this, torque_arguments);\n";
parameters.Push("torque_arguments.frame");
parameters.Push("torque_arguments.base");
parameters.Push("torque_arguments.length");
+ parameters.Push("torque_arguments.actual_count");
const Type* arguments_type = TypeOracle::GetArgumentsType();
StackRange range = parameter_types.PushMany(LowerType(arguments_type));
parameter_bindings.Add(*signature.arguments_variable,
@@ -625,7 +648,7 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
? "arguments.GetReceiver()"
: "UncheckedParameter<Object>(Descriptor::kReceiver)")
<< ";\n";
- csa_ccfile() << "USE(" << generated_name << ");\n";
+ csa_ccfile() << " USE(" << generated_name << ");\n";
expected_types = {TypeOracle::GetJSAnyType()};
} else if (param_name == "newTarget") {
csa_ccfile() << " TNode<Object> " << generated_name
@@ -737,12 +760,12 @@ const Type* ImplementationVisitor::Visit(
ReportError("constexpr variables need an initializer");
}
TypeVector lowered_types = LowerType(*type);
- for (const Type* type : lowered_types) {
+ for (const Type* t : lowered_types) {
assembler().Emit(PushUninitializedInstruction{TypeOracle::GetTopType(
"uninitialized variable '" + stmt->name->value + "' of type " +
- type->ToString() + " originally defined at " +
+ t->ToString() + " originally defined at " +
PositionAsString(stmt->pos),
- type)});
+ t)});
}
init_result =
VisitResult(*type, assembler().TopRange(lowered_types.size()));
@@ -1001,6 +1024,9 @@ const Type* ImplementationVisitor::Visit(GotoStatement* stmt) {
LanguageServerData::AddDefinition(stmt->label->pos,
label->declaration_position());
}
+ if (GlobalContext::collect_kythe_data()) {
+ KytheData::AddBindingUse(stmt->label->pos, label);
+ }
size_t i = 0;
StackRange arguments = assembler().TopRange(0);
@@ -1192,7 +1218,7 @@ const Type* ImplementationVisitor::Visit(AssertStatement* stmt) {
{}});
return TypeOracle::GetVoidType();
}
- bool do_check = stmt->kind != AssertStatement::AssertKind::kAssert ||
+ bool do_check = stmt->kind != AssertStatement::AssertKind::kDcheck ||
GlobalContext::force_assert_statements();
#if defined(DEBUG)
do_check = true;
@@ -1206,15 +1232,15 @@ const Type* ImplementationVisitor::Visit(AssertStatement* stmt) {
assembler().Bind(unreachable_block);
}
- // CSA_ASSERT & co. are not used here on purpose for two reasons. First,
+ // CSA_DCHECK & co. are not used here on purpose for two reasons. First,
// Torque allows and handles two types of expressions in the if protocol
// automagically, ones that return TNode<BoolT> and those that use the
// BranchIf(..., Label* true, Label* false) idiom. Because the machinery to
// handle this is embedded in the expression handling and to it's not
- // possible to make the decision to use CSA_ASSERT or CSA_ASSERT_BRANCH
+ // possible to make the decision to use CSA_DCHECK or CSA_DCHECK_BRANCH
// isn't trivial up-front. Secondly, on failure, the assert text should be
// the corresponding Torque code, not the -gen.cc code, which would be the
- // case when using CSA_ASSERT_XXX.
+ // case when using CSA_DCHECK_XXX.
Block* true_block = assembler().NewBlock(assembler().CurrentStack());
Block* false_block = assembler().NewBlock(assembler().CurrentStack(), true);
GenerateExpressionBranch(stmt->expression, true_block, false_block);
@@ -1907,9 +1933,9 @@ void FailCallableLookup(
stream << "\nfailed to instantiate all of these generic declarations:";
for (auto& failure : inapplicable_generics) {
GenericCallable* generic = failure.first;
- const std::string& reason = failure.second;
+ const std::string& fail_reason = failure.second;
stream << "\n " << generic->name() << " defined at "
- << generic->Position() << ":\n " << reason << "\n";
+ << generic->Position() << ":\n " << fail_reason << "\n";
}
}
ReportError(stream.str());
@@ -2226,6 +2252,9 @@ LocationReference ImplementationVisitor::GenerateFieldAccess(
if (GlobalContext::collect_language_server_data() && pos.has_value()) {
LanguageServerData::AddDefinition(*pos, field.pos);
}
+ if (GlobalContext::collect_kythe_data() && pos.has_value()) {
+ KytheData::AddClassFieldUse(*pos, &field);
+ }
if (field.const_qualified) {
VisitResult t_value = ProjectStructField(reference.variable(), fieldname);
return LocationReference::Temporary(
@@ -2322,6 +2351,9 @@ LocationReference ImplementationVisitor::GenerateFieldAccess(
if (GlobalContext::collect_language_server_data() && pos.has_value()) {
LanguageServerData::AddDefinition(*pos, field.pos);
}
+ if (GlobalContext::collect_kythe_data()) {
+ KytheData::AddClassFieldUse(*pos, &field);
+ }
return GenerateFieldReference(object_result, field, *class_type);
}
}
@@ -2363,6 +2395,13 @@ LocationReference ImplementationVisitor::GetLocationReference(
LanguageServerData::AddDefinition(expr->name->pos,
(*value)->declaration_position());
}
+ if (GlobalContext::collect_kythe_data()) {
+ if (!expr->IsThis()) {
+ DCHECK_EQ(expr->name->pos.end.column - expr->name->pos.start.column,
+ expr->name->value.length());
+ KytheData::AddBindingUse(expr->name->pos, *value);
+ }
+ }
if (expr->generic_arguments.size() != 0) {
ReportError("cannot have generic parameters on local name ",
expr->name);
@@ -2381,6 +2420,7 @@ LocationReference ImplementationVisitor::GetLocationReference(
LanguageServerData::AddDefinition(expr->name->pos,
(*builtin)->Position());
}
+ // TODO(v8:12261): Consider collecting KytheData here.
return LocationReference::Temporary(GetBuiltinCode(*builtin),
"builtin " + expr->name->value);
}
@@ -2407,6 +2447,9 @@ LocationReference ImplementationVisitor::GetLocationReference(
LanguageServerData::AddDefinition(expr->name->pos, value->name()->pos);
}
if (auto* constant = NamespaceConstant::DynamicCast(value)) {
+ if (GlobalContext::collect_kythe_data()) {
+ KytheData::AddConstantUse(expr->name->pos, constant);
+ }
if (constant->type()->IsConstexpr()) {
return LocationReference::Temporary(
VisitResult(constant->type(), constant->external_name() + "(state_)"),
@@ -2420,6 +2463,9 @@ LocationReference ImplementationVisitor::GetLocationReference(
"namespace constant " + expr->name->value);
}
ExternConstant* constant = ExternConstant::cast(value);
+ if (GlobalContext::collect_kythe_data()) {
+ KytheData::AddConstantUse(expr->name->pos, constant);
+ }
return LocationReference::Temporary(constant->value(),
"extern value " + expr->name->value);
}
@@ -3016,10 +3062,10 @@ VisitResult ImplementationVisitor::GenerateCall(
arguments_to_getter.parameters.begin(),
converted_arguments.begin() + 1, converted_arguments.end());
- Callable* callable = LookupCallable(
+ Callable* callable_macro = LookupCallable(
qualified_getter_name, Declarations::Lookup(qualified_getter_name),
arguments_to_getter, {});
- Macro* getter = Macro::DynamicCast(callable);
+ Macro* getter = Macro::DynamicCast(callable_macro);
if (!getter || getter->IsMethod()) {
ReportError(
"%MakeLazy expects a macro, not builtin or other type of callable");
@@ -3046,10 +3092,10 @@ VisitResult ImplementationVisitor::GenerateCall(
StackRange argument_range_for_getter = assembler().TopRange(0);
std::vector<std::string> constexpr_arguments_for_getter;
- size_t current = 0;
+ size_t arg_count = 0;
for (auto arg : arguments_to_getter.parameters) {
- DCHECK_LT(current, getter->signature().types().size());
- const Type* to_type = getter->signature().types()[current++];
+ DCHECK_LT(arg_count, getter->signature().types().size());
+ const Type* to_type = getter->signature().types()[arg_count++];
AddCallParameter(getter, arg, to_type, &converted_arguments_for_getter,
&argument_range_for_getter,
&constexpr_arguments_for_getter,
@@ -3065,10 +3111,20 @@ VisitResult ImplementationVisitor::GenerateCall(
const Type* type = specialization_types[0];
const ClassType* class_type = ClassType::DynamicCast(type);
if (!class_type) {
- ReportError("%FieldSlice must take a class type parameter");
+ ReportError("The first type parameter to %FieldSlice must be a class");
}
const Field& field =
class_type->LookupField(StringLiteralUnquote(constexpr_arguments[0]));
+ const Type* expected_slice_type =
+ field.const_qualified
+ ? TypeOracle::GetConstSliceType(field.name_and_type.type)
+ : TypeOracle::GetMutableSliceType(field.name_and_type.type);
+ const Type* declared_slice_type = specialization_types[1];
+ if (expected_slice_type != declared_slice_type) {
+ Error(
+ "The second type parameter to %FieldSlice must be the precise "
+ "slice type for the named field");
+ }
LocationReference ref = GenerateFieldReference(
VisitResult(type, argument_range), field, class_type,
/*treat_optional_as_indexed=*/true);
@@ -3131,6 +3187,12 @@ VisitResult ImplementationVisitor::Visit(CallExpression* expr,
LanguageServerData::AddDefinition(expr->callee->name->pos,
callable->IdentifierPosition());
}
+ if (GlobalContext::collect_kythe_data()) {
+ Callable* callable = LookupCallable(name, Declarations::Lookup(name),
+ arguments, specialization_types);
+ Callable* caller = CurrentCallable::Get();
+ KytheData::AddCall(caller, expr->callee->name->pos, callable);
+ }
if (expr->callee->name->value == "!" && arguments.parameters.size() == 1) {
PropagateBitfieldMark(expr->arguments[0], expr);
}
@@ -3174,6 +3236,10 @@ VisitResult ImplementationVisitor::Visit(CallMethodExpression* expr) {
LanguageServerData::AddDefinition(expr->method->name->pos,
callable->IdentifierPosition());
}
+ if (GlobalContext::collect_kythe_data()) {
+ Callable* caller = CurrentCallable::Get();
+ KytheData::AddCall(caller, expr->method->name->pos, callable);
+ }
return scope.Yield(GenerateCall(callable, target, arguments, {}, false));
}
@@ -3264,6 +3330,7 @@ std::vector<Binding<LocalLabel>*> ImplementationVisitor::LabelsFromIdentifiers(
LanguageServerData::AddDefinition(name->pos,
label->declaration_position());
}
+ // TODO(v8:12261): Might have to track KytheData here.
}
return result;
}
@@ -3521,7 +3588,8 @@ void ImplementationVisitor::GenerateBuiltinDefinitionsAndInterfaceDescriptors(
// count.
int parameter_count =
static_cast<int>(builtin->signature().ExplicitCount());
- builtin_definitions << ", " << parameter_count;
+ builtin_definitions << ", JSParameterCount(" << parameter_count
+ << ")";
// And the receiver is explicitly declared.
builtin_definitions << ", kReceiver";
for (size_t i = builtin->signature().implicit_count;
@@ -3543,7 +3611,8 @@ void ImplementationVisitor::GenerateBuiltinDefinitionsAndInterfaceDescriptors(
Declarations::FindSomeInternalBuiltinWithType(type);
if (!example_builtin) {
CurrentSourcePosition::Scope current_source_position(
- SourcePosition{CurrentSourceFile::Get(), {-1, -1}, {-1, -1}});
+ SourcePosition{CurrentSourceFile::Get(), LineAndColumn::Invalid(),
+ LineAndColumn::Invalid()});
ReportError("unable to find any builtin with type \"", *type, "\"");
}
builtin_definitions << " V(" << type->function_pointer_type_id() << ","
@@ -3712,29 +3781,6 @@ class FieldOffsetsGenerator {
bool header_size_emitted_ = false;
};
-class MacroFieldOffsetsGenerator : public FieldOffsetsGenerator {
- public:
- MacroFieldOffsetsGenerator(std::ostream& out, const ClassType* type)
- : FieldOffsetsGenerator(type), out_(out) {
- out_ << "#define ";
- out_ << "TORQUE_GENERATED_" << CapifyStringWithUnderscores(type_->name())
- << "_FIELDS(V) \\\n";
- }
- void WriteField(const Field& f, const std::string& size_string) override {
- out_ << "V(k" << CamelifyString(f.name_and_type.name) << "Offset, "
- << size_string << ") \\\n";
- }
- void WriteFieldOffsetGetter(const Field& f) override {
- // Can't do anything here.
- }
- void WriteMarker(const std::string& marker) override {
- out_ << "V(" << marker << ", 0) \\\n";
- }
-
- private:
- std::ostream& out_;
-};
-
void GenerateClassExport(const ClassType* type, std::ostream& header,
std::ostream& inl_header) {
const ClassType* super = type->GetSuperClass();
@@ -3752,27 +3798,13 @@ void GenerateClassExport(const ClassType* type, std::ostream& header,
} // namespace
-void ImplementationVisitor::GenerateClassFieldOffsets(
+void ImplementationVisitor::GenerateVisitorLists(
const std::string& output_directory) {
std::stringstream header;
- std::string file_name = "field-offsets.h";
+ std::string file_name = "visitor-lists.h";
{
IncludeGuardScope include_guard(header, file_name);
- for (const ClassType* type : TypeOracle::GetClasses()) {
- // TODO(danno): Remove this once all classes use ClassFieldOffsetGenerator
- // to generate field offsets without the use of macros.
- if (!type->GenerateCppClassDefinitions() && !type->HasUndefinedLayout()) {
- MacroFieldOffsetsGenerator g(header, type);
- for (auto f : type->fields()) {
- CurrentSourcePosition::Scope scope(f.pos);
- g.RecordOffsetFor(f);
- }
- g.Finish();
- header << "\n";
- }
- }
-
header << "#define TORQUE_INSTANCE_TYPE_TO_BODY_DESCRIPTOR_LIST(V)\\\n";
for (const ClassType* type : TypeOracle::GetClasses()) {
if (type->ShouldGenerateBodyDescriptor() && type->OwnInstanceType()) {
@@ -3855,13 +3887,17 @@ namespace {
class ClassFieldOffsetGenerator : public FieldOffsetsGenerator {
public:
ClassFieldOffsetGenerator(std::ostream& header, std::ostream& inline_header,
- const ClassType* type, std::string gen_name)
+ const ClassType* type, std::string gen_name,
+ const ClassType* parent)
: FieldOffsetsGenerator(type),
hdr_(header),
inl_(inline_header),
- previous_field_end_("P::kHeaderSize"),
+ previous_field_end_((parent && parent->IsShape()) ? "P::kSize"
+ : "P::kHeaderSize"),
gen_name_(gen_name) {}
+
void WriteField(const Field& f, const std::string& size_string) override {
+ hdr_ << " // " << PositionAsString(f.pos) << "\n";
std::string field = "k" + CamelifyString(f.name_and_type.name) + "Offset";
std::string field_end = field + "End";
hdr_ << " static constexpr int " << field << " = " << previous_field_end_
@@ -3870,6 +3906,7 @@ class ClassFieldOffsetGenerator : public FieldOffsetsGenerator {
<< size_string << " - 1;\n";
previous_field_end_ = field_end + " + 1";
}
+
void WriteFieldOffsetGetter(const Field& f) override {
// A static constexpr int is more convenient than a getter if the offset is
// known.
@@ -3922,6 +3959,8 @@ class CppClassGenerator {
void GenerateClass();
private:
+ SourcePosition Position();
+
void GenerateClassConstructors();
// Generates getter and setter runtime member functions for the given class
@@ -3981,7 +4020,7 @@ base::Optional<std::vector<Field>> GetOrderedUniqueIndexFields(
void CppClassGenerator::GenerateClass() {
// Is<name>_NonInline(HeapObject)
- {
+ if (!type_->IsShape()) {
cpp::Function f("Is"s + name_ + "_NonInline");
f.SetDescription("Alias for HeapObject::Is"s + name_ +
"() that avoids inlining.");
@@ -3990,18 +4029,21 @@ void CppClassGenerator::GenerateClass() {
f.AddParameter("HeapObject", "o");
f.PrintDeclaration(hdr_);
+ hdr_ << "\n";
f.PrintDefinition(impl_, [&](std::ostream& stream) {
- stream << " return o.Is" << name_ << "();";
+ stream << " return o.Is" << name_ << "();\n";
});
}
-
+ hdr_ << "// Definition " << PositionAsString(Position()) << "\n";
hdr_ << template_decl() << "\n";
hdr_ << "class " << gen_name_ << " : public P {\n";
- hdr_ << " static_assert(std::is_same<" << name_ << ", D>::value,\n"
- << " \"Use this class as direct base for " << name_ << ".\");\n";
- hdr_ << " static_assert(std::is_same<" << super_->name() << ", P>::value,\n"
- << " \"Pass in " << super_->name()
- << " as second template parameter for " << gen_name_ << ".\");\n";
+ hdr_ << " static_assert(\n"
+ << " std::is_same<" << name_ << ", D>::value,\n"
+ << " \"Use this class as direct base for " << name_ << ".\");\n";
+ hdr_ << " static_assert(\n"
+ << " std::is_same<" << super_->name() << ", P>::value,\n"
+ << " \"Pass in " << super_->name()
+ << " as second template parameter for " << gen_name_ << ".\");\n\n";
hdr_ << " public: \n";
hdr_ << " using Super = P;\n";
hdr_ << " using TorqueGeneratedClass = " << gen_name_ << "<D,P>;\n\n";
@@ -4009,6 +4051,7 @@ void CppClassGenerator::GenerateClass() {
hdr_ << " protected: // not extern or @export\n";
}
for (const Field& f : type_->fields()) {
+ CurrentSourcePosition::Scope scope(f.pos);
std::vector<const Field*> struct_fields;
GenerateFieldAccessors(f, struct_fields);
}
@@ -4023,7 +4066,7 @@ void CppClassGenerator::GenerateClass() {
cpp::Class c(std::move(templateArgs), gen_name_);
if (type_->ShouldGeneratePrint()) {
- hdr_ << "\n DECL_PRINTER(" << name_ << ")\n";
+ hdr_ << " DECL_PRINTER(" << name_ << ")\n\n";
}
if (type_->ShouldGenerateVerify()) {
@@ -4042,11 +4085,15 @@ void CppClassGenerator::GenerateClass() {
impl_ << " TorqueGeneratedClassVerifiers::" << name_ << "Verify(" << name_
<< "::cast(*this), "
"isolate);\n";
- impl_ << "}\n";
+ impl_ << "}\n\n";
+ }
+ if (type_->ShouldGenerateVerify()) {
+ impl_ << "\n";
}
hdr_ << "\n";
- ClassFieldOffsetGenerator g(hdr_, inl_, type_, gen_name_);
+ ClassFieldOffsetGenerator g(hdr_, inl_, type_, gen_name_,
+ type_->GetSuperClass());
for (auto f : type_->fields()) {
CurrentSourcePosition::Scope scope(f.pos);
g.RecordOffsetFor(f);
@@ -4068,7 +4115,7 @@ void CppClassGenerator::GenerateClass() {
{
cpp::Function f =
cpp::Function::DefaultGetter("int", &c, "AllocatedSize");
- f.PrintDeclaration(hdr_, 2);
+ f.PrintDeclaration(hdr_);
f.PrintDefinition(inl_, [&](std::ostream& stream) {
stream << " auto slice = "
@@ -4112,10 +4159,10 @@ void CppClassGenerator::GenerateClass() {
// V8_INLINE int32_t AllocatedSize() const
{
- cpp::Function f =
+ cpp::Function allocated_size_f =
cpp::Function::DefaultGetter("int32_t", &c, "AllocatedSize");
- f.SetFlag(cpp::Function::kV8Inline);
- f.PrintInlineDefinition(hdr_, [&](std::ostream& stream) {
+ allocated_size_f.SetFlag(cpp::Function::kV8Inline);
+ allocated_size_f.PrintInlineDefinition(hdr_, [&](std::ostream& stream) {
stream << " return SizeFor(";
bool first = true;
for (auto field : *index_fields) {
@@ -4143,7 +4190,7 @@ void CppClassGenerator::GenerateClass() {
// hand-written definition.
base::Optional<const ClassType*> parent = type_->parent()->ClassSupertype();
while (parent) {
- if ((*parent)->GenerateCppClassDefinitions() &&
+ if ((*parent)->ShouldGenerateCppClassDefinitions() &&
!(*parent)->ShouldGenerateFullClassDefinition() &&
(*parent)->AttributedToFile() == type_->AttributedToFile()) {
Error("Exported ", *type_,
@@ -4164,24 +4211,34 @@ void CppClassGenerator::GenerateClassCasts() {
// V8_INLINE static D cast(Object)
f.PrintInlineDefinition(hdr_, [](std::ostream& stream) {
- stream << " return D(object.ptr());\n";
+ stream << " return D(object.ptr());\n";
});
// V8_INLINE static D unchecked_cast(Object)
f.SetName("unchecked_cast");
f.PrintInlineDefinition(hdr_, [](std::ostream& stream) {
- stream << " return bit_cast<D>(object);\n";
+ stream << " return bit_cast<D>(object);\n";
});
}
+SourcePosition CppClassGenerator::Position() { return type_->GetPosition(); }
+
void CppClassGenerator::GenerateClassConstructors() {
- hdr_ << " public:\n";
+ const ClassType* typecheck_type = type_;
+ while (typecheck_type->IsShape()) {
+ typecheck_type = typecheck_type->GetSuperClass();
+
+ // Shapes have already been checked earlier to inherit from JSObject, so we
+ // should have found an appropriate type.
+ DCHECK(typecheck_type);
+ }
+
hdr_ << " template <class DAlias = D>\n";
hdr_ << " constexpr " << gen_name_ << "() : P() {\n";
- hdr_ << " static_assert(std::is_base_of<" << gen_name_ << ", \n";
- hdr_ << " DAlias>::value,\n";
- hdr_ << " \"class " << gen_name_ << " should be used as direct base for "
- << name_ << ".\");\n";
- hdr_ << " }\n";
+ hdr_ << " static_assert(\n";
+ hdr_ << " std::is_base_of<" << gen_name_ << ", DAlias>::value,\n";
+ hdr_ << " \"class " << gen_name_
+ << " should be used as direct base for " << name_ << ".\");\n";
+ hdr_ << " }\n\n";
hdr_ << " protected:\n";
hdr_ << " inline explicit " << gen_name_ << "(Address ptr);\n";
@@ -4193,18 +4250,19 @@ void CppClassGenerator::GenerateClassConstructors() {
inl_ << "template<class D, class P>\n";
inl_ << "inline " << gen_name_T_ << "::" << gen_name_ << "(Address ptr)\n";
- inl_ << " : P(ptr) {\n";
- inl_ << " SLOW_DCHECK(Is" << name_ << "_NonInline(*this));\n";
+ inl_ << " : P(ptr) {\n";
+ inl_ << " SLOW_DCHECK(Is" << typecheck_type->name()
+ << "_NonInline(*this));\n";
inl_ << "}\n";
inl_ << "template<class D, class P>\n";
inl_ << "inline " << gen_name_T_ << "::" << gen_name_
<< "(Address ptr, HeapObject::AllowInlineSmiStorage allow_smi)\n";
- inl_ << " : P(ptr, allow_smi) {\n";
+ inl_ << " : P(ptr, allow_smi) {\n";
inl_ << " SLOW_DCHECK("
<< "(allow_smi == HeapObject::AllowInlineSmiStorage::kAllowBeingASmi"
" && this->IsSmi()) || Is"
- << name_ << "_NonInline(*this));\n";
+ << typecheck_type->name() << "_NonInline(*this));\n";
inl_ << "}\n";
}
@@ -4258,6 +4316,14 @@ void GenerateBoundsDCheck(std::ostream& os, const std::string& index,
}
os << " DCHECK_LT(" << index << ", " << length_expression << ");\n";
}
+
+bool CanGenerateFieldAccessors(const Type* field_type) {
+ // float64_or_hole should be treated like float64. For now, we don't need it.
+ // TODO(v8:10391) Generate accessors for external pointers.
+ return field_type != TypeOracle::GetVoidType() &&
+ field_type != TypeOracle::GetFloat64OrHoleType() &&
+ !field_type->IsSubtypeOf(TypeOracle::GetExternalPointerType());
+}
} // namespace
// TODO(sigurds): Keep in sync with DECL_ACCESSORS and ACCESSORS macro.
@@ -4266,12 +4332,7 @@ void CppClassGenerator::GenerateFieldAccessors(
const Field& innermost_field =
struct_fields.empty() ? class_field : *struct_fields.back();
const Type* field_type = innermost_field.name_and_type.type;
- if (field_type == TypeOracle::GetVoidType()) return;
-
- // float64_or_hole should be treated like float64. For now, we don't need it.
- if (field_type == TypeOracle::GetFloat64OrHoleType()) {
- return;
- }
+ if (!CanGenerateFieldAccessors(field_type)) return;
if (const StructType* struct_type = StructType::DynamicCast(field_type)) {
struct_fields.resize(struct_fields.size() + 1);
@@ -4283,11 +4344,6 @@ void CppClassGenerator::GenerateFieldAccessors(
return;
}
- // TODO(v8:10391) Generate accessors for external pointers
- if (field_type->IsSubtypeOf(TypeOracle::GetExternalPointerType())) {
- return;
- }
-
bool indexed = class_field.index && !class_field.index->optional;
std::string type_name = GetTypeNameForAccessor(innermost_field);
bool can_contain_heap_objects = CanContainHeapObjects(field_type);
@@ -4601,20 +4657,24 @@ void ImplementationVisitor::GenerateClassDefinitions(
// Emit forward declarations.
for (const ClassType* type : TypeOracle::GetClasses()) {
+ CurrentSourcePosition::Scope position_activator(type->GetPosition());
auto& streams = GlobalContext::GeneratedPerFile(type->AttributedToFile());
std::ostream& header = streams.class_definition_headerfile;
- header << "class " << type->GetGeneratedTNodeTypeName() << ";\n";
- forward_declarations << "class " << type->GetGeneratedTNodeTypeName()
- << ";\n";
+ std::string name = type->ShouldGenerateCppClassDefinitions()
+ ? type->name()
+ : type->GetGeneratedTNodeTypeName();
+ header << "class " << name << ";\n";
+ forward_declarations << "class " << name << ";\n";
}
for (const ClassType* type : TypeOracle::GetClasses()) {
+ CurrentSourcePosition::Scope position_activator(type->GetPosition());
auto& streams = GlobalContext::GeneratedPerFile(type->AttributedToFile());
std::ostream& header = streams.class_definition_headerfile;
std::ostream& inline_header = streams.class_definition_inline_headerfile;
std::ostream& implementation = streams.class_definition_ccfile;
- if (type->GenerateCppClassDefinitions()) {
+ if (type->ShouldGenerateCppClassDefinitions()) {
CppClassGenerator g(type, header, inline_header, implementation);
g.GenerateClass();
}
@@ -4710,6 +4770,7 @@ void ImplementationVisitor::GenerateClassDefinitions(
}
for (const StructType* type : structs_used_in_classes) {
+ CurrentSourcePosition::Scope position_activator(type->GetPosition());
std::ostream& header =
GlobalContext::GeneratedPerFile(type->GetPosition().source)
.class_definition_headerfile;
@@ -4739,43 +4800,50 @@ void GeneratePrintDefinitionsForClass(std::ostream& impl, const ClassType* type,
std::map<std::string, const AggregateType*> field_names;
for (const AggregateType* aggregate_type : hierarchy) {
for (const Field& f : aggregate_type->fields()) {
- if (f.name_and_type.name == "map") continue;
- if (!f.index.has_value()) {
- if (f.name_and_type.type->IsSubtypeOf(TypeOracle::GetSmiType()) ||
- !f.name_and_type.type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
- impl << " os << \"\\n - " << f.name_and_type.name << ": \" << ";
- if (f.name_and_type.type->StructSupertype()) {
- // TODO(turbofan): Print struct fields too.
- impl << "\" <struct field printing still unimplemented>\";\n";
- } else {
- impl << "this->" << f.name_and_type.name;
- switch (f.read_synchronization) {
- case FieldSynchronization::kNone:
- impl << "();\n";
- break;
- case FieldSynchronization::kRelaxed:
- impl << "(kRelaxedLoad);\n";
- break;
- case FieldSynchronization::kAcquireRelease:
- impl << "(kAcquireLoad);\n";
- break;
- }
- }
+ if (f.name_and_type.name == "map" || f.index.has_value() ||
+ !CanGenerateFieldAccessors(f.name_and_type.type)) {
+ continue;
+ }
+ std::string getter = f.name_and_type.name;
+ if (aggregate_type != type) {
+ // We must call getters directly on the class that provided them,
+ // because a subclass could have hidden them.
+ getter = aggregate_type->name() + "::TorqueGeneratedClass::" + getter;
+ }
+ if (f.name_and_type.type->IsSubtypeOf(TypeOracle::GetSmiType()) ||
+ !f.name_and_type.type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
+ impl << " os << \"\\n - " << f.name_and_type.name << ": \" << ";
+ if (f.name_and_type.type->StructSupertype()) {
+ // TODO(turbofan): Print struct fields too.
+ impl << "\" <struct field printing still unimplemented>\";\n";
} else {
- impl << " os << \"\\n - " << f.name_and_type.name << ": \" << "
- << "Brief(this->" << f.name_and_type.name;
+ impl << "this->" << getter;
switch (f.read_synchronization) {
case FieldSynchronization::kNone:
- impl << "());\n";
+ impl << "();\n";
break;
case FieldSynchronization::kRelaxed:
- impl << "(kRelaxedLoad));\n";
+ impl << "(kRelaxedLoad);\n";
break;
case FieldSynchronization::kAcquireRelease:
- impl << "(kAcquireLoad));\n";
+ impl << "(kAcquireLoad);\n";
break;
}
}
+ } else {
+ impl << " os << \"\\n - " << f.name_and_type.name << ": \" << "
+ << "Brief(this->" << getter;
+ switch (f.read_synchronization) {
+ case FieldSynchronization::kNone:
+ impl << "());\n";
+ break;
+ case FieldSynchronization::kRelaxed:
+ impl << "(kRelaxedLoad));\n";
+ break;
+ case FieldSynchronization::kAcquireRelease:
+ impl << "(kAcquireLoad));\n";
+ break;
+ }
}
}
}
@@ -4798,19 +4866,14 @@ void ImplementationVisitor::GeneratePrintDefinitions(
for (const ClassType* type : TypeOracle::GetClasses()) {
if (!type->ShouldGeneratePrint()) continue;
-
- if (type->GenerateCppClassDefinitions()) {
- const ClassType* super = type->GetSuperClass();
- std::string gen_name = "TorqueGenerated" + type->name();
- std::string gen_name_T =
- gen_name + "<" + type->name() + ", " + super->name() + ">";
- std::string template_decl = "template <>";
- GeneratePrintDefinitionsForClass(impl, type, gen_name, gen_name_T,
- template_decl);
- } else {
- GeneratePrintDefinitionsForClass(impl, type, type->name(), type->name(),
- "");
- }
+ DCHECK(type->ShouldGenerateCppClassDefinitions());
+ const ClassType* super = type->GetSuperClass();
+ std::string gen_name = "TorqueGenerated" + type->name();
+ std::string gen_name_T =
+ gen_name + "<" + type->name() + ", " + super->name() + ">";
+ std::string template_decl = "template <>";
+ GeneratePrintDefinitionsForClass(impl, type, gen_name, gen_name_T,
+ template_decl);
}
}
@@ -5037,7 +5100,6 @@ void GenerateClassFieldVerifier(const std::string& class_name,
const ClassType& class_type, const Field& f,
std::ostream& h_contents,
std::ostream& cc_contents) {
- if (!f.generate_verify) return;
const Type* field_type = f.name_and_type.type;
// We only verify tagged types, not raw numbers or pointers. Structs
@@ -5154,16 +5216,7 @@ void ImplementationVisitor::GenerateClassVerifiers(
}
if (super_type) {
std::string super_name = super_type->name();
- if (super_name == "HeapObject") {
- // Special case: HeapObjectVerify checks the Map type and dispatches
- // to more specific types, so calling it here would cause infinite
- // recursion. We could consider moving that behavior into a
- // different method to make the contract of *Verify methods more
- // consistent, but for now we'll just avoid the bad case.
- cc_contents << " " << super_name << "Verify(o, isolate);\n";
- } else {
- cc_contents << " o." << super_name << "Verify(isolate);\n";
- }
+ cc_contents << " o." << super_name << "Verify(isolate);\n";
}
// Second, verify that this object is what it claims to be.
@@ -5274,6 +5327,7 @@ void ImplementationVisitor::GenerateExportedMacrosAssembler(
for (auto& declarable : GlobalContext::AllDeclarables()) {
TorqueMacro* macro = TorqueMacro::DynamicCast(declarable.get());
if (!(macro && macro->IsExportedToCSA())) continue;
+ CurrentSourcePosition::Scope position_activator(macro->Position());
cpp::Class assembler("TorqueGeneratedExportedMacrosAssembler");
std::vector<std::string> generated_parameter_names;
@@ -5340,12 +5394,12 @@ void ImplementationVisitor::GenerateCSATypes(
}
h_contents << "\n std::tuple<";
bool first = true;
- for (const Type* type : LowerType(struct_type)) {
+ for (const Type* lowered_type : LowerType(struct_type)) {
if (!first) {
h_contents << ", ";
}
first = false;
- h_contents << type->GetGeneratedTypeName();
+ h_contents << lowered_type->GetGeneratedTypeName();
}
std::vector<std::string> all_fields;
for (auto& field : struct_type->fields()) {
diff --git a/chromium/v8/src/torque/implementation-visitor.h b/chromium/v8/src/torque/implementation-visitor.h
index 0c9ac445c5f..8ebb72cc2eb 100644
--- a/chromium/v8/src/torque/implementation-visitor.h
+++ b/chromium/v8/src/torque/implementation-visitor.h
@@ -228,6 +228,8 @@ struct LayoutForInitialization {
VisitResult size;
};
+extern uint64_t next_unique_binding_index;
+
template <class T>
class Binding;
@@ -262,7 +264,8 @@ class Binding : public T {
name_(name),
previous_binding_(this),
used_(false),
- written_(false) {
+ written_(false),
+ unique_index_(next_unique_binding_index++) {
std::swap(previous_binding_, manager_->current_bindings_[name]);
}
template <class... Args>
@@ -300,6 +303,8 @@ class Binding : public T {
bool Written() const { return written_; }
void SetWritten() { written_ = true; }
+ uint64_t unique_index() const { return unique_index_; }
+
private:
bool SkipLintCheck() const { return name_.length() > 0 && name_[0] == '_'; }
@@ -309,26 +314,31 @@ class Binding : public T {
SourcePosition declaration_position_ = CurrentSourcePosition::Get();
bool used_;
bool written_;
+ uint64_t unique_index_;
};
template <class T>
class BlockBindings {
public:
explicit BlockBindings(BindingsManager<T>* manager) : manager_(manager) {}
- void Add(std::string name, T value, bool mark_as_used = false) {
+ Binding<T>* Add(std::string name, T value, bool mark_as_used = false) {
ReportErrorIfAlreadyBound(name);
auto binding =
std::make_unique<Binding<T>>(manager_, name, std::move(value));
+ Binding<T>* result = binding.get();
if (mark_as_used) binding->SetUsed();
bindings_.push_back(std::move(binding));
+ return result;
}
- void Add(const Identifier* name, T value, bool mark_as_used = false) {
+ Binding<T>* Add(const Identifier* name, T value, bool mark_as_used = false) {
ReportErrorIfAlreadyBound(name->value);
auto binding =
std::make_unique<Binding<T>>(manager_, name, std::move(value));
+ Binding<T>* result = binding.get();
if (mark_as_used) binding->SetUsed();
bindings_.push_back(std::move(binding));
+ return result;
}
std::vector<Binding<T>*> bindings() const {
@@ -433,7 +443,7 @@ class ImplementationVisitor {
public:
void GenerateBuiltinDefinitionsAndInterfaceDescriptors(
const std::string& output_directory);
- void GenerateClassFieldOffsets(const std::string& output_directory);
+ void GenerateVisitorLists(const std::string& output_directory);
void GenerateBitFields(const std::string& output_directory);
void GeneratePrintDefinitions(const std::string& output_directory);
void GenerateClassDefinitions(const std::string& output_directory);
diff --git a/chromium/v8/src/torque/kythe-data.cc b/chromium/v8/src/torque/kythe-data.cc
new file mode 100644
index 00000000000..4ef6c2910ab
--- /dev/null
+++ b/chromium/v8/src/torque/kythe-data.cc
@@ -0,0 +1,187 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/torque/kythe-data.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+DEFINE_CONTEXTUAL_VARIABLE(KytheData)
+
+namespace {
+
+KythePosition MakeKythePosition(const SourcePosition& pos) {
+ KythePosition p;
+ if (pos.source.IsValid()) {
+ p.file_path = SourceFileMap::PathFromV8Root(pos.source);
+ } else {
+ p.file_path = "UNKNOWN";
+ }
+ p.start_offset = pos.start.offset;
+ p.end_offset = pos.end.offset;
+ return p;
+}
+
+} // namespace
+
+// Constants
+kythe_entity_t KytheData::AddConstantDefinition(const Value* constant) {
+ DCHECK(constant->IsNamespaceConstant() || constant->IsExternConstant());
+ KytheData* that = &KytheData::Get();
+ // Check if we know the constant already.
+ auto it = that->constants_.find(constant);
+ if (it != that->constants_.end()) return it->second;
+
+ // Register this constant.
+ KythePosition pos = MakeKythePosition(constant->name()->pos);
+ kythe_entity_t constant_id = that->consumer_->AddDefinition(
+ KytheConsumer::Kind::Constant, constant->name()->value, pos);
+ that->constants_.insert(it, std::make_pair(constant, constant_id));
+ return constant_id;
+}
+
+void KytheData::AddConstantUse(SourcePosition use_position,
+ const Value* constant) {
+ DCHECK(constant->IsNamespaceConstant() || constant->IsExternConstant());
+ KytheData* that = &Get();
+ kythe_entity_t constant_id = AddConstantDefinition(constant);
+ KythePosition use_pos = MakeKythePosition(use_position);
+ that->consumer_->AddUse(KytheConsumer::Kind::Constant, constant_id, use_pos);
+}
+
+// Callables
+kythe_entity_t KytheData::AddFunctionDefinition(Callable* callable) {
+ KytheData* that = &KytheData::Get();
+ // Check if we know the caller already.
+ auto it = that->callables_.find(callable);
+ if (it != that->callables_.end()) return it->second;
+
+ // Register this callable.
+ auto ident_pos = callable->IdentifierPosition();
+ kythe_entity_t callable_id = that->consumer_->AddDefinition(
+ KytheConsumer::Kind::Function, callable->ExternalName(),
+ MakeKythePosition(ident_pos));
+ that->callables_.insert(it, std::make_pair(callable, callable_id));
+ return callable_id;
+}
+
+void KytheData::AddCall(Callable* caller, SourcePosition call_position,
+ Callable* callee) {
+ if (!caller) return; // Ignore those for now.
+ DCHECK_NOT_NULL(caller);
+ DCHECK_NOT_NULL(callee);
+ KytheData* that = &Get();
+ if (call_position.source.IsValid()) {
+ kythe_entity_t caller_id = AddFunctionDefinition(caller);
+ kythe_entity_t callee_id = AddFunctionDefinition(callee);
+
+ KythePosition call_pos = MakeKythePosition(call_position);
+ that->consumer_->AddCall(KytheConsumer::Kind::Function, caller_id, call_pos,
+ callee_id);
+ }
+}
+
+// Class fields
+kythe_entity_t KytheData::AddClassFieldDefinition(const Field* field) {
+ DCHECK(field);
+ KytheData* that = &KytheData::Get();
+ // Check if we know that field already.
+ auto it = that->class_fields_.find(field);
+ if (it != that->class_fields_.end()) return it->second;
+ // Register this field.
+ KythePosition pos = MakeKythePosition(field->pos);
+ kythe_entity_t field_id = that->consumer_->AddDefinition(
+ KytheConsumer::Kind::ClassField, field->name_and_type.name, pos);
+ that->class_fields_.insert(it, std::make_pair(field, field_id));
+ return field_id;
+}
+
+void KytheData::AddClassFieldUse(SourcePosition use_position,
+ const Field* field) {
+ DCHECK(field);
+ KytheData* that = &KytheData::Get();
+ kythe_entity_t field_id = AddClassFieldDefinition(field);
+
+ KythePosition use_pos = MakeKythePosition(use_position);
+ that->consumer_->AddUse(KytheConsumer::Kind::ClassField, field_id, use_pos);
+}
+
+// Bindings
+kythe_entity_t KytheData::AddBindingDefinition(Binding<LocalValue>* binding) {
+ CHECK(binding);
+ const uint64_t binding_index = binding->unique_index();
+ return AddBindingDefinitionImpl(binding_index, binding->name(),
+ binding->declaration_position());
+}
+
+kythe_entity_t KytheData::AddBindingDefinition(Binding<LocalLabel>* binding) {
+ CHECK(binding);
+ const uint64_t binding_index = binding->unique_index();
+ return AddBindingDefinitionImpl(binding_index, binding->name(),
+ binding->declaration_position());
+}
+
+kythe_entity_t KytheData::AddBindingDefinitionImpl(
+ uint64_t binding_index, const std::string& name,
+ const SourcePosition& ident_pos) {
+ KytheData* that = &KytheData::Get();
+ // Check if we know the binding already.
+ auto it = that->local_bindings_.find(binding_index);
+ if (it != that->local_bindings_.end()) return it->second;
+ // Register this binding.
+ kythe_entity_t binding_id = that->consumer_->AddDefinition(
+ KytheConsumer::Kind::Variable, name, MakeKythePosition(ident_pos));
+ that->local_bindings_.insert(it, std::make_pair(binding_index, binding_id));
+ return binding_id;
+}
+
+void KytheData::AddBindingUse(SourcePosition use_position,
+ Binding<LocalValue>* binding) {
+ CHECK(binding);
+ KytheData* that = &KytheData::Get();
+ kythe_entity_t binding_id = AddBindingDefinition(binding);
+
+ KythePosition use_pos = MakeKythePosition(use_position);
+ that->consumer_->AddUse(KytheConsumer::Kind::Variable, binding_id, use_pos);
+}
+
+void KytheData::AddBindingUse(SourcePosition use_position,
+ Binding<LocalLabel>* binding) {
+ CHECK(binding);
+ KytheData* that = &KytheData::Get();
+ kythe_entity_t binding_id = AddBindingDefinition(binding);
+
+ KythePosition use_pos = MakeKythePosition(use_position);
+ that->consumer_->AddUse(KytheConsumer::Kind::Variable, binding_id, use_pos);
+}
+
+// Types
+kythe_entity_t KytheData::AddTypeDefinition(const Declarable* type_decl) {
+ CHECK(type_decl);
+ KytheData* that = &KytheData::Get();
+ // Check if we know that type already.
+ auto it = that->types_.find(type_decl);
+ if (it != that->types_.end()) return it->second;
+ // Register this type.
+ KythePosition pos = MakeKythePosition(type_decl->IdentifierPosition());
+ kythe_entity_t type_id = that->consumer_->AddDefinition(
+ KytheConsumer::Kind::Type, type_decl->type_name(), pos);
+ that->types_.insert(it, std::make_pair(type_decl, type_id));
+ return type_id;
+}
+
+void KytheData::AddTypeUse(SourcePosition use_position,
+ const Declarable* type_decl) {
+ CHECK(type_decl);
+ KytheData* that = &KytheData::Get();
+ kythe_entity_t type_id = AddTypeDefinition(type_decl);
+
+ KythePosition use_pos = MakeKythePosition(use_position);
+ that->consumer_->AddUse(KytheConsumer::Kind::Type, type_id, use_pos);
+}
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/torque/kythe-data.h b/chromium/v8/src/torque/kythe-data.h
new file mode 100644
index 00000000000..ba188419497
--- /dev/null
+++ b/chromium/v8/src/torque/kythe-data.h
@@ -0,0 +1,110 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TORQUE_KYTHE_DATA_H_
+#define V8_TORQUE_KYTHE_DATA_H_
+
+#include <map>
+
+#include "src/torque/ast.h"
+#include "src/torque/contextual.h"
+#include "src/torque/global-context.h"
+#include "src/torque/implementation-visitor.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+struct KythePosition {
+ std::string file_path;
+ uint64_t start_offset;
+ uint64_t end_offset;
+};
+
+using kythe_entity_t = uint64_t;
+
+class KytheConsumer {
+ public:
+ enum class Kind {
+ Unspecified,
+ Constant,
+ Function,
+ ClassField,
+ Variable,
+ Type,
+ };
+
+ virtual ~KytheConsumer() = 0;
+
+ virtual kythe_entity_t AddDefinition(Kind kind, std::string name,
+ KythePosition pos) = 0;
+
+ virtual void AddUse(Kind kind, kythe_entity_t entity,
+ KythePosition use_pos) = 0;
+ virtual void AddCall(Kind kind, kythe_entity_t caller_entity,
+ KythePosition call_pos,
+ kythe_entity_t callee_entity) = 0;
+};
+inline KytheConsumer::~KytheConsumer() = default;
+
+class KytheData : public ContextualClass<KytheData> {
+ public:
+ KytheData() = default;
+
+ static void SetConsumer(KytheConsumer* consumer) {
+ Get().consumer_ = consumer;
+ }
+
+ // Constants
+ V8_EXPORT_PRIVATE static kythe_entity_t AddConstantDefinition(
+ const Value* constant);
+ V8_EXPORT_PRIVATE static void AddConstantUse(SourcePosition use_position,
+ const Value* constant);
+ // Callables
+ V8_EXPORT_PRIVATE static kythe_entity_t AddFunctionDefinition(
+ Callable* callable);
+ V8_EXPORT_PRIVATE static void AddCall(Callable* caller,
+ SourcePosition call_position,
+ Callable* callee);
+ // Class fields
+ V8_EXPORT_PRIVATE static kythe_entity_t AddClassFieldDefinition(
+ const Field* field);
+ V8_EXPORT_PRIVATE static void AddClassFieldUse(SourcePosition use_position,
+ const Field* field);
+ // Bindings
+ V8_EXPORT_PRIVATE static kythe_entity_t AddBindingDefinition(
+ Binding<LocalValue>* binding);
+ V8_EXPORT_PRIVATE static kythe_entity_t AddBindingDefinition(
+ Binding<LocalLabel>* binding);
+ V8_EXPORT_PRIVATE static void AddBindingUse(SourcePosition use_position,
+ Binding<LocalValue>* binding);
+ V8_EXPORT_PRIVATE static void AddBindingUse(SourcePosition use_position,
+ Binding<LocalLabel>* binding);
+
+ // Types
+ V8_EXPORT_PRIVATE static kythe_entity_t AddTypeDefinition(
+ const Declarable* type_decl);
+ V8_EXPORT_PRIVATE static void AddTypeUse(SourcePosition use_position,
+ const Declarable* type_decl);
+
+ private:
+ static kythe_entity_t AddBindingDefinitionImpl(
+ uint64_t binding_index, const std::string& name,
+ const SourcePosition& ident_pos);
+
+ KytheConsumer* consumer_;
+ std::unordered_map<const Value*, kythe_entity_t> constants_;
+ std::unordered_map<Callable*, kythe_entity_t> callables_;
+
+ std::unordered_map<const Field*, std::set<SourcePosition>> field_uses_;
+ std::unordered_map<uint64_t, kythe_entity_t> local_bindings_;
+ std::unordered_map<const Declarable*, kythe_entity_t> types_;
+ std::unordered_map<const Field*, kythe_entity_t> class_fields_;
+};
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TORQUE_KYTHE_DATA_H_
diff --git a/chromium/v8/src/torque/ls/message-handler.cc b/chromium/v8/src/torque/ls/message-handler.cc
index e9f2224df76..66995c0c89a 100644
--- a/chromium/v8/src/torque/ls/message-handler.cc
+++ b/chromium/v8/src/torque/ls/message-handler.cc
@@ -279,8 +279,9 @@ void HandleGotoDefinitionRequest(GotoDefinitionRequest request,
return;
}
- LineAndColumn pos{request.params().position().line(),
- request.params().position().character()};
+ auto pos =
+ LineAndColumn::WithUnknownOffset(request.params().position().line(),
+ request.params().position().character());
if (auto maybe_definition = LanguageServerData::FindDefinition(id, pos)) {
SourcePosition definition = *maybe_definition;
@@ -311,22 +312,22 @@ void HandleDocumentSymbolRequest(DocumentSymbolRequest request,
DCHECK(symbol->IsUserDefined());
if (symbol->IsMacro()) {
Macro* macro = Macro::cast(symbol);
- SymbolInformation symbol = response.add_result();
- symbol.set_name(macro->ReadableName());
- symbol.set_kind(SymbolKind::kFunction);
- symbol.location().SetTo(macro->Position());
+ SymbolInformation info = response.add_result();
+ info.set_name(macro->ReadableName());
+ info.set_kind(SymbolKind::kFunction);
+ info.location().SetTo(macro->Position());
} else if (symbol->IsBuiltin()) {
Builtin* builtin = Builtin::cast(symbol);
- SymbolInformation symbol = response.add_result();
- symbol.set_name(builtin->ReadableName());
- symbol.set_kind(SymbolKind::kFunction);
- symbol.location().SetTo(builtin->Position());
+ SymbolInformation info = response.add_result();
+ info.set_name(builtin->ReadableName());
+ info.set_kind(SymbolKind::kFunction);
+ info.location().SetTo(builtin->Position());
} else if (symbol->IsGenericCallable()) {
GenericCallable* generic = GenericCallable::cast(symbol);
- SymbolInformation symbol = response.add_result();
- symbol.set_name(generic->name());
- symbol.set_kind(SymbolKind::kFunction);
- symbol.location().SetTo(generic->Position());
+ SymbolInformation info = response.add_result();
+ info.set_name(generic->name());
+ info.set_kind(SymbolKind::kFunction);
+ info.location().SetTo(generic->Position());
} else if (symbol->IsTypeAlias()) {
const Type* type = TypeAlias::cast(symbol)->type();
SymbolKind kind =
diff --git a/chromium/v8/src/torque/source-positions.h b/chromium/v8/src/torque/source-positions.h
index 857efa22261..f953417fd34 100644
--- a/chromium/v8/src/torque/source-positions.h
+++ b/chromium/v8/src/torque/source-positions.h
@@ -30,16 +30,27 @@ class SourceId {
};
struct LineAndColumn {
+ static constexpr int kUnknownOffset = -1;
+
+ int offset;
int line;
int column;
- static LineAndColumn Invalid() { return {-1, -1}; }
+ static LineAndColumn Invalid() { return {-1, -1, -1}; }
+ static LineAndColumn WithUnknownOffset(int line, int column) {
+ return {kUnknownOffset, line, column};
+ }
bool operator==(const LineAndColumn& other) const {
- return line == other.line && column == other.column;
+ if (offset == kUnknownOffset || other.offset == kUnknownOffset) {
+ return line == other.line && column == other.column;
+ }
+ DCHECK_EQ(offset == other.offset,
+ line == other.line && column == other.column);
+ return offset == other.offset;
}
bool operator!=(const LineAndColumn& other) const {
- return !(*this == other);
+ return !operator==(other);
}
};
diff --git a/chromium/v8/src/torque/torque-compiler.cc b/chromium/v8/src/torque/torque-compiler.cc
index 64bad91cab8..579494f8451 100644
--- a/chromium/v8/src/torque/torque-compiler.cc
+++ b/chromium/v8/src/torque/torque-compiler.cc
@@ -50,6 +50,9 @@ void CompileCurrentAst(TorqueCompilerOptions options) {
if (options.collect_language_server_data) {
GlobalContext::SetCollectLanguageServerData();
}
+ if (options.collect_kythe_data) {
+ GlobalContext::SetCollectKytheData();
+ }
if (options.force_assert_statements) {
GlobalContext::SetForceAssertStatements();
}
@@ -87,7 +90,7 @@ void CompileCurrentAst(TorqueCompilerOptions options) {
implementation_visitor.GenerateBuiltinDefinitionsAndInterfaceDescriptors(
output_directory);
- implementation_visitor.GenerateClassFieldOffsets(output_directory);
+ implementation_visitor.GenerateVisitorLists(output_directory);
implementation_visitor.GenerateBitFields(output_directory);
implementation_visitor.GeneratePrintDefinitions(output_directory);
implementation_visitor.GenerateClassDefinitions(output_directory);
@@ -161,6 +164,38 @@ TorqueCompilerResult CompileTorque(std::vector<std::string> files,
return result;
}
+TorqueCompilerResult CompileTorqueForKythe(
+ std::vector<TorqueCompilationUnit> units, TorqueCompilerOptions options,
+ KytheConsumer* consumer) {
+ SourceFileMap::Scope source_map_scope(options.v8_root);
+ CurrentSourceFile::Scope unknown_source_file_scope(SourceId::Invalid());
+ CurrentAst::Scope ast_scope;
+ TorqueMessages::Scope messages_scope;
+ LanguageServerData::Scope server_data_scope;
+ KytheData::Scope kythe_scope;
+
+ KytheData::Get().SetConsumer(consumer);
+
+ TorqueCompilerResult result;
+ try {
+ for (const auto& unit : units) {
+ SourceId source_id = SourceFileMap::AddSource(unit.source_file_path);
+ CurrentSourceFile::Scope source_id_scope(source_id);
+ ParseTorque(unit.file_content);
+ }
+ CompileCurrentAst(options);
+ } catch (TorqueAbortCompilation&) {
+ // Do nothing. The relevant TorqueMessage is part of the
+ // TorqueMessages contextual.
+ }
+
+ result.source_file_map = SourceFileMap::Get();
+ result.language_server_data = std::move(LanguageServerData::Get());
+ result.messages = std::move(TorqueMessages::Get());
+
+ return result;
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/torque/torque-compiler.h b/chromium/v8/src/torque/torque-compiler.h
index 32fa41ea9b0..816e42f1daa 100644
--- a/chromium/v8/src/torque/torque-compiler.h
+++ b/chromium/v8/src/torque/torque-compiler.h
@@ -7,6 +7,7 @@
#include "src/torque/ast.h"
#include "src/torque/contextual.h"
+#include "src/torque/kythe-data.h"
#include "src/torque/server-data.h"
#include "src/torque/source-positions.h"
#include "src/torque/utils.h"
@@ -19,9 +20,10 @@ struct TorqueCompilerOptions {
std::string output_directory = "";
std::string v8_root = "";
bool collect_language_server_data = false;
+ bool collect_kythe_data = false;
- // assert(...) are only generated for debug builds. The provide
- // language server support for statements inside asserts, this flag
+ // dcheck(...) are only generated for debug builds. To provide
+ // language server support for statements inside dchecks, this flag
// can force generate them.
bool force_assert_statements = false;
@@ -52,10 +54,18 @@ struct TorqueCompilerResult {
std::vector<TorqueMessage> messages;
};
+struct TorqueCompilationUnit {
+ std::string source_file_path;
+ std::string file_content;
+};
+
V8_EXPORT_PRIVATE TorqueCompilerResult
CompileTorque(const std::string& source, TorqueCompilerOptions options);
TorqueCompilerResult CompileTorque(std::vector<std::string> files,
TorqueCompilerOptions options);
+V8_EXPORT_PRIVATE TorqueCompilerResult CompileTorqueForKythe(
+ std::vector<TorqueCompilationUnit> units, TorqueCompilerOptions options,
+ KytheConsumer* kythe_consumer);
} // namespace torque
} // namespace internal
diff --git a/chromium/v8/src/torque/torque-parser.cc b/chromium/v8/src/torque/torque-parser.cc
index 8320b62337b..542e3a42a4b 100644
--- a/chromium/v8/src/torque/torque-parser.cc
+++ b/chromium/v8/src/torque/torque-parser.cc
@@ -32,7 +32,7 @@ struct ExpressionWithSource {
struct TypeswitchCase {
SourcePosition pos;
- base::Optional<std::string> name;
+ base::Optional<Identifier*> name;
TypeExpression* type;
Statement* block;
};
@@ -313,9 +313,10 @@ void CheckNotDeferredStatement(Statement* statement) {
TypeExpression* AddConstexpr(TypeExpression* type) {
BasicTypeExpression* basic = BasicTypeExpression::DynamicCast(type);
if (!basic) Error("Unsupported extends clause.").Throw();
- return MakeNode<BasicTypeExpression>(basic->namespace_qualification,
- CONSTEXPR_TYPE_PREFIX + basic->name,
- basic->generic_arguments);
+ return MakeNode<BasicTypeExpression>(
+ basic->namespace_qualification,
+ MakeNode<Identifier>(CONSTEXPR_TYPE_PREFIX + basic->name->value),
+ basic->generic_arguments);
}
Expression* MakeCall(IdentifierExpression* callee,
@@ -327,7 +328,7 @@ Expression* MakeCall(IdentifierExpression* callee,
// All IdentifierExpressions are treated as label names and can be directly
// used as labels identifiers. All other statements in a call's otherwise
// must create intermediate Labels for the otherwise's statement code.
- size_t label_id = 0;
+ size_t label_id_count = 0;
std::vector<TryHandler*> temp_labels;
for (auto* statement : otherwise) {
if (auto* e = ExpressionStatement::DynamicCast(statement)) {
@@ -339,7 +340,7 @@ Expression* MakeCall(IdentifierExpression* callee,
continue;
}
}
- auto label_name = std::string("__label") + std::to_string(label_id++);
+ auto label_name = std::string("__label") + std::to_string(label_id_count++);
auto label_id = MakeNode<Identifier>(label_name);
label_id->pos = SourcePosition::Invalid();
labels.push_back(label_id);
@@ -382,12 +383,11 @@ base::Optional<ParseResult> MakeCall(ParseResultIterator* child_results) {
base::Optional<ParseResult> MakeMethodCall(ParseResultIterator* child_results) {
auto this_arg = child_results->NextAs<Expression*>();
- auto callee = child_results->NextAs<std::string>();
+ auto callee = child_results->NextAs<Identifier*>();
auto args = child_results->NextAs<std::vector<Expression*>>();
auto otherwise = child_results->NextAs<std::vector<Statement*>>();
- return ParseResult{
- MakeCall(MakeNode<IdentifierExpression>(MakeNode<Identifier>(callee)),
- this_arg, args, otherwise)};
+ return ParseResult{MakeCall(MakeNode<IdentifierExpression>(callee), this_arg,
+ args, otherwise)};
}
base::Optional<ParseResult> MakeNewExpression(
@@ -501,8 +501,8 @@ base::Optional<ParseResult> MakeAssertStatement(
auto kind_string = child_results->NextAs<Identifier*>()->value;
auto expr_with_source = child_results->NextAs<ExpressionWithSource>();
AssertStatement::AssertKind kind;
- if (kind_string == "assert") {
- kind = AssertStatement::AssertKind::kAssert;
+ if (kind_string == "dcheck") {
+ kind = AssertStatement::AssertKind::kDcheck;
} else if (kind_string == "check") {
kind = AssertStatement::AssertKind::kCheck;
} else if (kind_string == "static_assert") {
@@ -523,12 +523,6 @@ base::Optional<ParseResult> MakeDebugStatement(
return ParseResult{result};
}
-base::Optional<ParseResult> MakeVoidType(ParseResultIterator* child_results) {
- TypeExpression* result = MakeNode<BasicTypeExpression>(
- std::vector<std::string>{}, "void", std::vector<TypeExpression*>{});
- return ParseResult{result};
-}
-
base::Optional<ParseResult> MakeExternalMacro(
ParseResultIterator* child_results) {
auto transitioning = child_results->NextAs<bool>();
@@ -728,7 +722,7 @@ base::Optional<ParseResult> MakeAbstractTypeDeclaration(
constexpr_name, flags | AbstractTypeFlag::kConstexpr, constexpr_extends,
constexpr_generates);
constexpr_decl->pos = name->pos;
- Declaration* decl = constexpr_decl;
+ decl = constexpr_decl;
if (!generic_parameters.empty()) {
decl =
MakeNode<GenericTypeDeclaration>(generic_parameters, constexpr_decl);
@@ -887,8 +881,7 @@ base::Optional<ParseResult> MakeClassDeclaration(
ParseResultIterator* child_results) {
AnnotationSet annotations(
child_results,
- {ANNOTATION_GENERATE_PRINT, ANNOTATION_NO_VERIFIER, ANNOTATION_ABSTRACT,
- ANNOTATION_HAS_SAME_INSTANCE_TYPE_AS_PARENT,
+ {ANNOTATION_ABSTRACT, ANNOTATION_HAS_SAME_INSTANCE_TYPE_AS_PARENT,
ANNOTATION_DO_NOT_GENERATE_CPP_CLASS, ANNOTATION_CUSTOM_CPP_CLASS,
ANNOTATION_CUSTOM_MAP, ANNOTATION_GENERATE_BODY_DESCRIPTOR,
ANNOTATION_EXPORT, ANNOTATION_DO_NOT_GENERATE_CAST,
@@ -897,10 +890,6 @@ base::Optional<ParseResult> MakeClassDeclaration(
{ANNOTATION_RESERVE_BITS_IN_INSTANCE_TYPE,
ANNOTATION_INSTANCE_TYPE_VALUE});
ClassFlags flags = ClassFlag::kNone;
- bool generate_print = annotations.Contains(ANNOTATION_GENERATE_PRINT);
- if (generate_print) flags |= ClassFlag::kGeneratePrint;
- bool generate_verify = !annotations.Contains(ANNOTATION_NO_VERIFIER);
- if (generate_verify) flags |= ClassFlag::kGenerateVerify;
if (annotations.Contains(ANNOTATION_ABSTRACT)) {
flags |= ClassFlag::kAbstract;
}
@@ -1009,16 +998,15 @@ base::Optional<ParseResult> MakeClassDeclaration(
(flags & ClassFlag::kIsShape) == 0) {
ParameterList parameters;
parameters.names.push_back(MakeNode<Identifier>("obj"));
- parameters.types.push_back(
- MakeNode<BasicTypeExpression>(std::vector<std::string>{}, "HeapObject",
- std::vector<TypeExpression*>{}));
+ parameters.types.push_back(MakeNode<BasicTypeExpression>(
+ std::vector<std::string>{}, MakeNode<Identifier>("HeapObject"),
+ std::vector<TypeExpression*>{}));
LabelAndTypesVector labels;
labels.push_back(LabelAndTypes{MakeNode<Identifier>("CastError"),
std::vector<TypeExpression*>{}});
- TypeExpression* class_type =
- MakeNode<BasicTypeExpression>(std::vector<std::string>{}, name->value,
- std::vector<TypeExpression*>{});
+ TypeExpression* class_type = MakeNode<BasicTypeExpression>(
+ std::vector<std::string>{}, name, std::vector<TypeExpression*>{});
std::vector<std::string> namespace_qualification{
TORQUE_INTERNAL_NAMESPACE_STRING};
@@ -1043,9 +1031,8 @@ base::Optional<ParseResult> MakeClassDeclaration(
auto cast_body = MakeNode<ReturnStatement>(value);
std::vector<TypeExpression*> generic_parameters;
- generic_parameters.push_back(
- MakeNode<BasicTypeExpression>(std::vector<std::string>{}, name->value,
- std::vector<TypeExpression*>{}));
+ generic_parameters.push_back(MakeNode<BasicTypeExpression>(
+ std::vector<std::string>{}, name, std::vector<TypeExpression*>{}));
Declaration* specialization = MakeNode<SpecializationDeclaration>(
false, MakeNode<Identifier>("Cast"), generic_parameters,
@@ -1194,7 +1181,8 @@ base::Optional<ParseResult> MakeBasicTypeExpression(
child_results->NextAs<std::vector<TypeExpression*>>();
TypeExpression* result = MakeNode<BasicTypeExpression>(
std::move(namespace_qualification),
- is_constexpr ? GetConstexprName(name) : std::move(name),
+ MakeNode<Identifier>(is_constexpr ? GetConstexprName(name)
+ : std::move(name)),
std::move(generic_arguments));
return ParseResult{result};
}
@@ -1217,7 +1205,8 @@ base::Optional<ParseResult> MakeReferenceTypeExpression(
std::vector<TypeExpression*> generic_arguments{referenced_type};
TypeExpression* result = MakeNode<BasicTypeExpression>(
namespace_qualification,
- is_const ? CONST_REFERENCE_TYPE_STRING : MUTABLE_REFERENCE_TYPE_STRING,
+ MakeNode<Identifier>(is_const ? CONST_REFERENCE_TYPE_STRING
+ : MUTABLE_REFERENCE_TYPE_STRING),
generic_arguments);
return ParseResult{result};
}
@@ -1315,7 +1304,7 @@ base::Optional<ParseResult> MakeEnumDeclaration(
base::nullopt);
TypeExpression* name_type_expression =
- MakeNode<BasicTypeExpression>(name_identifier->value);
+ MakeNode<BasicTypeExpression>(name_identifier);
name_type_expression->pos = name_identifier->pos;
std::vector<Declaration*> entry_decls;
@@ -1346,7 +1335,7 @@ base::Optional<ParseResult> MakeEnumDeclaration(
entry.type.value_or(*base_type_expression), base::nullopt));
auto entry_type = MakeNode<BasicTypeExpression>(
- std::vector<std::string>{name}, entry.name->value,
+ std::vector<std::string>{name}, entry.name,
std::vector<TypeExpression*>{});
if (union_type) {
union_type = MakeNode<UnionTypeExpression>(union_type, entry_type);
@@ -1374,7 +1363,7 @@ base::Optional<ParseResult> MakeEnumDeclaration(
Identifier* constexpr_type_identifier =
MakeNode<Identifier>(std::string(CONSTEXPR_TYPE_PREFIX) + name);
TypeExpression* constexpr_type_expression = MakeNode<BasicTypeExpression>(
- std::string(CONSTEXPR_TYPE_PREFIX) + name);
+ MakeNode<Identifier>(std::string(CONSTEXPR_TYPE_PREFIX) + name));
base::Optional<TypeExpression*> base_constexpr_type_expression =
base::nullopt;
if (base_type_expression) {
@@ -1390,8 +1379,9 @@ base::Optional<ParseResult> MakeEnumDeclaration(
Statement* fromconstexpr_body = nullptr;
if (generate_nonconstexpr) {
DCHECK(base_type_expression.has_value());
- type_expr = MakeNode<BasicTypeExpression>(
- std::vector<std::string>{}, name, std::vector<TypeExpression*>{});
+ type_expr = MakeNode<BasicTypeExpression>(std::vector<std::string>{},
+ MakeNode<Identifier>(name),
+ std::vector<TypeExpression*>{});
// return %RawDownCast<Enum>(%FromConstexpr<Base>(o)))
fromconstexpr_identifier = MakeNode<Identifier>("FromConstexpr");
@@ -1440,9 +1430,10 @@ base::Optional<ParseResult> MakeEnumDeclaration(
MakeNode<Identifier>("constexpr constant " + entry_name);
entry_decls.push_back(MakeNode<ExternConstDeclaration>(
constexpr_constant_name,
- MakeNode<BasicTypeExpression>(std::vector<std::string>{},
- entry_constexpr_type,
- std::vector<TypeExpression*>{}),
+ MakeNode<BasicTypeExpression>(
+ std::vector<std::string>{},
+ MakeNode<Identifier>(entry_constexpr_type),
+ std::vector<TypeExpression*>{}),
constexpr_generates + "::" + entry_name));
entry_decls.push_back(MakeNode<ConstDeclaration>(
entry.name, *entry.type,
@@ -1461,9 +1452,10 @@ base::Optional<ParseResult> MakeEnumDeclaration(
// }
entry_decls.push_back(MakeNode<ExternConstDeclaration>(
entry.name,
- MakeNode<BasicTypeExpression>(std::vector<std::string>{},
- entry_constexpr_type,
- std::vector<TypeExpression*>{}),
+ MakeNode<BasicTypeExpression>(
+ std::vector<std::string>{},
+ MakeNode<Identifier>(entry_constexpr_type),
+ std::vector<TypeExpression*>{}),
constexpr_generates + "::" + entry_name));
}
@@ -1471,9 +1463,10 @@ base::Optional<ParseResult> MakeEnumDeclaration(
// : Enum::constexpr kEntry0): Enum
if (generate_nonconstexpr) {
TypeExpression* entry_constexpr_type_expr =
- MakeNode<BasicTypeExpression>(std::vector<std::string>{name},
- entry_constexpr_type,
- std::vector<TypeExpression*>{});
+ MakeNode<BasicTypeExpression>(
+ std::vector<std::string>{name},
+ MakeNode<Identifier>(entry_constexpr_type),
+ std::vector<TypeExpression*>{});
ParameterList parameters;
parameters.names.push_back(fromconstexpr_parameter_identifier);
@@ -1498,7 +1491,7 @@ base::Optional<ParseResult> MakeTypeswitchStatement(
ParseResultIterator* child_results) {
auto expression = child_results->NextAs<Expression*>();
auto cases = child_results->NextAs<std::vector<TypeswitchCase>>();
- CurrentSourcePosition::Scope current_source_position(
+ CurrentSourcePosition::Scope matched_input_current_source_position(
child_results->matched_input().pos);
// typeswitch (expression) case (x1 : T1) {
@@ -1556,10 +1549,11 @@ base::Optional<ParseResult> MakeTypeswitchStatement(
} else {
case_block = current_block;
}
- std::string name = "__case_value";
+ Identifier* name =
+ cases[i].name ? *cases[i].name : MakeNode<Identifier>("__case_value");
if (cases[i].name) name = *cases[i].name;
- case_block->statements.push_back(MakeNode<VarDeclarationStatement>(
- true, MakeNode<Identifier>(name), cases[i].type, value));
+ case_block->statements.push_back(
+ MakeNode<VarDeclarationStatement>(true, name, cases[i].type, value));
case_block->statements.push_back(cases[i].block);
if (i < cases.size() - 1) {
BlockStatement* next_block = MakeNode<BlockStatement>();
@@ -1580,11 +1574,11 @@ base::Optional<ParseResult> MakeTypeswitchStatement(
base::Optional<ParseResult> MakeTypeswitchCase(
ParseResultIterator* child_results) {
- auto name = child_results->NextAs<base::Optional<std::string>>();
+ auto name = child_results->NextAs<base::Optional<Identifier*>>();
auto type = child_results->NextAs<TypeExpression*>();
auto block = child_results->NextAs<Statement*>();
- return ParseResult{TypeswitchCase{child_results->matched_input().pos,
- std::move(name), type, block}};
+ return ParseResult{
+ TypeswitchCase{child_results->matched_input().pos, name, type, block}};
}
base::Optional<ParseResult> MakeWhileStatement(
@@ -1722,7 +1716,8 @@ base::Optional<ParseResult> MakeCatchBlock(ParseResultIterator* child_results) {
ParameterList parameters;
parameters.names.push_back(MakeNode<Identifier>(variable));
parameters.types.push_back(MakeNode<BasicTypeExpression>(
- std::vector<std::string>{}, "JSAny", std::vector<TypeExpression*>{}));
+ std::vector<std::string>{}, MakeNode<Identifier>("JSAny"),
+ std::vector<TypeExpression*>{}));
parameters.has_varargs = false;
TryHandler* result = MakeNode<TryHandler>(
TryHandler::HandlerKind::kCatch, MakeNode<Identifier>(kCatchLabelName),
@@ -1964,11 +1959,9 @@ base::Optional<ParseResult> MakeAnnotation(ParseResultIterator* child_results) {
base::Optional<ParseResult> MakeClassField(ParseResultIterator* child_results) {
AnnotationSet annotations(
child_results,
- {ANNOTATION_NO_VERIFIER, ANNOTATION_CPP_RELAXED_STORE,
- ANNOTATION_CPP_RELAXED_LOAD, ANNOTATION_CPP_RELEASE_STORE,
- ANNOTATION_CPP_ACQUIRE_LOAD},
+ {ANNOTATION_CPP_RELAXED_STORE, ANNOTATION_CPP_RELAXED_LOAD,
+ ANNOTATION_CPP_RELEASE_STORE, ANNOTATION_CPP_ACQUIRE_LOAD},
{ANNOTATION_IF, ANNOTATION_IFNOT});
- bool generate_verify = !annotations.Contains(ANNOTATION_NO_VERIFIER);
FieldSynchronization write_synchronization = FieldSynchronization::kNone;
if (annotations.Contains(ANNOTATION_CPP_RELEASE_STORE)) {
write_synchronization = FieldSynchronization::kAcquireRelease;
@@ -2020,7 +2013,6 @@ base::Optional<ParseResult> MakeClassField(ParseResultIterator* child_results) {
std::move(conditions),
weak,
const_qualified,
- generate_verify,
read_synchronization,
write_synchronization}};
}
@@ -2270,10 +2262,6 @@ struct TorqueGrammar : Grammar {
TryOrDefault<TypeList>(Sequence({Token("("), typeList, Token(")")}))},
MakeLabelAndTypes)};
- // Result: TypeExpression*
- Symbol optionalReturnType = {Rule({Token(":"), &type}),
- Rule({}, MakeVoidType)};
-
// Result: LabelAndTypesVector
Symbol* optionalLabelList{TryOrDefault<LabelAndTypesVector>(
Sequence({Token("labels"),
@@ -2364,10 +2352,9 @@ struct TorqueGrammar : Grammar {
{&identifierExpression, &argumentList, optionalOtherwise}, MakeCall)};
// Result: Expression*
- Symbol callMethodExpression = {
- Rule({&primaryExpression, Token("."), &identifier, &argumentList,
- optionalOtherwise},
- MakeMethodCall)};
+ Symbol callMethodExpression = {Rule(
+ {&primaryExpression, Token("."), &name, &argumentList, optionalOtherwise},
+ MakeMethodCall)};
// Result: NameAndExpression
Symbol namedExpression = {
@@ -2555,7 +2542,7 @@ struct TorqueGrammar : Grammar {
MakeTypeswitchStatement),
Rule({Token("try"), &block, List<TryHandler*>(&tryHandler)},
MakeTryLabelExpression),
- Rule({OneOf({"assert", "check", "static_assert"}), Token("("),
+ Rule({OneOf({"dcheck", "check", "static_assert"}), Token("("),
&expressionWithSource, Token(")"), Token(";")},
MakeAssertStatement),
Rule({Token("while"), Token("("), expression, Token(")"), &statement},
@@ -2569,7 +2556,7 @@ struct TorqueGrammar : Grammar {
// Result: TypeswitchCase
Symbol typeswitchCase = {
Rule({Token("case"), Token("("),
- Optional<std::string>(Sequence({&identifier, Token(":")})), &type,
+ Optional<Identifier*>(Sequence({&name, Token(":")})), &type,
Token(")"), Token(":"), &block},
MakeTypeswitchCase)};
@@ -2582,7 +2569,7 @@ struct TorqueGrammar : Grammar {
Symbol method = {Rule(
{CheckIf(Token("transitioning")),
Optional<std::string>(Sequence({Token("operator"), &externalString})),
- Token("macro"), &name, &parameterListNoVararg, &optionalReturnType,
+ Token("macro"), &name, &parameterListNoVararg, Token(":"), &type,
optionalLabelList, &block},
MakeMethodDeclaration)};
@@ -2629,7 +2616,7 @@ struct TorqueGrammar : Grammar {
AsSingletonVector<Declaration*, MakeTypeAliasDeclaration>()),
Rule({Token("intrinsic"), &intrinsicName,
TryOrDefault<GenericParameters>(&genericParameters),
- &parameterListNoVararg, &optionalReturnType, &optionalBody},
+ &parameterListNoVararg, Token(":"), &type, &optionalBody},
AsSingletonVector<Declaration*, MakeIntrinsicDeclaration>()),
Rule({Token("extern"), CheckIf(Token("transitioning")),
Optional<std::string>(
@@ -2637,33 +2624,33 @@ struct TorqueGrammar : Grammar {
Token("macro"),
Optional<std::string>(Sequence({&identifier, Token("::")})), &name,
TryOrDefault<GenericParameters>(&genericParameters),
- &typeListMaybeVarArgs, &optionalReturnType, optionalLabelList,
+ &typeListMaybeVarArgs, Token(":"), &type, optionalLabelList,
Token(";")},
AsSingletonVector<Declaration*, MakeExternalMacro>()),
Rule({Token("extern"), CheckIf(Token("transitioning")),
CheckIf(Token("javascript")), Token("builtin"), &name,
TryOrDefault<GenericParameters>(&genericParameters),
- &typeListMaybeVarArgs, &optionalReturnType, Token(";")},
+ &typeListMaybeVarArgs, Token(":"), &type, Token(";")},
AsSingletonVector<Declaration*, MakeExternalBuiltin>()),
Rule({Token("extern"), CheckIf(Token("transitioning")), Token("runtime"),
- &name, &typeListMaybeVarArgs, &optionalReturnType, Token(";")},
+ &name, &typeListMaybeVarArgs, Token(":"), &type, Token(";")},
AsSingletonVector<Declaration*, MakeExternalRuntime>()),
Rule({annotations, CheckIf(Token("transitioning")),
Optional<std::string>(
Sequence({Token("operator"), &externalString})),
Token("macro"), &name,
TryOrDefault<GenericParameters>(&genericParameters),
- &parameterListNoVararg, &optionalReturnType, optionalLabelList,
+ &parameterListNoVararg, Token(":"), &type, optionalLabelList,
&optionalBody},
AsSingletonVector<Declaration*, MakeTorqueMacroDeclaration>()),
Rule({CheckIf(Token("transitioning")), CheckIf(Token("javascript")),
Token("builtin"), &name,
TryOrDefault<GenericParameters>(&genericParameters),
- &parameterListAllowVararg, &optionalReturnType, &optionalBody},
+ &parameterListAllowVararg, Token(":"), &type, &optionalBody},
AsSingletonVector<Declaration*, MakeTorqueBuiltinDeclaration>()),
Rule({CheckIf(Token("transitioning")), &name,
&genericSpecializationTypeList, &parameterListAllowVararg,
- &optionalReturnType, optionalLabelList, &block},
+ Token(":"), &type, optionalLabelList, &block},
AsSingletonVector<Declaration*, MakeSpecializationDeclaration>()),
Rule({Token("#include"), &externalString},
AsSingletonVector<Declaration*, MakeCppIncludeDeclaration>()),
diff --git a/chromium/v8/src/torque/type-inference.cc b/chromium/v8/src/torque/type-inference.cc
index 612d9edb07f..3cffa73ce90 100644
--- a/chromium/v8/src/torque/type-inference.cc
+++ b/chromium/v8/src/torque/type-inference.cc
@@ -61,7 +61,7 @@ void TypeArgumentInference::Match(TypeExpression* parameter,
BasicTypeExpression::DynamicCast(parameter)) {
// If the parameter is referring to one of the type parameters, substitute
if (basic->namespace_qualification.empty() && !basic->is_constexpr) {
- auto result = type_parameter_from_name_.find(basic->name);
+ auto result = type_parameter_from_name_.find(basic->name->value);
if (result != type_parameter_from_name_.end()) {
size_t type_parameter_index = result->second;
if (type_parameter_index < num_explicit_) {
@@ -92,7 +92,7 @@ void TypeArgumentInference::Match(TypeExpression* parameter,
void TypeArgumentInference::MatchGeneric(BasicTypeExpression* parameter,
const Type* argument_type) {
QualifiedName qualified_name{parameter->namespace_qualification,
- parameter->name};
+ parameter->name->value};
GenericType* generic_type =
Declarations::LookupUniqueGenericType(qualified_name);
auto& specialized_from = argument_type->GetSpecializedFrom();
diff --git a/chromium/v8/src/torque/type-visitor.cc b/chromium/v8/src/torque/type-visitor.cc
index 3b94d6a512f..d7b107dbe3a 100644
--- a/chromium/v8/src/torque/type-visitor.cc
+++ b/chromium/v8/src/torque/type-visitor.cc
@@ -7,6 +7,7 @@
#include "src/common/globals.h"
#include "src/torque/declarable.h"
#include "src/torque/global-context.h"
+#include "src/torque/kythe-data.h"
#include "src/torque/server-data.h"
#include "src/torque/type-inference.h"
#include "src/torque/type-oracle.h"
@@ -117,7 +118,10 @@ void DeclareMethods(AggregateType* container_type,
signature.parameter_types.types.insert(
signature.parameter_types.types.begin() + signature.implicit_count,
container_type);
- Declarations::CreateMethod(container_type, method_name, signature, body);
+ Method* m = Declarations::CreateMethod(container_type, method_name,
+ signature, body);
+ m->SetPosition(method->pos);
+ m->SetIdentifierPosition(method->name->pos);
}
}
@@ -189,7 +193,7 @@ const StructType* TypeVisitor::ComputeType(
StructDeclaration* decl, MaybeSpecializationKey specialized_from) {
StructType* struct_type = TypeOracle::GetStructType(decl, specialized_from);
CurrentScope::Scope struct_namespace_scope(struct_type->nspace());
- CurrentSourcePosition::Scope position_activator(decl->pos);
+ CurrentSourcePosition::Scope decl_position_activator(decl->pos);
ResidueClass offset = 0;
for (auto& field : decl->fields) {
@@ -207,7 +211,6 @@ const StructType* TypeVisitor::ComputeType(
offset.SingleValue(),
false,
field.const_qualified,
- false,
FieldSynchronization::kNone,
FieldSynchronization::kNone};
auto optional_size = SizeOf(f.name_and_type.type);
@@ -315,7 +318,6 @@ const ClassType* TypeVisitor::ComputeType(
Error("non-external classes must have defined layouts");
}
}
- flags = flags | ClassFlag::kGeneratePrint | ClassFlag::kGenerateVerify;
}
if (!(flags & ClassFlag::kExtern) &&
(flags & ClassFlag::kHasSameInstanceTypeAsParent)) {
@@ -334,7 +336,8 @@ const ClassType* TypeVisitor::ComputeType(
const Type* TypeVisitor::ComputeType(TypeExpression* type_expression) {
if (auto* basic = BasicTypeExpression::DynamicCast(type_expression)) {
- QualifiedName qualified_name{basic->namespace_qualification, basic->name};
+ QualifiedName qualified_name{basic->namespace_qualification,
+ basic->name->value};
auto& args = basic->generic_arguments;
const Type* type;
SourcePosition pos = SourcePosition::Invalid();
@@ -343,12 +346,20 @@ const Type* TypeVisitor::ComputeType(TypeExpression* type_expression) {
auto* alias = Declarations::LookupTypeAlias(qualified_name);
type = alias->type();
pos = alias->GetDeclarationPosition();
+ if (GlobalContext::collect_kythe_data()) {
+ if (alias->IsUserDefined()) {
+ KytheData::AddTypeUse(basic->name->pos, alias);
+ }
+ }
} else {
auto* generic_type =
Declarations::LookupUniqueGenericType(qualified_name);
type = TypeOracle::GetGenericTypeInstance(generic_type,
ComputeTypeVector(args));
pos = generic_type->declaration()->name->pos;
+ if (GlobalContext::collect_kythe_data()) {
+ KytheData::AddTypeUse(basic->name->pos, generic_type);
+ }
}
if (GlobalContext::collect_language_server_data()) {
@@ -429,7 +440,6 @@ void TypeVisitor::VisitClassFieldsAndMethods(
class_offset.SingleValue(),
field_expression.weak,
field_expression.const_qualified,
- field_expression.generate_verify,
field_expression.read_synchronization,
field_expression.write_synchronization});
ResidueClass field_size = std::get<0>(field.GetFieldSizeInformation());
@@ -482,7 +492,8 @@ const Type* TypeVisitor::ComputeTypeForStructExpression(
ReportError("expected basic type expression referring to struct");
}
- QualifiedName qualified_name{basic->namespace_qualification, basic->name};
+ QualifiedName qualified_name{basic->namespace_qualification,
+ basic->name->value};
base::Optional<GenericType*> maybe_generic_type =
Declarations::TryLookupGenericType(qualified_name);
diff --git a/chromium/v8/src/torque/types.cc b/chromium/v8/src/torque/types.cc
index 5ea7fe73caf..9157268bcb3 100644
--- a/chromium/v8/src/torque/types.cc
+++ b/chromium/v8/src/torque/types.cc
@@ -818,10 +818,10 @@ void ClassType::GenerateSliceAccessor(size_t field_index) {
// );
// }
//
- // If the field has an unknown offset, and the previous field is named p, and
- // an item in the previous field has size 4:
+ // If the field has an unknown offset, and the previous field is named p, is
+ // not const, and is of type PType with size 4:
// FieldSliceClassNameFieldName(o: ClassName) {
- // const previous = %FieldSlice<ClassName>(o, "p");
+ // const previous = %FieldSlice<ClassName, MutableSlice<PType>>(o, "p");
// return torque_internal::unsafe::New{Const,Mutable}Slice<FieldType>(
// /*object:*/ o,
// /*offset:*/ previous.offset + 4 * previous.length,
@@ -853,14 +853,21 @@ void ClassType::GenerateSliceAccessor(size_t field_index) {
const Field* previous = GetFieldPreceding(field_index);
DCHECK_NOT_NULL(previous);
- // %FieldSlice<ClassName>(o, "p")
+ const Type* previous_slice_type =
+ previous->const_qualified
+ ? TypeOracle::GetConstSliceType(previous->name_and_type.type)
+ : TypeOracle::GetMutableSliceType(previous->name_and_type.type);
+
+ // %FieldSlice<ClassName, MutableSlice<PType>>(o, "p")
Expression* previous_expression = MakeCallExpression(
- MakeIdentifierExpression({"torque_internal"}, "%FieldSlice",
- {MakeNode<PrecomputedTypeExpression>(this)}),
+ MakeIdentifierExpression(
+ {"torque_internal"}, "%FieldSlice",
+ {MakeNode<PrecomputedTypeExpression>(this),
+ MakeNode<PrecomputedTypeExpression>(previous_slice_type)}),
{parameter, MakeNode<StringLiteralExpression>(
StringLiteralQuote(previous->name_and_type.name))});
- // const previous = %FieldSlice<ClassName>(o, "p");
+ // const previous = %FieldSlice<ClassName, MutableSlice<PType>>(o, "p");
Statement* define_previous =
MakeConstDeclarationStatement("previous", previous_expression);
statements.push_back(define_previous);
diff --git a/chromium/v8/src/torque/types.h b/chromium/v8/src/torque/types.h
index e231fb9431d..d14dfaf7b20 100644
--- a/chromium/v8/src/torque/types.h
+++ b/chromium/v8/src/torque/types.h
@@ -228,7 +228,6 @@ struct Field {
bool is_weak;
bool const_qualified;
- bool generate_verify;
FieldSynchronization read_synchronization;
FieldSynchronization write_synchronization;
};
@@ -670,12 +669,12 @@ class ClassType final : public AggregateType {
std::string GetGeneratedTNodeTypeNameImpl() const override;
bool IsExtern() const { return flags_ & ClassFlag::kExtern; }
bool ShouldGeneratePrint() const {
- return !IsExtern() ||
- ((flags_ & ClassFlag::kGeneratePrint) && !HasUndefinedLayout());
+ return !IsExtern() || (ShouldGenerateCppClassDefinitions() &&
+ !IsAbstract() && !HasUndefinedLayout());
}
bool ShouldGenerateVerify() const {
- return !IsExtern() || ((flags_ & ClassFlag::kGenerateVerify) &&
- (!HasUndefinedLayout() && !IsShape()));
+ return !IsExtern() || (ShouldGenerateCppClassDefinitions() &&
+ !HasUndefinedLayout() && !IsShape());
}
bool ShouldGenerateBodyDescriptor() const {
return flags_ & ClassFlag::kGenerateBodyDescriptor ||
@@ -689,9 +688,8 @@ class ClassType final : public AggregateType {
bool HasSameInstanceTypeAsParent() const {
return flags_ & ClassFlag::kHasSameInstanceTypeAsParent;
}
- bool GenerateCppClassDefinitions() const {
- return flags_ & ClassFlag::kGenerateCppClassDefinitions || !IsExtern() ||
- ShouldGenerateBodyDescriptor();
+ bool ShouldGenerateCppClassDefinitions() const {
+ return (flags_ & ClassFlag::kGenerateCppClassDefinitions) || !IsExtern();
}
bool ShouldGenerateFullClassDefinition() const {
return !IsExtern() && !(flags_ & ClassFlag::kCustomCppClass);
diff --git a/chromium/v8/src/torque/utils.h b/chromium/v8/src/torque/utils.h
index 327e1946c52..89633c9d3eb 100644
--- a/chromium/v8/src/torque/utils.h
+++ b/chromium/v8/src/torque/utils.h
@@ -176,8 +176,8 @@ void PrintCommaSeparatedList(std::ostream& os, const T& list) {
struct BottomOffset {
size_t offset;
- BottomOffset& operator=(std::size_t offset) {
- this->offset = offset;
+ BottomOffset& operator=(std::size_t other_offset) {
+ this->offset = other_offset;
return *this;
}
BottomOffset& operator++() {
diff --git a/chromium/v8/src/trap-handler/handler-inside-posix.cc b/chromium/v8/src/trap-handler/handler-inside-posix.cc
index 173e0ba3cda..86e2fb8b8ea 100644
--- a/chromium/v8/src/trap-handler/handler-inside-posix.cc
+++ b/chromium/v8/src/trap-handler/handler-inside-posix.cc
@@ -88,7 +88,7 @@ class UnmaskOobSignalScope {
#ifdef V8_TRAP_HANDLER_VIA_SIMULATOR
// This is the address where we continue on a failed "ProbeMemory". It's defined
-// in "handler-outside-simulators.cc".
+// in "handler-outside-simulator.cc".
extern "C" char v8_probe_memory_continuation[];
#endif // V8_TRAP_HANDLER_VIA_SIMULATOR
diff --git a/chromium/v8/src/trap-handler/handler-inside-win.cc b/chromium/v8/src/trap-handler/handler-inside-win.cc
index e5ce133a6ba..fcccc78ee52 100644
--- a/chromium/v8/src/trap-handler/handler-inside-win.cc
+++ b/chromium/v8/src/trap-handler/handler-inside-win.cc
@@ -30,6 +30,10 @@
#include "src/trap-handler/trap-handler-internal.h"
#include "src/trap-handler/trap-handler.h"
+#ifdef V8_TRAP_HANDLER_VIA_SIMULATOR
+#include "src/trap-handler/trap-handler-simulator.h"
+#endif
+
namespace v8 {
namespace internal {
namespace trap_handler {
@@ -49,6 +53,12 @@ struct TEB {
PVOID thread_local_storage_pointer;
};
+#ifdef V8_TRAP_HANDLER_VIA_SIMULATOR
+// This is the address where we continue on a failed "ProbeMemory". It's defined
+// in "handler-outside-simulator.cc".
+extern "C" char v8_probe_memory_continuation[];
+#endif // V8_TRAP_HANDLER_VIA_SIMULATOR
+
bool TryHandleWasmTrap(EXCEPTION_POINTERS* exception) {
// VectoredExceptionHandlers need extreme caution. Do as little as possible
// to determine if the exception should be handled or not. Exceptions can be
@@ -71,17 +81,16 @@ bool TryHandleWasmTrap(EXCEPTION_POINTERS* exception) {
// need to run to initialize values may not have run yet, but that is not
// the case for any thread_locals used here).
TEB* pteb = reinterpret_cast<TEB*>(NtCurrentTeb());
- if (!pteb->thread_local_storage_pointer) {
- return false;
- }
+ if (!pteb->thread_local_storage_pointer) return false;
// Now safe to run more advanced logic, which may access thread_locals
// Ensure the faulting thread was actually running Wasm code.
- if (!IsThreadInWasm()) {
- return false;
- }
+ if (!IsThreadInWasm()) return false;
// Clear g_thread_in_wasm_code, primarily to protect against nested faults.
+ // The only path that resets the flag to true is if we find a landing pad (in
+ // which case this function returns true). Otherwise we leave the flag unset
+ // since we do not return to wasm code.
g_thread_in_wasm_code = false;
const EXCEPTION_RECORD* record = exception->ExceptionRecord;
@@ -89,17 +98,28 @@ bool TryHandleWasmTrap(EXCEPTION_POINTERS* exception) {
uintptr_t fault_addr = reinterpret_cast<uintptr_t>(record->ExceptionAddress);
uintptr_t landing_pad = 0;
- if (TryFindLandingPad(fault_addr, &landing_pad)) {
- exception->ContextRecord->Rip = landing_pad;
- // We will return to wasm code, so restore the g_thread_in_wasm_code flag.
- g_thread_in_wasm_code = true;
- return true;
- }
-
- // If we get here, it's not a recoverable wasm fault, so we go to the next
- // handler. Leave the g_thread_in_wasm_code flag unset since we do not return
- // to wasm code.
- return false;
+#ifdef V8_TRAP_HANDLER_VIA_SIMULATOR
+ // Only handle signals triggered by the load in {ProbeMemory}.
+ if (fault_addr != reinterpret_cast<uintptr_t>(&ProbeMemory)) return false;
+
+ // The simulated ip will be in the second parameter register (%rdx).
+ uintptr_t simulated_ip = exception->ContextRecord->Rdx;
+ if (!TryFindLandingPad(simulated_ip, &landing_pad)) return false;
+ TH_DCHECK(landing_pad != 0);
+
+ exception->ContextRecord->Rax = landing_pad;
+ // Continue at the memory probing continuation.
+ exception->ContextRecord->Rip =
+ reinterpret_cast<uintptr_t>(&v8_probe_memory_continuation);
+#else
+ if (!TryFindLandingPad(fault_addr, &landing_pad)) return false;
+
+ // Tell the caller to return to the landing pad.
+ exception->ContextRecord->Rip = landing_pad;
+#endif
+ // We will return to wasm code, so restore the g_thread_in_wasm_code flag.
+ g_thread_in_wasm_code = true;
+ return true;
}
LONG HandleWasmTrap(EXCEPTION_POINTERS* exception) {
diff --git a/chromium/v8/src/trap-handler/handler-outside-simulator.cc b/chromium/v8/src/trap-handler/handler-outside-simulator.cc
index cc1e20ee21c..d59debe6252 100644
--- a/chromium/v8/src/trap-handler/handler-outside-simulator.cc
+++ b/chromium/v8/src/trap-handler/handler-outside-simulator.cc
@@ -15,10 +15,14 @@
asm(
".globl " SYMBOL(ProbeMemory) " \n"
SYMBOL(ProbeMemory) ": \n"
- // First parameter (address) passed in %rdi.
- // The second parameter (pc) is unused here. It is read by the trap handler
- // instead.
+// First parameter (address) passed in %rdi on Linux/Mac, and %rcx on Windows.
+// The second parameter (pc) is unused here. It is read by the trap handler
+// instead.
+#if V8_OS_WIN
+ " movb (%rcx), %al \n"
+#else
" movb (%rdi), %al \n"
+#endif // V8_OS_WIN
// Return 0 on success.
" xorl %eax, %eax \n"
// Place an additional "ret" here instead of falling through to the one
diff --git a/chromium/v8/src/trap-handler/trap-handler.h b/chromium/v8/src/trap-handler/trap-handler.h
index 0b3a6e0a705..79ddf566534 100644
--- a/chromium/v8/src/trap-handler/trap-handler.h
+++ b/chromium/v8/src/trap-handler/trap-handler.h
@@ -25,8 +25,9 @@ namespace trap_handler {
// Arm64 (non-simulator) on Mac.
#elif V8_TARGET_ARCH_ARM64 && V8_HOST_ARCH_ARM64 && V8_OS_MACOSX
#define V8_TRAP_HANDLER_SUPPORTED true
-// Arm64 simulator on x64 on Linux or Mac.
-#elif V8_TARGET_ARCH_ARM64 && V8_HOST_ARCH_X64 && (V8_OS_LINUX || V8_OS_MACOSX)
+// Arm64 simulator on x64 on Linux, Mac, or Windows.
+#elif V8_TARGET_ARCH_ARM64 && V8_HOST_ARCH_X64 && \
+ (V8_OS_LINUX || V8_OS_MACOSX || V8_OS_WIN)
#define V8_TRAP_HANDLER_VIA_SIMULATOR
#define V8_TRAP_HANDLER_SUPPORTED true
// Everything else is unsupported.
diff --git a/chromium/v8/src/utils/address-map.h b/chromium/v8/src/utils/address-map.h
index 6a9c513bc6c..0a6c749b391 100644
--- a/chromium/v8/src/utils/address-map.h
+++ b/chromium/v8/src/utils/address-map.h
@@ -5,7 +5,6 @@
#ifndef V8_UTILS_ADDRESS_MAP_H_
#define V8_UTILS_ADDRESS_MAP_H_
-#include "include/v8.h"
#include "src/base/hashmap.h"
#include "src/common/assert-scope.h"
#include "src/objects/heap-object.h"
diff --git a/chromium/v8/src/utils/allocation.cc b/chromium/v8/src/utils/allocation.cc
index 9cdd53fa6d6..5eb6a41f31e 100644
--- a/chromium/v8/src/utils/allocation.cc
+++ b/chromium/v8/src/utils/allocation.cc
@@ -17,6 +17,7 @@
#include "src/base/vector.h"
#include "src/flags/flags.h"
#include "src/init/v8.h"
+#include "src/init/vm-cage.h"
#include "src/utils/memcopy.h"
#if V8_LIBC_BIONIC
@@ -53,6 +54,7 @@ class PageAllocatorInitializer {
page_allocator_ = default_page_allocator.get();
}
#if defined(LEAK_SANITIZER)
+ static_assert(!V8_VIRTUAL_MEMORY_CAGE_BOOL, "Not currently supported");
static base::LeakyObject<base::LsanPageAllocator> lsan_allocator(
page_allocator_);
page_allocator_ = lsan_allocator.get();
@@ -70,7 +72,7 @@ class PageAllocatorInitializer {
};
DEFINE_LAZY_LEAKY_OBJECT_GETTER(PageAllocatorInitializer,
- GetPageTableInitializer)
+ GetPageAllocatorInitializer)
// We will attempt allocation this many times. After each failure, we call
// OnCriticalMemoryPressure to try to free some memory.
@@ -79,14 +81,27 @@ const int kAllocationTries = 2;
} // namespace
v8::PageAllocator* GetPlatformPageAllocator() {
- DCHECK_NOT_NULL(GetPageTableInitializer()->page_allocator());
- return GetPageTableInitializer()->page_allocator();
+ DCHECK_NOT_NULL(GetPageAllocatorInitializer()->page_allocator());
+ return GetPageAllocatorInitializer()->page_allocator();
}
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+v8::PageAllocator* GetVirtualMemoryCagePageAllocator() {
+ // TODO(chromium:1218005) remove this code once the cage is no longer
+ // optional.
+ if (GetProcessWideVirtualMemoryCage()->is_disabled()) {
+ return GetPlatformPageAllocator();
+ } else {
+ CHECK(GetProcessWideVirtualMemoryCage()->is_initialized());
+ return GetProcessWideVirtualMemoryCage()->page_allocator();
+ }
+}
+#endif
+
v8::PageAllocator* SetPlatformPageAllocatorForTesting(
v8::PageAllocator* new_page_allocator) {
v8::PageAllocator* old_page_allocator = GetPlatformPageAllocator();
- GetPageTableInitializer()->SetPageAllocatorForTesting(new_page_allocator);
+ GetPageAllocatorInitializer()->SetPageAllocatorForTesting(new_page_allocator);
return old_page_allocator;
}
@@ -117,10 +132,10 @@ char* StrNDup(const char* str, size_t n) {
return result;
}
-void* AllocWithRetry(size_t size) {
+void* AllocWithRetry(size_t size, MallocFn malloc_fn) {
void* result = nullptr;
for (int i = 0; i < kAllocationTries; ++i) {
- result = base::Malloc(size);
+ result = malloc_fn(size);
if (result != nullptr) break;
if (!OnCriticalMemoryPressure(size)) break;
}
@@ -323,7 +338,8 @@ inline Address VirtualMemoryCageStart(
}
} // namespace
-bool VirtualMemoryCage::InitReservation(const ReservationParams& params) {
+bool VirtualMemoryCage::InitReservation(
+ const ReservationParams& params, base::AddressRegion existing_reservation) {
DCHECK(!reservation_.IsReserved());
const size_t allocate_page_size = params.page_allocator->AllocatePageSize();
@@ -337,7 +353,15 @@ bool VirtualMemoryCage::InitReservation(const ReservationParams& params) {
RoundUp(params.base_alignment, allocate_page_size)) -
RoundUp(params.base_bias_size, allocate_page_size);
- if (params.base_alignment == ReservationParams::kAnyBaseAlignment) {
+ if (!existing_reservation.is_empty()) {
+ CHECK_EQ(existing_reservation.size(), params.reservation_size);
+ CHECK(params.base_alignment == ReservationParams::kAnyBaseAlignment ||
+ IsAligned(existing_reservation.begin(), params.base_alignment));
+ reservation_ =
+ VirtualMemory(params.page_allocator, existing_reservation.begin(),
+ existing_reservation.size());
+ base_ = reservation_.address() + params.base_bias_size;
+ } else if (params.base_alignment == ReservationParams::kAnyBaseAlignment) {
// When the base doesn't need to be aligned, the virtual memory reservation
// fails only due to OOM.
VirtualMemory reservation(params.page_allocator, params.reservation_size,
@@ -418,7 +442,8 @@ bool VirtualMemoryCage::InitReservation(const ReservationParams& params) {
params.page_size);
page_allocator_ = std::make_unique<base::BoundedPageAllocator>(
params.page_allocator, allocatable_base, allocatable_size,
- params.page_size);
+ params.page_size,
+ base::PageInitializationMode::kAllocatedPagesCanBeUninitialized);
return true;
}
diff --git a/chromium/v8/src/utils/allocation.h b/chromium/v8/src/utils/allocation.h
index 1d161b7e246..7127b8efe86 100644
--- a/chromium/v8/src/utils/allocation.h
+++ b/chromium/v8/src/utils/allocation.h
@@ -90,9 +90,11 @@ class FreeStoreAllocationPolicy {
}
};
+using MallocFn = void* (*)(size_t);
+
// Performs a malloc, with retry logic on failure. Returns nullptr on failure.
// Call free to release memory allocated with this function.
-void* AllocWithRetry(size_t size);
+void* AllocWithRetry(size_t size, MallocFn = base::Malloc);
V8_EXPORT_PRIVATE void* AlignedAlloc(size_t size, size_t alignment);
V8_EXPORT_PRIVATE void AlignedFree(void* ptr);
@@ -100,6 +102,24 @@ V8_EXPORT_PRIVATE void AlignedFree(void* ptr);
// Returns platfrom page allocator instance. Guaranteed to be a valid pointer.
V8_EXPORT_PRIVATE v8::PageAllocator* GetPlatformPageAllocator();
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+// Returns the virtual memory cage page allocator instance for allocating pages
+// inside the virtual memory cage. Guaranteed to be a valid pointer.
+V8_EXPORT_PRIVATE v8::PageAllocator* GetVirtualMemoryCagePageAllocator();
+#endif
+
+// Returns the appropriate page allocator to use for ArrayBuffer backing stores.
+// If the virtual memory cage is enabled, these must be allocated inside the
+// cage and so this will be the CagePageAllocator. Otherwise it will be the
+// PlatformPageAllocator.
+inline v8::PageAllocator* GetArrayBufferPageAllocator() {
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ return GetVirtualMemoryCagePageAllocator();
+#else
+ return GetPlatformPageAllocator();
+#endif
+}
+
// Sets the given page allocator as the platform page allocator and returns
// the current one. This function *must* be used only for testing purposes.
// It is not thread-safe and the testing infrastructure should ensure that
@@ -310,6 +330,9 @@ class VirtualMemory final {
// and the base bias size must be AllocatePageSize-aligned.
// - The base alignment may be kAnyBaseAlignment to denote any alignment is
// acceptable. In this case the base bias size does not need to be aligned.
+//
+// TODO(chromium:1218005) can we either combine this class and
+// v8::VirtualMemoryCage in v8-platform.h or rename one of the two?
class VirtualMemoryCage {
public:
VirtualMemoryCage();
@@ -351,7 +374,12 @@ class VirtualMemoryCage {
// A number of attempts is made to try to reserve a region that satisfies the
// constraints in params, but this may fail. The base address may be different
// than the one requested.
- bool InitReservation(const ReservationParams& params);
+ // If an existing reservation is provided, it will be used for this cage
+ // instead. The caller retains ownership of the reservation and is responsible
+ // for keeping the memory reserved during the lifetime of this object.
+ bool InitReservation(
+ const ReservationParams& params,
+ base::AddressRegion existing_reservation = base::AddressRegion());
void Free();
diff --git a/chromium/v8/src/utils/utils.h b/chromium/v8/src/utils/utils.h
index 5238062e055..005e1c4ad4e 100644
--- a/chromium/v8/src/utils/utils.h
+++ b/chromium/v8/src/utils/utils.h
@@ -217,7 +217,8 @@ inline T RoundingAverageUnsigned(T a, T b) {
//
// DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, MAP_FIELDS)
//
-#define DEFINE_ONE_FIELD_OFFSET(Name, Size) Name, Name##End = Name + (Size)-1,
+#define DEFINE_ONE_FIELD_OFFSET(Name, Size, ...) \
+ Name, Name##End = Name + (Size)-1,
#define DEFINE_FIELD_OFFSET_CONSTANTS(StartOffset, LIST_MACRO) \
enum { \
diff --git a/chromium/v8/src/utils/v8dll-main.cc b/chromium/v8/src/utils/v8dll-main.cc
index 6b484cfc8e2..9bdd97f365a 100644
--- a/chromium/v8/src/utils/v8dll-main.cc
+++ b/chromium/v8/src/utils/v8dll-main.cc
@@ -5,7 +5,7 @@
// The GYP based build ends up defining USING_V8_SHARED when compiling this
// file.
#undef USING_V8_SHARED
-#include "include/v8.h"
+#include "include/v8config.h"
#if V8_OS_WIN
#include "src/base/win32-headers.h"
diff --git a/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index 6e2bacc0439..211cf82398a 100644
--- a/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -4262,14 +4262,34 @@ void LiftoffAssembler::MaybeOSR() {}
void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
ValueKind kind) {
- UNIMPLEMENTED();
+ if (kind == kF32) {
+ FloatRegister src_f = liftoff::GetFloatRegister(src);
+ VFPCompareAndSetFlags(src_f, src_f);
+ } else {
+ DCHECK_EQ(kind, kF64);
+ VFPCompareAndSetFlags(src, src);
+ }
+
+ // Store a non-zero value if src is NaN.
+ str(dst, MemOperand(dst), ne); // x != x iff isnan(x)
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
- UNIMPLEMENTED();
+ QwNeonRegister src_q = liftoff::GetSimd128Register(src);
+ QwNeonRegister tmp_q = liftoff::GetSimd128Register(tmp_s128);
+ if (lane_kind == kF32) {
+ vpadd(tmp_q.low(), src_q.low(), src_q.high());
+ LowDwVfpRegister tmp_d =
+ LowDwVfpRegister::from_code(tmp_s128.low_fp().code());
+ vadd(tmp_d.low(), tmp_d.low(), tmp_d.high());
+ } else {
+ DCHECK_EQ(lane_kind, kF64);
+ vadd(tmp_q.low(), src_q.low(), src_q.high());
+ }
+ emit_set_if_nan(dst, tmp_q.low(), lane_kind);
}
void LiftoffStackSlots::Construct(int param_slots) {
diff --git a/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index a52370f2935..e10a18a5607 100644
--- a/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -452,6 +452,13 @@ void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
LoadTaggedPointerField(dst, MemOperand{instance, offset});
}
+void LiftoffAssembler::LoadExternalPointer(Register dst, Register instance,
+ int offset, ExternalPointerTag tag,
+ Register isolate_root) {
+ LoadExternalPointerField(dst, FieldMemOperand(instance, offset), tag,
+ isolate_root);
+}
+
void LiftoffAssembler::SpillInstance(Register instance) {
Str(instance, liftoff::GetInstanceOperand());
}
@@ -1173,12 +1180,7 @@ void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
}
bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
- UseScratchRegisterScope temps(this);
- VRegister scratch = temps.AcquireV(kFormat8B);
- Fmov(scratch.S(), src.W());
- Cnt(scratch, scratch);
- Addv(scratch.B(), scratch);
- Fmov(dst.W(), scratch.S());
+ PopcntHelper(dst.W(), src.W());
return true;
}
@@ -1193,12 +1195,7 @@ void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) {
bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
LiftoffRegister src) {
- UseScratchRegisterScope temps(this);
- VRegister scratch = temps.AcquireV(kFormat8B);
- Fmov(scratch.D(), src.gp().X());
- Cnt(scratch, scratch);
- Addv(scratch.B(), scratch);
- Fmov(dst.gp().X(), scratch.D());
+ PopcntHelper(dst.gp().X(), src.gp().X());
return true;
}
@@ -1717,13 +1714,13 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
UseScratchRegisterScope temps(this);
MemOperand src_op{
liftoff::GetEffectiveAddress(this, &temps, addr, offset_reg, offset_imm)};
- *protected_load_pc = pc_offset();
MachineType mem_type = type.mem_type();
if (dst != src) {
Mov(dst.fp().Q(), src.fp().Q());
}
+ *protected_load_pc = pc_offset();
if (mem_type == MachineType::Int8()) {
ld1(dst.fp().B(), laneidx, src_op);
} else if (mem_type == MachineType::Int16()) {
@@ -3259,14 +3256,35 @@ void LiftoffAssembler::MaybeOSR() {}
void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
ValueKind kind) {
- UNIMPLEMENTED();
+ Label not_nan;
+ if (kind == kF32) {
+ Fcmp(src.S(), src.S());
+ B(eq, &not_nan); // x != x iff isnan(x)
+ // If it's a NaN, it must be non-zero, so store that as the set value.
+ Str(src.S(), MemOperand(dst));
+ } else {
+ DCHECK_EQ(kind, kF64);
+ Fcmp(src.D(), src.D());
+ B(eq, &not_nan); // x != x iff isnan(x)
+ // Double-precision NaNs must be non-zero in the most-significant 32
+ // bits, so store that.
+ St1(src.V4S(), 1, MemOperand(dst));
+ }
+ Bind(&not_nan);
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
- UNIMPLEMENTED();
+ DoubleRegister tmp_fp = tmp_s128.fp();
+ if (lane_kind == kF32) {
+ Fmaxv(tmp_fp.S(), src.fp().V4S());
+ } else {
+ DCHECK_EQ(lane_kind, kF64);
+ Fmaxp(tmp_fp.D(), src.fp().V2D());
+ }
+ emit_set_if_nan(dst, tmp_fp, lane_kind);
}
void LiftoffStackSlots::Construct(int param_slots) {
diff --git a/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index bb2fed83c65..2d922d3b2e5 100644
--- a/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -2718,40 +2718,6 @@ void EmitSimdShiftOpImm(LiftoffAssembler* assm, LiftoffRegister dst,
}
}
-enum class ShiftSignedness { kSigned, kUnsigned };
-
-template <bool is_signed>
-void EmitI8x16Shr(LiftoffAssembler* assm, LiftoffRegister dst,
- LiftoffRegister lhs, LiftoffRegister rhs) {
- // Same algorithm is used for both signed and unsigned shifts, the only
- // difference is the actual shift and pack in the end. This is the same
- // algorithm as used in code-generator-ia32.cc
- Register tmp =
- assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(rhs)).gp();
- XMMRegister tmp_simd =
- assm->GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs)).fp();
-
- // Unpack the bytes into words, do logical shifts, and repack.
- assm->Punpckhbw(liftoff::kScratchDoubleReg, lhs.fp());
- assm->Punpcklbw(dst.fp(), lhs.fp());
- assm->mov(tmp, rhs.gp());
- // Take shift value modulo 8.
- assm->and_(tmp, 7);
- assm->add(tmp, Immediate(8));
- assm->Movd(tmp_simd, tmp);
- if (is_signed) {
- assm->Psraw(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg,
- tmp_simd);
- assm->Psraw(dst.fp(), dst.fp(), tmp_simd);
- assm->Packsswb(dst.fp(), liftoff::kScratchDoubleReg);
- } else {
- assm->Psrlw(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg,
- tmp_simd);
- assm->Psrlw(dst.fp(), dst.fp(), tmp_simd);
- assm->Packuswb(dst.fp(), liftoff::kScratchDoubleReg);
- }
-}
-
inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister src) {
Register tmp =
@@ -2762,7 +2728,7 @@ inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
assm->cmov(zero, dst.gp(), tmp);
}
-template <void (TurboAssembler::*pcmp)(XMMRegister, XMMRegister)>
+template <void (SharedTurboAssembler::*pcmp)(XMMRegister, XMMRegister)>
inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister src,
base::Optional<CpuFeature> feature = base::nullopt) {
@@ -2809,23 +2775,19 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
}
} else if (transform == LoadTransformationKind::kZeroExtend) {
if (memtype == MachineType::Int32()) {
- movss(dst.fp(), src_op);
+ Movss(dst.fp(), src_op);
} else {
DCHECK_EQ(MachineType::Int64(), memtype);
- movsd(dst.fp(), src_op);
+ Movsd(dst.fp(), src_op);
}
} else {
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
if (memtype == MachineType::Int8()) {
- Pinsrb(dst.fp(), src_op, 0);
- Pxor(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Pshufb(dst.fp(), liftoff::kScratchDoubleReg);
+ S128Load8Splat(dst.fp(), src_op, liftoff::kScratchDoubleReg);
} else if (memtype == MachineType::Int16()) {
- Pinsrw(dst.fp(), src_op, 0);
- Pshuflw(dst.fp(), dst.fp(), uint8_t{0});
- Punpcklqdq(dst.fp(), dst.fp());
+ S128Load16Splat(dst.fp(), src_op, liftoff::kScratchDoubleReg);
} else if (memtype == MachineType::Int32()) {
- Vbroadcastss(dst.fp(), src_op);
+ S128Load32Splat(dst.fp(), src_op);
} else if (memtype == MachineType::Int64()) {
Movddup(dst.fp(), src_op);
}
@@ -2875,12 +2837,7 @@ void LiftoffAssembler::StoreLane(Register dst, Register offset,
S128Store32Lane(dst_op, src.fp(), lane);
} else {
DCHECK_EQ(MachineRepresentation::kWord64, rep);
- if (lane == 0) {
- Movlps(dst_op, src.fp());
- } else {
- DCHECK_EQ(1, lane);
- Movhps(dst_op, src.fp());
- }
+ S128Store64Lane(dst_op, src.fp(), lane);
}
}
@@ -2951,16 +2908,12 @@ void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
- Movd(dst.fp(), src.gp());
- Pxor(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Pshufb(dst.fp(), liftoff::kScratchDoubleReg);
+ I8x16Splat(dst.fp(), src.gp(), liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
- Movd(dst.fp(), src.gp());
- Pshuflw(dst.fp(), dst.fp(), uint8_t{0});
- Pshufd(dst.fp(), dst.fp(), uint8_t{0});
+ I16x8Splat(dst.fp(), src.gp());
}
void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
@@ -3180,17 +3133,11 @@ void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
// Different register alias requirements depending on CpuFeatures supported:
- if (CpuFeatures::IsSupported(AVX)) {
- // 1. AVX, no requirements.
+ if (CpuFeatures::IsSupported(AVX) || CpuFeatures::IsSupported(SSE4_2)) {
+ // 1. AVX, or SSE4_2 no requirements (I64x2GtS takes care of aliasing).
I64x2GtS(dst.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
- } else if (CpuFeatures::IsSupported(SSE4_2)) {
- // 2. SSE4_2, dst == lhs.
- if (dst != lhs) {
- movaps(dst.fp(), lhs.fp());
- }
- I64x2GtS(dst.fp(), dst.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
} else {
- // 3. Else, dst != lhs && dst != rhs (lhs == rhs is ok).
+ // 2. Else, dst != lhs && dst != rhs (lhs == rhs is ok).
if (dst == lhs || dst == rhs) {
LiftoffRegister tmp = GetUnusedRegister(
RegClass::kFpReg, LiftoffRegList::ForRegs(lhs, rhs));
@@ -3366,89 +3313,48 @@ void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- static constexpr RegClass tmp_rc = reg_class_for(kI32);
- static constexpr RegClass tmp_simd_rc = reg_class_for(kS128);
- LiftoffRegister tmp = GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(rhs));
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(rhs));
LiftoffRegister tmp_simd =
- GetUnusedRegister(tmp_simd_rc, LiftoffRegList::ForRegs(dst, lhs));
- // Mask off the unwanted bits before word-shifting.
- Pcmpeqw(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- mov(tmp.gp(), rhs.gp());
- and_(tmp.gp(), Immediate(7));
- add(tmp.gp(), Immediate(8));
- Movd(tmp_simd.fp(), tmp.gp());
- Psrlw(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, tmp_simd.fp());
- Packuswb(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
-
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpand(dst.fp(), lhs.fp(), liftoff::kScratchDoubleReg);
- } else {
- if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- andps(dst.fp(), liftoff::kScratchDoubleReg);
- }
- sub(tmp.gp(), Immediate(8));
- Movd(tmp_simd.fp(), tmp.gp());
- Psllw(dst.fp(), dst.fp(), tmp_simd.fp());
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs));
+ I8x16Shl(dst.fp(), lhs.fp(), rhs.gp(), tmp.gp(), liftoff::kScratchDoubleReg,
+ tmp_simd.fp());
}
void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- static constexpr RegClass tmp_rc = reg_class_for(kI32);
- LiftoffRegister tmp = GetUnusedRegister(tmp_rc, {});
- byte shift = static_cast<byte>(rhs & 0x7);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsllw(dst.fp(), lhs.fp(), shift);
- } else {
- if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- psllw(dst.fp(), shift);
- }
-
- uint8_t bmask = static_cast<uint8_t>(0xff << shift);
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- mov(tmp.gp(), mask);
- Movd(liftoff::kScratchDoubleReg, tmp.gp());
- Pshufd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, uint8_t{0});
- Pand(dst.fp(), liftoff::kScratchDoubleReg);
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
+ I8x16Shl(dst.fp(), lhs.fp(), rhs, tmp.gp(), liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitI8x16Shr</*is_signed=*/true>(this, dst, lhs, rhs);
+ Register tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(rhs)).gp();
+ XMMRegister tmp_simd =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs)).fp();
+ I8x16ShrS(dst.fp(), lhs.fp(), rhs.gp(), tmp, liftoff::kScratchDoubleReg,
+ tmp_simd);
}
void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
- Punpckhbw(liftoff::kScratchDoubleReg, lhs.fp());
- Punpcklbw(dst.fp(), lhs.fp());
- uint8_t shift = (rhs & 7) + 8;
- Psraw(liftoff::kScratchDoubleReg, shift);
- Psraw(dst.fp(), shift);
- Packsswb(dst.fp(), liftoff::kScratchDoubleReg);
+ I8x16ShrS(dst.fp(), lhs.fp(), rhs, liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitI8x16Shr</*is_signed=*/false>(this, dst, lhs, rhs);
+ Register tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(rhs)).gp();
+ XMMRegister tmp_simd =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs)).fp();
+ I8x16ShrU(dst.fp(), lhs.fp(), rhs.gp(), tmp, liftoff::kScratchDoubleReg,
+ tmp_simd);
}
void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
Register tmp = GetUnusedRegister(kGpReg, {}).gp();
- // Perform 16-bit shift, then mask away high bits.
- uint8_t shift = rhs & 7;
- liftoff::EmitSimdShiftOpImm<&Assembler::vpsrlw, &Assembler::psrlw, 3>(
- this, dst, lhs, rhs);
-
- uint8_t bmask = 0xff >> shift;
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- mov(tmp, mask);
- Movd(liftoff::kScratchDoubleReg, tmp);
- Pshufd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, uint8_t{0});
- Pand(dst.fp(), liftoff::kScratchDoubleReg);
+ I8x16ShrU(dst.fp(), lhs.fp(), rhs, tmp, liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -3951,19 +3857,7 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs));
LiftoffRegister tmp2 =
GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs, tmp1));
- Movaps(tmp1.fp(), lhs.fp());
- Movaps(tmp2.fp(), rhs.fp());
- // Multiply high dword of each qword of left with right.
- Psrlq(tmp1.fp(), byte{32});
- Pmuludq(tmp1.fp(), tmp1.fp(), rhs.fp());
- // Multiply high dword of each qword of right with left.
- Psrlq(tmp2.fp(), byte{32});
- Pmuludq(tmp2.fp(), tmp2.fp(), lhs.fp());
- Paddq(tmp2.fp(), tmp2.fp(), tmp1.fp());
- Psllq(tmp2.fp(), tmp2.fp(), byte{32});
- liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmuludq, &Assembler::pmuludq>(
- this, dst, lhs, rhs);
- Paddq(dst.fp(), dst.fp(), tmp2.fp());
+ I64x2Mul(dst.fp(), lhs.fp(), rhs.fp(), tmp1.fp(), tmp2.fp());
}
void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_s(LiftoffRegister dst,
@@ -4021,28 +3915,14 @@ void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Psrld(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, byte{1});
- Andps(dst.fp(), liftoff::kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Psrld(dst.fp(), dst.fp(), byte{1});
- Andps(dst.fp(), src.fp());
- }
+ Register tmp = GetUnusedRegister(kGpReg, {}).gp();
+ Absps(dst.fp(), src.fp(), tmp);
}
void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Pslld(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, byte{31});
- Xorps(dst.fp(), liftoff::kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Pslld(dst.fp(), dst.fp(), byte{31});
- Xorps(dst.fp(), src.fp());
- }
+ Register tmp = GetUnusedRegister(kGpReg, {}).gp();
+ Negps(dst.fp(), src.fp(), tmp);
}
void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
@@ -4104,61 +3984,12 @@ void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // The minps instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform minps in both orders, merge the results, and adjust.
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vminps(liftoff::kScratchDoubleReg, lhs.fp(), rhs.fp());
- vminps(dst.fp(), rhs.fp(), lhs.fp());
- } else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
- XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
- movaps(liftoff::kScratchDoubleReg, src);
- minps(liftoff::kScratchDoubleReg, dst.fp());
- minps(dst.fp(), src);
- } else {
- movaps(liftoff::kScratchDoubleReg, lhs.fp());
- minps(liftoff::kScratchDoubleReg, rhs.fp());
- movaps(dst.fp(), rhs.fp());
- minps(dst.fp(), lhs.fp());
- }
- // propagate -0's and NaNs, which may be non-canonical.
- Orps(liftoff::kScratchDoubleReg, dst.fp());
- // Canonicalize NaNs by quieting and clearing the payload.
- Cmpunordps(dst.fp(), dst.fp(), liftoff::kScratchDoubleReg);
- Orps(liftoff::kScratchDoubleReg, dst.fp());
- Psrld(dst.fp(), dst.fp(), byte{10});
- Andnps(dst.fp(), dst.fp(), liftoff::kScratchDoubleReg);
+ F32x4Min(dst.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // The maxps instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform maxps in both orders, merge the results, and adjust.
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmaxps(liftoff::kScratchDoubleReg, lhs.fp(), rhs.fp());
- vmaxps(dst.fp(), rhs.fp(), lhs.fp());
- } else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
- XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
- movaps(liftoff::kScratchDoubleReg, src);
- maxps(liftoff::kScratchDoubleReg, dst.fp());
- maxps(dst.fp(), src);
- } else {
- movaps(liftoff::kScratchDoubleReg, lhs.fp());
- maxps(liftoff::kScratchDoubleReg, rhs.fp());
- movaps(dst.fp(), rhs.fp());
- maxps(dst.fp(), lhs.fp());
- }
- // Find discrepancies.
- Xorps(dst.fp(), liftoff::kScratchDoubleReg);
- // Propagate NaNs, which may be non-canonical.
- Orps(liftoff::kScratchDoubleReg, dst.fp());
- // Propagate sign discrepancy and (subtle) quiet NaNs.
- Subps(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, dst.fp());
- // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
- Cmpunordps(dst.fp(), dst.fp(), liftoff::kScratchDoubleReg);
- Psrld(dst.fp(), dst.fp(), byte{10});
- Andnps(dst.fp(), dst.fp(), liftoff::kScratchDoubleReg);
+ F32x4Max(dst.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs,
@@ -4177,28 +4008,14 @@ void LiftoffAssembler::emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Psrlq(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, byte{1});
- Andpd(dst.fp(), liftoff::kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Psrlq(dst.fp(), dst.fp(), byte{1});
- Andpd(dst.fp(), src.fp());
- }
+ Register tmp = GetUnusedRegister(kGpReg, {}).gp();
+ Abspd(dst.fp(), src.fp(), tmp);
}
void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Psllq(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, byte{63});
- Xorpd(dst.fp(), liftoff::kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Psllq(dst.fp(), dst.fp(), byte{63});
- Xorpd(dst.fp(), src.fp());
- }
+ Register tmp = GetUnusedRegister(kGpReg, {}).gp();
+ Negpd(dst.fp(), src.fp(), tmp);
}
void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
@@ -4300,26 +4117,8 @@ void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
- // NAN->0
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vcmpeqps(liftoff::kScratchDoubleReg, src.fp(), src.fp());
- vpand(dst.fp(), src.fp(), liftoff::kScratchDoubleReg);
- } else {
- movaps(liftoff::kScratchDoubleReg, src.fp());
- cmpeqps(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- if (dst.fp() != src.fp()) movaps(dst.fp(), src.fp());
- andps(dst.fp(), liftoff::kScratchDoubleReg);
- }
- // Set top bit if >= 0 (but not -0.0!).
- Pxor(liftoff::kScratchDoubleReg, dst.fp());
- // Convert to int.
- Cvttps2dq(dst.fp(), dst.fp());
- // Set top bit if >=0 is now < 0.
- Pand(liftoff::kScratchDoubleReg, dst.fp());
- Psrad(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, byte{31});
- // Set positive overflow lanes to 0x7FFFFFFF.
- Pxor(dst.fp(), liftoff::kScratchDoubleReg);
+ Register tmp = GetUnusedRegister(kGpReg, {}).gp();
+ I32x4SConvertF32x4(dst.fp(), src.fp(), liftoff::kScratchDoubleReg, tmp);
}
void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
@@ -4787,22 +4586,14 @@ void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
// Since we have more cache registers than parameter registers, the
// {LiftoffCompiler} should always be able to place {target} in a register.
DCHECK(target.is_valid());
- if (FLAG_untrusted_code_mitigations) {
- RetpolineCall(target);
- } else {
- call(target);
- }
+ call(target);
}
void LiftoffAssembler::TailCallIndirect(Register target) {
// Since we have more cache registers than parameter registers, the
// {LiftoffCompiler} should always be able to place {target} in a register.
DCHECK(target.is_valid());
- if (FLAG_untrusted_code_mitigations) {
- RetpolineJump(target);
- } else {
- jmp(target);
- }
+ jmp(target);
}
void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
@@ -4836,19 +4627,19 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
bind(&ret);
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
if (lane_kind == kF32) {
- movaps(tmp_fp, src);
- cmpunordps(tmp_fp, tmp_fp);
+ movaps(tmp_s128.fp(), src.fp());
+ cmpunordps(tmp_s128.fp(), tmp_s128.fp());
} else {
DCHECK_EQ(lane_kind, kF64);
- movapd(tmp_fp, src);
- cmpunordpd(tmp_fp, tmp_fp);
+ movapd(tmp_s128.fp(), src.fp());
+ cmpunordpd(tmp_s128.fp(), tmp_s128.fp());
}
- pmovmskb(tmp_gp, tmp_fp);
+ pmovmskb(tmp_gp, tmp_s128.fp());
or_(Operand(dst, 0), tmp_gp);
}
diff --git a/chromium/v8/src/wasm/baseline/liftoff-assembler-defs.h b/chromium/v8/src/wasm/baseline/liftoff-assembler-defs.h
index d445655dcac..5b43a2a41d1 100644
--- a/chromium/v8/src/wasm/baseline/liftoff-assembler-defs.h
+++ b/chromium/v8/src/wasm/baseline/liftoff-assembler-defs.h
@@ -46,6 +46,18 @@ constexpr RegList kLiftoffAssemblerGpCacheRegs =
constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf(
f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20, f22, f24, f26);
+#elif V8_TARGET_ARCH_LOONG64
+
+// t6-t8 and s3-s4: scratch registers, s6: root
+constexpr RegList kLiftoffAssemblerGpCacheRegs =
+ Register::ListOf(a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, t3, t4, t5, s0,
+ s1, s2, s5, s7, s8);
+
+// f29: zero, f30-f31: macro-assembler scratch float Registers.
+constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf(
+ f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16,
+ f17, f18, f19, f20, f21, f22, f23, f24, f25, f26, f27, f28);
+
#elif V8_TARGET_ARCH_ARM
// r10: root, r11: fp, r12: ip, r13: sp, r14: lr, r15: pc.
@@ -95,8 +107,8 @@ constexpr RegList kLiftoffAssemblerGpCacheRegs =
// Any change of kLiftoffAssemblerGpCacheRegs also need to update
// kPushedFpRegs in frame-constants-riscv64.h
constexpr RegList kLiftoffAssemblerFpCacheRegs =
- DoubleRegister::ListOf(ft0, ft1, ft2, ft3, ft4, ft5, ft6, ft7, fa0, fa1,
- fa2, fa3, fa4, fa5, fa6, fa7, ft8, ft9, ft10, ft11);
+ DoubleRegister::ListOf(ft1, ft2, ft3, ft4, ft5, ft6, ft7, fa0, fa1, fa2,
+ fa3, fa4, fa5, fa6, fa7, ft8, ft9, ft10, ft11);
#else
constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
diff --git a/chromium/v8/src/wasm/baseline/liftoff-assembler.h b/chromium/v8/src/wasm/baseline/liftoff-assembler.h
index 19611fb0eef..cea6c9361d2 100644
--- a/chromium/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/chromium/v8/src/wasm/baseline/liftoff-assembler.h
@@ -32,7 +32,9 @@ namespace wasm {
enum LiftoffCondition {
kEqual,
+ kEqualZero = kEqual, // When used in a unary operation.
kUnequal,
+ kNotEqualZero = kUnequal, // When used in a unary operation.
kSignedLessThan,
kSignedLessEqual,
kSignedGreaterThan,
@@ -43,8 +45,8 @@ enum LiftoffCondition {
kUnsignedGreaterEqual
};
-inline constexpr LiftoffCondition Negate(LiftoffCondition liftoff_cond) {
- switch (liftoff_cond) {
+inline constexpr LiftoffCondition Negate(LiftoffCondition cond) {
+ switch (cond) {
case kEqual:
return kUnequal;
case kUnequal:
@@ -68,6 +70,31 @@ inline constexpr LiftoffCondition Negate(LiftoffCondition liftoff_cond) {
}
}
+inline constexpr LiftoffCondition Flip(LiftoffCondition cond) {
+ switch (cond) {
+ case kEqual:
+ return kEqual;
+ case kUnequal:
+ return kUnequal;
+ case kSignedLessThan:
+ return kSignedGreaterThan;
+ case kSignedLessEqual:
+ return kSignedGreaterEqual;
+ case kSignedGreaterEqual:
+ return kSignedLessEqual;
+ case kSignedGreaterThan:
+ return kSignedLessThan;
+ case kUnsignedLessThan:
+ return kUnsignedGreaterThan;
+ case kUnsignedLessEqual:
+ return kUnsignedGreaterEqual;
+ case kUnsignedGreaterEqual:
+ return kUnsignedLessEqual;
+ case kUnsignedGreaterThan:
+ return kUnsignedLessThan;
+ }
+}
+
class LiftoffAssembler : public TurboAssembler {
public:
// Each slot in our stack frame currently has exactly 8 bytes.
@@ -668,6 +695,9 @@ class LiftoffAssembler : public TurboAssembler {
int size);
inline void LoadTaggedPointerFromInstance(Register dst, Register instance,
int offset);
+ inline void LoadExternalPointer(Register dst, Register instance, int offset,
+ ExternalPointerTag tag,
+ Register isolate_root);
inline void SpillInstance(Register instance);
inline void ResetOSRTarget();
inline void FillInstanceInto(Register dst);
@@ -975,8 +1005,8 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_cond_jump(LiftoffCondition, Label*, ValueKind value,
Register lhs, Register rhs = no_reg);
- inline void emit_i32_cond_jumpi(LiftoffCondition liftoff_cond, Label* label,
- Register lhs, int imm);
+ inline void emit_i32_cond_jumpi(LiftoffCondition, Label*, Register lhs,
+ int imm);
// Set {dst} to 1 if condition holds, 0 otherwise.
inline void emit_i32_eqz(Register dst, Register src);
inline void emit_i32_set_cond(LiftoffCondition, Register dst, Register lhs,
@@ -1456,12 +1486,12 @@ class LiftoffAssembler : public TurboAssembler {
// Instrumentation for shadow-stack-compatible OSR on x64.
inline void MaybeOSR();
- // Set the i32 at address dst to 1 if src is a NaN.
+ // Set the i32 at address dst to a non-zero value if src is a NaN.
inline void emit_set_if_nan(Register dst, DoubleRegister src, ValueKind kind);
// Set the i32 at address dst to a non-zero value if src contains a NaN.
- inline void emit_s128_set_if_nan(Register dst, DoubleRegister src,
- Register tmp_gp, DoubleRegister tmp_fp,
+ inline void emit_s128_set_if_nan(Register dst, LiftoffRegister src,
+ Register tmp_gp, LiftoffRegister tmp_s128,
ValueKind lane_kind);
////////////////////////////////////
@@ -1506,6 +1536,10 @@ class LiftoffAssembler : public TurboAssembler {
private:
LiftoffRegister LoadI64HalfIntoRegister(VarState slot, RegPairHalf half);
+ V8_NOINLINE LiftoffRegister SpillOneRegister(LiftoffRegList candidates);
+ // Spill one or two fp registers to get a pair of adjacent fp registers.
+ LiftoffRegister SpillAdjacentFpRegisters(LiftoffRegList pinned);
+
uint32_t num_locals_ = 0;
static constexpr uint32_t kInlineLocalKinds = 16;
union {
@@ -1521,10 +1555,6 @@ class LiftoffAssembler : public TurboAssembler {
int ool_spill_space_size_ = 0;
LiftoffBailoutReason bailout_reason_ = kSuccess;
const char* bailout_detail_ = nullptr;
-
- V8_NOINLINE LiftoffRegister SpillOneRegister(LiftoffRegList candidates);
- // Spill one or two fp registers to get a pair of adjacent fp registers.
- LiftoffRegister SpillAdjacentFpRegisters(LiftoffRegList pinned);
};
std::ostream& operator<<(std::ostream& os, LiftoffAssembler::VarState);
@@ -1711,6 +1741,8 @@ bool CheckCompatibleStackSlotTypes(ValueKind a, ValueKind b);
#include "src/wasm/baseline/mips/liftoff-assembler-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/wasm/baseline/mips64/liftoff-assembler-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/wasm/baseline/loong64/liftoff-assembler-loong64.h"
#elif V8_TARGET_ARCH_S390
#include "src/wasm/baseline/s390/liftoff-assembler-s390.h"
#elif V8_TARGET_ARCH_RISCV64
diff --git a/chromium/v8/src/wasm/baseline/liftoff-compiler.cc b/chromium/v8/src/wasm/baseline/liftoff-compiler.cc
index eeed531cf83..fc5684f4273 100644
--- a/chromium/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/chromium/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -306,31 +306,18 @@ void CheckBailoutAllowed(LiftoffBailoutReason reason, const char* detail,
// Some externally maintained architectures don't fully implement Liftoff yet.
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_S390X || \
- V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
+ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_LOONG64
return;
#endif
#define LIST_FEATURE(name, ...) kFeature_##name,
constexpr WasmFeatures kExperimentalFeatures{
FOREACH_WASM_EXPERIMENTAL_FEATURE_FLAG(LIST_FEATURE)};
- constexpr WasmFeatures kStagedFeatures{
- FOREACH_WASM_STAGING_FEATURE_FLAG(LIST_FEATURE)};
#undef LIST_FEATURE
// Bailout is allowed if any experimental feature is enabled.
if (env->enabled_features.contains_any(kExperimentalFeatures)) return;
- // Staged features should be feature complete in Liftoff according to
- // https://v8.dev/docs/wasm-shipping-checklist. Some are not though. They are
- // listed here explicitly, with a bug assigned to each of them.
-
- // TODO(7581): Fully implement reftypes in Liftoff.
- STATIC_ASSERT(kStagedFeatures.has_reftypes());
- if (reason == kRefTypes) {
- DCHECK(env->enabled_features.has_reftypes());
- return;
- }
-
// Otherwise, bailout is not allowed.
FATAL("Liftoff bailout should not happen. Cause: %s\n", detail);
}
@@ -373,6 +360,29 @@ class LiftoffCompiler {
using FullDecoder = WasmFullDecoder<validate, LiftoffCompiler>;
using ValueKindSig = LiftoffAssembler::ValueKindSig;
+ class MostlySmallValueKindSig : public Signature<ValueKind> {
+ public:
+ MostlySmallValueKindSig(Zone* zone, const FunctionSig* sig)
+ : Signature<ValueKind>(sig->return_count(), sig->parameter_count(),
+ MakeKinds(inline_storage_, zone, sig)) {}
+
+ private:
+ static constexpr size_t kInlineStorage = 8;
+
+ static ValueKind* MakeKinds(ValueKind* storage, Zone* zone,
+ const FunctionSig* sig) {
+ const size_t size = sig->parameter_count() + sig->return_count();
+ if (V8_UNLIKELY(size > kInlineStorage)) {
+ storage = zone->NewArray<ValueKind>(size);
+ }
+ std::transform(sig->all().begin(), sig->all().end(), storage,
+ [](ValueType type) { return type.kind(); });
+ return storage;
+ }
+
+ ValueKind inline_storage_[kInlineStorage];
+ };
+
// For debugging, we need to spill registers before a trap or a stack check to
// be able to inspect them.
struct SpilledRegistersForInspection : public ZoneObject {
@@ -800,7 +810,7 @@ class LiftoffCompiler {
// is never a position of any instruction in the function.
StackCheck(decoder, 0);
- if (FLAG_wasm_dynamic_tiering) {
+ if (env_->dynamic_tiering == DynamicTiering::kEnabled) {
// TODO(arobin): Avoid spilling registers unconditionally.
__ SpillAllRegisters();
CODE_COMMENT("dynamic tiering");
@@ -832,8 +842,8 @@ class LiftoffCompiler {
// Check if the number of calls is a power of 2.
__ emit_i32_and(old_number_of_calls.gp(), old_number_of_calls.gp(),
new_number_of_calls.gp());
- // Unary "unequal" means "different from zero".
- __ emit_cond_jump(kUnequal, &no_tierup, kI32, old_number_of_calls.gp());
+ __ emit_cond_jump(kNotEqualZero, &no_tierup, kI32,
+ old_number_of_calls.gp());
TierUpFunction(decoder);
// After the runtime call, the instance cache register is clobbered (we
// reset it already in {SpillAllRegisters} above, but then we still access
@@ -1009,13 +1019,11 @@ class LiftoffCompiler {
LOAD_INSTANCE_FIELD(flag, HookOnFunctionCallAddress, kSystemPointerSize,
{});
__ Load(LiftoffRegister{flag}, flag, no_reg, 0, LoadType::kI32Load8U, {});
- // Unary "unequal" means "not equals zero".
- __ emit_cond_jump(kUnequal, &do_break, kI32, flag);
+ __ emit_cond_jump(kNotEqualZero, &do_break, kI32, flag);
// Check if we should stop on "script entry".
LOAD_INSTANCE_FIELD(flag, BreakOnEntry, kUInt8Size, {});
- // Unary "equal" means "equals zero".
- __ emit_cond_jump(kEqual, &no_break, kI32, flag);
+ __ emit_cond_jump(kEqualZero, &no_break, kI32, flag);
__ bind(&do_break);
EmitBreakpoint(decoder);
@@ -1254,6 +1262,46 @@ class LiftoffCompiler {
}
}
+ void JumpIfFalse(FullDecoder* decoder, Label* false_dst) {
+ LiftoffCondition cond =
+ test_and_reset_outstanding_op(kExprI32Eqz) ? kNotEqualZero : kEqualZero;
+
+ if (!has_outstanding_op()) {
+ // Unary comparison.
+ Register value = __ PopToRegister().gp();
+ __ emit_cond_jump(cond, false_dst, kI32, value);
+ return;
+ }
+
+ // Binary comparison of i32 values.
+ cond = Negate(GetCompareCondition(outstanding_op_));
+ outstanding_op_ = kNoOutstandingOp;
+ LiftoffAssembler::VarState rhs_slot = __ cache_state()->stack_state.back();
+ if (rhs_slot.is_const()) {
+ // Compare to a constant.
+ int32_t rhs_imm = rhs_slot.i32_const();
+ __ cache_state()->stack_state.pop_back();
+ Register lhs = __ PopToRegister().gp();
+ __ emit_i32_cond_jumpi(cond, false_dst, lhs, rhs_imm);
+ return;
+ }
+
+ Register rhs = __ PopToRegister().gp();
+ LiftoffAssembler::VarState lhs_slot = __ cache_state()->stack_state.back();
+ if (lhs_slot.is_const()) {
+ // Compare a constant to an arbitrary value.
+ int32_t lhs_imm = lhs_slot.i32_const();
+ __ cache_state()->stack_state.pop_back();
+ // Flip the condition, because {lhs} and {rhs} are swapped.
+ __ emit_i32_cond_jumpi(Flip(cond), false_dst, rhs, lhs_imm);
+ return;
+ }
+
+ // Compare two arbitrary values.
+ Register lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs)).gp();
+ __ emit_cond_jump(cond, false_dst, kI32, lhs, rhs);
+ }
+
void If(FullDecoder* decoder, const Value& cond, Control* if_block) {
DCHECK_EQ(if_block, decoder->control_at(0));
DCHECK(if_block->is_if());
@@ -1261,25 +1309,8 @@ class LiftoffCompiler {
// Allocate the else state.
if_block->else_state = std::make_unique<ElseState>();
- // Test the condition, jump to else if zero.
- Register value = __ PopToRegister().gp();
- if (!has_outstanding_op()) {
- // Unary "equal" means "equals zero".
- __ emit_cond_jump(kEqual, if_block->else_state->label.get(), kI32, value);
- } else if (outstanding_op_ == kExprI32Eqz) {
- // Unary "unequal" means "not equals zero".
- __ emit_cond_jump(kUnequal, if_block->else_state->label.get(), kI32,
- value);
- outstanding_op_ = kNoOutstandingOp;
- } else {
- // Otherwise, it's an i32 compare opcode.
- LiftoffCondition cond = Negate(GetCompareCondition(outstanding_op_));
- Register rhs = value;
- Register lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs)).gp();
- __ emit_cond_jump(cond, if_block->else_state->label.get(), kI32, lhs,
- rhs);
- outstanding_op_ = kNoOutstandingOp;
- }
+ // Test the condition on the value stack, jump to else if zero.
+ JumpIfFalse(decoder, if_block->else_state->label.get());
// Store the state (after popping the value) for executing the else branch.
if_block->else_state->state.Split(*__ cache_state());
@@ -2313,7 +2344,7 @@ class LiftoffCompiler {
__ PushRegister(kind, value);
}
- void GlobalSet(FullDecoder* decoder, const Value& value,
+ void GlobalSet(FullDecoder* decoder, const Value&,
const GlobalIndexImmediate<validate>& imm) {
auto* global = &env_->module->globals[imm.index];
ValueKind kind = global->type.kind();
@@ -2493,23 +2524,9 @@ class LiftoffCompiler {
}
Label cont_false;
- Register value = __ PopToRegister().gp();
- if (!has_outstanding_op()) {
- // Unary "equal" means "equals zero".
- __ emit_cond_jump(kEqual, &cont_false, kI32, value);
- } else if (outstanding_op_ == kExprI32Eqz) {
- // Unary "unequal" means "not equals zero".
- __ emit_cond_jump(kUnequal, &cont_false, kI32, value);
- outstanding_op_ = kNoOutstandingOp;
- } else {
- // Otherwise, it's an i32 compare opcode.
- LiftoffCondition cond = Negate(GetCompareCondition(outstanding_op_));
- Register rhs = value;
- Register lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs)).gp();
- __ emit_cond_jump(cond, &cont_false, kI32, lhs, rhs);
- outstanding_op_ = kNoOutstandingOp;
- }
+ // Test the condition on the value stack, jump to {cont_false} if zero.
+ JumpIfFalse(decoder, &cont_false);
BrOrRet(decoder, depth, 0);
__ bind(&cont_false);
@@ -2693,8 +2710,7 @@ class LiftoffCompiler {
__ emit_u32_to_intptr(index_ptrsize, index_ptrsize);
} else if (kSystemPointerSize == kInt32Size) {
DCHECK_GE(kMaxUInt32, env_->max_memory_size);
- // Unary "unequal" means "not equals zero".
- __ emit_cond_jump(kUnequal, trap_label, kI32, index.high_gp());
+ __ emit_cond_jump(kNotEqualZero, trap_label, kI32, index.high_gp());
}
uintptr_t end_offset = offset + access_size - 1u;
@@ -2757,14 +2773,17 @@ class LiftoffCompiler {
// Before making the runtime call, spill all cache registers.
__ SpillAllRegisters();
- LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
+ LiftoffRegList pinned;
+ if (index != no_reg) pinned.set(index);
// Get one register for computing the effective offset (offset + index).
LiftoffRegister effective_offset =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- // TODO(clemensb): Do a 64-bit addition here if memory64 is used.
DCHECK_GE(kMaxUInt32, offset);
__ LoadConstant(effective_offset, WasmValue(static_cast<uint32_t>(offset)));
- __ emit_i32_add(effective_offset.gp(), effective_offset.gp(), index);
+ if (index != no_reg) {
+ // TODO(clemensb): Do a 64-bit addition here if memory64 is used.
+ __ emit_i32_add(effective_offset.gp(), effective_offset.gp(), index);
+ }
// Get a register to hold the stack slot for MemoryTracingInfo.
LiftoffRegister info = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
@@ -2808,30 +2827,6 @@ class LiftoffCompiler {
__ DeallocateStackSlot(sizeof(MemoryTracingInfo));
}
- Register AddMemoryMasking(Register index, uintptr_t* offset,
- LiftoffRegList* pinned) {
- if (!FLAG_untrusted_code_mitigations ||
- env_->bounds_checks == kTrapHandler) {
- return index;
- }
- CODE_COMMENT("mask memory index");
- // Make sure that we can overwrite {index}.
- if (__ cache_state()->is_used(LiftoffRegister(index))) {
- Register old_index = index;
- pinned->clear(LiftoffRegister{old_index});
- index = pinned->set(__ GetUnusedRegister(kGpReg, *pinned)).gp();
- if (index != old_index) {
- __ Move(index, old_index, kPointerKind);
- }
- }
- Register tmp = __ GetUnusedRegister(kGpReg, *pinned).gp();
- LOAD_INSTANCE_FIELD(tmp, MemoryMask, kSystemPointerSize, *pinned);
- if (*offset) __ emit_ptrsize_addi(index, index, *offset);
- __ emit_ptrsize_and(index, index, tmp);
- *offset = 0;
- return index;
- }
-
bool IndexStaticallyInBounds(const LiftoffAssembler::VarState& index_slot,
int access_size, uintptr_t* offset) {
if (!index_slot.is_const()) return false;
@@ -2892,7 +2887,6 @@ class LiftoffCompiler {
CODE_COMMENT("load from memory");
LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
- index = AddMemoryMasking(index, &offset, &pinned);
// Load the memory start address only now to reduce register pressure
// (important on ia32).
@@ -2937,7 +2931,6 @@ class LiftoffCompiler {
uintptr_t offset = imm.offset;
LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
- index = AddMemoryMasking(index, &offset, &pinned);
CODE_COMMENT("load with transformation");
Register addr = GetMemoryStart(pinned);
LiftoffRegister value = __ GetUnusedRegister(reg_class_for(kS128), {});
@@ -2977,7 +2970,6 @@ class LiftoffCompiler {
uintptr_t offset = imm.offset;
pinned.set(index);
- index = AddMemoryMasking(index, &offset, &pinned);
CODE_COMMENT("load lane");
Register addr = GetMemoryStart(pinned);
LiftoffRegister result = __ GetUnusedRegister(reg_class_for(kS128), {});
@@ -3023,7 +3015,6 @@ class LiftoffCompiler {
if (index == no_reg) return;
pinned.set(index);
- index = AddMemoryMasking(index, &offset, &pinned);
CODE_COMMENT("store to memory");
uint32_t protected_store_pc = 0;
// Load the memory start address only now to reduce register pressure
@@ -3058,7 +3049,6 @@ class LiftoffCompiler {
uintptr_t offset = imm.offset;
pinned.set(index);
- index = AddMemoryMasking(index, &offset, &pinned);
CODE_COMMENT("store lane to memory");
Register addr = pinned.set(GetMemoryStart(pinned));
uint32_t protected_store_pc = 0;
@@ -4186,8 +4176,9 @@ class LiftoffCompiler {
Load64BitExceptionValue(value, values_array, index, pinned);
break;
case kF64: {
- RegClass rc = reg_class_for(kI64);
- LiftoffRegister tmp_reg = pinned.set(__ GetUnusedRegister(rc, pinned));
+ RegClass rc_i64 = reg_class_for(kI64);
+ LiftoffRegister tmp_reg =
+ pinned.set(__ GetUnusedRegister(rc_i64, pinned));
Load64BitExceptionValue(tmp_reg, values_array, index, pinned);
__ emit_type_conversion(kExprF64ReinterpretI64, value, tmp_reg,
nullptr);
@@ -4340,7 +4331,6 @@ class LiftoffCompiler {
pinned.set(index);
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
- index = AddMemoryMasking(index, &offset, &pinned);
CODE_COMMENT("atomic store to memory");
Register addr = pinned.set(GetMemoryStart(pinned));
LiftoffRegList outer_pinned;
@@ -4363,7 +4353,6 @@ class LiftoffCompiler {
LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
- index = AddMemoryMasking(index, &offset, &pinned);
CODE_COMMENT("atomic load from memory");
Register addr = pinned.set(GetMemoryStart(pinned));
RegClass rc = reg_class_for(kind);
@@ -4411,7 +4400,6 @@ class LiftoffCompiler {
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
- index = AddMemoryMasking(index, &offset, &pinned);
Register addr = pinned.set(GetMemoryStart(pinned));
(asm_.*emit_fn)(addr, index, offset, value, result, type);
@@ -4434,7 +4422,6 @@ class LiftoffCompiler {
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
- index = AddMemoryMasking(index, &offset, &pinned);
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
__ emit_i32_add(addr, addr, index);
@@ -4467,7 +4454,6 @@ class LiftoffCompiler {
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
- index = AddMemoryMasking(index, &offset, &pinned);
Register addr = pinned.set(GetMemoryStart(pinned));
LiftoffRegister result =
pinned.set(__ GetUnusedRegister(reg_class_for(result_kind), pinned));
@@ -4514,7 +4500,6 @@ class LiftoffCompiler {
pinned);
uintptr_t offset = imm.offset;
- index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
Register index_plus_offset =
__ cache_state()->is_used(LiftoffRegister(index_reg))
? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp()
@@ -4531,8 +4516,7 @@ class LiftoffCompiler {
__ cache_state()->stack_state.end()[-2];
LiftoffAssembler::VarState index = __ cache_state()->stack_state.end()[-3];
- // We have to set the correct register for the index. It may have changed
- // above in {AddMemoryMasking}.
+ // We have to set the correct register for the index.
index.MakeRegister(LiftoffRegister(index_plus_offset));
static constexpr WasmCode::RuntimeStubId kTargets[2][2]{
@@ -4562,7 +4546,6 @@ class LiftoffCompiler {
AlignmentCheckMem(decoder, kInt32Size, imm.offset, index_reg, pinned);
uintptr_t offset = imm.offset;
- index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
Register index_plus_offset =
__ cache_state()->is_used(LiftoffRegister(index_reg))
? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp()
@@ -4914,7 +4897,7 @@ class LiftoffCompiler {
__ cache_state()->stack_state.pop_back(2);
RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
- __ SmiUntag(kReturnRegister0);
+ __ SmiToInt32(kReturnRegister0);
__ PushRegister(kI32, LiftoffRegister(kReturnRegister0));
}
@@ -5055,7 +5038,7 @@ class LiftoffCompiler {
Label* trap_label =
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapArrayTooLarge);
__ emit_i32_cond_jumpi(kUnsignedGreaterThan, trap_label, length.gp(),
- static_cast<int>(wasm::kV8MaxWasmArrayLength));
+ WasmArray::MaxLength(imm.array_type));
}
ValueKind elem_kind = imm.array_type->element_type().kind();
int elem_size = element_size_bytes(elem_kind);
@@ -5184,6 +5167,8 @@ class LiftoffCompiler {
void ArrayCopy(FullDecoder* decoder, const Value& dst, const Value& dst_index,
const Value& src, const Value& src_index,
const Value& length) {
+ // TODO(7748): Unify implementation with TF: Implement this with
+ // GenerateCCall. Remove runtime function and builtin in wasm.tq.
CallRuntimeStub(WasmCode::kWasmArrayCopyWithChecks,
MakeSig::Params(kI32, kI32, kI32, kOptRef, kOptRef),
// Builtin parameter order:
@@ -5200,7 +5185,50 @@ class LiftoffCompiler {
void ArrayInit(FullDecoder* decoder, const ArrayIndexImmediate<validate>& imm,
const base::Vector<Value>& elements, const Value& rtt,
Value* result) {
- UNREACHABLE();
+ ValueKind rtt_kind = rtt.type.kind();
+ ValueKind elem_kind = imm.array_type->element_type().kind();
+ // Allocate the array.
+ {
+ LiftoffAssembler::VarState rtt_var =
+ __ cache_state()->stack_state.end()[-1];
+
+ LiftoffRegList pinned;
+
+ LiftoffRegister elem_size_reg =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ LoadConstant(elem_size_reg, WasmValue(element_size_bytes(elem_kind)));
+ LiftoffAssembler::VarState elem_size_var(kI32, elem_size_reg, 0);
+
+ LiftoffRegister length_reg =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ LoadConstant(length_reg,
+ WasmValue(static_cast<int32_t>(elements.size())));
+ LiftoffAssembler::VarState length_var(kI32, length_reg, 0);
+
+ CallRuntimeStub(WasmCode::kWasmAllocateArray_Uninitialized,
+ MakeSig::Returns(kRef).Params(rtt_kind, kI32, kI32),
+ {rtt_var, length_var, elem_size_var},
+ decoder->position());
+ // Drop the RTT.
+ __ DropValues(1);
+ }
+
+ // Initialize the array with stack arguments.
+ LiftoffRegister array(kReturnRegister0);
+ if (!CheckSupportedType(decoder, elem_kind, "array.init")) return;
+ for (int i = static_cast<int>(elements.size()) - 1; i >= 0; i--) {
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(array);
+ LiftoffRegister element = pinned.set(__ PopToRegister(pinned));
+ LiftoffRegister offset_reg =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ LoadConstant(offset_reg, WasmValue(i << element_size_log2(elem_kind)));
+ StoreObjectField(array.gp(), offset_reg.gp(),
+ wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize),
+ element, pinned, elem_kind);
+ }
+
+ // Push the array onto the stack.
+ __ PushRegister(kRef, array);
}
// 1 bit Smi tag, 31 bits Smi shift, 1 bit i31ref high-bit truncation.
@@ -5648,20 +5676,11 @@ class LiftoffCompiler {
}
private:
- ValueKindSig* MakeKindSig(Zone* zone, const FunctionSig* sig) {
- ValueKind* reps =
- zone->NewArray<ValueKind>(sig->parameter_count() + sig->return_count());
- ValueKind* ptr = reps;
- for (ValueType type : sig->all()) *ptr++ = type.kind();
- return zone->New<ValueKindSig>(sig->return_count(), sig->parameter_count(),
- reps);
- }
-
void CallDirect(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[], Value returns[], TailCall tail_call) {
- ValueKindSig* sig = MakeKindSig(compilation_zone_, imm.sig);
- for (ValueKind ret : sig->returns()) {
+ MostlySmallValueKindSig sig(compilation_zone_, imm.sig);
+ for (ValueKind ret : sig.returns()) {
if (!CheckSupportedType(decoder, ret, "return")) return;
}
@@ -5691,7 +5710,7 @@ class LiftoffCompiler {
ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index), pinned);
Register* explicit_instance = &imported_function_ref;
- __ PrepareCall(sig, call_descriptor, &target, explicit_instance);
+ __ PrepareCall(&sig, call_descriptor, &target, explicit_instance);
if (tail_call) {
__ PrepareTailCall(
static_cast<int>(call_descriptor->ParameterSlotCount()),
@@ -5701,12 +5720,12 @@ class LiftoffCompiler {
} else {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), true);
- __ CallIndirect(sig, call_descriptor, target);
- FinishCall(decoder, sig, call_descriptor);
+ __ CallIndirect(&sig, call_descriptor, target);
+ FinishCall(decoder, &sig, call_descriptor);
}
} else {
// A direct call within this module just gets the current instance.
- __ PrepareCall(sig, call_descriptor);
+ __ PrepareCall(&sig, call_descriptor);
// Just encode the function index. This will be patched at instantiation.
Address addr = static_cast<Address>(imm.index);
if (tail_call) {
@@ -5720,7 +5739,7 @@ class LiftoffCompiler {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), true);
__ CallNativeWasmCode(addr);
- FinishCall(decoder, sig, call_descriptor);
+ FinishCall(decoder, &sig, call_descriptor);
}
}
}
@@ -5728,8 +5747,8 @@ class LiftoffCompiler {
void CallIndirect(FullDecoder* decoder, const Value& index_val,
const CallIndirectImmediate<validate>& imm,
TailCall tail_call) {
- ValueKindSig* sig = MakeKindSig(compilation_zone_, imm.sig);
- for (ValueKind ret : sig->returns()) {
+ MostlySmallValueKindSig sig(compilation_zone_, imm.sig);
+ for (ValueKind ret : sig.returns()) {
if (!CheckSupportedType(decoder, ret, "return")) return;
}
@@ -5778,28 +5797,6 @@ class LiftoffCompiler {
__ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kI32, index,
tmp_const);
- // Mask the index to prevent SSCA.
- if (FLAG_untrusted_code_mitigations) {
- CODE_COMMENT("Mask indirect call index");
- // mask = ((index - size) & ~index) >> 31
- // Reuse allocated registers; note: size is still stored in {tmp_const}.
- Register diff = table;
- Register neg_index = tmp_const;
- Register mask = scratch;
- // 1) diff = index - size
- __ emit_i32_sub(diff, index, tmp_const);
- // 2) neg_index = ~index
- __ LoadConstant(LiftoffRegister(neg_index), WasmValue(int32_t{-1}));
- __ emit_i32_xor(neg_index, neg_index, index);
- // 3) mask = diff & neg_index
- __ emit_i32_and(mask, diff, neg_index);
- // 4) mask = mask >> 31
- __ emit_i32_sari(mask, mask, 31);
-
- // Apply mask.
- __ emit_i32_and(index, index, mask);
- }
-
CODE_COMMENT("Check indirect call signature");
// Load the signature from {instance->ift_sig_ids[key]}
if (imm.table_imm.index == 0) {
@@ -5875,7 +5872,7 @@ class LiftoffCompiler {
GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
Register target = scratch;
- __ PrepareCall(sig, call_descriptor, &target, explicit_instance);
+ __ PrepareCall(&sig, call_descriptor, &target, explicit_instance);
if (tail_call) {
__ PrepareTailCall(
static_cast<int>(call_descriptor->ParameterSlotCount()),
@@ -5885,16 +5882,16 @@ class LiftoffCompiler {
} else {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), true);
- __ CallIndirect(sig, call_descriptor, target);
+ __ CallIndirect(&sig, call_descriptor, target);
- FinishCall(decoder, sig, call_descriptor);
+ FinishCall(decoder, &sig, call_descriptor);
}
}
void CallRef(FullDecoder* decoder, ValueType func_ref_type,
const FunctionSig* type_sig, TailCall tail_call) {
- ValueKindSig* sig = MakeKindSig(compilation_zone_, type_sig);
- for (ValueKind ret : sig->returns()) {
+ MostlySmallValueKindSig sig(compilation_zone_, type_sig);
+ for (ValueKind ret : sig.returns()) {
if (!CheckSupportedType(decoder, ret, "return")) return;
}
compiler::CallDescriptor* call_descriptor =
@@ -5965,11 +5962,9 @@ class LiftoffCompiler {
#ifdef V8_HEAP_SANDBOX
LOAD_INSTANCE_FIELD(temp.gp(), IsolateRoot, kSystemPointerSize, pinned);
- __ LoadExternalPointerField(
- target.gp(),
- FieldOperand(func_data.gp(), WasmFunctionData::kForeignAddressOffset),
- kForeignForeignAddressTag, temp.gp(),
- TurboAssembler::IsolateRootLocation::kInScratchRegister);
+ __ LoadExternalPointer(target.gp(), func_data.gp(),
+ WasmFunctionData::kForeignAddressOffset,
+ kForeignForeignAddressTag, temp.gp());
#else
__ Load(
target, func_data.gp(), no_reg,
@@ -5999,7 +5994,7 @@ class LiftoffCompiler {
// is in {instance}.
Register target_reg = target.gp();
Register instance_reg = instance.gp();
- __ PrepareCall(sig, call_descriptor, &target_reg, &instance_reg);
+ __ PrepareCall(&sig, call_descriptor, &target_reg, &instance_reg);
if (tail_call) {
__ PrepareTailCall(
static_cast<int>(call_descriptor->ParameterSlotCount()),
@@ -6009,9 +6004,9 @@ class LiftoffCompiler {
} else {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), true);
- __ CallIndirect(sig, call_descriptor, target_reg);
+ __ CallIndirect(&sig, call_descriptor, target_reg);
- FinishCall(decoder, sig, call_descriptor);
+ FinishCall(decoder, &sig, call_descriptor);
}
}
@@ -6151,14 +6146,61 @@ class LiftoffCompiler {
ValueKind lane_kind) {
RegClass rc = reg_class_for(kS128);
LiftoffRegister tmp_gp = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- LiftoffRegister tmp_fp = pinned.set(__ GetUnusedRegister(rc, pinned));
+ LiftoffRegister tmp_s128 = pinned.set(__ GetUnusedRegister(rc, pinned));
LiftoffRegister nondeterminism_addr =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
__ LoadConstant(
nondeterminism_addr,
WasmValue::ForUintPtr(reinterpret_cast<uintptr_t>(nondeterminism_)));
- __ emit_s128_set_if_nan(nondeterminism_addr.gp(), dst.fp(), tmp_gp.gp(),
- tmp_fp.fp(), lane_kind);
+ __ emit_s128_set_if_nan(nondeterminism_addr.gp(), dst, tmp_gp.gp(),
+ tmp_s128, lane_kind);
+ }
+
+ bool has_outstanding_op() const {
+ return outstanding_op_ != kNoOutstandingOp;
+ }
+
+ bool test_and_reset_outstanding_op(WasmOpcode opcode) {
+ DCHECK_NE(kNoOutstandingOp, opcode);
+ if (outstanding_op_ != opcode) return false;
+ outstanding_op_ = kNoOutstandingOp;
+ return true;
+ }
+
+ void TraceCacheState(FullDecoder* decoder) const {
+ if (!FLAG_trace_liftoff) return;
+ StdoutStream os;
+ for (int control_depth = decoder->control_depth() - 1; control_depth >= -1;
+ --control_depth) {
+ auto* cache_state =
+ control_depth == -1 ? __ cache_state()
+ : &decoder->control_at(control_depth)
+ ->label_state;
+ os << PrintCollection(cache_state->stack_state);
+ if (control_depth != -1) PrintF("; ");
+ }
+ os << "\n";
+ }
+
+ void DefineSafepoint() {
+ Safepoint safepoint = safepoint_table_builder_.DefineSafepoint(&asm_);
+ __ cache_state()->DefineSafepoint(safepoint);
+ }
+
+ void DefineSafepointWithCalleeSavedRegisters() {
+ Safepoint safepoint = safepoint_table_builder_.DefineSafepoint(&asm_);
+ __ cache_state()->DefineSafepointWithCalleeSavedRegisters(safepoint);
+ }
+
+ Register LoadInstanceIntoRegister(LiftoffRegList pinned, Register fallback) {
+ Register instance = __ cache_state()->cached_instance;
+ if (instance == no_reg) {
+ instance = __ cache_state()->TrySetCachedInstanceRegister(
+ pinned | LiftoffRegList::ForRegs(fallback));
+ if (instance == no_reg) instance = fallback;
+ __ LoadInstanceFromFrame(instance);
+ }
+ return instance;
}
static constexpr WasmOpcode kNoOutstandingOp = kExprUnreachable;
@@ -6223,46 +6265,6 @@ class LiftoffCompiler {
int32_t* max_steps_;
int32_t* nondeterminism_;
- bool has_outstanding_op() const {
- return outstanding_op_ != kNoOutstandingOp;
- }
-
- void TraceCacheState(FullDecoder* decoder) const {
- if (!FLAG_trace_liftoff) return;
- StdoutStream os;
- for (int control_depth = decoder->control_depth() - 1; control_depth >= -1;
- --control_depth) {
- auto* cache_state =
- control_depth == -1 ? __ cache_state()
- : &decoder->control_at(control_depth)
- ->label_state;
- os << PrintCollection(cache_state->stack_state);
- if (control_depth != -1) PrintF("; ");
- }
- os << "\n";
- }
-
- void DefineSafepoint() {
- Safepoint safepoint = safepoint_table_builder_.DefineSafepoint(&asm_);
- __ cache_state()->DefineSafepoint(safepoint);
- }
-
- void DefineSafepointWithCalleeSavedRegisters() {
- Safepoint safepoint = safepoint_table_builder_.DefineSafepoint(&asm_);
- __ cache_state()->DefineSafepointWithCalleeSavedRegisters(safepoint);
- }
-
- Register LoadInstanceIntoRegister(LiftoffRegList pinned, Register fallback) {
- Register instance = __ cache_state()->cached_instance;
- if (instance == no_reg) {
- instance = __ cache_state()->TrySetCachedInstanceRegister(
- pinned | LiftoffRegList::ForRegs(fallback));
- if (instance == no_reg) instance = fallback;
- __ LoadInstanceFromFrame(instance);
- }
- return instance;
- }
-
DISALLOW_IMPLICIT_CONSTRUCTORS(LiftoffCompiler);
};
diff --git a/chromium/v8/src/wasm/baseline/liftoff-register.h b/chromium/v8/src/wasm/baseline/liftoff-register.h
index 63ac2acf8bc..74eb10ca341 100644
--- a/chromium/v8/src/wasm/baseline/liftoff-register.h
+++ b/chromium/v8/src/wasm/baseline/liftoff-register.h
@@ -313,9 +313,9 @@ class LiftoffRegister {
}
private:
- storage_t code_;
-
explicit constexpr LiftoffRegister(storage_t code) : code_(code) {}
+
+ storage_t code_;
};
ASSERT_TRIVIALLY_COPYABLE(LiftoffRegister);
@@ -467,10 +467,10 @@ class LiftoffRegList {
}
private:
- storage_t regs_ = 0;
-
// Unchecked constructor. Only use for valid bits.
explicit constexpr LiftoffRegList(storage_t bits) : regs_(bits) {}
+
+ storage_t regs_ = 0;
};
ASSERT_TRIVIALLY_COPYABLE(LiftoffRegList);
diff --git a/chromium/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h b/chromium/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h
new file mode 100644
index 00000000000..f22e0136014
--- /dev/null
+++ b/chromium/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h
@@ -0,0 +1,2817 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_BASELINE_LOONG64_LIFTOFF_ASSEMBLER_LOONG64_H_
+#define V8_WASM_BASELINE_LOONG64_LIFTOFF_ASSEMBLER_LOONG64_H_
+
+#include "src/base/platform/wrappers.h"
+#include "src/codegen/machine-type.h"
+#include "src/heap/memory-chunk.h"
+#include "src/wasm/baseline/liftoff-assembler.h"
+#include "src/wasm/wasm-objects.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+namespace liftoff {
+
+inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
+ switch (liftoff_cond) {
+ case kEqual:
+ return eq;
+ case kUnequal:
+ return ne;
+ case kSignedLessThan:
+ return lt;
+ case kSignedLessEqual:
+ return le;
+ case kSignedGreaterThan:
+ return gt;
+ case kSignedGreaterEqual:
+ return ge;
+ case kUnsignedLessThan:
+ return ult;
+ case kUnsignedLessEqual:
+ return ule;
+ case kUnsignedGreaterThan:
+ return ugt;
+ case kUnsignedGreaterEqual:
+ return uge;
+ }
+}
+
+// Liftoff Frames.
+//
+// slot Frame
+// +--------------------+---------------------------
+// n+4 | optional padding slot to keep the stack 16 byte aligned.
+// n+3 | parameter n |
+// ... | ... |
+// 4 | parameter 1 | or parameter 2
+// 3 | parameter 0 | or parameter 1
+// 2 | (result address) | or parameter 0
+// -----+--------------------+---------------------------
+// 1 | return addr (ra) |
+// 0 | previous frame (fp)|
+// -----+--------------------+ <-- frame ptr (fp)
+// -1 | 0xa: WASM |
+// -2 | instance |
+// -----+--------------------+---------------------------
+// -3 | slot 0 | ^
+// -4 | slot 1 | |
+// | | Frame slots
+// | | |
+// | | v
+// | optional padding slot to keep the stack 16 byte aligned.
+// -----+--------------------+ <-- stack ptr (sp)
+//
+
+// fp-8 holds the stack marker, fp-16 is the instance parameter.
+constexpr int kInstanceOffset = 16;
+
+inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); }
+
+inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
+
+template <typename T>
+inline MemOperand GetMemOp(LiftoffAssembler* assm, Register addr,
+ Register offset, T offset_imm) {
+ if (is_int32(offset_imm)) {
+ int32_t offset_imm32 = static_cast<int32_t>(offset_imm);
+ if (offset == no_reg) return MemOperand(addr, offset_imm32);
+ assm->add_d(kScratchReg, addr, offset);
+ return MemOperand(kScratchReg, offset_imm32);
+ }
+ // Offset immediate does not fit in 31 bits.
+ assm->li(kScratchReg, Operand(offset_imm));
+ assm->add_d(kScratchReg, kScratchReg, addr);
+ if (offset != no_reg) {
+ assm->add_d(kScratchReg, kScratchReg, offset);
+ }
+ return MemOperand(kScratchReg, 0);
+}
+
+inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
+ ValueKind kind) {
+ switch (kind) {
+ case kI32:
+ assm->Ld_w(dst.gp(), src);
+ break;
+ case kI64:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
+ assm->Ld_d(dst.gp(), src);
+ break;
+ case kF32:
+ assm->Fld_s(dst.fp(), src);
+ break;
+ case kF64:
+ assm->Fld_d(dst.fp(), src);
+ break;
+ case kS128:
+ UNREACHABLE();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
+ LiftoffRegister src, ValueKind kind) {
+ MemOperand dst(base, offset);
+ switch (kind) {
+ case kI32:
+ assm->St_w(src.gp(), dst);
+ break;
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
+ assm->St_d(src.gp(), dst);
+ break;
+ case kF32:
+ assm->Fst_s(src.fp(), dst);
+ break;
+ case kF64:
+ assm->Fst_d(src.fp(), dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind) {
+ switch (kind) {
+ case kI32:
+ assm->addi_d(sp, sp, -kSystemPointerSize);
+ assm->St_w(reg.gp(), MemOperand(sp, 0));
+ break;
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ assm->Push(reg.gp());
+ break;
+ case kF32:
+ assm->addi_d(sp, sp, -kSystemPointerSize);
+ assm->Fst_s(reg.fp(), MemOperand(sp, 0));
+ break;
+ case kF64:
+ assm->addi_d(sp, sp, -kSystemPointerSize);
+ assm->Fst_d(reg.fp(), MemOperand(sp, 0));
+ break;
+ case kS128:
+ UNREACHABLE();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+} // namespace liftoff
+
+int LiftoffAssembler::PrepareStackFrame() {
+ int offset = pc_offset();
+ // When constant that represents size of stack frame can't be represented
+ // as 16bit we need three instructions to add it to sp, so we reserve space
+ // for this case.
+ addi_d(sp, sp, 0);
+ nop();
+ nop();
+ return offset;
+}
+
+void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
+ int stack_param_delta) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+
+ // Push the return address and frame pointer to complete the stack frame.
+ Ld_d(scratch, MemOperand(fp, 8));
+ Push(scratch);
+ Ld_d(scratch, MemOperand(fp, 0));
+ Push(scratch);
+
+ // Shift the whole frame upwards.
+ int slot_count = num_callee_stack_params + 2;
+ for (int i = slot_count - 1; i >= 0; --i) {
+ Ld_d(scratch, MemOperand(sp, i * 8));
+ St_d(scratch, MemOperand(fp, (i - stack_param_delta) * 8));
+ }
+
+ // Set the new stack and frame pointer.
+ addi_d(sp, fp, -stack_param_delta * 8);
+ Pop(ra, fp);
+}
+
+void LiftoffAssembler::AlignFrameSize() {}
+
+void LiftoffAssembler::PatchPrepareStackFrame(
+ int offset, SafepointTableBuilder* safepoint_table_builder) {
+ // The frame_size includes the frame marker and the instance slot. Both are
+ // pushed as part of frame construction, so we don't need to allocate memory
+ // for them anymore.
+ int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
+
+ // We can't run out of space, just pass anything big enough to not cause the
+ // assembler to try to grow the buffer.
+ constexpr int kAvailableSpace = 256;
+ TurboAssembler patching_assembler(
+ nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
+
+ if (V8_LIKELY(frame_size < 4 * KB)) {
+ // This is the standard case for small frames: just subtract from SP and be
+ // done with it.
+ patching_assembler.Add_d(sp, sp, Operand(-frame_size));
+ return;
+ }
+
+ // The frame size is bigger than 4KB, so we might overflow the available stack
+ // space if we first allocate the frame and then do the stack check (we will
+ // need some remaining stack space for throwing the exception). That's why we
+ // check the available stack space before we allocate the frame. To do this we
+ // replace the {__ Add_d(sp, sp, -frame_size)} with a jump to OOL code that
+ // does this "extended stack check".
+ //
+ // The OOL code can simply be generated here with the normal assembler,
+ // because all other code generation, including OOL code, has already finished
+ // when {PatchPrepareStackFrame} is called. The function prologue then jumps
+ // to the current {pc_offset()} to execute the OOL code for allocating the
+ // large frame.
+ // Emit the unconditional branch in the function prologue (from {offset} to
+ // {pc_offset()}).
+
+ int imm32 = pc_offset() - offset;
+ CHECK(is_int26(imm32));
+ patching_assembler.b(imm32 >> 2);
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ RecordComment("OOL: stack check for large frame");
+ Label continuation;
+ if (frame_size < FLAG_stack_size * 1024) {
+ Register stack_limit = kScratchReg;
+ Ld_d(stack_limit,
+ FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kRealStackLimitAddressOffset));
+ Ld_d(stack_limit, MemOperand(stack_limit, 0));
+ Add_d(stack_limit, stack_limit, Operand(frame_size));
+ Branch(&continuation, uge, sp, Operand(stack_limit));
+ }
+
+ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
+ // The call will not return; just define an empty safepoint.
+ safepoint_table_builder->DefineSafepoint(this);
+ if (FLAG_debug_code) stop();
+
+ bind(&continuation);
+
+ // Now allocate the stack space. Note that this might do more than just
+ // decrementing the SP;
+ Add_d(sp, sp, Operand(-frame_size));
+
+ // Jump back to the start of the function, from {pc_offset()} to
+ // right after the reserved space for the {__ Add_d(sp, sp, -framesize)}
+ // (which is a Branch now).
+ int func_start_offset = offset + 3 * kInstrSize;
+ imm32 = func_start_offset - pc_offset();
+ CHECK(is_int26(imm32));
+ b(imm32 >> 2);
+}
+
+void LiftoffAssembler::FinishCode() {}
+
+void LiftoffAssembler::AbortCompilation() {}
+
+// static
+constexpr int LiftoffAssembler::StaticStackFrameSize() {
+ return liftoff::kInstanceOffset;
+}
+
+int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
+ switch (kind) {
+ case kS128:
+ return element_size_bytes(kind);
+ default:
+ return kStackSlotSize;
+ }
+}
+
+bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
+ return kind == kS128 || is_reference(kind);
+}
+
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
+ RelocInfo::Mode rmode) {
+ switch (value.type().kind()) {
+ case kI32:
+ TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
+ break;
+ case kI64:
+ TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode));
+ break;
+ case kF32:
+ TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
+ break;
+ case kF64:
+ TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
+ Ld_d(dst, liftoff::GetInstanceOperand());
+}
+
+void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
+ int offset, int size) {
+ DCHECK_LE(0, offset);
+ switch (size) {
+ case 1:
+ Ld_b(dst, MemOperand(instance, offset));
+ break;
+ case 4:
+ Ld_w(dst, MemOperand(instance, offset));
+ break;
+ case 8:
+ Ld_d(dst, MemOperand(instance, offset));
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ Register instance,
+ int32_t offset) {
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ Ld_d(dst, MemOperand(instance, offset));
+}
+
+void LiftoffAssembler::SpillInstance(Register instance) {
+ St_d(instance, liftoff::GetInstanceOperand());
+}
+
+void LiftoffAssembler::ResetOSRTarget() {}
+
+void LiftoffAssembler::FillInstanceInto(Register dst) {
+ Ld_d(dst, liftoff::GetInstanceOperand());
+}
+
+void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
+ Register offset_reg,
+ int32_t offset_imm,
+ LiftoffRegList pinned) {
+ STATIC_ASSERT(kTaggedSize == kInt64Size);
+ MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
+ Ld_d(dst, src_op);
+}
+
+void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
+ int32_t offset_imm) {
+ MemOperand src_op = liftoff::GetMemOp(this, src_addr, no_reg, offset_imm);
+ Ld_d(dst, src_op);
+}
+
+void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
+ Register offset_reg,
+ int32_t offset_imm,
+ LiftoffRegister src,
+ LiftoffRegList pinned,
+ SkipWriteBarrier skip_write_barrier) {
+ UseScratchRegisterScope temps(this);
+ Operand offset_op =
+ offset_reg.is_valid() ? Operand(offset_reg) : Operand(offset_imm);
+ // For the write barrier (below), we cannot have both an offset register and
+ // an immediate offset. Add them to a 32-bit offset initially, but in a 64-bit
+ // register, because that's needed in the MemOperand below.
+ if (offset_reg.is_valid() && offset_imm) {
+ Register effective_offset = temps.Acquire();
+ Add_d(effective_offset, offset_reg, Operand(offset_imm));
+ offset_op = Operand(effective_offset);
+ }
+ if (offset_op.is_reg()) {
+ St_d(src.gp(), MemOperand(dst_addr, offset_op.rm()));
+ } else {
+ St_d(src.gp(), MemOperand(dst_addr, offset_imm));
+ }
+
+ if (skip_write_barrier || FLAG_disable_write_barriers) return;
+
+ Label write_barrier;
+ Label exit;
+ CheckPageFlag(dst_addr, MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ &write_barrier);
+ b(&exit);
+ bind(&write_barrier);
+ JumpIfSmi(src.gp(), &exit);
+ CheckPageFlag(src.gp(), MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ &exit);
+ CallRecordWriteStubSaveRegisters(
+ dst_addr, offset_op, RememberedSetAction::kEmit, SaveFPRegsMode::kSave,
+ StubCallMode::kCallWasmRuntimeStub);
+ bind(&exit);
+}
+
+void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uintptr_t offset_imm,
+ LoadType type, LiftoffRegList pinned,
+ uint32_t* protected_load_pc, bool is_load_mem,
+ bool i64_offset) {
+ MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
+
+ if (protected_load_pc) *protected_load_pc = pc_offset();
+ switch (type.value()) {
+ case LoadType::kI32Load8U:
+ case LoadType::kI64Load8U:
+ Ld_bu(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load8S:
+ case LoadType::kI64Load8S:
+ Ld_b(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load16U:
+ case LoadType::kI64Load16U:
+ TurboAssembler::Ld_hu(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load16S:
+ case LoadType::kI64Load16S:
+ TurboAssembler::Ld_h(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load32U:
+ TurboAssembler::Ld_wu(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load:
+ case LoadType::kI64Load32S:
+ TurboAssembler::Ld_w(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load:
+ TurboAssembler::Ld_d(dst.gp(), src_op);
+ break;
+ case LoadType::kF32Load:
+ TurboAssembler::Fld_s(dst.fp(), src_op);
+ break;
+ case LoadType::kF64Load:
+ TurboAssembler::Fld_d(dst.fp(), src_op);
+ break;
+ case LoadType::kS128Load:
+ UNREACHABLE();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned,
+ uint32_t* protected_store_pc, bool is_store_mem) {
+ MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
+
+ if (protected_store_pc) *protected_store_pc = pc_offset();
+ switch (type.value()) {
+ case StoreType::kI32Store8:
+ case StoreType::kI64Store8:
+ St_b(src.gp(), dst_op);
+ break;
+ case StoreType::kI32Store16:
+ case StoreType::kI64Store16:
+ TurboAssembler::St_h(src.gp(), dst_op);
+ break;
+ case StoreType::kI32Store:
+ case StoreType::kI64Store32:
+ TurboAssembler::St_w(src.gp(), dst_op);
+ break;
+ case StoreType::kI64Store:
+ TurboAssembler::St_d(src.gp(), dst_op);
+ break;
+ case StoreType::kF32Store:
+ TurboAssembler::Fst_s(src.fp(), dst_op);
+ break;
+ case StoreType::kF64Store:
+ TurboAssembler::Fst_d(src.fp(), dst_op);
+ break;
+ case StoreType::kS128Store:
+ UNREACHABLE();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uintptr_t offset_imm,
+ LoadType type, LiftoffRegList pinned) {
+ bailout(kAtomics, "AtomicLoad");
+}
+
+void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned) {
+ bailout(kAtomics, "AtomicStore");
+}
+
+void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicAdd");
+}
+
+void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicSub");
+}
+
+void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicAnd");
+}
+
+void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicOr");
+}
+
+void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicXor");
+}
+
+void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm,
+ LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicExchange");
+}
+
+void LiftoffAssembler::AtomicCompareExchange(
+ Register dst_addr, Register offset_reg, uintptr_t offset_imm,
+ LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
+ StoreType type) {
+ bailout(kAtomics, "AtomicCompareExchange");
+}
+
+void LiftoffAssembler::AtomicFence() { dbar(0); }
+
+void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
+ uint32_t caller_slot_idx,
+ ValueKind kind) {
+ MemOperand src(fp, kSystemPointerSize * (caller_slot_idx + 1));
+ liftoff::Load(this, dst, src, kind);
+}
+
+void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
+ uint32_t caller_slot_idx,
+ ValueKind kind) {
+ int32_t offset = kSystemPointerSize * (caller_slot_idx + 1);
+ liftoff::Store(this, fp, offset, src, kind);
+}
+
+void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
+ ValueKind kind) {
+ liftoff::Load(this, dst, MemOperand(sp, offset), kind);
+}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
+ ValueKind kind) {
+ DCHECK_NE(dst_offset, src_offset);
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(kind), {});
+ Fill(reg, src_offset, kind);
+ Spill(dst_offset, reg, kind);
+}
+
+void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
+ DCHECK_NE(dst, src);
+ // TODO(ksreten): Handle different sizes here.
+ TurboAssembler::Move(dst, src);
+}
+
+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
+ ValueKind kind) {
+ DCHECK_NE(dst, src);
+ if (kind != kS128) {
+ TurboAssembler::Move(dst, src);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
+ RecordUsedSpillOffset(offset);
+ MemOperand dst = liftoff::GetStackSlot(offset);
+ switch (kind) {
+ case kI32:
+ St_w(reg.gp(), dst);
+ break;
+ case kI64:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
+ St_d(reg.gp(), dst);
+ break;
+ case kF32:
+ Fst_s(reg.fp(), dst);
+ break;
+ case kF64:
+ TurboAssembler::Fst_d(reg.fp(), dst);
+ break;
+ case kS128:
+ UNREACHABLE();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::Spill(int offset, WasmValue value) {
+ RecordUsedSpillOffset(offset);
+ MemOperand dst = liftoff::GetStackSlot(offset);
+ switch (value.type().kind()) {
+ case kI32: {
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
+ TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
+ St_w(tmp.gp(), dst);
+ break;
+ }
+ case kI64:
+ case kRef:
+ case kOptRef: {
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
+ TurboAssembler::li(tmp.gp(), value.to_i64());
+ St_d(tmp.gp(), dst);
+ break;
+ }
+ default:
+ // kWasmF32 and kWasmF64 are unreachable, since those
+ // constants are not tracked.
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
+ MemOperand src = liftoff::GetStackSlot(offset);
+ switch (kind) {
+ case kI32:
+ Ld_w(reg.gp(), src);
+ break;
+ case kI64:
+ case kRef:
+ case kOptRef:
+ // TODO(LOONG_dev): LOONG64 Check, MIPS64 dosn't need, ARM64/LOONG64 need?
+ case kRtt:
+ case kRttWithDepth:
+ Ld_d(reg.gp(), src);
+ break;
+ case kF32:
+ Fld_s(reg.fp(), src);
+ break;
+ case kF64:
+ TurboAssembler::Fld_d(reg.fp(), src);
+ break;
+ case kS128:
+ UNREACHABLE();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) {
+ UNREACHABLE();
+}
+
+void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
+ DCHECK_LT(0, size);
+ RecordUsedSpillOffset(start + size);
+
+ if (size <= 12 * kStackSlotSize) {
+ // Special straight-line code for up to 12 slots. Generates one
+ // instruction per slot (<= 12 instructions total).
+ uint32_t remainder = size;
+ for (; remainder >= kStackSlotSize; remainder -= kStackSlotSize) {
+ St_d(zero_reg, liftoff::GetStackSlot(start + remainder));
+ }
+ DCHECK(remainder == 4 || remainder == 0);
+ if (remainder) {
+ St_w(zero_reg, liftoff::GetStackSlot(start + remainder));
+ }
+ } else {
+ // General case for bigger counts (12 instructions).
+ // Use a0 for start address (inclusive), a1 for end address (exclusive).
+ Push(a1, a0);
+ Add_d(a0, fp, Operand(-start - size));
+ Add_d(a1, fp, Operand(-start));
+
+ Label loop;
+ bind(&loop);
+ St_d(zero_reg, MemOperand(a0, 0));
+ addi_d(a0, a0, kSystemPointerSize);
+ BranchShort(&loop, ne, a0, Operand(a1));
+
+ Pop(a1, a0);
+ }
+}
+
+void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) {
+ TurboAssembler::Clz_d(dst.gp(), src.gp());
+}
+
+void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) {
+ TurboAssembler::Ctz_d(dst.gp(), src.gp());
+}
+
+bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ TurboAssembler::Popcnt_d(dst.gp(), src.gp());
+ return true;
+}
+
+void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
+ TurboAssembler::Mul_w(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
+ Label* trap_div_by_zero,
+ Label* trap_div_unrepresentable) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+
+ // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable.
+ TurboAssembler::li(kScratchReg, 1);
+ TurboAssembler::li(kScratchReg2, 1);
+ TurboAssembler::LoadZeroOnCondition(kScratchReg, lhs, Operand(kMinInt), eq);
+ TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs, Operand(-1), eq);
+ add_d(kScratchReg, kScratchReg, kScratchReg2);
+ TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
+ Operand(zero_reg));
+
+ TurboAssembler::Div_w(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ TurboAssembler::Div_wu(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ TurboAssembler::Mod_w(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ TurboAssembler::Mod_wu(dst, lhs, rhs);
+}
+
+#define I32_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
+ Register rhs) { \
+ instruction(dst, lhs, rhs); \
+ }
+
+// clang-format off
+I32_BINOP(add, add_w)
+I32_BINOP(sub, sub_w)
+I32_BINOP(and, and_)
+I32_BINOP(or, or_)
+I32_BINOP(xor, xor_)
+// clang-format on
+
+#undef I32_BINOP
+
+#define I32_BINOP_I(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name##i(Register dst, Register lhs, \
+ int32_t imm) { \
+ instruction(dst, lhs, Operand(imm)); \
+ }
+
+// clang-format off
+I32_BINOP_I(add, Add_w)
+I32_BINOP_I(sub, Sub_w)
+I32_BINOP_I(and, And)
+I32_BINOP_I(or, Or)
+I32_BINOP_I(xor, Xor)
+// clang-format on
+
+#undef I32_BINOP_I
+
+void LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
+ TurboAssembler::Clz_w(dst, src);
+}
+
+void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
+ TurboAssembler::Ctz_w(dst, src);
+}
+
+bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
+ TurboAssembler::Popcnt_w(dst, src);
+ return true;
+}
+
+#define I32_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register src, \
+ Register amount) { \
+ instruction(dst, src, amount); \
+ }
+#define I32_SHIFTOP_I(name, instruction, instruction1) \
+ I32_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name##i(Register dst, Register src, \
+ int amount) { \
+ instruction1(dst, src, amount & 0x1f); \
+ }
+
+I32_SHIFTOP_I(shl, sll_w, slli_w)
+I32_SHIFTOP_I(sar, sra_w, srai_w)
+I32_SHIFTOP_I(shr, srl_w, srli_w)
+
+#undef I32_SHIFTOP
+#undef I32_SHIFTOP_I
+
+void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
+ int64_t imm) {
+ TurboAssembler::Add_d(dst.gp(), lhs.gp(), Operand(imm));
+}
+
+void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ TurboAssembler::Mul_d(dst.gp(), lhs.gp(), rhs.gp());
+}
+
+bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ Label* trap_div_by_zero,
+ Label* trap_div_unrepresentable) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+
+ // Check if lhs == MinInt64 and rhs == -1, since this case is unrepresentable.
+ TurboAssembler::li(kScratchReg, 1);
+ TurboAssembler::li(kScratchReg2, 1);
+ TurboAssembler::LoadZeroOnCondition(
+ kScratchReg, lhs.gp(), Operand(std::numeric_limits<int64_t>::min()), eq);
+ TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs.gp(), Operand(-1), eq);
+ add_d(kScratchReg, kScratchReg, kScratchReg2);
+ TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
+ Operand(zero_reg));
+
+ TurboAssembler::Div_d(dst.gp(), lhs.gp(), rhs.gp());
+ return true;
+}
+
+bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ TurboAssembler::Div_du(dst.gp(), lhs.gp(), rhs.gp());
+ return true;
+}
+
+bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ TurboAssembler::Mod_d(dst.gp(), lhs.gp(), rhs.gp());
+ return true;
+}
+
+bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ TurboAssembler::Mod_du(dst.gp(), lhs.gp(), rhs.gp());
+ return true;
+}
+
+#define I64_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_i64_##name( \
+ LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ instruction(dst.gp(), lhs.gp(), rhs.gp()); \
+ }
+
+// clang-format off
+I64_BINOP(add, Add_d)
+I64_BINOP(sub, Sub_d)
+I64_BINOP(and, and_)
+I64_BINOP(or, or_)
+I64_BINOP(xor, xor_)
+// clang-format on
+
+#undef I64_BINOP
+
+#define I64_BINOP_I(name, instruction) \
+ void LiftoffAssembler::emit_i64_##name##i( \
+ LiftoffRegister dst, LiftoffRegister lhs, int32_t imm) { \
+ instruction(dst.gp(), lhs.gp(), Operand(imm)); \
+ }
+
+// clang-format off
+I64_BINOP_I(and, And)
+I64_BINOP_I(or, Or)
+I64_BINOP_I(xor, Xor)
+// clang-format on
+
+#undef I64_BINOP_I
+
+#define I64_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i64_##name( \
+ LiftoffRegister dst, LiftoffRegister src, Register amount) { \
+ instruction(dst.gp(), src.gp(), amount); \
+ }
+#define I64_SHIFTOP_I(name, instruction, instructioni) \
+ I64_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i64_##name##i(LiftoffRegister dst, \
+ LiftoffRegister src, int amount) { \
+ instructioni(dst.gp(), src.gp(), amount & 63); \
+ }
+
+I64_SHIFTOP_I(shl, sll_d, slli_d)
+I64_SHIFTOP_I(sar, sra_d, srai_d)
+I64_SHIFTOP_I(shr, srl_d, srli_d)
+
+#undef I64_SHIFTOP
+#undef I64_SHIFTOP_I
+
+void LiftoffAssembler::emit_u32_to_intptr(Register dst, Register src) {
+ bstrpick_d(dst, src, 31, 0);
+}
+
+void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
+ TurboAssembler::Neg_s(dst, src);
+}
+
+void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
+ TurboAssembler::Neg_d(dst, src);
+}
+
+void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Label ool, done;
+ TurboAssembler::Float32Min(dst, lhs, rhs, &ool);
+ Branch(&done);
+
+ bind(&ool);
+ TurboAssembler::Float32MinOutOfLine(dst, lhs, rhs);
+ bind(&done);
+}
+
+void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Label ool, done;
+ TurboAssembler::Float32Max(dst, lhs, rhs, &ool);
+ Branch(&done);
+
+ bind(&ool);
+ TurboAssembler::Float32MaxOutOfLine(dst, lhs, rhs);
+ bind(&done);
+}
+
+void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ bailout(kComplexOperation, "f32_copysign");
+}
+
+void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Label ool, done;
+ TurboAssembler::Float64Min(dst, lhs, rhs, &ool);
+ Branch(&done);
+
+ bind(&ool);
+ TurboAssembler::Float64MinOutOfLine(dst, lhs, rhs);
+ bind(&done);
+}
+
+void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Label ool, done;
+ TurboAssembler::Float64Max(dst, lhs, rhs, &ool);
+ Branch(&done);
+
+ bind(&ool);
+ TurboAssembler::Float64MaxOutOfLine(dst, lhs, rhs);
+ bind(&done);
+}
+
+void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ bailout(kComplexOperation, "f64_copysign");
+}
+
+#define FP_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
+ DoubleRegister rhs) { \
+ instruction(dst, lhs, rhs); \
+ }
+#define FP_UNOP(name, instruction) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ instruction(dst, src); \
+ }
+#define FP_UNOP_RETURN_TRUE(name, instruction) \
+ bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ instruction(dst, src); \
+ return true; \
+ }
+
+FP_BINOP(f32_add, fadd_s)
+FP_BINOP(f32_sub, fsub_s)
+FP_BINOP(f32_mul, fmul_s)
+FP_BINOP(f32_div, fdiv_s)
+FP_UNOP(f32_abs, fabs_s)
+FP_UNOP_RETURN_TRUE(f32_ceil, Ceil_s)
+FP_UNOP_RETURN_TRUE(f32_floor, Floor_s)
+FP_UNOP_RETURN_TRUE(f32_trunc, Trunc_s)
+FP_UNOP_RETURN_TRUE(f32_nearest_int, Round_s)
+FP_UNOP(f32_sqrt, fsqrt_s)
+FP_BINOP(f64_add, fadd_d)
+FP_BINOP(f64_sub, fsub_d)
+FP_BINOP(f64_mul, fmul_d)
+FP_BINOP(f64_div, fdiv_d)
+FP_UNOP(f64_abs, fabs_d)
+FP_UNOP_RETURN_TRUE(f64_ceil, Ceil_d)
+FP_UNOP_RETURN_TRUE(f64_floor, Floor_d)
+FP_UNOP_RETURN_TRUE(f64_trunc, Trunc_d)
+FP_UNOP_RETURN_TRUE(f64_nearest_int, Round_d)
+FP_UNOP(f64_sqrt, fsqrt_d)
+
+#undef FP_BINOP
+#undef FP_UNOP
+#undef FP_UNOP_RETURN_TRUE
+
+bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
+ LiftoffRegister dst,
+ LiftoffRegister src, Label* trap) {
+ switch (opcode) {
+ case kExprI32ConvertI64:
+ TurboAssembler::bstrpick_w(dst.gp(), src.gp(), 31, 0);
+ return true;
+ case kExprI32SConvertF32: {
+ LiftoffRegister rounded =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister converted_back =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+
+ // Real conversion.
+ TurboAssembler::Trunc_s(rounded.fp(), src.fp());
+ ftintrz_w_s(kScratchDoubleReg, rounded.fp());
+ movfr2gr_s(dst.gp(), kScratchDoubleReg);
+ // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
+ // because INT32_MIN allows easier out-of-bounds detection.
+ TurboAssembler::Add_w(kScratchReg, dst.gp(), 1);
+ TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
+ TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
+
+ // Checking if trap.
+ movgr2fr_w(kScratchDoubleReg, dst.gp());
+ ffint_s_w(converted_back.fp(), kScratchDoubleReg);
+ TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ);
+ TurboAssembler::BranchFalseF(trap);
+ return true;
+ }
+ case kExprI32UConvertF32: {
+ LiftoffRegister rounded =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister converted_back =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+
+ // Real conversion.
+ TurboAssembler::Trunc_s(rounded.fp(), src.fp());
+ TurboAssembler::Ftintrz_uw_s(dst.gp(), rounded.fp(), kScratchDoubleReg);
+ // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
+ // because 0 allows easier out-of-bounds detection.
+ TurboAssembler::Add_w(kScratchReg, dst.gp(), 1);
+ TurboAssembler::Movz(dst.gp(), zero_reg, kScratchReg);
+
+ // Checking if trap.
+ TurboAssembler::Ffint_d_uw(converted_back.fp(), dst.gp());
+ fcvt_s_d(converted_back.fp(), converted_back.fp());
+ TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ);
+ TurboAssembler::BranchFalseF(trap);
+ return true;
+ }
+ case kExprI32SConvertF64: {
+ LiftoffRegister rounded =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister converted_back =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+
+ // Real conversion.
+ TurboAssembler::Trunc_d(rounded.fp(), src.fp());
+ ftintrz_w_d(kScratchDoubleReg, rounded.fp());
+ movfr2gr_s(dst.gp(), kScratchDoubleReg);
+
+ // Checking if trap.
+ ffint_d_w(converted_back.fp(), kScratchDoubleReg);
+ TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ);
+ TurboAssembler::BranchFalseF(trap);
+ return true;
+ }
+ case kExprI32UConvertF64: {
+ LiftoffRegister rounded =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister converted_back =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+
+ // Real conversion.
+ TurboAssembler::Trunc_d(rounded.fp(), src.fp());
+ TurboAssembler::Ftintrz_uw_d(dst.gp(), rounded.fp(), kScratchDoubleReg);
+
+ // Checking if trap.
+ TurboAssembler::Ffint_d_uw(converted_back.fp(), dst.gp());
+ TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ);
+ TurboAssembler::BranchFalseF(trap);
+ return true;
+ }
+ case kExprI32ReinterpretF32:
+ TurboAssembler::FmoveLow(dst.gp(), src.fp());
+ return true;
+ case kExprI64SConvertI32:
+ slli_w(dst.gp(), src.gp(), 0);
+ return true;
+ case kExprI64UConvertI32:
+ TurboAssembler::bstrpick_d(dst.gp(), src.gp(), 31, 0);
+ return true;
+ case kExprI64SConvertF32: {
+ LiftoffRegister rounded =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister converted_back =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+
+ // Real conversion.
+ TurboAssembler::Trunc_s(rounded.fp(), src.fp());
+ ftintrz_l_s(kScratchDoubleReg, rounded.fp());
+ movfr2gr_d(dst.gp(), kScratchDoubleReg);
+ // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
+ // because INT64_MIN allows easier out-of-bounds detection.
+ TurboAssembler::Add_d(kScratchReg, dst.gp(), 1);
+ TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
+ TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
+
+ // Checking if trap.
+ movgr2fr_d(kScratchDoubleReg, dst.gp());
+ ffint_s_l(converted_back.fp(), kScratchDoubleReg);
+ TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ);
+ TurboAssembler::BranchFalseF(trap);
+ return true;
+ }
+ case kExprI64UConvertF32: {
+ // Real conversion.
+ TurboAssembler::Ftintrz_ul_s(dst.gp(), src.fp(), kScratchDoubleReg,
+ kScratchReg);
+
+ // Checking if trap.
+ TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
+ return true;
+ }
+ case kExprI64SConvertF64: {
+ LiftoffRegister rounded =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister converted_back =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+
+ // Real conversion.
+ TurboAssembler::Trunc_d(rounded.fp(), src.fp());
+ ftintrz_l_d(kScratchDoubleReg, rounded.fp());
+ movfr2gr_d(dst.gp(), kScratchDoubleReg);
+ // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
+ // because INT64_MIN allows easier out-of-bounds detection.
+ TurboAssembler::Add_d(kScratchReg, dst.gp(), 1);
+ TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
+ TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
+
+ // Checking if trap.
+ movgr2fr_d(kScratchDoubleReg, dst.gp());
+ ffint_d_l(converted_back.fp(), kScratchDoubleReg);
+ TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ);
+ TurboAssembler::BranchFalseF(trap);
+ return true;
+ }
+ case kExprI64UConvertF64: {
+ // Real conversion.
+ TurboAssembler::Ftintrz_ul_d(dst.gp(), src.fp(), kScratchDoubleReg,
+ kScratchReg);
+
+ // Checking if trap.
+ TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
+ return true;
+ }
+ case kExprI64ReinterpretF64:
+ movfr2gr_d(dst.gp(), src.fp());
+ return true;
+ case kExprF32SConvertI32: {
+ LiftoffRegister scratch =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
+ movgr2fr_w(scratch.fp(), src.gp());
+ ffint_s_w(dst.fp(), scratch.fp());
+ return true;
+ }
+ case kExprF32UConvertI32:
+ TurboAssembler::Ffint_s_uw(dst.fp(), src.gp());
+ return true;
+ case kExprF32ConvertF64:
+ fcvt_s_d(dst.fp(), src.fp());
+ return true;
+ case kExprF32ReinterpretI32:
+ TurboAssembler::FmoveLow(dst.fp(), src.gp());
+ return true;
+ case kExprF64SConvertI32: {
+ LiftoffRegister scratch =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
+ movgr2fr_w(scratch.fp(), src.gp());
+ ffint_d_w(dst.fp(), scratch.fp());
+ return true;
+ }
+ case kExprF64UConvertI32:
+ TurboAssembler::Ffint_d_uw(dst.fp(), src.gp());
+ return true;
+ case kExprF64ConvertF32:
+ fcvt_d_s(dst.fp(), src.fp());
+ return true;
+ case kExprF64ReinterpretI64:
+ movgr2fr_d(dst.fp(), src.gp());
+ return true;
+ case kExprI32SConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF32");
+ return true;
+ case kExprI32UConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF32");
+ return true;
+ case kExprI32SConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF64");
+ return true;
+ case kExprI32UConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF64");
+ return true;
+ case kExprI64SConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF32");
+ return true;
+ case kExprI64UConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF32");
+ return true;
+ case kExprI64SConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF64");
+ return true;
+ case kExprI64UConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF64");
+ return true;
+ default:
+ return false;
+ }
+}
+
+void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
+ bailout(kComplexOperation, "i32_signextend_i8");
+}
+
+void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
+ bailout(kComplexOperation, "i32_signextend_i16");
+}
+
+void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kComplexOperation, "i64_signextend_i8");
+}
+
+void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kComplexOperation, "i64_signextend_i16");
+}
+
+void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kComplexOperation, "i64_signextend_i32");
+}
+
+void LiftoffAssembler::emit_jump(Label* label) {
+ TurboAssembler::Branch(label);
+}
+
+void LiftoffAssembler::emit_jump(Register target) {
+ TurboAssembler::Jump(target);
+}
+
+void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
+ Label* label, ValueKind kind,
+ Register lhs, Register rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ if (rhs == no_reg) {
+ DCHECK(kind == kI32 || kind == kI64);
+ TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg));
+ } else {
+ DCHECK((kind == kI32 || kind == kI64) ||
+ (is_reference(kind) &&
+ (liftoff_cond == kEqual || liftoff_cond == kUnequal)));
+ TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
+ }
+}
+
+void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
+ Label* label, Register lhs,
+ int32_t imm) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ TurboAssembler::Branch(label, cond, lhs, Operand(imm));
+}
+
+void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
+ sltui(dst, src, 1);
+}
+
+void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, Register lhs,
+ Register rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ Register tmp = dst;
+ if (dst == lhs || dst == rhs) {
+ tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
+ }
+ // Write 1 as result.
+ TurboAssembler::li(tmp, 1);
+
+ // If negative condition is true, write 0 as result.
+ Condition neg_cond = NegateCondition(cond);
+ TurboAssembler::LoadZeroOnCondition(tmp, lhs, Operand(rhs), neg_cond);
+
+ // If tmp != dst, result will be moved.
+ TurboAssembler::Move(dst, tmp);
+}
+
+void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
+ sltui(dst, src.gp(), 1);
+}
+
+void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ Register tmp = dst;
+ if (dst == lhs.gp() || dst == rhs.gp()) {
+ tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
+ }
+ // Write 1 as result.
+ TurboAssembler::li(tmp, 1);
+
+ // If negative condition is true, write 0 as result.
+ Condition neg_cond = NegateCondition(cond);
+ TurboAssembler::LoadZeroOnCondition(tmp, lhs.gp(), Operand(rhs.gp()),
+ neg_cond);
+
+ // If tmp != dst, result will be moved.
+ TurboAssembler::Move(dst, tmp);
+}
+
+namespace liftoff {
+
+inline FPUCondition ConditionToConditionCmpFPU(LiftoffCondition condition,
+ bool* predicate) {
+ switch (condition) {
+ case kEqual:
+ *predicate = true;
+ return CEQ;
+ case kUnequal:
+ *predicate = false;
+ return CEQ;
+ case kUnsignedLessThan:
+ *predicate = true;
+ return CLT;
+ case kUnsignedGreaterEqual:
+ *predicate = false;
+ return CLT;
+ case kUnsignedLessEqual:
+ *predicate = true;
+ return CLE;
+ case kUnsignedGreaterThan:
+ *predicate = false;
+ return CLE;
+ default:
+ *predicate = true;
+ break;
+ }
+ UNREACHABLE();
+}
+
+} // namespace liftoff
+
+void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ Label not_nan, cont;
+ TurboAssembler::CompareIsNanF32(lhs, rhs);
+ TurboAssembler::BranchFalseF(&not_nan);
+ // If one of the operands is NaN, return 1 for f32.ne, else 0.
+ if (cond == ne) {
+ TurboAssembler::li(dst, 1);
+ } else {
+ TurboAssembler::Move(dst, zero_reg);
+ }
+ TurboAssembler::Branch(&cont);
+
+ bind(&not_nan);
+
+ TurboAssembler::li(dst, 1);
+ bool predicate;
+ FPUCondition fcond =
+ liftoff::ConditionToConditionCmpFPU(liftoff_cond, &predicate);
+ TurboAssembler::CompareF32(lhs, rhs, fcond);
+ if (predicate) {
+ TurboAssembler::LoadZeroIfNotFPUCondition(dst);
+ } else {
+ TurboAssembler::LoadZeroIfFPUCondition(dst);
+ }
+
+ bind(&cont);
+}
+
+void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ Label not_nan, cont;
+ TurboAssembler::CompareIsNanF64(lhs, rhs);
+ TurboAssembler::BranchFalseF(&not_nan);
+ // If one of the operands is NaN, return 1 for f64.ne, else 0.
+ if (cond == ne) {
+ TurboAssembler::li(dst, 1);
+ } else {
+ TurboAssembler::Move(dst, zero_reg);
+ }
+ TurboAssembler::Branch(&cont);
+
+ bind(&not_nan);
+
+ TurboAssembler::li(dst, 1);
+ bool predicate;
+ FPUCondition fcond =
+ liftoff::ConditionToConditionCmpFPU(liftoff_cond, &predicate);
+ TurboAssembler::CompareF64(lhs, rhs, fcond);
+ if (predicate) {
+ TurboAssembler::LoadZeroIfNotFPUCondition(dst);
+ } else {
+ TurboAssembler::LoadZeroIfFPUCondition(dst);
+ }
+
+ bind(&cont);
+}
+
+bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
+ LiftoffRegister true_value,
+ LiftoffRegister false_value,
+ ValueKind kind) {
+ return false;
+}
+
+void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
+ SmiCheckMode mode) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ And(scratch, obj, Operand(kSmiTagMask));
+ Condition condition = mode == kJumpOnSmi ? eq : ne;
+ Branch(target, condition, scratch, Operand(zero_reg));
+}
+
+void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uintptr_t offset_imm,
+ LoadType type,
+ LoadTransformationKind transform,
+ uint32_t* protected_load_pc) {
+ bailout(kSimd, "load extend and load splat unimplemented");
+}
+
+void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
+ Register addr, Register offset_reg,
+ uintptr_t offset_imm, LoadType type,
+ uint8_t laneidx, uint32_t* protected_load_pc) {
+ bailout(kSimd, "loadlane");
+}
+
+void LiftoffAssembler::StoreLane(Register dst, Register offset,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, uint8_t lane,
+ uint32_t* protected_store_pc) {
+ bailout(kSimd, "storelane");
+}
+
+void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ const uint8_t shuffle[16],
+ bool is_swizzle) {
+ bailout(kSimd, "emit_i8x16_shuffle");
+}
+
+void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_swizzle");
+}
+
+void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_splat");
+}
+
+void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_splat");
+}
+
+void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_splat");
+}
+
+void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_splat");
+}
+
+void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_splat");
+}
+
+void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_splat");
+}
+
+#define SIMD_BINOP(name1, name2) \
+ void LiftoffAssembler::emit_##name1##_extmul_low_##name2( \
+ LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \
+ bailout(kSimd, "emit_" #name1 "_extmul_low_" #name2); \
+ } \
+ void LiftoffAssembler::emit_##name1##_extmul_high_##name2( \
+ LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \
+ bailout(kSimd, "emit_" #name1 "_extmul_high_" #name2); \
+ }
+
+SIMD_BINOP(i16x8, i8x16_s)
+SIMD_BINOP(i16x8, i8x16_u)
+
+SIMD_BINOP(i32x4, i16x8_s)
+SIMD_BINOP(i32x4, i16x8_u)
+
+SIMD_BINOP(i64x2, i32x4_s)
+SIMD_BINOP(i64x2, i32x4_u)
+
+#undef SIMD_BINOP
+
+#define SIMD_BINOP(name1, name2) \
+ void LiftoffAssembler::emit_##name1##_extadd_pairwise_##name2( \
+ LiftoffRegister dst, LiftoffRegister src) { \
+ bailout(kSimd, "emit_" #name1 "_extadd_pairwise_" #name2); \
+ }
+
+SIMD_BINOP(i16x8, i8x16_s)
+SIMD_BINOP(i16x8, i8x16_u)
+SIMD_BINOP(i32x4, i16x8_s)
+SIMD_BINOP(i32x4, i16x8_u)
+#undef SIMD_BINOP
+
+void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "emit_i16x8_q15mulr_sat_s");
+}
+
+void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_eq");
+}
+
+void LiftoffAssembler::emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_ne");
+}
+
+void LiftoffAssembler::emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_gt_s");
+}
+
+void LiftoffAssembler::emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_gt_u");
+}
+
+void LiftoffAssembler::emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_ge_s");
+}
+
+void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_ge_u");
+}
+
+void LiftoffAssembler::emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_eq");
+}
+
+void LiftoffAssembler::emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_ne");
+}
+
+void LiftoffAssembler::emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_gt_s");
+}
+
+void LiftoffAssembler::emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_gt_u");
+}
+
+void LiftoffAssembler::emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_ge_s");
+}
+
+void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_ge_u");
+}
+
+void LiftoffAssembler::emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_eq");
+}
+
+void LiftoffAssembler::emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_ne");
+}
+
+void LiftoffAssembler::emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_gt_s");
+}
+
+void LiftoffAssembler::emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_gt_u");
+}
+
+void LiftoffAssembler::emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_ge_s");
+}
+
+void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_ge_u");
+}
+
+void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_eq");
+}
+
+void LiftoffAssembler::emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_ne");
+}
+
+void LiftoffAssembler::emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_lt");
+}
+
+void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_le");
+}
+
+void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_eq");
+}
+
+void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_ne");
+}
+
+void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_abs");
+}
+
+void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_eq");
+}
+
+void LiftoffAssembler::emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_ne");
+}
+
+void LiftoffAssembler::emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_lt");
+}
+
+void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_le");
+}
+
+void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
+ const uint8_t imms[16]) {
+ bailout(kSimd, "emit_s128_const");
+}
+
+void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
+ bailout(kSimd, "emit_s128_not");
+}
+
+void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_and");
+}
+
+void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_or");
+}
+
+void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_xor");
+}
+
+void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_and_not");
+}
+
+void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ LiftoffRegister mask) {
+ bailout(kSimd, "emit_s128_select");
+}
+
+void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_neg");
+}
+
+void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_v128_anytrue");
+}
+
+void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_alltrue");
+}
+
+void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_bitmask");
+}
+
+void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_shl");
+}
+
+void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i8x16_shli");
+}
+
+void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_shr_s");
+}
+
+void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i8x16_shri_s");
+}
+
+void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_shr_u");
+}
+
+void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i8x16_shri_u");
+}
+
+void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_add");
+}
+
+void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_add_sat_s");
+}
+
+void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_add_sat_u");
+}
+
+void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sub");
+}
+
+void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sub_sat_s");
+}
+
+void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sub_sat_u");
+}
+
+void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_min_s");
+}
+
+void LiftoffAssembler::emit_i8x16_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_min_u");
+}
+
+void LiftoffAssembler::emit_i8x16_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_max_s");
+}
+
+void LiftoffAssembler::emit_i8x16_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_max_u");
+}
+
+void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_popcnt");
+}
+
+void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_neg");
+}
+
+void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_alltrue");
+}
+
+void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_bitmask");
+}
+
+void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_shl");
+}
+
+void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i16x8_shli");
+}
+
+void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_shr_s");
+}
+
+void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i16x8_shri_s");
+}
+
+void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_shr_u");
+}
+
+void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i16x8_shri_u");
+}
+
+void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_add");
+}
+
+void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_add_sat_s");
+}
+
+void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_add_sat_u");
+}
+
+void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sub");
+}
+
+void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sub_sat_s");
+}
+
+void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sub_sat_u");
+}
+
+void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_mul");
+}
+
+void LiftoffAssembler::emit_i16x8_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_min_s");
+}
+
+void LiftoffAssembler::emit_i16x8_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_min_u");
+}
+
+void LiftoffAssembler::emit_i16x8_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_max_s");
+}
+
+void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_max_u");
+}
+
+void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_neg");
+}
+
+void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_alltrue");
+}
+
+void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_bitmask");
+}
+
+void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_shl");
+}
+
+void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i32x4_shli");
+}
+
+void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_shr_s");
+}
+
+void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i32x4_shri_s");
+}
+
+void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_shr_u");
+}
+
+void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i32x4_shri_u");
+}
+
+void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_add");
+}
+
+void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_sub");
+}
+
+void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_mul");
+}
+
+void LiftoffAssembler::emit_i32x4_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_min_s");
+}
+
+void LiftoffAssembler::emit_i32x4_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_min_u");
+}
+
+void LiftoffAssembler::emit_i32x4_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_max_s");
+}
+
+void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_max_u");
+}
+
+void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_dot_i16x8_s");
+}
+
+void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_neg");
+}
+
+void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_alltrue");
+}
+
+void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_bitmask");
+}
+
+void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_shl");
+}
+
+void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i64x2_shli");
+}
+
+void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_shr_s");
+}
+
+void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i64x2_shri_s");
+}
+
+void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_shr_u");
+}
+
+void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i64x2_shri_u");
+}
+
+void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_add");
+}
+
+void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_sub");
+}
+
+void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_mul");
+}
+
+void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_gt_s");
+}
+
+void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_ge_s");
+}
+
+void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_abs");
+}
+
+void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_neg");
+}
+
+void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_sqrt");
+}
+
+bool LiftoffAssembler::emit_f32x4_ceil(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_ceil");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f32x4_floor(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_floor");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f32x4_trunc(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_trunc");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f32x4_nearest_int(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_nearest_int");
+ return true;
+}
+
+void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_add");
+}
+
+void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_sub");
+}
+
+void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_mul");
+}
+
+void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_div");
+}
+
+void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_min");
+}
+
+void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_max");
+}
+
+void LiftoffAssembler::emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_pmin");
+}
+
+void LiftoffAssembler::emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_pmax");
+}
+
+void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_abs");
+}
+
+void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_neg");
+}
+
+void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_sqrt");
+}
+
+bool LiftoffAssembler::emit_f64x2_ceil(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_ceil");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f64x2_floor(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_floor");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f64x2_trunc(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_trunc");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f64x2_nearest_int(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_nearest_int");
+ return true;
+}
+
+void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_add");
+}
+
+void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_sub");
+}
+
+void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_mul");
+}
+
+void LiftoffAssembler::emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_div");
+}
+
+void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_min");
+}
+
+void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_max");
+}
+
+void LiftoffAssembler::emit_f64x2_pmin(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_pmin");
+}
+
+void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_pmax");
+}
+
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_convert_low_i32x4_s");
+}
+
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_convert_low_i32x4_u");
+}
+
+void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_promote_low_f32x4");
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_sconvert_f32x4");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_uconvert_f32x4");
+}
+
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_trunc_sat_f64x2_s_zero");
+}
+
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_trunc_sat_f64x2_u_zero");
+}
+
+void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_sconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_uconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_demote_f64x2_zero");
+}
+
+void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sconvert_i16x8");
+}
+
+void LiftoffAssembler::emit_i8x16_uconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_uconvert_i16x8");
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_uconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_sconvert_i8x16_low");
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_sconvert_i8x16_high");
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_uconvert_i8x16_low");
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_uconvert_i8x16_high");
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_sconvert_i16x8_low");
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_sconvert_i16x8_high");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_uconvert_i16x8_low");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_uconvert_i16x8_high");
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_sconvert_i32x4_low");
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_sconvert_i32x4_high");
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_uconvert_i32x4_low");
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_uconvert_i32x4_high");
+}
+
+void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_rounding_average_u");
+}
+
+void LiftoffAssembler::emit_i16x8_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_rounding_average_u");
+}
+
+void LiftoffAssembler::emit_i8x16_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_abs");
+}
+
+void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_abs");
+}
+
+void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_abs");
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i8x16_extract_lane_s");
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i8x16_extract_lane_u");
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i16x8_extract_lane_s");
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i16x8_extract_lane_u");
+}
+
+void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i32x4_extract_lane");
+}
+
+void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i64x2_extract_lane");
+}
+
+void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f32x4_extract_lane");
+}
+
+void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f64x2_extract_lane");
+}
+
+void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i8x16_replace_lane");
+}
+
+void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i16x8_replace_lane");
+}
+
+void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i32x4_replace_lane");
+}
+
+void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i64x2_replace_lane");
+}
+
+void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f32x4_replace_lane");
+}
+
+void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f64x2_replace_lane");
+}
+
+void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
+ TurboAssembler::Ld_d(limit_address, MemOperand(limit_address, 0));
+ TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address));
+}
+
+void LiftoffAssembler::CallTrapCallbackForTesting() {
+ PrepareCallCFunction(0, GetUnusedRegister(kGpReg, {}).gp());
+ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
+}
+
+void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
+ if (FLAG_debug_code) Abort(reason);
+}
+
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
+ LiftoffRegList gp_regs = regs & kGpCacheRegList;
+ unsigned num_gp_regs = gp_regs.GetNumRegsSet();
+ if (num_gp_regs) {
+ unsigned offset = num_gp_regs * kSystemPointerSize;
+ addi_d(sp, sp, -offset);
+ while (!gp_regs.is_empty()) {
+ LiftoffRegister reg = gp_regs.GetFirstRegSet();
+ offset -= kSystemPointerSize;
+ St_d(reg.gp(), MemOperand(sp, offset));
+ gp_regs.clear(reg);
+ }
+ DCHECK_EQ(offset, 0);
+ }
+ LiftoffRegList fp_regs = regs & kFpCacheRegList;
+ unsigned num_fp_regs = fp_regs.GetNumRegsSet();
+ if (num_fp_regs) {
+ unsigned slot_size = 8;
+ addi_d(sp, sp, -(num_fp_regs * slot_size));
+ unsigned offset = 0;
+ while (!fp_regs.is_empty()) {
+ LiftoffRegister reg = fp_regs.GetFirstRegSet();
+ TurboAssembler::Fst_d(reg.fp(), MemOperand(sp, offset));
+ fp_regs.clear(reg);
+ offset += slot_size;
+ }
+ DCHECK_EQ(offset, num_fp_regs * slot_size);
+ }
+}
+
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
+ LiftoffRegList fp_regs = regs & kFpCacheRegList;
+ unsigned fp_offset = 0;
+ while (!fp_regs.is_empty()) {
+ LiftoffRegister reg = fp_regs.GetFirstRegSet();
+ TurboAssembler::Fld_d(reg.fp(), MemOperand(sp, fp_offset));
+ fp_regs.clear(reg);
+ fp_offset += 8;
+ }
+ if (fp_offset) addi_d(sp, sp, fp_offset);
+ LiftoffRegList gp_regs = regs & kGpCacheRegList;
+ unsigned gp_offset = 0;
+ while (!gp_regs.is_empty()) {
+ LiftoffRegister reg = gp_regs.GetLastRegSet();
+ Ld_d(reg.gp(), MemOperand(sp, gp_offset));
+ gp_regs.clear(reg);
+ gp_offset += kSystemPointerSize;
+ }
+ addi_d(sp, sp, gp_offset);
+}
+
+void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
+ LiftoffRegList all_spills,
+ LiftoffRegList ref_spills,
+ int spill_offset) {
+ int spill_space_size = 0;
+ while (!all_spills.is_empty()) {
+ LiftoffRegister reg = all_spills.GetFirstRegSet();
+ if (ref_spills.has(reg)) {
+ safepoint.DefinePointerSlot(spill_offset);
+ }
+ all_spills.clear(reg);
+ ++spill_offset;
+ spill_space_size += kSystemPointerSize;
+ }
+ // Record the number of additional spill slots.
+ RecordOolSpillSpaceSize(spill_space_size);
+}
+
+void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
+ DCHECK_LT(num_stack_slots,
+ (1 << 16) / kSystemPointerSize); // 16 bit immediate
+ Drop(static_cast<int>(num_stack_slots));
+ Ret();
+}
+
+void LiftoffAssembler::CallC(const ValueKindSig* sig,
+ const LiftoffRegister* args,
+ const LiftoffRegister* rets,
+ ValueKind out_argument_kind, int stack_bytes,
+ ExternalReference ext_ref) {
+ addi_d(sp, sp, -stack_bytes);
+
+ int arg_bytes = 0;
+ for (ValueKind param_kind : sig->parameters()) {
+ liftoff::Store(this, sp, arg_bytes, *args++, param_kind);
+ arg_bytes += element_size_bytes(param_kind);
+ }
+ DCHECK_LE(arg_bytes, stack_bytes);
+
+ // Pass a pointer to the buffer with the arguments to the C function.
+ // On LoongArch, the first argument is passed in {a0}.
+ constexpr Register kFirstArgReg = a0;
+ mov(kFirstArgReg, sp);
+
+ // Now call the C function.
+ constexpr int kNumCCallArgs = 1;
+ PrepareCallCFunction(kNumCCallArgs, kScratchReg);
+ CallCFunction(ext_ref, kNumCCallArgs);
+
+ // Move return value to the right register.
+ const LiftoffRegister* next_result_reg = rets;
+ if (sig->return_count() > 0) {
+ DCHECK_EQ(1, sig->return_count());
+ constexpr Register kReturnReg = a0;
+ if (kReturnReg != next_result_reg->gp()) {
+ Move(*next_result_reg, LiftoffRegister(kReturnReg), sig->GetReturn(0));
+ }
+ ++next_result_reg;
+ }
+
+ // Load potential output value from the buffer on the stack.
+ if (out_argument_kind != kVoid) {
+ liftoff::Load(this, *next_result_reg, MemOperand(sp, 0), out_argument_kind);
+ }
+
+ addi_d(sp, sp, stack_bytes);
+}
+
+void LiftoffAssembler::CallNativeWasmCode(Address addr) {
+ Call(addr, RelocInfo::WASM_CALL);
+}
+
+void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
+ Jump(addr, RelocInfo::WASM_CALL);
+}
+
+void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target) {
+ if (target == no_reg) {
+ Pop(kScratchReg);
+ Call(kScratchReg);
+ } else {
+ Call(target);
+ }
+}
+
+void LiftoffAssembler::TailCallIndirect(Register target) {
+ if (target == no_reg) {
+ Pop(kScratchReg);
+ Jump(kScratchReg);
+ } else {
+ Jump(target);
+ }
+}
+
+void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched at relocation.
+ Call(static_cast<Address>(sid), RelocInfo::WASM_STUB_CALL);
+}
+
+void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
+ addi_d(sp, sp, -size);
+ TurboAssembler::Move(addr, sp);
+}
+
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
+ addi_d(sp, sp, size);
+}
+
+void LiftoffAssembler::MaybeOSR() {}
+
+void LiftoffAssembler::emit_set_if_nan(Register dst, FPURegister src,
+ ValueKind kind) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Label not_nan;
+ if (kind == kF32) {
+ CompareIsNanF32(src, src);
+ } else {
+ DCHECK_EQ(kind, kF64);
+ CompareIsNanF64(src, src);
+ }
+ BranchFalseShortF(&not_nan);
+ li(scratch, 1);
+ St_w(scratch, MemOperand(dst, 0));
+ bind(&not_nan);
+}
+
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
+ Register tmp_gp,
+ LiftoffRegister tmp_s128,
+ ValueKind lane_kind) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffStackSlots::Construct(int param_slots) {
+ DCHECK_LT(0, slots_.size());
+ SortInPushOrder();
+ int last_stack_slot = param_slots;
+ for (auto& slot : slots_) {
+ const int stack_slot = slot.dst_slot_;
+ int stack_decrement = (last_stack_slot - stack_slot) * kSystemPointerSize;
+ DCHECK_LT(0, stack_decrement);
+ last_stack_slot = stack_slot;
+ const LiftoffAssembler::VarState& src = slot.src_;
+ switch (src.loc()) {
+ case LiftoffAssembler::VarState::kStack:
+ if (src.kind() != kS128) {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ asm_->Ld_d(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
+ asm_->Push(kScratchReg);
+ } else {
+ asm_->AllocateStackSpace(stack_decrement - kSimd128Size);
+ asm_->Ld_d(kScratchReg, liftoff::GetStackSlot(slot.src_offset_ - 8));
+ asm_->Push(kScratchReg);
+ asm_->Ld_d(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
+ asm_->Push(kScratchReg);
+ }
+ break;
+ case LiftoffAssembler::VarState::kRegister: {
+ int pushed_bytes = SlotSizeInBytes(slot);
+ asm_->AllocateStackSpace(stack_decrement - pushed_bytes);
+ liftoff::push(asm_, src.reg(), src.kind());
+ break;
+ }
+ case LiftoffAssembler::VarState::kIntConst: {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ asm_->li(kScratchReg, Operand(src.i32_const()));
+ asm_->Push(kScratchReg);
+ break;
+ }
+ }
+ }
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_BASELINE_LOONG64_LIFTOFF_ASSEMBLER_LOONG64_H_
diff --git a/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index 4ab036da8e6..35eabecbf04 100644
--- a/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -3067,20 +3067,22 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, FPURegister src,
ValueKind kind) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- li(scratch, 1);
+ Label not_nan;
if (kind == kF32) {
CompareIsNanF32(src, src);
} else {
DCHECK_EQ(kind, kF64);
CompareIsNanF64(src, src);
}
- LoadZeroIfNotFPUCondition(scratch);
- Sw(scratch, MemOperand(dst));
+ BranchFalseShortF(&not_nan, USE_DELAY_SLOT);
+ li(scratch, 1);
+ sw(scratch, MemOperand(dst));
+ bind(&not_nan);
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
UNIMPLEMENTED();
}
diff --git a/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index 0a23c190e92..e47da841485 100644
--- a/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -3235,22 +3235,35 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, FPURegister src,
ValueKind kind) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- li(scratch, 1);
+ Label not_nan;
if (kind == kF32) {
CompareIsNanF32(src, src);
} else {
DCHECK_EQ(kind, kF64);
CompareIsNanF64(src, src);
}
- LoadZeroIfNotFPUCondition(scratch);
- Sd(scratch, MemOperand(dst));
+ BranchFalseShortF(&not_nan, USE_DELAY_SLOT);
+ li(scratch, 1);
+ Sw(dst, MemOperand(dst));
+ bind(&not_nan);
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
- UNIMPLEMENTED();
+ Label not_nan;
+ if (lane_kind == kF32) {
+ fcun_w(tmp_s128.fp().toW(), src.fp().toW(), src.fp().toW());
+ } else {
+ DCHECK_EQ(lane_kind, kF64);
+ fcun_d(tmp_s128.fp().toW(), src.fp().toW(), src.fp().toW());
+ }
+ BranchMSA(&not_nan, MSA_BRANCH_V, all_zero, tmp_s128.fp().toW(),
+ USE_DELAY_SLOT);
+ li(tmp_gp, 1);
+ Sw(tmp_gp, MemOperand(dst));
+ bind(&not_nan);
}
void LiftoffStackSlots::Construct(int param_slots) {
diff --git a/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index 8e3808d2599..938fa41ea9e 100644
--- a/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -10,6 +10,7 @@
#include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h"
#include "src/wasm/simd-shuffle.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -41,7 +42,8 @@ namespace liftoff {
//
//
-constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
+constexpr int32_t kInstanceOffset =
+ (FLAG_enable_embedded_constant_pool ? 3 : 2) * kSystemPointerSize;
inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
int32_t half_offset =
@@ -106,33 +108,105 @@ int LiftoffAssembler::PrepareStackFrame() {
void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
int stack_param_delta) {
- bailout(kUnsupportedArchitecture, "PrepareTailCall");
+ Register scratch = ip;
+ // Push the return address and frame pointer to complete the stack frame.
+ AddS64(sp, sp, Operand(-2 * kSystemPointerSize), r0);
+ LoadU64(scratch, MemOperand(fp, kSystemPointerSize), r0);
+ StoreU64(scratch, MemOperand(sp, kSystemPointerSize), r0);
+ LoadU64(scratch, MemOperand(fp), r0);
+ StoreU64(scratch, MemOperand(sp), r0);
+
+ // Shift the whole frame upwards.
+ int slot_count = num_callee_stack_params + 2;
+ for (int i = slot_count - 1; i >= 0; --i) {
+ LoadU64(scratch, MemOperand(sp, i * kSystemPointerSize), r0);
+ StoreU64(scratch,
+ MemOperand(fp, (i - stack_param_delta) * kSystemPointerSize), r0);
+ }
+
+ // Set the new stack and frame pointer.
+ AddS64(sp, fp, Operand(-stack_param_delta * kSystemPointerSize), r0);
+ Pop(r0, fp);
+ mtlr(r0);
}
void LiftoffAssembler::AlignFrameSize() {}
-void LiftoffAssembler::PatchPrepareStackFrame(int offset,
- SafepointTableBuilder*) {
- int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
-
-#ifdef USE_SIMULATOR
- // When using the simulator, deal with Liftoff which allocates the stack
- // before checking it.
- // TODO(arm): Remove this when the stack check mechanism will be updated.
- if (frame_size > KB / 2) {
- bailout(kOtherReason,
- "Stack limited to 512 bytes to avoid a bug in StackCheck");
+void LiftoffAssembler::PatchPrepareStackFrame(
+ int offset, SafepointTableBuilder* safepoint_table_builder) {
+ int frame_size =
+ GetTotalFrameSize() -
+ (FLAG_enable_embedded_constant_pool ? 3 : 2) * kSystemPointerSize;
+
+ Assembler patching_assembler(
+ AssemblerOptions{},
+ ExternalAssemblerBuffer(buffer_start_ + offset, kInstrSize + kGap));
+
+ if (V8_LIKELY(frame_size < 4 * KB)) {
+ patching_assembler.addi(sp, sp, Operand(-frame_size));
return;
}
-#endif
- if (!is_int16(-frame_size)) {
- bailout(kOtherReason, "PPC subi overflow");
+
+ // The frame size is bigger than 4KB, so we might overflow the available stack
+ // space if we first allocate the frame and then do the stack check (we will
+ // need some remaining stack space for throwing the exception). That's why we
+ // check the available stack space before we allocate the frame. To do this we
+ // replace the {__ sub(sp, sp, framesize)} with a jump to OOL code that does
+ // this "extended stack check".
+ //
+ // The OOL code can simply be generated here with the normal assembler,
+ // because all other code generation, including OOL code, has already finished
+ // when {PatchPrepareStackFrame} is called. The function prologue then jumps
+ // to the current {pc_offset()} to execute the OOL code for allocating the
+ // large frame.
+
+ // Emit the unconditional branch in the function prologue (from {offset} to
+ // {pc_offset()}).
+
+ int jump_offset = pc_offset() - offset;
+ if (!is_int26(jump_offset)) {
+ bailout(kUnsupportedArchitecture, "branch offset overflow");
return;
}
- Assembler patching_assembler(
- AssemblerOptions{},
- ExternalAssemblerBuffer(buffer_start_ + offset, kInstrSize + kGap));
- patching_assembler.addi(sp, sp, Operand(-frame_size));
+ patching_assembler.b(jump_offset, LeaveLK);
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ RecordComment("OOL: stack check for large frame");
+ Label continuation;
+ if (frame_size < FLAG_stack_size * 1024) {
+ Register stack_limit = ip;
+ LoadU64(stack_limit,
+ FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kRealStackLimitAddressOffset),
+ r0);
+ LoadU64(stack_limit, MemOperand(stack_limit), r0);
+ AddS64(stack_limit, stack_limit, Operand(frame_size), r0);
+ CmpU64(sp, stack_limit);
+ bge(&continuation);
+ }
+
+ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
+ // The call will not return; just define an empty safepoint.
+ safepoint_table_builder->DefineSafepoint(this);
+ if (FLAG_debug_code) stop();
+
+ bind(&continuation);
+
+ // Now allocate the stack space. Note that this might do more than just
+ // decrementing the SP; consult {TurboAssembler::AllocateStackSpace}.
+ SubS64(sp, sp, Operand(frame_size), r0);
+
+ // Jump back to the start of the function, from {pc_offset()} to
+ // right after the reserved space for the {__ sub(sp, sp, framesize)} (which
+ // is a branch now).
+ jump_offset = offset - pc_offset() + kInstrSize;
+ if (!is_int26(jump_offset)) {
+ bailout(kUnsupportedArchitecture, "branch offset overflow");
+ return;
+ }
+ b(jump_offset, LeaveLK);
}
void LiftoffAssembler::FinishCode() { EmitConstantPool(); }
@@ -169,14 +243,14 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
case kF32: {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- mov(scratch, Operand(value.to_f32_boxed().get_scalar()));
- MovIntToFloat(reg.fp(), scratch);
+ mov(scratch, Operand(value.to_f32_boxed().get_bits()));
+ MovIntToFloat(reg.fp(), scratch, ip);
break;
}
case kF64: {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- mov(scratch, Operand(value.to_f64_boxed().get_scalar()));
+ mov(scratch, Operand(value.to_f64_boxed().get_bits()));
MovInt64ToDouble(reg.fp(), scratch);
break;
}
@@ -412,43 +486,124 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned) {
- bailout(kAtomics, "AtomicLoad");
+ Load(dst, src_addr, offset_reg, offset_imm, type, pinned, nullptr, true);
+ lwsync();
}
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned) {
- bailout(kAtomics, "AtomicStore");
+ lwsync();
+ Store(dst_addr, offset_reg, offset_imm, src, type, pinned, nullptr, true);
+ sync();
}
+#ifdef V8_TARGET_BIG_ENDIAN
+constexpr bool is_be = true;
+#else
+constexpr bool is_be = false;
+#endif
+
+#define ATOMIC_OP(instr) \
+ { \
+ Register offset = r0; \
+ if (offset_imm != 0) { \
+ mov(ip, Operand(offset_imm)); \
+ if (offset_reg != no_reg) { \
+ add(ip, ip, offset_reg); \
+ } \
+ offset = ip; \
+ } else { \
+ if (offset_reg != no_reg) { \
+ offset = offset_reg; \
+ } \
+ } \
+ \
+ MemOperand dst = MemOperand(offset, dst_addr); \
+ \
+ switch (type.value()) { \
+ case StoreType::kI32Store8: \
+ case StoreType::kI64Store8: { \
+ auto op_func = [&](Register dst, Register lhs, Register rhs) { \
+ instr(dst, lhs, rhs); \
+ }; \
+ AtomicOps<uint8_t>(dst, value.gp(), result.gp(), r0, op_func); \
+ break; \
+ } \
+ case StoreType::kI32Store16: \
+ case StoreType::kI64Store16: { \
+ auto op_func = [&](Register dst, Register lhs, Register rhs) { \
+ if (is_be) { \
+ ByteReverseU16(dst, lhs); \
+ instr(dst, dst, rhs); \
+ ByteReverseU16(dst, dst); \
+ } else { \
+ instr(dst, lhs, rhs); \
+ } \
+ }; \
+ AtomicOps<uint16_t>(dst, value.gp(), result.gp(), r0, op_func); \
+ break; \
+ } \
+ case StoreType::kI32Store: \
+ case StoreType::kI64Store32: { \
+ auto op_func = [&](Register dst, Register lhs, Register rhs) { \
+ if (is_be) { \
+ ByteReverseU32(dst, lhs); \
+ instr(dst, dst, rhs); \
+ ByteReverseU32(dst, dst); \
+ } else { \
+ instr(dst, lhs, rhs); \
+ } \
+ }; \
+ AtomicOps<uint32_t>(dst, value.gp(), result.gp(), r0, op_func); \
+ break; \
+ } \
+ case StoreType::kI64Store: { \
+ auto op_func = [&](Register dst, Register lhs, Register rhs) { \
+ if (is_be) { \
+ ByteReverseU64(dst, lhs); \
+ instr(dst, dst, rhs); \
+ ByteReverseU64(dst, dst); \
+ } else { \
+ instr(dst, lhs, rhs); \
+ } \
+ }; \
+ AtomicOps<uint64_t>(dst, value.gp(), result.gp(), r0, op_func); \
+ break; \
+ } \
+ default: \
+ UNREACHABLE(); \
+ } \
+ }
+
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicAdd");
+ ATOMIC_OP(add);
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicSub");
+ ATOMIC_OP(sub);
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicAnd");
+ ATOMIC_OP(and_);
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicOr");
+ ATOMIC_OP(orx);
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicXor");
+ ATOMIC_OP(xor_);
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
@@ -594,16 +749,16 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
switch (kind) {
case kI32:
case kF32:
- LoadU32(ip, liftoff::GetStackSlot(dst_offset + stack_bias), r0);
- StoreU32(ip, liftoff::GetStackSlot(src_offset + stack_bias), r0);
+ LoadU32(ip, liftoff::GetStackSlot(src_offset + stack_bias), r0);
+ StoreU32(ip, liftoff::GetStackSlot(dst_offset + stack_bias), r0);
break;
case kI64:
case kOptRef:
case kRef:
case kRtt:
case kF64:
- LoadU64(ip, liftoff::GetStackSlot(dst_offset), r0);
- StoreU64(ip, liftoff::GetStackSlot(src_offset), r0);
+ LoadU64(ip, liftoff::GetStackSlot(src_offset), r0);
+ StoreU64(ip, liftoff::GetStackSlot(dst_offset), r0);
break;
case kS128:
bailout(kSimd, "simd op");
@@ -750,20 +905,25 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
#define SIGN_EXT(r) extsw(r, r)
#define ROUND_F64_TO_F32(fpr) frsp(fpr, fpr)
#define INT32_AND_WITH_1F(x) Operand(x & 0x1f)
+#define INT32_AND_WITH_3F(x) Operand(x & 0x3f)
#define REGISTER_AND_WITH_1F \
([&](Register rhs) { \
andi(r0, rhs, Operand(31)); \
return r0; \
})
+#define REGISTER_AND_WITH_3F \
+ ([&](Register rhs) { \
+ andi(r0, rhs, Operand(63)); \
+ return r0; \
+ })
+
#define LFR_TO_REG(reg) reg.gp()
// V(name, instr, dtype, stype, dcast, scast, rcast, return_val, return_type)
#define UNOP_LIST(V) \
- V(f32_abs, fabs, DoubleRegister, DoubleRegister, , , ROUND_F64_TO_F32, , \
- void) \
- V(f32_neg, fneg, DoubleRegister, DoubleRegister, , , ROUND_F64_TO_F32, , \
- void) \
+ V(f32_abs, fabs, DoubleRegister, DoubleRegister, , , USE, , void) \
+ V(f32_neg, fneg, DoubleRegister, DoubleRegister, , , USE, , void) \
V(f32_sqrt, fsqrt, DoubleRegister, DoubleRegister, , , ROUND_F64_TO_F32, , \
void) \
V(f32_floor, frim, DoubleRegister, DoubleRegister, , , ROUND_F64_TO_F32, \
@@ -772,16 +932,12 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
true, bool) \
V(f32_trunc, friz, DoubleRegister, DoubleRegister, , , ROUND_F64_TO_F32, \
true, bool) \
- V(f32_nearest_int, frin, DoubleRegister, DoubleRegister, , , \
- ROUND_F64_TO_F32, true, bool) \
V(f64_abs, fabs, DoubleRegister, DoubleRegister, , , USE, , void) \
V(f64_neg, fneg, DoubleRegister, DoubleRegister, , , USE, , void) \
V(f64_sqrt, fsqrt, DoubleRegister, DoubleRegister, , , USE, , void) \
V(f64_floor, frim, DoubleRegister, DoubleRegister, , , USE, true, bool) \
V(f64_ceil, frip, DoubleRegister, DoubleRegister, , , USE, true, bool) \
V(f64_trunc, friz, DoubleRegister, DoubleRegister, , , USE, true, bool) \
- V(f64_nearest_int, frin, DoubleRegister, DoubleRegister, , , USE, true, \
- bool) \
V(i32_clz, CountLeadingZerosU32, Register, Register, , , USE, , void) \
V(i32_ctz, CountTrailingZerosU32, Register, Register, , , USE, , void) \
V(i64_clz, CountLeadingZerosU64, LiftoffRegister, LiftoffRegister, \
@@ -816,89 +972,89 @@ UNOP_LIST(EMIT_UNOP_FUNCTION)
// V(name, instr, dtype, stype1, stype2, dcast, scast1, scast2, rcast,
// return_val, return_type)
-#define BINOP_LIST(V) \
- V(f32_copysign, fcpsgn, DoubleRegister, DoubleRegister, DoubleRegister, , , \
- , ROUND_F64_TO_F32, , void) \
- V(f64_copysign, fcpsgn, DoubleRegister, DoubleRegister, DoubleRegister, , , \
- , USE, , void) \
- V(f32_min, MinF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f32_max, MaxF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f64_min, MinF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f64_max, MaxF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(i64_sub, SubS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
- LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
- V(i64_add, AddS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
- LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
- V(i64_addi, AddS64, LiftoffRegister, LiftoffRegister, int64_t, LFR_TO_REG, \
- LFR_TO_REG, Operand, USE, , void) \
- V(i32_sub, SubS32, Register, Register, Register, , , , USE, , void) \
- V(i32_add, AddS32, Register, Register, Register, , , , USE, , void) \
- V(i32_addi, AddS32, Register, Register, int32_t, , , Operand, USE, , void) \
- V(i32_subi, SubS32, Register, Register, int32_t, , , Operand, USE, , void) \
- V(i32_mul, MulS32, Register, Register, Register, , , , USE, , void) \
- V(i64_mul, MulS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
- LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
- V(i32_andi, AndU32, Register, Register, int32_t, , , Operand, USE, , void) \
- V(i32_ori, OrU32, Register, Register, int32_t, , , Operand, USE, , void) \
- V(i32_xori, XorU32, Register, Register, int32_t, , , Operand, USE, , void) \
- V(i32_and, AndU32, Register, Register, Register, , , , USE, , void) \
- V(i32_or, OrU32, Register, Register, Register, , , , USE, , void) \
- V(i32_xor, XorU32, Register, Register, Register, , , , USE, , void) \
- V(i64_and, AndU64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
- LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
- V(i64_or, OrU64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
- LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
- V(i64_xor, XorU64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
- LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
- V(i64_andi, AndU64, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
- LFR_TO_REG, Operand, USE, , void) \
- V(i64_ori, OrU64, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
- LFR_TO_REG, Operand, USE, , void) \
- V(i64_xori, XorU64, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
- LFR_TO_REG, Operand, USE, , void) \
- V(i32_shli, ShiftLeftU32, Register, Register, int32_t, , , \
- INT32_AND_WITH_1F, USE, , void) \
- V(i32_sari, ShiftRightS32, Register, Register, int32_t, , , \
- INT32_AND_WITH_1F, USE, , void) \
- V(i32_shri, ShiftRightU32, Register, Register, int32_t, , , \
- INT32_AND_WITH_1F, USE, , void) \
- V(i32_shl, ShiftLeftU32, Register, Register, Register, , , \
- REGISTER_AND_WITH_1F, USE, , void) \
- V(i32_sar, ShiftRightS32, Register, Register, Register, , , \
- REGISTER_AND_WITH_1F, USE, , void) \
- V(i32_shr, ShiftRightU32, Register, Register, Register, , , \
- REGISTER_AND_WITH_1F, USE, , void) \
- V(i64_shl, ShiftLeftU64, LiftoffRegister, LiftoffRegister, Register, \
- LFR_TO_REG, LFR_TO_REG, , USE, , void) \
- V(i64_sar, ShiftRightS64, LiftoffRegister, LiftoffRegister, Register, \
- LFR_TO_REG, LFR_TO_REG, , USE, , void) \
- V(i64_shr, ShiftRightU64, LiftoffRegister, LiftoffRegister, Register, \
- LFR_TO_REG, LFR_TO_REG, , USE, , void) \
- V(i64_shli, ShiftLeftU64, LiftoffRegister, LiftoffRegister, int32_t, \
- LFR_TO_REG, LFR_TO_REG, Operand, USE, , void) \
- V(i64_sari, ShiftRightS64, LiftoffRegister, LiftoffRegister, int32_t, \
- LFR_TO_REG, LFR_TO_REG, Operand, USE, , void) \
- V(i64_shri, ShiftRightU64, LiftoffRegister, LiftoffRegister, int32_t, \
- LFR_TO_REG, LFR_TO_REG, Operand, USE, , void) \
- V(f64_add, AddF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f64_sub, SubF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f64_mul, MulF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f64_div, DivF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f32_add, AddF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f32_sub, SubF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f32_mul, MulF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f32_div, DivF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+#define BINOP_LIST(V) \
+ V(f32_copysign, CopySignF64, DoubleRegister, DoubleRegister, DoubleRegister, \
+ , , , ROUND_F64_TO_F32, , void) \
+ V(f64_copysign, CopySignF64, DoubleRegister, DoubleRegister, DoubleRegister, \
+ , , , USE, , void) \
+ V(f32_min, MinF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f32_max, MaxF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f64_min, MinF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f64_max, MaxF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(i64_sub, SubS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i64_add, AddS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i64_addi, AddS64, LiftoffRegister, LiftoffRegister, int64_t, LFR_TO_REG, \
+ LFR_TO_REG, Operand, USE, , void) \
+ V(i32_sub, SubS32, Register, Register, Register, , , , USE, , void) \
+ V(i32_add, AddS32, Register, Register, Register, , , , USE, , void) \
+ V(i32_addi, AddS32, Register, Register, int32_t, , , Operand, USE, , void) \
+ V(i32_subi, SubS32, Register, Register, int32_t, , , Operand, USE, , void) \
+ V(i32_mul, MulS32, Register, Register, Register, , , , USE, , void) \
+ V(i64_mul, MulS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i32_andi, AndU32, Register, Register, int32_t, , , Operand, USE, , void) \
+ V(i32_ori, OrU32, Register, Register, int32_t, , , Operand, USE, , void) \
+ V(i32_xori, XorU32, Register, Register, int32_t, , , Operand, USE, , void) \
+ V(i32_and, AndU32, Register, Register, Register, , , , USE, , void) \
+ V(i32_or, OrU32, Register, Register, Register, , , , USE, , void) \
+ V(i32_xor, XorU32, Register, Register, Register, , , , USE, , void) \
+ V(i64_and, AndU64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i64_or, OrU64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i64_xor, XorU64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i64_andi, AndU64, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
+ LFR_TO_REG, Operand, USE, , void) \
+ V(i64_ori, OrU64, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
+ LFR_TO_REG, Operand, USE, , void) \
+ V(i64_xori, XorU64, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
+ LFR_TO_REG, Operand, USE, , void) \
+ V(i32_shli, ShiftLeftU32, Register, Register, int32_t, , , \
+ INT32_AND_WITH_1F, USE, , void) \
+ V(i32_sari, ShiftRightS32, Register, Register, int32_t, , , \
+ INT32_AND_WITH_1F, USE, , void) \
+ V(i32_shri, ShiftRightU32, Register, Register, int32_t, , , \
+ INT32_AND_WITH_1F, USE, , void) \
+ V(i32_shl, ShiftLeftU32, Register, Register, Register, , , \
+ REGISTER_AND_WITH_1F, USE, , void) \
+ V(i32_sar, ShiftRightS32, Register, Register, Register, , , \
+ REGISTER_AND_WITH_1F, USE, , void) \
+ V(i32_shr, ShiftRightU32, Register, Register, Register, , , \
+ REGISTER_AND_WITH_1F, USE, , void) \
+ V(i64_shl, ShiftLeftU64, LiftoffRegister, LiftoffRegister, Register, \
+ LFR_TO_REG, LFR_TO_REG, REGISTER_AND_WITH_3F, USE, , void) \
+ V(i64_sar, ShiftRightS64, LiftoffRegister, LiftoffRegister, Register, \
+ LFR_TO_REG, LFR_TO_REG, REGISTER_AND_WITH_3F, USE, , void) \
+ V(i64_shr, ShiftRightU64, LiftoffRegister, LiftoffRegister, Register, \
+ LFR_TO_REG, LFR_TO_REG, REGISTER_AND_WITH_3F, USE, , void) \
+ V(i64_shli, ShiftLeftU64, LiftoffRegister, LiftoffRegister, int32_t, \
+ LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void) \
+ V(i64_sari, ShiftRightS64, LiftoffRegister, LiftoffRegister, int32_t, \
+ LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void) \
+ V(i64_shri, ShiftRightU64, LiftoffRegister, LiftoffRegister, int32_t, \
+ LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void) \
+ V(f64_add, AddF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f64_sub, SubF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f64_mul, MulF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f64_div, DivF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f32_add, AddF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f32_sub, SubF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f32_mul, MulF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f32_div, DivF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
USE, , void)
#define EMIT_BINOP_FUNCTION(name, instr, dtype, stype1, stype2, dcast, scast1, \
@@ -921,61 +1077,331 @@ BINOP_LIST(EMIT_BINOP_FUNCTION)
#undef REGISTER_AND_WITH_1F
#undef LFR_TO_REG
+bool LiftoffAssembler::emit_f32_nearest_int(DoubleRegister dst,
+ DoubleRegister src) {
+ return false;
+}
+
+bool LiftoffAssembler::emit_f64_nearest_int(DoubleRegister dst,
+ DoubleRegister src) {
+ return false;
+}
+
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- bailout(kUnsupportedArchitecture, "i32_divs");
+ Label cont;
+
+ // Check for division by zero.
+ CmpS32(rhs, Operand::Zero(), r0);
+ b(eq, trap_div_by_zero);
+
+ // Check for kMinInt / -1. This is unrepresentable.
+ CmpS32(rhs, Operand(-1), r0);
+ bne(&cont);
+ CmpS32(lhs, Operand(kMinInt), r0);
+ b(eq, trap_div_unrepresentable);
+
+ bind(&cont);
+ DivS32(dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i32_divu");
+ CmpS32(rhs, Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+ DivU32(dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i32_rems");
+ Label cont, done, trap_div_unrepresentable;
+ // Check for division by zero.
+ CmpS32(rhs, Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+
+ // Check kMinInt/-1 case.
+ CmpS32(rhs, Operand(-1), r0);
+ bne(&cont);
+ CmpS32(lhs, Operand(kMinInt), r0);
+ beq(&trap_div_unrepresentable);
+
+ // Continue noraml calculation.
+ bind(&cont);
+ ModS32(dst, lhs, rhs);
+ bne(&done);
+
+ // trap by kMinInt/-1 case.
+ bind(&trap_div_unrepresentable);
+ mov(dst, Operand(0));
+ bind(&done);
}
void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i32_remu");
+ CmpS32(rhs, Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+ ModU32(dst, lhs, rhs);
}
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- bailout(kUnsupportedArchitecture, "i64_divs");
+ constexpr int64_t kMinInt64 = static_cast<int64_t>(1) << 63;
+ Label cont;
+ // Check for division by zero.
+ CmpS64(rhs.gp(), Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+
+ // Check for kMinInt / -1. This is unrepresentable.
+ CmpS64(rhs.gp(), Operand(-1), r0);
+ bne(&cont);
+ CmpS64(lhs.gp(), Operand(kMinInt64), r0);
+ beq(trap_div_unrepresentable);
+
+ bind(&cont);
+ DivS64(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i64_divu");
+ CmpS64(rhs.gp(), Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+ // Do div.
+ DivU64(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i64_rems");
+ constexpr int64_t kMinInt64 = static_cast<int64_t>(1) << 63;
+
+ Label trap_div_unrepresentable;
+ Label done;
+ Label cont;
+
+ // Check for division by zero.
+ CmpS64(rhs.gp(), Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+
+ // Check for kMinInt / -1. This is unrepresentable.
+ CmpS64(rhs.gp(), Operand(-1), r0);
+ bne(&cont);
+ CmpS64(lhs.gp(), Operand(kMinInt64), r0);
+ beq(&trap_div_unrepresentable);
+
+ bind(&cont);
+ ModS64(dst.gp(), lhs.gp(), rhs.gp());
+ bne(&done);
+
+ bind(&trap_div_unrepresentable);
+ mov(dst.gp(), Operand(0));
+ bind(&done);
return true;
}
bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i64_remu");
+ CmpS64(rhs.gp(), Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+ ModU64(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src, Label* trap) {
- bailout(kUnsupportedArchitecture, "emit_type_conversion");
- return true;
+ switch (opcode) {
+ case kExprI32ConvertI64:
+ extsw(dst.gp(), src.gp());
+ return true;
+ case kExprI64SConvertI32:
+ extsw(dst.gp(), src.gp());
+ return true;
+ case kExprI64UConvertI32:
+ ZeroExtWord32(dst.gp(), src.gp());
+ return true;
+ case kExprF32ConvertF64:
+ frsp(dst.fp(), src.fp());
+ return true;
+ case kExprF64ConvertF32:
+ fmr(dst.fp(), src.fp());
+ return true;
+ case kExprF32SConvertI32: {
+ ConvertIntToFloat(src.gp(), dst.fp());
+ return true;
+ }
+ case kExprF32UConvertI32: {
+ ConvertUnsignedIntToFloat(src.gp(), dst.fp());
+ return true;
+ }
+ case kExprF64SConvertI32: {
+ ConvertIntToDouble(src.gp(), dst.fp());
+ return true;
+ }
+ case kExprF64UConvertI32: {
+ ConvertUnsignedIntToDouble(src.gp(), dst.fp());
+ return true;
+ }
+ case kExprF64SConvertI64: {
+ ConvertInt64ToDouble(src.gp(), dst.fp());
+ return true;
+ }
+ case kExprF64UConvertI64: {
+ ConvertUnsignedInt64ToDouble(src.gp(), dst.fp());
+ return true;
+ }
+ case kExprF32SConvertI64: {
+ ConvertInt64ToFloat(src.gp(), dst.fp());
+ return true;
+ }
+ case kExprF32UConvertI64: {
+ ConvertUnsignedInt64ToFloat(src.gp(), dst.fp());
+ return true;
+ }
+ case kExprI32SConvertF64:
+ case kExprI32SConvertF32: {
+ LoadDoubleLiteral(kScratchDoubleReg, base::Double(0.0), r0);
+ fcmpu(src.fp(), kScratchDoubleReg);
+ bunordered(trap);
+
+ fctiwz(kScratchDoubleReg, src.fp());
+ MovDoubleLowToInt(dst.gp(), kScratchDoubleReg);
+ mcrfs(cr7, VXCVI);
+ boverflow(trap, cr7);
+ return true;
+ }
+ case kExprI32UConvertF64:
+ case kExprI32UConvertF32: {
+ ConvertDoubleToUnsignedInt64(src.fp(), r0, kScratchDoubleReg,
+ kRoundToZero);
+ mcrfs(cr7, VXCVI); // extract FPSCR field containing VXCVI into cr7
+ boverflow(trap, cr7);
+ ZeroExtWord32(dst.gp(), r0);
+ CmpU64(dst.gp(), r0);
+ bne(trap);
+ return true;
+ }
+ case kExprI64SConvertF64:
+ case kExprI64SConvertF32: {
+ LoadDoubleLiteral(kScratchDoubleReg, base::Double(0.0), r0);
+ fcmpu(src.fp(), kScratchDoubleReg);
+ bunordered(trap);
+
+ fctidz(kScratchDoubleReg, src.fp());
+ MovDoubleToInt64(dst.gp(), kScratchDoubleReg);
+ mcrfs(cr7, VXCVI);
+ boverflow(trap, cr7);
+ return true;
+ }
+ case kExprI64UConvertF64:
+ case kExprI64UConvertF32: {
+ LoadDoubleLiteral(kScratchDoubleReg, base::Double(0.0), r0);
+ fcmpu(src.fp(), kScratchDoubleReg);
+ bunordered(trap);
+
+ fctiduz(kScratchDoubleReg, src.fp());
+ MovDoubleToInt64(dst.gp(), kScratchDoubleReg);
+ mcrfs(cr7, VXCVI);
+ boverflow(trap, cr7);
+ return true;
+ }
+ case kExprI32SConvertSatF64:
+ case kExprI32SConvertSatF32: {
+ Label done, src_is_nan;
+ LoadDoubleLiteral(kScratchDoubleReg, base::Double(0.0), r0);
+ fcmpu(src.fp(), kScratchDoubleReg);
+ bunordered(&src_is_nan);
+
+ mtfsb0(VXCVI); // clear FPSCR:VXCVI bit
+ fctiwz(kScratchDoubleReg, src.fp());
+ MovDoubleLowToInt(dst.gp(), kScratchDoubleReg);
+ b(&done);
+
+ bind(&src_is_nan);
+ mov(dst.gp(), Operand::Zero());
+
+ bind(&done);
+ return true;
+ }
+ case kExprI32UConvertSatF64:
+ case kExprI32UConvertSatF32: {
+ Label done, src_is_nan;
+ LoadDoubleLiteral(kScratchDoubleReg, base::Double(0.0), r0);
+ fcmpu(src.fp(), kScratchDoubleReg);
+ bunordered(&src_is_nan);
+
+ mtfsb0(VXCVI); // clear FPSCR:VXCVI bit
+ fctiwuz(kScratchDoubleReg, src.fp());
+ MovDoubleLowToInt(dst.gp(), kScratchDoubleReg);
+ b(&done);
+
+ bind(&src_is_nan);
+ mov(dst.gp(), Operand::Zero());
+
+ bind(&done);
+ return true;
+ }
+ case kExprI64SConvertSatF64:
+ case kExprI64SConvertSatF32: {
+ Label done, src_is_nan;
+ LoadDoubleLiteral(kScratchDoubleReg, base::Double(0.0), r0);
+ fcmpu(src.fp(), kScratchDoubleReg);
+ bunordered(&src_is_nan);
+
+ mtfsb0(VXCVI); // clear FPSCR:VXCVI bit
+ fctidz(kScratchDoubleReg, src.fp());
+ MovDoubleToInt64(dst.gp(), kScratchDoubleReg);
+ b(&done);
+
+ bind(&src_is_nan);
+ mov(dst.gp(), Operand::Zero());
+
+ bind(&done);
+ return true;
+ }
+ case kExprI64UConvertSatF64:
+ case kExprI64UConvertSatF32: {
+ Label done, src_is_nan;
+ LoadDoubleLiteral(kScratchDoubleReg, base::Double(0.0), r0);
+ fcmpu(src.fp(), kScratchDoubleReg);
+ bunordered(&src_is_nan);
+
+ mtfsb0(VXCVI); // clear FPSCR:VXCVI bit
+ fctiduz(kScratchDoubleReg, src.fp());
+ MovDoubleToInt64(dst.gp(), kScratchDoubleReg);
+ b(&done);
+
+ bind(&src_is_nan);
+ mov(dst.gp(), Operand::Zero());
+
+ bind(&done);
+ return true;
+ }
+ case kExprI32ReinterpretF32: {
+ MovFloatToInt(dst.gp(), src.fp(), kScratchDoubleReg);
+ return true;
+ }
+ case kExprI64ReinterpretF64: {
+ MovDoubleToInt64(dst.gp(), src.fp());
+ return true;
+ }
+ case kExprF32ReinterpretI32: {
+ MovIntToFloat(dst.fp(), src.gp(), r0);
+ return true;
+ }
+ case kExprF64ReinterpretI64: {
+ MovInt64ToDouble(dst.fp(), src.gp());
+ return true;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::emit_jump(Label* label) { b(al, label); }
@@ -1025,8 +1451,13 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
Label* label, Register lhs,
int32_t imm) {
+ bool use_signed = liftoff::UseSignedOp(liftoff_cond);
Condition cond = liftoff::ToCondition(liftoff_cond);
- CmpS32(lhs, Operand(imm), r0);
+ if (use_signed) {
+ CmpS32(lhs, Operand(imm), r0);
+ } else {
+ CmpU32(lhs, Operand(imm), r0);
+ }
b(cond, label);
}
@@ -1083,11 +1514,19 @@ void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
Register dst, DoubleRegister lhs,
DoubleRegister rhs) {
- fcmpu(lhs, rhs);
- Label done;
- mov(dst, Operand(1));
- b(liftoff::ToCondition(liftoff_cond), &done);
+ fcmpu(lhs, rhs, cr0);
+ Label nan, done;
+ bunordered(&nan, cr0);
mov(dst, Operand::Zero());
+ b(NegateCondition(liftoff::ToCondition(liftoff_cond)), &done, cr0);
+ mov(dst, Operand(1));
+ b(&done);
+ bind(&nan);
+ if (liftoff_cond == kUnequal) {
+ mov(dst, Operand(1));
+ } else {
+ mov(dst, Operand::Zero());
+ }
bind(&done);
}
@@ -1114,7 +1553,9 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
SmiCheckMode mode) {
- bailout(kUnsupportedArchitecture, "emit_smi_check");
+ TestIfSmi(obj, r0);
+ Condition condition = mode == kJumpOnSmi ? eq : ne;
+ b(condition, target, cr0); // branch if SMI
}
void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
@@ -2254,30 +2695,46 @@ void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
- bailout(kUnsupportedArchitecture, "StackCheck");
+ LoadU64(limit_address, MemOperand(limit_address), r0);
+ CmpU64(sp, limit_address);
+ ble(ool_code);
}
void LiftoffAssembler::CallTrapCallbackForTesting() {
- bailout(kUnsupportedArchitecture, "CallTrapCallbackForTesting");
+ PrepareCallCFunction(0, 0, ip);
+ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- bailout(kUnsupportedArchitecture, "AssertUnreachable");
+ if (FLAG_debug_code) Abort(reason);
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
- bailout(kUnsupportedArchitecture, "PushRegisters");
+ MultiPush(regs.GetGpList());
+ MultiPushDoubles(regs.GetFpList());
}
void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
- bailout(kUnsupportedArchitecture, "PopRegisters");
+ MultiPopDoubles(regs.GetFpList());
+ MultiPop(regs.GetGpList());
}
void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
LiftoffRegList all_spills,
LiftoffRegList ref_spills,
int spill_offset) {
- bailout(kRefTypes, "RecordSpillsInSafepoint");
+ int spill_space_size = 0;
+ while (!all_spills.is_empty()) {
+ LiftoffRegister reg = all_spills.GetLastRegSet();
+ if (ref_spills.has(reg)) {
+ safepoint.DefinePointerSlot(spill_offset);
+ }
+ all_spills.clear(reg);
+ ++spill_offset;
+ spill_space_size += kSystemPointerSize;
+ }
+ // Record the number of additional spill slots.
+ RecordOolSpillSpaceSize(spill_space_size);
}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
@@ -2289,37 +2746,120 @@ void LiftoffAssembler::CallC(const ValueKindSig* sig,
const LiftoffRegister* rets,
ValueKind out_argument_kind, int stack_bytes,
ExternalReference ext_ref) {
- bailout(kUnsupportedArchitecture, "CallC");
+ int total_size = RoundUp(stack_bytes, kSystemPointerSize);
+
+ int size = total_size;
+ constexpr int kStackPageSize = 4 * KB;
+
+ // Reserve space in the stack.
+ while (size > kStackPageSize) {
+ SubS64(sp, sp, Operand(kStackPageSize), r0);
+ StoreU64(r0, MemOperand(sp));
+ size -= kStackPageSize;
+ }
+
+ SubS64(sp, sp, Operand(size), r0);
+
+ int arg_bytes = 0;
+ for (ValueKind param_kind : sig->parameters()) {
+ switch (param_kind) {
+ case kI32:
+ StoreU32(args->gp(), MemOperand(sp, arg_bytes), r0);
+ break;
+ case kI64:
+ StoreU64(args->gp(), MemOperand(sp, arg_bytes), r0);
+ break;
+ case kF32:
+ StoreF32(args->fp(), MemOperand(sp, arg_bytes), r0);
+ break;
+ case kF64:
+ StoreF64(args->fp(), MemOperand(sp, arg_bytes), r0);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ args++;
+ arg_bytes += element_size_bytes(param_kind);
+ }
+
+ DCHECK_LE(arg_bytes, stack_bytes);
+
+ // Pass a pointer to the buffer with the arguments to the C function.
+ mr(r3, sp);
+
+ // Now call the C function.
+ constexpr int kNumCCallArgs = 1;
+ PrepareCallCFunction(kNumCCallArgs, r0);
+ CallCFunction(ext_ref, kNumCCallArgs);
+
+ // Move return value to the right register.
+ const LiftoffRegister* result_reg = rets;
+ if (sig->return_count() > 0) {
+ DCHECK_EQ(1, sig->return_count());
+ constexpr Register kReturnReg = r3;
+ if (kReturnReg != rets->gp()) {
+ Move(*rets, LiftoffRegister(kReturnReg), sig->GetReturn(0));
+ }
+ result_reg++;
+ }
+
+ // Load potential output value from the buffer on the stack.
+ if (out_argument_kind != kVoid) {
+ switch (out_argument_kind) {
+ case kI32:
+ LoadS32(result_reg->gp(), MemOperand(sp));
+ break;
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
+ LoadU64(result_reg->gp(), MemOperand(sp));
+ break;
+ case kF32:
+ LoadF32(result_reg->fp(), MemOperand(sp));
+ break;
+ case kF64:
+ LoadF64(result_reg->fp(), MemOperand(sp));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ AddS64(sp, sp, Operand(total_size), r0);
}
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
- bailout(kUnsupportedArchitecture, "CallNativeWasmCode");
+ Call(addr, RelocInfo::WASM_CALL);
}
void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
- bailout(kUnsupportedArchitecture, "TailCallNativeWasmCode");
+ Jump(addr, RelocInfo::WASM_CALL);
}
void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
- bailout(kUnsupportedArchitecture, "CallIndirect");
+ DCHECK(target != no_reg);
+ Call(target);
}
void LiftoffAssembler::TailCallIndirect(Register target) {
- bailout(kUnsupportedArchitecture, "TailCallIndirect");
+ DCHECK(target != no_reg);
+ Jump(target);
}
void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
- bailout(kUnsupportedArchitecture, "CallRuntimeStub");
+ Call(static_cast<Address>(sid), RelocInfo::WASM_STUB_CALL);
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- bailout(kUnsupportedArchitecture, "AllocateStackSlot");
+ SubS64(sp, sp, Operand(size), r0);
+ mr(addr, sp);
}
void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
- bailout(kUnsupportedArchitecture, "DeallocateStackSlot");
+ AddS64(sp, sp, Operand(size));
}
void LiftoffAssembler::MaybeOSR() {}
@@ -2329,15 +2869,114 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
UNIMPLEMENTED();
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
UNIMPLEMENTED();
}
void LiftoffStackSlots::Construct(int param_slots) {
- asm_->bailout(kUnsupportedArchitecture, "LiftoffStackSlots::Construct");
+ DCHECK_LT(0, slots_.size());
+ SortInPushOrder();
+ int last_stack_slot = param_slots;
+ for (auto& slot : slots_) {
+ const int stack_slot = slot.dst_slot_;
+ int stack_decrement = (last_stack_slot - stack_slot) * kSystemPointerSize;
+ DCHECK_LT(0, stack_decrement);
+ last_stack_slot = stack_slot;
+ const LiftoffAssembler::VarState& src = slot.src_;
+ switch (src.loc()) {
+ case LiftoffAssembler::VarState::kStack: {
+ switch (src.kind()) {
+ case kI32:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
+ case kI64: {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ UseScratchRegisterScope temps(asm_);
+ Register scratch = temps.Acquire();
+ asm_->LoadU64(scratch, liftoff::GetStackSlot(slot.src_offset_), r0);
+ asm_->Push(scratch);
+ break;
+ }
+ case kF32: {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ asm_->LoadF32(kScratchDoubleReg,
+ liftoff::GetStackSlot(slot.src_offset_), r0);
+ asm_->AddS64(sp, sp, Operand(-kSystemPointerSize));
+ asm_->StoreF32(kScratchDoubleReg, MemOperand(sp), r0);
+ break;
+ }
+ case kF64: {
+ asm_->AllocateStackSpace(stack_decrement - kDoubleSize);
+ asm_->LoadF64(kScratchDoubleReg,
+ liftoff::GetStackSlot(slot.src_offset_), r0);
+ asm_->AddS64(sp, sp, Operand(-kSystemPointerSize), r0);
+ asm_->StoreF64(kScratchDoubleReg, MemOperand(sp), r0);
+ break;
+ }
+ case kS128: {
+ asm_->bailout(kSimd, "LiftoffStackSlots::Construct");
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+ case LiftoffAssembler::VarState::kRegister: {
+ int pushed_bytes = SlotSizeInBytes(slot);
+ asm_->AllocateStackSpace(stack_decrement - pushed_bytes);
+ switch (src.kind()) {
+ case kI64:
+ case kI32:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
+ asm_->push(src.reg().gp());
+ break;
+ case kF32:
+ asm_->AddS64(sp, sp, Operand(-kSystemPointerSize), r0);
+ asm_->StoreF32(src.reg().fp(), MemOperand(sp), r0);
+ break;
+ case kF64:
+ asm_->AddS64(sp, sp, Operand(-kSystemPointerSize), r0);
+ asm_->StoreF64(src.reg().fp(), MemOperand(sp), r0);
+ break;
+ case kS128: {
+ asm_->bailout(kSimd, "LiftoffStackSlots::Construct");
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+ case LiftoffAssembler::VarState::kIntConst: {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ DCHECK(src.kind() == kI32 || src.kind() == kI64);
+ UseScratchRegisterScope temps(asm_);
+ Register scratch = temps.Acquire();
+
+ switch (src.kind()) {
+ case kI32:
+ asm_->mov(scratch, Operand(src.i32_const()));
+ break;
+ case kI64:
+ asm_->mov(scratch, Operand(int64_t{slot.src_.i32_const()}));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ asm_->push(scratch);
+ break;
+ }
+ }
+ }
}
} // namespace wasm
diff --git a/chromium/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h b/chromium/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
index fef59471c1d..616f10fa8f6 100644
--- a/chromium/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
+++ b/chromium/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
@@ -79,16 +79,16 @@ inline MemOperand GetMemOp(LiftoffAssembler* assm, Register addr,
if (is_uint31(offset_imm)) {
int32_t offset_imm32 = static_cast<int32_t>(offset_imm);
if (offset == no_reg) return MemOperand(addr, offset_imm32);
- assm->Add64(kScratchReg, addr, offset);
- return MemOperand(kScratchReg, offset_imm32);
+ assm->Add64(kScratchReg2, addr, offset);
+ return MemOperand(kScratchReg2, offset_imm32);
}
// Offset immediate does not fit in 31 bits.
- assm->li(kScratchReg, offset_imm);
- assm->Add64(kScratchReg, kScratchReg, addr);
+ assm->li(kScratchReg2, offset_imm);
+ assm->Add64(kScratchReg2, kScratchReg2, addr);
if (offset != no_reg) {
- assm->Add64(kScratchReg, kScratchReg, offset);
+ assm->Add64(kScratchReg2, kScratchReg2, offset);
}
- return MemOperand(kScratchReg, 0);
+ return MemOperand(kScratchReg2, 0);
}
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
@@ -128,10 +128,10 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
assm->Usd(src.gp(), dst);
break;
case kF32:
- assm->UStoreFloat(src.fp(), dst);
+ assm->UStoreFloat(src.fp(), dst, kScratchReg);
break;
case kF64:
- assm->UStoreDouble(src.fp(), dst);
+ assm->UStoreDouble(src.fp(), dst, kScratchReg);
break;
default:
UNREACHABLE();
@@ -335,7 +335,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(
// space if we first allocate the frame and then do the stack check (we will
// need some remaining stack space for throwing the exception). That's why we
// check the available stack space before we allocate the frame. To do this we
- // replace the {__ Daddu(sp, sp, -frame_size)} with a jump to OOL code that
+ // replace the {__ Add64(sp, sp, -frame_size)} with a jump to OOL code that
// does this "extended stack check".
//
// The OOL code can simply be generated here with the normal assembler,
@@ -376,7 +376,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(
Add64(sp, sp, Operand(-frame_size));
// Jump back to the start of the function, from {pc_offset()} to
- // right after the reserved space for the {__ Daddu(sp, sp, -framesize)}
+ // right after the reserved space for the {__ Add64(sp, sp, -framesize)}
// (which is a Branch now).
int func_start_offset = offset + 2 * kInstrSize;
imm32 = func_start_offset - pc_offset();
@@ -552,11 +552,20 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
TurboAssembler::Uld(dst.gp(), src_op);
break;
case LoadType::kF32Load:
- TurboAssembler::ULoadFloat(dst.fp(), src_op);
+ TurboAssembler::ULoadFloat(dst.fp(), src_op, kScratchReg);
break;
case LoadType::kF64Load:
- TurboAssembler::ULoadDouble(dst.fp(), src_op);
+ TurboAssembler::ULoadDouble(dst.fp(), src_op, kScratchReg);
break;
+ case LoadType::kS128Load: {
+ VU.set(kScratchReg, E8, m1);
+ Register src_reg = src_op.offset() == 0 ? src_op.rm() : kScratchReg;
+ if (src_op.offset() != 0) {
+ TurboAssembler::Add64(src_reg, src_op.rm(), src_op.offset());
+ }
+ vl(dst.fp().toV(), src_reg, 0, E8);
+ break;
+ }
default:
UNREACHABLE();
}
@@ -607,11 +616,20 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
TurboAssembler::Usd(src.gp(), dst_op);
break;
case StoreType::kF32Store:
- TurboAssembler::UStoreFloat(src.fp(), dst_op);
+ TurboAssembler::UStoreFloat(src.fp(), dst_op, kScratchReg);
break;
case StoreType::kF64Store:
- TurboAssembler::UStoreDouble(src.fp(), dst_op);
+ TurboAssembler::UStoreDouble(src.fp(), dst_op, kScratchReg);
break;
+ case StoreType::kS128Store: {
+ VU.set(kScratchReg, E8, m1);
+ Register dst_reg = dst_op.offset() == 0 ? dst_op.rm() : kScratchReg;
+ if (dst_op.offset() != 0) {
+ Add64(kScratchReg, dst_op.rm(), dst_op.offset());
+ }
+ vs(src.fp().toV(), dst_reg, 0, VSew::E8);
+ break;
+ }
default:
UNREACHABLE();
}
@@ -747,24 +765,26 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
switch (type.value()) {
case LoadType::kI32Load8U:
case LoadType::kI64Load8U:
+ fence(PSR | PSW, PSR | PSW);
lbu(dst.gp(), src_reg, 0);
- sync();
+ fence(PSR, PSR | PSW);
return;
case LoadType::kI32Load16U:
case LoadType::kI64Load16U:
+ fence(PSR | PSW, PSR | PSW);
lhu(dst.gp(), src_reg, 0);
- sync();
+ fence(PSR, PSR | PSW);
return;
case LoadType::kI32Load:
- lr_w(true, true, dst.gp(), src_reg);
- return;
case LoadType::kI64Load32U:
- lr_w(true, true, dst.gp(), src_reg);
- slli(dst.gp(), dst.gp(), 32);
- srli(dst.gp(), dst.gp(), 32);
+ fence(PSR | PSW, PSR | PSW);
+ lw(dst.gp(), src_reg, 0);
+ fence(PSR, PSR | PSW);
return;
case LoadType::kI64Load:
- lr_d(true, true, dst.gp(), src_reg);
+ fence(PSR | PSW, PSR | PSW);
+ ld(dst.gp(), src_reg, 0);
+ fence(PSR, PSR | PSW);
return;
default:
UNREACHABLE();
@@ -780,22 +800,22 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
switch (type.value()) {
case StoreType::kI64Store8:
case StoreType::kI32Store8:
- sync();
+ fence(PSR | PSW, PSW);
sb(src.gp(), dst_reg, 0);
- sync();
return;
case StoreType::kI64Store16:
case StoreType::kI32Store16:
- sync();
+ fence(PSR | PSW, PSW);
sh(src.gp(), dst_reg, 0);
- sync();
return;
case StoreType::kI64Store32:
case StoreType::kI32Store:
- sc_w(true, true, zero_reg, dst_reg, src.gp());
+ fence(PSR | PSW, PSW);
+ sw(src.gp(), dst_reg, 0);
return;
case StoreType::kI64Store:
- sc_d(true, true, zero_reg, dst_reg, src.gp());
+ fence(PSR | PSW, PSW);
+ sd(src.gp(), dst_reg, 0);
return;
default:
UNREACHABLE();
@@ -948,7 +968,11 @@ void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueKind kind) {
DCHECK_NE(dst, src);
- TurboAssembler::Move(dst, src);
+ if (kind != kS128) {
+ TurboAssembler::Move(dst, src);
+ } else {
+ TurboAssembler::vmv_vv(dst.toV(), dst.toV());
+ }
}
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
@@ -971,9 +995,15 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
case kF64:
TurboAssembler::StoreDouble(reg.fp(), dst);
break;
- case kS128:
- bailout(kSimd, "Spill S128");
+ case kS128: {
+ VU.set(kScratchReg, E8, m1);
+ Register dst_reg = dst.offset() == 0 ? dst.rm() : kScratchReg;
+ if (dst.offset() != 0) {
+ Add64(kScratchReg, dst.rm(), dst.offset());
+ }
+ vs(reg.fp().toV(), dst_reg, 0, VSew::E8);
break;
+ }
default:
UNREACHABLE();
}
@@ -1021,6 +1051,15 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
case kF64:
TurboAssembler::LoadDouble(reg.fp(), src);
break;
+ case kS128: {
+ VU.set(kScratchReg, E8, m1);
+ Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg;
+ if (src.offset() != 0) {
+ TurboAssembler::Add64(src_reg, src.rm(), src.offset());
+ }
+ vl(reg.fp().toV(), src_reg, 0, E8);
+ break;
+ }
default:
UNREACHABLE();
}
@@ -1072,7 +1111,7 @@ void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) {
bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
LiftoffRegister src) {
- TurboAssembler::Popcnt64(dst.gp(), src.gp());
+ TurboAssembler::Popcnt64(dst.gp(), src.gp(), kScratchReg);
return true;
}
@@ -1154,7 +1193,7 @@ void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
}
bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
- TurboAssembler::Popcnt32(dst, src);
+ TurboAssembler::Popcnt32(dst, src, kScratchReg);
return true;
}
@@ -1663,7 +1702,37 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
LiftoffRegister rhs,
const uint8_t shuffle[16],
bool is_swizzle) {
- bailout(kSimd, "emit_i8x16_shuffle");
+ VRegister dst_v = dst.fp().toV();
+ VRegister lhs_v = lhs.fp().toV();
+ VRegister rhs_v = rhs.fp().toV();
+
+ uint64_t imm1 = *(reinterpret_cast<const uint64_t*>(shuffle));
+ uint64_t imm2 = *((reinterpret_cast<const uint64_t*>(shuffle)) + 1);
+ VU.set(kScratchReg, VSew::E64, Vlmul::m1);
+ li(kScratchReg, 1);
+ vmv_vx(v0, kScratchReg);
+ li(kScratchReg, imm1);
+ vmerge_vx(kSimd128ScratchReg, kScratchReg, kSimd128ScratchReg);
+ li(kScratchReg, imm2);
+ vsll_vi(v0, v0, 1);
+ vmerge_vx(kSimd128ScratchReg, kScratchReg, kSimd128ScratchReg);
+
+ VU.set(kScratchReg, E8, m1);
+ VRegister temp =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(lhs, rhs)).fp().toV();
+ if (dst_v == lhs_v) {
+ vmv_vv(temp, lhs_v);
+ lhs_v = temp;
+ } else if (dst_v == rhs_v) {
+ vmv_vv(temp, rhs_v);
+ rhs_v = temp;
+ }
+ vrgather_vv(dst_v, lhs_v, kSimd128ScratchReg);
+ vadd_vi(kSimd128ScratchReg, kSimd128ScratchReg,
+ -16); // The indices in range [16, 31] select the i - 16-th element
+ // of rhs
+ vrgather_vv(kSimd128ScratchReg2, rhs_v, kSimd128ScratchReg);
+ vor_vv(dst_v, dst_v, kSimd128ScratchReg2);
}
void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
@@ -1679,52 +1748,60 @@ void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i8x16_splat");
+ VU.set(kScratchReg, E8, m1);
+ vmv_vx(dst.fp().toV(), src.gp());
}
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i16x8_splat");
+ VU.set(kScratchReg, E16, m1);
+ vmv_vx(dst.fp().toV(), src.gp());
}
void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i32x4_splat");
+ VU.set(kScratchReg, E32, m1);
+ vmv_vx(dst.fp().toV(), src.gp());
}
void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i64x2_splat");
+ VU.set(kScratchReg, E64, m1);
+ vmv_vx(dst.fp().toV(), src.gp());
}
void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i64x2_eq");
+ WasmRvvEq(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E64, m1);
}
void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i64x2_ne");
+ WasmRvvNe(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E64, m1);
}
void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i64x2.gt_s");
+ WasmRvvGtS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E64, m1);
}
void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i64x2.ge_s");
+ WasmRvvGeS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E64, m1);
}
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f32x4_splat");
+ VU.set(kScratchReg, E32, m1);
+ fmv_x_w(kScratchReg, src.fp());
+ vmv_vx(dst.fp().toV(), kScratchReg);
}
void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f64x2_splat");
+ VU.set(kScratchReg, E64, m1);
+ fmv_x_d(kScratchReg, src.fp());
+ vmv_vx(dst.fp().toV(), kScratchReg);
}
#define SIMD_BINOP(name1, name2) \
@@ -1756,7 +1833,11 @@ void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i64x2_bitmask");
+ VU.set(kScratchReg, E64, m1);
+ vmv_vx(kSimd128RegZero, zero_reg);
+ vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
+ VU.set(kScratchReg, E32, m1);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
@@ -1781,112 +1862,124 @@ void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_eq");
+ WasmRvvEq(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
}
void LiftoffAssembler::emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_ne");
+ WasmRvvNe(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
}
void LiftoffAssembler::emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_gt_s");
+ WasmRvvGtS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
}
void LiftoffAssembler::emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_gt_u");
+ WasmRvvGtU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_ge_s");
+ WasmRvvGeS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
}
void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_ge_u");
+ WasmRvvGeU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
}
void LiftoffAssembler::emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_eq");
+ WasmRvvEq(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_ne");
+ WasmRvvNe(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_gt_s");
+ WasmRvvGtS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_gt_u");
+ WasmRvvGtU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_ge_s");
+ WasmRvvGeS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_ge_u");
+ WasmRvvGeU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_eq");
+ WasmRvvEq(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
}
void LiftoffAssembler::emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_ne");
+ WasmRvvNe(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
}
void LiftoffAssembler::emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_gt_s");
+ WasmRvvGtS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
}
void LiftoffAssembler::emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_gt_u");
+ WasmRvvGtU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
}
void LiftoffAssembler::emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_ge_s");
+ WasmRvvGeS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
}
void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_ge_u");
+ WasmRvvGeU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
}
void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_eq");
+ VU.set(kScratchReg, E32, m1);
+ vmfeq_vv(v0, rhs.fp().toV(), lhs.fp().toV());
+ vmv_vx(dst.fp().toV(), zero_reg);
+ vmerge_vi(dst.fp().toV(), -1, dst.fp().toV());
}
void LiftoffAssembler::emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_ne");
+ VU.set(kScratchReg, E32, m1);
+ vmfne_vv(v0, rhs.fp().toV(), lhs.fp().toV());
+ vmv_vx(dst.fp().toV(), zero_reg);
+ vmerge_vi(dst.fp().toV(), -1, dst.fp().toV());
}
void LiftoffAssembler::emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_lt");
+ VU.set(kScratchReg, E32, m1);
+ vmflt_vv(v0, rhs.fp().toV(), lhs.fp().toV());
+ vmv_vx(dst.fp().toV(), zero_reg);
+ vmerge_vi(dst.fp().toV(), -1, dst.fp().toV());
}
void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_le");
+ VU.set(kScratchReg, E32, m1);
+ vmfle_vv(v0, rhs.fp().toV(), lhs.fp().toV());
+ vmv_vx(dst.fp().toV(), zero_reg);
+ vmerge_vi(dst.fp().toV(), -1, dst.fp().toV());
}
void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
@@ -1906,7 +1999,10 @@ void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "f32x4.demote_f64x2_zero");
+ VU.set(kScratchReg, E32, m1);
+ vfncvt_f_f_w(dst.fp().toV(), src.fp().toV());
+ vmv_vi(v0, 12);
+ vmerge_vx(dst.fp().toV(), zero_reg, dst.fp().toV());
}
void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
@@ -1941,69 +2037,102 @@ void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
const uint8_t imms[16]) {
- bailout(kSimd, "emit_s128_const");
+ WasmRvvS128const(dst.fp().toV(), imms);
}
void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
- bailout(kSimd, "emit_s128_not");
+ VU.set(kScratchReg, E8, m1);
+ vnot_vv(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_s128_and");
+ VU.set(kScratchReg, E8, m1);
+ vand_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_s128_or");
+ VU.set(kScratchReg, E8, m1);
+ vor_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_s128_xor");
+ VU.set(kScratchReg, E8, m1);
+ vxor_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_s128_and_not");
+ VU.set(kScratchReg, E8, m1);
+ vnot_vv(dst.fp().toV(), rhs.fp().toV());
+ vand_vv(dst.fp().toV(), lhs.fp().toV(), dst.fp().toV());
}
void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
LiftoffRegister mask) {
- bailout(kSimd, "emit_s128_select");
+ VU.set(kScratchReg, E8, m1);
+ vand_vv(kSimd128ScratchReg, src1.fp().toV(), mask.fp().toV());
+ vnot_vv(kSimd128ScratchReg2, mask.fp().toV());
+ vand_vv(kSimd128ScratchReg2, src2.fp().toV(), kSimd128ScratchReg2);
+ vor_vv(dst.fp().toV(), kSimd128ScratchReg, kSimd128ScratchReg2);
}
void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i8x16_neg");
+ VU.set(kScratchReg, E8, m1);
+ vneg_vv(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_v128_anytrue");
+ VU.set(kScratchReg, E8, m1);
+ Label t;
+ vmv_sx(kSimd128ScratchReg, zero_reg);
+ vredmaxu_vs(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
+ beq(dst.gp(), zero_reg, &t);
+ li(dst.gp(), 1);
+ bind(&t);
}
void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i8x16_alltrue");
+ VU.set(kScratchReg, E8, m1);
+ Label alltrue;
+ li(kScratchReg, -1);
+ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ vredminu_vs(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
+ beqz(dst.gp(), &alltrue);
+ li(dst.gp(), 1);
+ bind(&alltrue);
}
void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i8x16_bitmask");
+ VU.set(kScratchReg, E8, m1);
+ vmv_vx(kSimd128RegZero, zero_reg);
+ vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
+ VU.set(kScratchReg, E32, m1);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_shl");
+ VU.set(kScratchReg, E8, m1);
+ vsll_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "emit_i8x16_shli");
+ DCHECK(is_uint5(rhs));
+ VU.set(kScratchReg, E8, m1);
+ vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs);
}
void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
@@ -2030,36 +2159,42 @@ void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_add");
+ VU.set(kScratchReg, E8, m1);
+ vadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_add_sat_s");
+ VU.set(kScratchReg, E8, m1);
+ vsadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_add_sat_u");
+ VU.set(kScratchReg, E8, m1);
+ vsaddu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_sub");
+ VU.set(kScratchReg, E8, m1);
+ vsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_sub_sat_s");
+ VU.set(kScratchReg, E8, m1);
+ vssub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_sub_sat_u");
+ VU.set(kScratchReg, E8, m1);
+ vssubu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
@@ -2093,22 +2228,37 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i16x8_alltrue");
+ VU.set(kScratchReg, E16, m1);
+ Label alltrue;
+ li(kScratchReg, -1);
+ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ vredminu_vs(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
+ beqz(dst.gp(), &alltrue);
+ li(dst.gp(), 1);
+ bind(&alltrue);
}
void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i16x8_bitmask");
+ VU.set(kScratchReg, E16, m1);
+ vmv_vx(kSimd128RegZero, zero_reg);
+ vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
+ VU.set(kScratchReg, E32, m1);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_shl");
+ VU.set(kScratchReg, E16, m1);
+ vsll_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "emit_i16x8_shli");
+ DCHECK(is_uint5(rhs));
+ VU.set(kScratchReg, E16, m1);
+ vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs);
}
void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
@@ -2135,7 +2285,8 @@ void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_add");
+ VU.set(kScratchReg, E16, m1);
+ vadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
@@ -2152,7 +2303,8 @@ void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_sub");
+ VU.set(kScratchReg, E16, m1);
+ vsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
@@ -2203,22 +2355,39 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i32x4_alltrue");
+ VU.set(kScratchReg, E32, m1);
+ Label alltrue;
+ li(kScratchReg, -1);
+ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ vredminu_vs(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
+ beqz(dst.gp(), &alltrue);
+ li(dst.gp(), 1);
+ bind(&alltrue);
}
void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i32x4_bitmask");
+ VU.set(kScratchReg, E32, m1);
+ vmv_vx(kSimd128RegZero, zero_reg);
+ vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_shl");
+ VU.set(kScratchReg, E32, m1);
+ vsll_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "emit_i32x4_shli");
+ if (is_uint5(rhs)) {
+ vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs);
+ } else {
+ li(kScratchReg, rhs);
+ vsll_vx(dst.fp().toV(), lhs.fp().toV(), kScratchReg);
+ }
}
void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
@@ -2245,12 +2414,14 @@ void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_add");
+ VU.set(kScratchReg, E32, m1);
+ vadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_sub");
+ VU.set(kScratchReg, E32, m1);
+ vsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2295,17 +2466,32 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i64x2_alltrue");
+ VU.set(kScratchReg, E64, m1);
+ Label alltrue;
+ li(kScratchReg, -1);
+ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ vredminu_vs(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
+ beqz(dst.gp(), &alltrue);
+ li(dst.gp(), 1);
+ bind(&alltrue);
}
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i64x2_shl");
+ VU.set(kScratchReg, E64, m1);
+ vsll_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "emit_i64x2_shli");
+ VU.set(kScratchReg, E64, m1);
+ if (is_uint5(rhs)) {
+ vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs);
+ } else {
+ li(kScratchReg, rhs);
+ vsll_vx(dst.fp().toV(), lhs.fp().toV(), kScratchReg);
+ }
}
void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
@@ -2332,12 +2518,14 @@ void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i64x2_add");
+ VU.set(kScratchReg, E64, m1);
+ vadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i64x2_sub");
+ VU.set(kScratchReg, E64, m1);
+ vsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2347,12 +2535,14 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f32x4_abs");
+ VU.set(kScratchReg, E32, m1);
+ vfabs_vv(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f32x4_neg");
+ VU.set(kScratchReg, E32, m1);
+ vfneg_vv(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
@@ -2362,13 +2552,13 @@ void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
bool LiftoffAssembler::emit_f32x4_ceil(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f32x4_ceil");
+ Ceil_f(dst.fp().toV(), src.fp().toV(), kScratchReg, kSimd128ScratchReg);
return true;
}
bool LiftoffAssembler::emit_f32x4_floor(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f32x4_floor");
+ Floor_f(dst.fp().toV(), src.fp().toV(), kScratchReg, kSimd128ScratchReg);
return true;
}
@@ -2386,32 +2576,53 @@ bool LiftoffAssembler::emit_f32x4_nearest_int(LiftoffRegister dst,
void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_add");
+ VU.set(kScratchReg, E32, m1);
+ vfadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_sub");
+ VU.set(kScratchReg, E32, m1);
+ vfsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_mul");
+ VU.set(kScratchReg, E32, m1);
+ VU.set(RoundingMode::RTZ);
+ vfmul_vv(dst.fp().toV(), rhs.fp().toV(), lhs.fp().toV());
}
void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_div");
+ VU.set(kScratchReg, E32, m1);
+ vfdiv_vv(dst.fp().toV(), rhs.fp().toV(), lhs.fp().toV());
}
void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_min");
+ const int32_t kNaN = 0x7FC00000;
+ VU.set(kScratchReg, E32, m1);
+ vmfeq_vv(v0, lhs.fp().toV(), lhs.fp().toV());
+ vmfeq_vv(kSimd128ScratchReg, rhs.fp().toV(), rhs.fp().toV());
+ vand_vv(v0, v0, kSimd128ScratchReg);
+ li(kScratchReg, kNaN);
+ vmv_vx(kSimd128ScratchReg, kScratchReg);
+ vfmin_vv(kSimd128ScratchReg, rhs.fp().toV(), lhs.fp().toV(), Mask);
+ vmv_vv(dst.fp().toV(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_max");
+ const int32_t kNaN = 0x7FC00000;
+ VU.set(kScratchReg, E32, m1);
+ vmfeq_vv(v0, lhs.fp().toV(), lhs.fp().toV());
+ vmfeq_vv(kSimd128ScratchReg, rhs.fp().toV(), rhs.fp().toV());
+ vand_vv(v0, v0, kSimd128ScratchReg);
+ li(kScratchReg, kNaN);
+ vmv_vx(kSimd128ScratchReg, kScratchReg);
+ vfmax_vv(kSimd128ScratchReg, rhs.fp().toV(), lhs.fp().toV(), Mask);
+ vmv_vv(dst.fp().toV(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2426,12 +2637,14 @@ void LiftoffAssembler::emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f64x2_abs");
+ VU.set(kScratchReg, E64, m1);
+ vfabs_vv(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f64x2_neg");
+ VU.set(kScratchReg, E64, m1);
+ vfneg_vv(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
@@ -2441,13 +2654,13 @@ void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
bool LiftoffAssembler::emit_f64x2_ceil(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f64x2_ceil");
+ Ceil_d(dst.fp().toV(), src.fp().toV(), kScratchReg, kSimd128ScratchReg);
return true;
}
bool LiftoffAssembler::emit_f64x2_floor(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f64x2_floor");
+ Floor_d(dst.fp().toV(), src.fp().toV(), kScratchReg, kSimd128ScratchReg);
return true;
}
@@ -2465,12 +2678,14 @@ bool LiftoffAssembler::emit_f64x2_nearest_int(LiftoffRegister dst,
void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f64x2_add");
+ VU.set(kScratchReg, E64, m1);
+ vfadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f64x2_sub");
+ VU.set(kScratchReg, E64, m1);
+ vfsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2505,22 +2720,34 @@ void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i32x4_sconvert_f32x4");
+ VU.set(kScratchReg, E32, m1);
+ VU.set(RoundingMode::RTZ);
+ vmfeq_vv(v0, src.fp().toV(), src.fp().toV());
+ vmv_vx(dst.fp().toV(), zero_reg);
+ vfcvt_x_f_v(dst.fp().toV(), src.fp().toV(), Mask);
}
void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i32x4_uconvert_f32x4");
+ VU.set(kScratchReg, E32, m1);
+ VU.set(RoundingMode::RTZ);
+ vmfeq_vv(v0, src.fp().toV(), src.fp().toV());
+ vmv_vx(dst.fp().toV(), zero_reg);
+ vfcvt_xu_f_v(dst.fp().toV(), src.fp().toV(), Mask);
}
void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f32x4_sconvert_i32x4");
+ VU.set(kScratchReg, E32, m1);
+ VU.set(RoundingMode::RTZ);
+ vfcvt_f_x_v(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f32x4_uconvert_i32x4");
+ VU.set(kScratchReg, E32, m1);
+ VU.set(RoundingMode::RTZ);
+ vfcvt_f_xu_v(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
@@ -2637,7 +2864,11 @@ void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i32x4_abs");
+ VU.set(kScratchReg, E32, m1);
+ vmv_vx(kSimd128RegZero, zero_reg);
+ vmv_vv(dst.fp().toV(), src.fp().toV());
+ vmslt_vv(v0, src.fp().toV(), kSimd128RegZero);
+ vsub_vv(dst.fp().toV(), kSimd128RegZero, src.fp().toV(), Mask);
}
void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
@@ -2667,7 +2898,9 @@ void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_i32x4_extract_lane");
+ VU.set(kScratchReg, E32, m1);
+ vslidedown_vi(v31, lhs.fp().toV(), imm_lane_idx);
+ vmv_xs(dst.gp(), v31);
}
void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
@@ -2692,28 +2925,40 @@ void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_i8x16_replace_lane");
+ VU.set(kScratchReg, E8, m1);
+ li(kScratchReg, 0x1 << imm_lane_idx);
+ vmv_sx(v0, kScratchReg);
+ vmerge_vx(dst.fp().toV(), src2.gp(), src1.fp().toV());
}
void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_i16x8_replace_lane");
+ VU.set(kScratchReg, E16, m1);
+ li(kScratchReg, 0x1 << imm_lane_idx);
+ vmv_sx(v0, kScratchReg);
+ vmerge_vx(dst.fp().toV(), src2.gp(), src1.fp().toV());
}
void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_i32x4_replace_lane");
+ VU.set(kScratchReg, E32, m1);
+ li(kScratchReg, 0x1 << imm_lane_idx);
+ vmv_sx(v0, kScratchReg);
+ vmerge_vx(dst.fp().toV(), src2.gp(), src1.fp().toV());
}
void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_i64x2_replace_lane");
+ VU.set(kScratchReg, E64, m1);
+ li(kScratchReg, 0x1 << imm_lane_idx);
+ vmv_sx(v0, kScratchReg);
+ vmerge_vx(dst.fp().toV(), src2.gp(), src1.fp().toV());
}
void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
@@ -2730,9 +2975,9 @@ void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
bailout(kSimd, "emit_f64x2_replace_lane");
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
bailout(kSimd, "emit_s128_set_if_nan");
}
diff --git a/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index 722b0b074b4..52e8bb683dc 100644
--- a/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -10,6 +10,7 @@
#include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h"
#include "src/wasm/simd-shuffle.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -122,26 +123,72 @@ void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
void LiftoffAssembler::AlignFrameSize() {}
-void LiftoffAssembler::PatchPrepareStackFrame(int offset,
- SafepointTableBuilder*) {
+void LiftoffAssembler::PatchPrepareStackFrame(
+ int offset, SafepointTableBuilder* safepoint_table_builder) {
int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
constexpr int LayInstrSize = 6;
-#ifdef USE_SIMULATOR
- // When using the simulator, deal with Liftoff which allocates the stack
- // before checking it.
- // TODO(arm): Remove this when the stack check mechanism will be updated.
- if (frame_size > KB / 2) {
- bailout(kOtherReason,
- "Stack limited to 512 bytes to avoid a bug in StackCheck");
- return;
- }
-#endif
Assembler patching_assembler(
AssemblerOptions{},
ExternalAssemblerBuffer(buffer_start_ + offset, LayInstrSize + kGap));
- patching_assembler.lay(sp, MemOperand(sp, -frame_size));
+ if (V8_LIKELY(frame_size < 4 * KB)) {
+ patching_assembler.lay(sp, MemOperand(sp, -frame_size));
+ return;
+ }
+
+ // The frame size is bigger than 4KB, so we might overflow the available stack
+ // space if we first allocate the frame and then do the stack check (we will
+ // need some remaining stack space for throwing the exception). That's why we
+ // check the available stack space before we allocate the frame. To do this we
+ // replace the {__ sub(sp, sp, framesize)} with a jump to OOL code that does
+ // this "extended stack check".
+ //
+ // The OOL code can simply be generated here with the normal assembler,
+ // because all other code generation, including OOL code, has already finished
+ // when {PatchPrepareStackFrame} is called. The function prologue then jumps
+ // to the current {pc_offset()} to execute the OOL code for allocating the
+ // large frame.
+
+ // Emit the unconditional branch in the function prologue (from {offset} to
+ // {pc_offset()}).
+
+ int jump_offset = pc_offset() - offset;
+ patching_assembler.branchOnCond(al, jump_offset, true, true);
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ RecordComment("OOL: stack check for large frame");
+ Label continuation;
+ if (frame_size < FLAG_stack_size * 1024) {
+ Register stack_limit = ip;
+ LoadU64(stack_limit,
+ FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kRealStackLimitAddressOffset),
+ r0);
+ LoadU64(stack_limit, MemOperand(stack_limit), r0);
+ AddU64(stack_limit, Operand(frame_size));
+ CmpU64(sp, stack_limit);
+ bge(&continuation);
+ }
+
+ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
+ // The call will not return; just define an empty safepoint.
+ safepoint_table_builder->DefineSafepoint(this);
+ if (FLAG_debug_code) stop();
+
+ bind(&continuation);
+
+ // Now allocate the stack space. Note that this might do more than just
+ // decrementing the SP; consult {TurboAssembler::AllocateStackSpace}.
+ lay(sp, MemOperand(sp, -frame_size));
+
+ // Jump back to the start of the function, from {pc_offset()} to
+ // right after the reserved space for the {__ sub(sp, sp, framesize)} (which
+ // is a branch now).
+ jump_offset = offset - pc_offset() + 6;
+ branchOnCond(al, jump_offset, true);
}
void LiftoffAssembler::FinishCode() {}
@@ -2057,8 +2104,13 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
Label* label, Register lhs,
int32_t imm) {
+ bool use_signed = liftoff::UseSignedOp(liftoff_cond);
Condition cond = liftoff::ToCondition(liftoff_cond);
- CmpS32(lhs, Operand(imm));
+ if (use_signed) {
+ CmpS32(lhs, Operand(imm));
+ } else {
+ CmpU32(lhs, Operand(imm));
+ }
b(cond, label);
}
@@ -2143,81 +2195,116 @@ void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
b(condition, target); // branch if SMI
}
-#define SIMD_BINOP_LIST(V) \
- V(f64x2_add, F64x2Add) \
- V(f64x2_sub, F64x2Sub) \
- V(f64x2_mul, F64x2Mul) \
- V(f64x2_div, F64x2Div) \
- V(f64x2_min, F64x2Min) \
- V(f64x2_max, F64x2Max) \
- V(f64x2_eq, F64x2Eq) \
- V(f64x2_ne, F64x2Ne) \
- V(f64x2_lt, F64x2Lt) \
- V(f64x2_le, F64x2Le) \
- V(f32x4_add, F32x4Add) \
- V(f32x4_sub, F32x4Sub) \
- V(f32x4_mul, F32x4Mul) \
- V(f32x4_div, F32x4Div) \
- V(f32x4_min, F32x4Min) \
- V(f32x4_max, F32x4Max) \
- V(f32x4_eq, F32x4Eq) \
- V(f32x4_ne, F32x4Ne) \
- V(f32x4_lt, F32x4Lt) \
- V(f32x4_le, F32x4Le) \
- V(i64x2_add, I64x2Add) \
- V(i64x2_sub, I64x2Sub) \
- V(i64x2_mul, I64x2Mul) \
- V(i64x2_eq, I64x2Eq) \
- V(i64x2_ne, I64x2Ne) \
- V(i64x2_gt_s, I64x2GtS) \
- V(i64x2_ge_s, I64x2GeS) \
- V(i32x4_add, I32x4Add) \
- V(i32x4_sub, I32x4Sub) \
- V(i32x4_mul, I32x4Mul) \
- V(i32x4_eq, I32x4Eq) \
- V(i32x4_ne, I32x4Ne) \
- V(i32x4_gt_s, I32x4GtS) \
- V(i32x4_ge_s, I32x4GeS) \
- V(i32x4_gt_u, I32x4GtU) \
- V(i32x4_ge_u, I32x4GeU) \
- V(i32x4_min_s, I32x4MinS) \
- V(i32x4_min_u, I32x4MinU) \
- V(i32x4_max_s, I32x4MaxS) \
- V(i32x4_max_u, I32x4MaxU) \
- V(i16x8_add, I16x8Add) \
- V(i16x8_sub, I16x8Sub) \
- V(i16x8_mul, I16x8Mul) \
- V(i16x8_eq, I16x8Eq) \
- V(i16x8_ne, I16x8Ne) \
- V(i16x8_gt_s, I16x8GtS) \
- V(i16x8_ge_s, I16x8GeS) \
- V(i16x8_gt_u, I16x8GtU) \
- V(i16x8_ge_u, I16x8GeU) \
- V(i16x8_min_s, I16x8MinS) \
- V(i16x8_min_u, I16x8MinU) \
- V(i16x8_max_s, I16x8MaxS) \
- V(i16x8_max_u, I16x8MaxU) \
- V(i8x16_add, I8x16Add) \
- V(i8x16_sub, I8x16Sub) \
- V(i8x16_eq, I8x16Eq) \
- V(i8x16_ne, I8x16Ne) \
- V(i8x16_gt_s, I8x16GtS) \
- V(i8x16_ge_s, I8x16GeS) \
- V(i8x16_gt_u, I8x16GtU) \
- V(i8x16_ge_u, I8x16GeU) \
- V(i8x16_min_s, I8x16MinS) \
- V(i8x16_min_u, I8x16MinU) \
- V(i8x16_max_s, I8x16MaxS) \
- V(i8x16_max_u, I8x16MaxU)
-
-#define EMIT_SIMD_BINOP(name, op) \
+#define SIMD_BINOP_RR_LIST(V) \
+ V(f64x2_add, F64x2Add, fp) \
+ V(f64x2_sub, F64x2Sub, fp) \
+ V(f64x2_mul, F64x2Mul, fp) \
+ V(f64x2_div, F64x2Div, fp) \
+ V(f64x2_min, F64x2Min, fp) \
+ V(f64x2_max, F64x2Max, fp) \
+ V(f64x2_eq, F64x2Eq, fp) \
+ V(f64x2_ne, F64x2Ne, fp) \
+ V(f64x2_lt, F64x2Lt, fp) \
+ V(f64x2_le, F64x2Le, fp) \
+ V(f32x4_add, F32x4Add, fp) \
+ V(f32x4_sub, F32x4Sub, fp) \
+ V(f32x4_mul, F32x4Mul, fp) \
+ V(f32x4_div, F32x4Div, fp) \
+ V(f32x4_min, F32x4Min, fp) \
+ V(f32x4_max, F32x4Max, fp) \
+ V(f32x4_eq, F32x4Eq, fp) \
+ V(f32x4_ne, F32x4Ne, fp) \
+ V(f32x4_lt, F32x4Lt, fp) \
+ V(f32x4_le, F32x4Le, fp) \
+ V(i64x2_add, I64x2Add, fp) \
+ V(i64x2_sub, I64x2Sub, fp) \
+ V(i64x2_mul, I64x2Mul, fp) \
+ V(i64x2_eq, I64x2Eq, fp) \
+ V(i64x2_ne, I64x2Ne, fp) \
+ V(i64x2_gt_s, I64x2GtS, fp) \
+ V(i64x2_ge_s, I64x2GeS, fp) \
+ V(i64x2_shl, I64x2Shl, gp) \
+ V(i64x2_shr_s, I64x2ShrS, gp) \
+ V(i64x2_shr_u, I64x2ShrU, gp) \
+ V(i32x4_add, I32x4Add, fp) \
+ V(i32x4_sub, I32x4Sub, fp) \
+ V(i32x4_mul, I32x4Mul, fp) \
+ V(i32x4_eq, I32x4Eq, fp) \
+ V(i32x4_ne, I32x4Ne, fp) \
+ V(i32x4_gt_s, I32x4GtS, fp) \
+ V(i32x4_ge_s, I32x4GeS, fp) \
+ V(i32x4_gt_u, I32x4GtU, fp) \
+ V(i32x4_ge_u, I32x4GeU, fp) \
+ V(i32x4_min_s, I32x4MinS, fp) \
+ V(i32x4_min_u, I32x4MinU, fp) \
+ V(i32x4_max_s, I32x4MaxS, fp) \
+ V(i32x4_max_u, I32x4MaxU, fp) \
+ V(i32x4_shl, I32x4Shl, gp) \
+ V(i32x4_shr_s, I32x4ShrS, gp) \
+ V(i32x4_shr_u, I32x4ShrU, gp) \
+ V(i16x8_add, I16x8Add, fp) \
+ V(i16x8_sub, I16x8Sub, fp) \
+ V(i16x8_mul, I16x8Mul, fp) \
+ V(i16x8_eq, I16x8Eq, fp) \
+ V(i16x8_ne, I16x8Ne, fp) \
+ V(i16x8_gt_s, I16x8GtS, fp) \
+ V(i16x8_ge_s, I16x8GeS, fp) \
+ V(i16x8_gt_u, I16x8GtU, fp) \
+ V(i16x8_ge_u, I16x8GeU, fp) \
+ V(i16x8_min_s, I16x8MinS, fp) \
+ V(i16x8_min_u, I16x8MinU, fp) \
+ V(i16x8_max_s, I16x8MaxS, fp) \
+ V(i16x8_max_u, I16x8MaxU, fp) \
+ V(i16x8_shl, I16x8Shl, gp) \
+ V(i16x8_shr_s, I16x8ShrS, gp) \
+ V(i16x8_shr_u, I16x8ShrU, gp) \
+ V(i8x16_add, I8x16Add, fp) \
+ V(i8x16_sub, I8x16Sub, fp) \
+ V(i8x16_eq, I8x16Eq, fp) \
+ V(i8x16_ne, I8x16Ne, fp) \
+ V(i8x16_gt_s, I8x16GtS, fp) \
+ V(i8x16_ge_s, I8x16GeS, fp) \
+ V(i8x16_gt_u, I8x16GtU, fp) \
+ V(i8x16_ge_u, I8x16GeU, fp) \
+ V(i8x16_min_s, I8x16MinS, fp) \
+ V(i8x16_min_u, I8x16MinU, fp) \
+ V(i8x16_max_s, I8x16MaxS, fp) \
+ V(i8x16_max_u, I8x16MaxU, fp) \
+ V(i8x16_shl, I8x16Shl, gp) \
+ V(i8x16_shr_s, I8x16ShrS, gp) \
+ V(i8x16_shr_u, I8x16ShrU, gp)
+
+#define EMIT_SIMD_BINOP_RR(name, op, stype) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
LiftoffRegister rhs) { \
- op(dst.fp(), lhs.fp(), rhs.fp()); \
+ op(dst.fp(), lhs.fp(), rhs.stype()); \
+ }
+SIMD_BINOP_RR_LIST(EMIT_SIMD_BINOP_RR)
+#undef EMIT_SIMD_BINOP_RR
+#undef SIMD_BINOP_RR_LIST
+
+#define SIMD_BINOP_RI_LIST(V) \
+ V(i64x2_shli, I64x2Shl) \
+ V(i64x2_shri_s, I64x2ShrS) \
+ V(i64x2_shri_u, I64x2ShrU) \
+ V(i32x4_shli, I32x4Shl) \
+ V(i32x4_shri_s, I32x4ShrS) \
+ V(i32x4_shri_u, I32x4ShrU) \
+ V(i16x8_shli, I16x8Shl) \
+ V(i16x8_shri_s, I16x8ShrS) \
+ V(i16x8_shri_u, I16x8ShrU) \
+ V(i8x16_shli, I8x16Shl) \
+ V(i8x16_shri_s, I8x16ShrS) \
+ V(i8x16_shri_u, I8x16ShrU)
+
+#define EMIT_SIMD_BINOP_RI(name, op) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
+ int32_t rhs) { \
+ op(dst.fp(), lhs.fp(), Operand(rhs)); \
}
-SIMD_BINOP_LIST(EMIT_SIMD_BINOP)
-#undef EMIT_SIMD_BINOP
-#undef SIMD_BINOP_LIST
+SIMD_BINOP_RI_LIST(EMIT_SIMD_BINOP_RI)
+#undef EMIT_SIMD_BINOP_RI
+#undef SIMD_BINOP_RI_LIST
#define SIMD_UNOP_LIST(V) \
V(f64x2_splat, F64x2Splat, fp, fp) \
@@ -2424,38 +2511,6 @@ void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
bailout(kSimd, "i64x2_alltrue");
}
-void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i64x2_shl");
-}
-
-void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t rhs) {
- bailout(kSimd, "i64x2_shli");
-}
-
-void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i64x2_shr_s");
-}
-
-void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i64x2_shri_s");
-}
-
-void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i64x2_shr_u");
-}
-
-void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i64x2_shri_u");
-}
-
void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -2520,38 +2575,6 @@ void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
bailout(kSimd, "i32x4_bitmask");
}
-void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i32x4_shl");
-}
-
-void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t rhs) {
- bailout(kSimd, "i32x4_shli");
-}
-
-void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i32x4_shr_s");
-}
-
-void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i32x4_shri_s");
-}
-
-void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i32x4_shr_u");
-}
-
-void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i32x4_shri_u");
-}
-
void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2607,38 +2630,6 @@ void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
bailout(kSimd, "i16x8_bitmask");
}
-void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i16x8_shl");
-}
-
-void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t rhs) {
- bailout(kSimd, "i16x8_shli");
-}
-
-void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i16x8_shr_s");
-}
-
-void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i16x8_shri_s");
-}
-
-void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i16x8_shr_u");
-}
-
-void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i16x8_shri_u");
-}
-
void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2736,38 +2727,6 @@ void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
bailout(kSimd, "i8x16_bitmask");
}
-void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i8x16_shl");
-}
-
-void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t rhs) {
- bailout(kSimd, "i8x16_shli");
-}
-
-void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i8x16_shr_s");
-}
-
-void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i8x16_shri_s");
-}
-
-void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i8x16_shr_u");
-}
-
-void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i8x16_shri_u");
-}
-
void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -3134,14 +3093,40 @@ void LiftoffAssembler::MaybeOSR() {}
void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
ValueKind kind) {
- UNIMPLEMENTED();
+ Label return_nan, done;
+ if (kind == kF32) {
+ cebr(src, src);
+ bunordered(&return_nan);
+ } else {
+ DCHECK_EQ(kind, kF64);
+ cdbr(src, src);
+ bunordered(&return_nan);
+ }
+ b(&done);
+ bind(&return_nan);
+ StoreF32LE(src, MemOperand(dst), r0);
+ bind(&done);
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
- UNIMPLEMENTED();
+ Label return_nan, done;
+ if (lane_kind == kF32) {
+ vfce(tmp_s128.fp(), src.fp(), src.fp(), Condition(1), Condition(0),
+ Condition(2));
+ b(Condition(0x5), &return_nan); // If any or all are NaN.
+ } else {
+ DCHECK_EQ(lane_kind, kF64);
+ vfce(tmp_s128.fp(), src.fp(), src.fp(), Condition(1), Condition(0),
+ Condition(3));
+ b(Condition(0x5), &return_nan);
+ }
+ b(&done);
+ bind(&return_nan);
+ StoreF32LE(src.fp(), MemOperand(dst), r0);
+ bind(&done);
}
void LiftoffStackSlots::Construct(int param_slots) {
diff --git a/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index d5cda7b3c48..890afa2eda9 100644
--- a/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -357,6 +357,14 @@ void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
LoadTaggedPointerField(dst, Operand(instance, offset));
}
+void LiftoffAssembler::LoadExternalPointer(Register dst, Register instance,
+ int offset, ExternalPointerTag tag,
+ Register isolate_root) {
+ LoadExternalPointerField(dst, FieldOperand(instance, offset), tag,
+ isolate_root,
+ IsolateRootLocation::kInScratchRegister);
+}
+
void LiftoffAssembler::SpillInstance(Register instance) {
movq(liftoff::GetInstanceOperand(), instance);
}
@@ -1317,7 +1325,9 @@ void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- if (dst.gp() == rhs.gp()) {
+ if (lhs.gp() == rhs.gp()) {
+ xorq(dst.gp(), dst.gp());
+ } else if (dst.gp() == rhs.gp()) {
negq(dst.gp());
addq(dst.gp(), lhs.gp());
} else {
@@ -2181,7 +2191,7 @@ void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
}
namespace liftoff {
-template <void (TurboAssembler::*cmp_op)(DoubleRegister, DoubleRegister)>
+template <void (SharedTurboAssembler::*cmp_op)(DoubleRegister, DoubleRegister)>
void EmitFloatSetCond(LiftoffAssembler* assm, Condition cond, Register dst,
DoubleRegister lhs, DoubleRegister rhs) {
Label cont;
@@ -2335,29 +2345,6 @@ void EmitSimdShiftOpImm(LiftoffAssembler* assm, LiftoffRegister dst,
}
}
-template <bool is_signed>
-void EmitI8x16Shr(LiftoffAssembler* assm, LiftoffRegister dst,
- LiftoffRegister lhs, LiftoffRegister rhs) {
- // Same algorithm as the one in code-generator-x64.cc.
- assm->Punpckhbw(kScratchDoubleReg, lhs.fp());
- assm->Punpcklbw(dst.fp(), lhs.fp());
- // Prepare shift value
- assm->movq(kScratchRegister, rhs.gp());
- // Take shift value modulo 8.
- assm->andq(kScratchRegister, Immediate(7));
- assm->addq(kScratchRegister, Immediate(8));
- assm->Movq(liftoff::kScratchDoubleReg2, kScratchRegister);
- if (is_signed) {
- assm->Psraw(kScratchDoubleReg, liftoff::kScratchDoubleReg2);
- assm->Psraw(dst.fp(), liftoff::kScratchDoubleReg2);
- assm->Packsswb(dst.fp(), kScratchDoubleReg);
- } else {
- assm->Psrlw(kScratchDoubleReg, liftoff::kScratchDoubleReg2);
- assm->Psrlw(dst.fp(), liftoff::kScratchDoubleReg2);
- assm->Packuswb(dst.fp(), kScratchDoubleReg);
- }
-}
-
inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister src) {
assm->xorq(dst.gp(), dst.gp());
@@ -2365,7 +2352,7 @@ inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
assm->setcc(not_equal, dst.gp());
}
-template <void (TurboAssembler::*pcmp)(XMMRegister, XMMRegister)>
+template <void (SharedTurboAssembler::*pcmp)(XMMRegister, XMMRegister)>
inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister src,
base::Optional<CpuFeature> feature = base::nullopt) {
@@ -2414,21 +2401,11 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
} else {
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
if (memtype == MachineType::Int8()) {
- Pinsrb(dst.fp(), dst.fp(), src_op, 0);
- Pxor(kScratchDoubleReg, kScratchDoubleReg);
- Pshufb(dst.fp(), kScratchDoubleReg);
+ S128Load8Splat(dst.fp(), src_op, kScratchDoubleReg);
} else if (memtype == MachineType::Int16()) {
- Pinsrw(dst.fp(), dst.fp(), src_op, 0);
- Pshuflw(dst.fp(), dst.fp(), uint8_t{0});
- Punpcklqdq(dst.fp(), dst.fp());
+ S128Load16Splat(dst.fp(), src_op, kScratchDoubleReg);
} else if (memtype == MachineType::Int32()) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vbroadcastss(dst.fp(), src_op);
- } else {
- movss(dst.fp(), src_op);
- shufps(dst.fp(), dst.fp(), byte{0});
- }
+ S128Load32Splat(dst.fp(), src_op);
} else if (memtype == MachineType::Int64()) {
Movddup(dst.fp(), src_op);
}
@@ -2440,18 +2417,17 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
uintptr_t offset_imm, LoadType type,
uint8_t laneidx, uint32_t* protected_load_pc) {
Operand src_op = liftoff::GetMemOp(this, addr, offset_reg, offset_imm);
- *protected_load_pc = pc_offset();
MachineType mem_type = type.mem_type();
if (mem_type == MachineType::Int8()) {
- Pinsrb(dst.fp(), src.fp(), src_op, laneidx);
+ Pinsrb(dst.fp(), src.fp(), src_op, laneidx, protected_load_pc);
} else if (mem_type == MachineType::Int16()) {
- Pinsrw(dst.fp(), src.fp(), src_op, laneidx);
+ Pinsrw(dst.fp(), src.fp(), src_op, laneidx, protected_load_pc);
} else if (mem_type == MachineType::Int32()) {
- Pinsrd(dst.fp(), src.fp(), src_op, laneidx);
+ Pinsrd(dst.fp(), src.fp(), src_op, laneidx, protected_load_pc);
} else {
DCHECK_EQ(MachineType::Int64(), mem_type);
- Pinsrq(dst.fp(), src.fp(), src_op, laneidx);
+ Pinsrq(dst.fp(), src.fp(), src_op, laneidx, protected_load_pc);
}
}
@@ -2515,26 +2491,24 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- I8x16Swizzle(dst.fp(), lhs.fp(), rhs.fp());
+ I8x16Swizzle(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg,
+ kScratchRegister);
}
void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
LiftoffRegister src) {
- I8x16Popcnt(dst.fp(), src.fp(), liftoff::kScratchDoubleReg2);
+ I8x16Popcnt(dst.fp(), src.fp(), kScratchDoubleReg,
+ liftoff::kScratchDoubleReg2, kScratchRegister);
}
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
- Movd(dst.fp(), src.gp());
- Pxor(kScratchDoubleReg, kScratchDoubleReg);
- Pshufb(dst.fp(), kScratchDoubleReg);
+ I8x16Splat(dst.fp(), src.gp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
- Movd(dst.fp(), src.gp());
- Pshuflw(dst.fp(), dst.fp(), static_cast<uint8_t>(0));
- Pshufd(dst.fp(), dst.fp(), static_cast<uint8_t>(0));
+ I16x8Splat(dst.fp(), src.gp());
}
void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
@@ -2753,17 +2727,11 @@ void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
// Different register alias requirements depending on CpuFeatures supported:
- if (CpuFeatures::IsSupported(AVX)) {
- // 1. AVX, no requirements.
+ if (CpuFeatures::IsSupported(AVX) || CpuFeatures::IsSupported(SSE4_2)) {
+ // 1. AVX, or SSE4_2 no requirements (I64x2GtS takes care of aliasing).
I64x2GtS(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
- } else if (CpuFeatures::IsSupported(SSE4_2)) {
- // 2. SSE4_2, dst == lhs.
- if (dst != lhs) {
- movaps(dst.fp(), lhs.fp());
- }
- I64x2GtS(dst.fp(), dst.fp(), rhs.fp(), kScratchDoubleReg);
} else {
- // 3. Else, dst != lhs && dst != rhs (lhs == rhs is ok).
+ // 2. Else, dst != lhs && dst != rhs (lhs == rhs is ok).
if (dst == lhs || dst == rhs) {
I64x2GtS(liftoff::kScratchDoubleReg2, lhs.fp(), rhs.fp(),
kScratchDoubleReg);
@@ -2857,9 +2825,7 @@ void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
const uint8_t imms[16]) {
uint64_t vals[2];
memcpy(vals, imms, sizeof(vals));
- TurboAssembler::Move(dst.fp(), vals[0]);
- movq(kScratchRegister, vals[1]);
- Pinsrq(dst.fp(), kScratchRegister, uint8_t{1});
+ TurboAssembler::Move(dst.fp(), vals[1], vals[0]);
}
void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
@@ -2927,89 +2893,37 @@ void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- static constexpr RegClass tmp_simd_rc = reg_class_for(kS128);
- LiftoffRegister tmp_simd =
- GetUnusedRegister(tmp_simd_rc, LiftoffRegList::ForRegs(dst, lhs));
- // Mask off the unwanted bits before word-shifting.
- Pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
- movq(kScratchRegister, rhs.gp());
- andq(kScratchRegister, Immediate(7));
- addq(kScratchRegister, Immediate(8));
- Movq(tmp_simd.fp(), kScratchRegister);
- Psrlw(kScratchDoubleReg, tmp_simd.fp());
- Packuswb(kScratchDoubleReg, kScratchDoubleReg);
-
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpand(dst.fp(), lhs.fp(), kScratchDoubleReg);
- } else {
- if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- andps(dst.fp(), kScratchDoubleReg);
- }
- subq(kScratchRegister, Immediate(8));
- Movq(tmp_simd.fp(), kScratchRegister);
- Psllw(dst.fp(), tmp_simd.fp());
+ I8x16Shl(dst.fp(), lhs.fp(), rhs.gp(), kScratchRegister, kScratchDoubleReg,
+ liftoff::kScratchDoubleReg2);
}
void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- byte shift = static_cast<byte>(rhs & 0x7);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsllw(dst.fp(), lhs.fp(), shift);
- } else {
- if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- psllw(dst.fp(), shift);
- }
-
- uint8_t bmask = static_cast<uint8_t>(0xff << shift);
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- movl(kScratchRegister, Immediate(mask));
- Movd(kScratchDoubleReg, kScratchRegister);
- Pshufd(kScratchDoubleReg, kScratchDoubleReg, uint8_t{0});
- Pand(dst.fp(), kScratchDoubleReg);
+ I8x16Shl(dst.fp(), lhs.fp(), rhs, kScratchRegister, kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitI8x16Shr</*is_signed=*/true>(this, dst, lhs, rhs);
+ I8x16ShrS(dst.fp(), lhs.fp(), rhs.gp(), kScratchRegister, kScratchDoubleReg,
+ liftoff::kScratchDoubleReg2);
}
void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
- Punpckhbw(kScratchDoubleReg, lhs.fp());
- Punpcklbw(dst.fp(), lhs.fp());
- uint8_t shift = (rhs & 7) + 8;
- Psraw(kScratchDoubleReg, shift);
- Psraw(dst.fp(), shift);
- Packsswb(dst.fp(), kScratchDoubleReg);
+ I8x16ShrS(dst.fp(), lhs.fp(), rhs, kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitI8x16Shr</*is_signed=*/false>(this, dst, lhs, rhs);
+ I8x16ShrU(dst.fp(), lhs.fp(), rhs.gp(), kScratchRegister, kScratchDoubleReg,
+ liftoff::kScratchDoubleReg2);
}
void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
- // Perform 16-bit shift, then mask away high bits.
- uint8_t shift = rhs & 7; // i.InputInt3(1);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsrlw(dst.fp(), lhs.fp(), byte{shift});
- } else if (dst != lhs) {
- Movaps(dst.fp(), lhs.fp());
- psrlw(dst.fp(), byte{shift});
- }
-
- uint8_t bmask = 0xff >> shift;
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- movl(kScratchRegister, Immediate(mask));
- Movd(kScratchDoubleReg, kScratchRegister);
- Pshufd(kScratchDoubleReg, kScratchDoubleReg, byte{0});
- Pand(dst.fp(), kScratchDoubleReg);
+ I8x16ShrU(dst.fp(), lhs.fp(), rhs, kScratchRegister, kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -3220,14 +3134,13 @@ void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst,
LiftoffRegister src) {
- I16x8ExtAddPairwiseI8x16S(dst.fp(), src.fp());
+ I16x8ExtAddPairwiseI8x16S(dst.fp(), src.fp(), kScratchDoubleReg,
+ kScratchRegister);
}
void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
LiftoffRegister src) {
- Operand op = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x01());
- Pmaddubsw(dst.fp(), src.fp(), op);
+ I16x8ExtAddPairwiseI8x16U(dst.fp(), src.fp(), kScratchRegister);
}
void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst,
@@ -3259,7 +3172,7 @@ void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
- I16x8Q15MulRSatS(dst.fp(), src1.fp(), src2.fp());
+ I16x8Q15MulRSatS(dst.fp(), src1.fp(), src2.fp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
@@ -3376,14 +3289,12 @@ void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst,
LiftoffRegister src) {
- Operand op = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i16x8_splat_0x0001());
- Pmaddwd(dst.fp(), src.fp(), op);
+ I32x4ExtAddPairwiseI16x8S(dst.fp(), src.fp(), kScratchRegister);
}
void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst,
LiftoffRegister src) {
- I32x4ExtAddPairwiseI16x8U(dst.fp(), src.fp());
+ I32x4ExtAddPairwiseI16x8U(dst.fp(), src.fp(), kScratchDoubleReg);
}
namespace liftoff {
@@ -3504,19 +3415,7 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs));
LiftoffRegister tmp2 =
GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs, tmp1));
- Movaps(tmp1.fp(), lhs.fp());
- Movaps(tmp2.fp(), rhs.fp());
- // Multiply high dword of each qword of left with right.
- Psrlq(tmp1.fp(), byte{32});
- Pmuludq(tmp1.fp(), rhs.fp());
- // Multiply high dword of each qword of right with left.
- Psrlq(tmp2.fp(), byte{32});
- Pmuludq(tmp2.fp(), lhs.fp());
- Paddq(tmp2.fp(), tmp1.fp());
- Psllq(tmp2.fp(), byte{32});
- liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmuludq, &Assembler::pmuludq>(
- this, dst, lhs, rhs);
- Paddq(dst.fp(), tmp2.fp());
+ I64x2Mul(dst.fp(), lhs.fp(), rhs.fp(), tmp1.fp(), tmp2.fp());
}
void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_s(LiftoffRegister dst,
@@ -3574,28 +3473,12 @@ void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- Psrld(kScratchDoubleReg, static_cast<byte>(1));
- Andps(dst.fp(), kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Psrld(dst.fp(), static_cast<byte>(1));
- Andps(dst.fp(), src.fp());
- }
+ Absps(dst.fp(), src.fp(), kScratchRegister);
}
void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- Pslld(kScratchDoubleReg, byte{31});
- Xorps(dst.fp(), kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Pslld(dst.fp(), byte{31});
- Xorps(dst.fp(), src.fp());
- }
+ Negps(dst.fp(), src.fp(), kScratchRegister);
}
void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
@@ -3657,61 +3540,12 @@ void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // The minps instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform minps in both orders, merge the results, and adjust.
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vminps(kScratchDoubleReg, lhs.fp(), rhs.fp());
- vminps(dst.fp(), rhs.fp(), lhs.fp());
- } else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
- XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
- movaps(kScratchDoubleReg, src);
- minps(kScratchDoubleReg, dst.fp());
- minps(dst.fp(), src);
- } else {
- movaps(kScratchDoubleReg, lhs.fp());
- minps(kScratchDoubleReg, rhs.fp());
- movaps(dst.fp(), rhs.fp());
- minps(dst.fp(), lhs.fp());
- }
- // propagate -0's and NaNs, which may be non-canonical.
- Orps(kScratchDoubleReg, dst.fp());
- // Canonicalize NaNs by quieting and clearing the payload.
- Cmpunordps(dst.fp(), kScratchDoubleReg);
- Orps(kScratchDoubleReg, dst.fp());
- Psrld(dst.fp(), byte{10});
- Andnps(dst.fp(), kScratchDoubleReg);
+ F32x4Min(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // The maxps instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform maxps in both orders, merge the results, and adjust.
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmaxps(kScratchDoubleReg, lhs.fp(), rhs.fp());
- vmaxps(dst.fp(), rhs.fp(), lhs.fp());
- } else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
- XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
- movaps(kScratchDoubleReg, src);
- maxps(kScratchDoubleReg, dst.fp());
- maxps(dst.fp(), src);
- } else {
- movaps(kScratchDoubleReg, lhs.fp());
- maxps(kScratchDoubleReg, rhs.fp());
- movaps(dst.fp(), rhs.fp());
- maxps(dst.fp(), lhs.fp());
- }
- // Find discrepancies.
- Xorps(dst.fp(), kScratchDoubleReg);
- // Propagate NaNs, which may be non-canonical.
- Orps(kScratchDoubleReg, dst.fp());
- // Propagate sign discrepancy and (subtle) quiet NaNs.
- Subps(kScratchDoubleReg, dst.fp());
- // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
- Cmpunordps(dst.fp(), kScratchDoubleReg);
- Psrld(dst.fp(), byte{10});
- Andnps(dst.fp(), kScratchDoubleReg);
+ F32x4Max(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs,
@@ -3730,28 +3564,12 @@ void LiftoffAssembler::emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- Psrlq(kScratchDoubleReg, byte{1});
- Andpd(dst.fp(), kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Psrlq(dst.fp(), byte{1});
- Andpd(dst.fp(), src.fp());
- }
+ Abspd(dst.fp(), src.fp(), kScratchRegister);
}
void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- Psllq(kScratchDoubleReg, static_cast<byte>(63));
- Xorpd(dst.fp(), kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Psllq(dst.fp(), static_cast<byte>(63));
- Xorpd(dst.fp(), src.fp());
- }
+ Negpd(dst.fp(), src.fp(), kScratchRegister);
}
void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
@@ -3842,7 +3660,7 @@ void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
LiftoffRegister src) {
- F64x2ConvertLowI32x4U(dst.fp(), src.fp());
+ F64x2ConvertLowI32x4U(dst.fp(), src.fp(), kScratchRegister);
}
void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
@@ -3852,26 +3670,7 @@ void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
- // NAN->0
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vcmpeqps(kScratchDoubleReg, src.fp(), src.fp());
- vpand(dst.fp(), src.fp(), kScratchDoubleReg);
- } else {
- movaps(kScratchDoubleReg, src.fp());
- cmpeqps(kScratchDoubleReg, kScratchDoubleReg);
- if (dst.fp() != src.fp()) movaps(dst.fp(), src.fp());
- andps(dst.fp(), kScratchDoubleReg);
- }
- // Set top bit if >= 0 (but not -0.0!).
- Pxor(kScratchDoubleReg, dst.fp());
- // Convert to int.
- Cvttps2dq(dst.fp(), dst.fp());
- // Set top bit if >=0 is now < 0.
- Pand(kScratchDoubleReg, dst.fp());
- Psrad(kScratchDoubleReg, byte{31});
- // Set positive overflow lanes to 0x7FFFFFFF.
- Pxor(dst.fp(), kScratchDoubleReg);
+ I32x4SConvertF32x4(dst.fp(), src.fp(), kScratchDoubleReg, kScratchRegister);
}
void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
@@ -4012,12 +3811,14 @@ void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
LiftoffRegister src) {
- I32x4TruncSatF64x2SZero(dst.fp(), src.fp());
+ I32x4TruncSatF64x2SZero(dst.fp(), src.fp(), kScratchDoubleReg,
+ kScratchRegister);
}
void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
LiftoffRegister src) {
- I32x4TruncSatF64x2UZero(dst.fp(), src.fp());
+ I32x4TruncSatF64x2UZero(dst.fp(), src.fp(), kScratchDoubleReg,
+ kScratchRegister);
}
void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
@@ -4322,11 +4123,7 @@ void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
popq(kScratchRegister);
target = kScratchRegister;
}
- if (FLAG_untrusted_code_mitigations) {
- RetpolineCall(target);
- } else {
- call(target);
- }
+ call(target);
}
void LiftoffAssembler::TailCallIndirect(Register target) {
@@ -4334,11 +4131,7 @@ void LiftoffAssembler::TailCallIndirect(Register target) {
popq(kScratchRegister);
target = kScratchRegister;
}
- if (FLAG_untrusted_code_mitigations) {
- RetpolineJump(target);
- } else {
- jmp(target);
- }
+ jmp(target);
}
void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
@@ -4376,19 +4169,19 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
bind(&ret);
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
if (lane_kind == kF32) {
- movaps(tmp_fp, src);
- cmpunordps(tmp_fp, tmp_fp);
+ movaps(tmp_s128.fp(), src.fp());
+ cmpunordps(tmp_s128.fp(), tmp_s128.fp());
} else {
DCHECK_EQ(lane_kind, kF64);
- movapd(tmp_fp, src);
- cmpunordpd(tmp_fp, tmp_fp);
+ movapd(tmp_s128.fp(), src.fp());
+ cmpunordpd(tmp_s128.fp(), tmp_s128.fp());
}
- pmovmskb(tmp_gp, tmp_fp);
+ pmovmskb(tmp_gp, tmp_s128.fp());
orl(Operand(dst, 0), tmp_gp);
}
diff --git a/chromium/v8/src/wasm/c-api.cc b/chromium/v8/src/wasm/c-api.cc
index 5a1ab579e77..e2163510308 100644
--- a/chromium/v8/src/wasm/c-api.cc
+++ b/chromium/v8/src/wasm/c-api.cc
@@ -26,12 +26,13 @@
#include <iostream>
#include "include/libplatform/libplatform.h"
+#include "include/v8-initialization.h"
#include "src/api/api-inl.h"
#include "src/base/platform/wrappers.h"
#include "src/builtins/builtins.h"
#include "src/compiler/wasm-compiler.h"
#include "src/objects/js-collection-inl.h"
-#include "src/objects/managed.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/stack-frame-info-inl.h"
#include "src/wasm/leb-helper.h"
#include "src/wasm/module-instantiate.h"
@@ -396,6 +397,11 @@ auto Engine::make(own<Config>&& config) -> own<Engine> {
if (!engine) return own<Engine>();
engine->platform = v8::platform::NewDefaultPlatform();
v8::V8::InitializePlatform(engine->platform.get());
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ if (!v8::V8::InitializeVirtualMemoryCage()) {
+ FATAL("Could not initialize the virtual memory cage");
+ }
+#endif
v8::V8::Initialize();
return make_own(seal<Engine>(engine));
}
@@ -1945,7 +1951,7 @@ auto Table::make(Store* store_abs, const TableType* type, const Ref* ref)
i::Handle<i::FixedArray> backing_store;
i::Handle<i::WasmTableObject> table_obj = i::WasmTableObject::New(
isolate, i::Handle<i::WasmInstanceObject>(), i_type, minimum, has_maximum,
- maximum, &backing_store);
+ maximum, &backing_store, isolate->factory()->null_value());
if (ref) {
i::Handle<i::JSReceiver> init = impl(ref)->v8_object();
diff --git a/chromium/v8/src/wasm/c-api.h b/chromium/v8/src/wasm/c-api.h
index 0dba237d301..97a8d2d5f6c 100644
--- a/chromium/v8/src/wasm/c-api.h
+++ b/chromium/v8/src/wasm/c-api.h
@@ -9,7 +9,8 @@
#ifndef V8_WASM_C_API_H_
#define V8_WASM_C_API_H_
-#include "include/v8.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
#include "src/common/globals.h"
#include "src/handles/handles.h"
#include "third_party/wasm-api/wasm.hh"
diff --git a/chromium/v8/src/wasm/code-space-access.cc b/chromium/v8/src/wasm/code-space-access.cc
index 0f71c9a2245..83cb5ddea14 100644
--- a/chromium/v8/src/wasm/code-space-access.cc
+++ b/chromium/v8/src/wasm/code-space-access.cc
@@ -12,6 +12,12 @@ namespace internal {
namespace wasm {
thread_local int CodeSpaceWriteScope::code_space_write_nesting_level_ = 0;
+// The thread-local counter (above) is only valid if a single thread only works
+// on one module at a time. This second thread-local checks that.
+#if defined(DEBUG) && !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
+thread_local NativeModule* CodeSpaceWriteScope::current_native_module_ =
+ nullptr;
+#endif
// TODO(jkummerow): Background threads could permanently stay in
// writable mode; only the main thread has to switch back and forth.
@@ -20,6 +26,12 @@ CodeSpaceWriteScope::CodeSpaceWriteScope(NativeModule*) {
#else // !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
CodeSpaceWriteScope::CodeSpaceWriteScope(NativeModule* native_module)
: native_module_(native_module) {
+#ifdef DEBUG
+ if (code_space_write_nesting_level_ == 0) {
+ current_native_module_ = native_module;
+ }
+ DCHECK_EQ(native_module, current_native_module_);
+#endif // DEBUG
#endif // !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
if (code_space_write_nesting_level_ == 0) SetWritable();
code_space_write_nesting_level_++;
diff --git a/chromium/v8/src/wasm/code-space-access.h b/chromium/v8/src/wasm/code-space-access.h
index 96f852e63bd..788bb8eca37 100644
--- a/chromium/v8/src/wasm/code-space-access.h
+++ b/chromium/v8/src/wasm/code-space-access.h
@@ -55,6 +55,9 @@ class V8_NODISCARD CodeSpaceWriteScope final {
private:
static thread_local int code_space_write_nesting_level_;
+#if defined(DEBUG) && !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
+ static thread_local NativeModule* current_native_module_;
+#endif
void SetWritable() const;
void SetExecutable() const;
diff --git a/chromium/v8/src/wasm/compilation-environment.h b/chromium/v8/src/wasm/compilation-environment.h
index 773090c4e5c..574fe25ccac 100644
--- a/chromium/v8/src/wasm/compilation-environment.h
+++ b/chromium/v8/src/wasm/compilation-environment.h
@@ -45,6 +45,8 @@ enum BoundsCheckStrategy : int8_t {
kNoBoundsChecks
};
+enum class DynamicTiering { kEnabled, kDisabled };
+
// The {CompilationEnv} encapsulates the module data that is used during
// compilation. CompilationEnvs are shareable across multiple compilations.
struct CompilationEnv {
@@ -70,10 +72,13 @@ struct CompilationEnv {
// Features enabled for this compilation.
const WasmFeatures enabled_features;
+ const DynamicTiering dynamic_tiering;
+
constexpr CompilationEnv(const WasmModule* module,
BoundsCheckStrategy bounds_checks,
RuntimeExceptionSupport runtime_exception_support,
- const WasmFeatures& enabled_features)
+ const WasmFeatures& enabled_features,
+ DynamicTiering dynamic_tiering)
: module(module),
bounds_checks(bounds_checks),
runtime_exception_support(runtime_exception_support),
@@ -88,7 +93,8 @@ struct CompilationEnv {
uintptr_t{module->maximum_pages})
: kV8MaxWasmMemoryPages) *
kWasmPageSize),
- enabled_features(enabled_features) {}
+ enabled_features(enabled_features),
+ dynamic_tiering(dynamic_tiering) {}
};
// The wire bytes are either owned by the StreamingDecoder, or (after streaming)
@@ -105,6 +111,7 @@ class WireBytesStorage {
enum class CompilationEvent : uint8_t {
kFinishedBaselineCompilation,
kFinishedExportWrappers,
+ kFinishedCompilationChunk,
kFinishedTopTierCompilation,
kFailedCompilation,
kFinishedRecompilation
@@ -148,6 +155,8 @@ class V8_EXPORT_PRIVATE CompilationState {
void set_compilation_id(int compilation_id);
+ DynamicTiering dynamic_tiering() const;
+
// Override {operator delete} to avoid implicit instantiation of {operator
// delete} with {size_t} argument. The {size_t} argument would be incorrect.
void operator delete(void* ptr) { ::operator delete(ptr); }
@@ -162,7 +171,8 @@ class V8_EXPORT_PRIVATE CompilationState {
// such that it can keep it alive (by regaining a {std::shared_ptr}) in
// certain scopes.
static std::unique_ptr<CompilationState> New(
- const std::shared_ptr<NativeModule>&, std::shared_ptr<Counters>);
+ const std::shared_ptr<NativeModule>&, std::shared_ptr<Counters>,
+ DynamicTiering dynamic_tiering);
};
} // namespace wasm
diff --git a/chromium/v8/src/wasm/function-body-decoder-impl.h b/chromium/v8/src/wasm/function-body-decoder-impl.h
index 20c6b30ffcd..3d5ec7f933a 100644
--- a/chromium/v8/src/wasm/function-body-decoder-impl.h
+++ b/chromium/v8/src/wasm/function-body-decoder-impl.h
@@ -397,6 +397,10 @@ ValueType read_value_type(Decoder* decoder, const byte* pc,
"invalid value type 's128', enable with --experimental-wasm-simd");
return kWasmBottom;
}
+ if (!VALIDATE(CheckHardwareSupportsSimd())) {
+ DecodeError<validate>(decoder, pc, "Wasm SIMD unsupported");
+ return kWasmBottom;
+ }
return kWasmS128;
}
// Although these codes are included in ValueTypeCode, they technically
@@ -945,6 +949,8 @@ struct ControlBase : public PcForErrors<validate> {
F(GlobalGet, Value* result, const GlobalIndexImmediate<validate>& imm) \
F(StructNewWithRtt, const StructIndexImmediate<validate>& imm, \
const Value& rtt, const Value args[], Value* result) \
+ F(StructNewDefault, const StructIndexImmediate<validate>& imm, \
+ const Value& rtt, Value* result) \
F(ArrayInit, const ArrayIndexImmediate<validate>& imm, \
const base::Vector<Value>& elements, const Value& rtt, Value* result) \
F(RttCanon, uint32_t type_index, Value* result) \
@@ -1047,8 +1053,6 @@ struct ControlBase : public PcForErrors<validate> {
F(TableSize, const IndexImmediate<validate>& imm, Value* result) \
F(TableFill, const IndexImmediate<validate>& imm, const Value& start, \
const Value& value, const Value& count) \
- F(StructNewDefault, const StructIndexImmediate<validate>& imm, \
- const Value& rtt, Value* result) \
F(StructGet, const Value& struct_object, \
const FieldImmediate<validate>& field, bool is_signed, Value* result) \
F(StructSet, const Value& struct_object, \
@@ -1330,11 +1334,10 @@ class WasmDecoder : public Decoder {
}
bool CanReturnCall(const FunctionSig* target_sig) {
- if (target_sig == nullptr) return false;
- size_t num_returns = sig_->return_count();
- if (num_returns != target_sig->return_count()) return false;
- for (size_t i = 0; i < num_returns; ++i) {
- if (sig_->GetReturn(i) != target_sig->GetReturn(i)) return false;
+ if (sig_->return_count() != target_sig->return_count()) return false;
+ auto target_sig_it = target_sig->returns().begin();
+ for (ValueType ret_type : sig_->returns()) {
+ if (!IsSubtypeOf(*target_sig_it++, ret_type, this->module_)) return false;
}
return true;
}
@@ -1849,8 +1852,10 @@ class WasmDecoder : public Decoder {
opcode =
decoder->read_prefixed_opcode<validate>(pc, &length, "gc_index");
switch (opcode) {
+ case kExprStructNew:
case kExprStructNewWithRtt:
- case kExprStructNewDefault: {
+ case kExprStructNewDefault:
+ case kExprStructNewDefaultWithRtt: {
StructIndexImmediate<validate> imm(decoder, pc + length);
return length + imm.length;
}
@@ -1861,8 +1866,10 @@ class WasmDecoder : public Decoder {
FieldImmediate<validate> imm(decoder, pc + length);
return length + imm.length;
}
+ case kExprArrayNew:
case kExprArrayNewWithRtt:
case kExprArrayNewDefault:
+ case kExprArrayNewDefaultWithRtt:
case kExprArrayGet:
case kExprArrayGetS:
case kExprArrayGetU:
@@ -1871,6 +1878,13 @@ class WasmDecoder : public Decoder {
ArrayIndexImmediate<validate> imm(decoder, pc + length);
return length + imm.length;
}
+ case kExprArrayInit:
+ case kExprArrayInitStatic: {
+ ArrayIndexImmediate<validate> array_imm(decoder, pc + length);
+ IndexImmediate<validate> length_imm(
+ decoder, pc + length + array_imm.length, "array length");
+ return length + array_imm.length + length_imm.length;
+ }
case kExprArrayCopy: {
ArrayIndexImmediate<validate> dst_imm(decoder, pc + length);
ArrayIndexImmediate<validate> src_imm(decoder,
@@ -1887,7 +1901,11 @@ class WasmDecoder : public Decoder {
}
case kExprRttCanon:
case kExprRttSub:
- case kExprRttFreshSub: {
+ case kExprRttFreshSub:
+ case kExprRefTestStatic:
+ case kExprRefCastStatic:
+ case kExprBrOnCastStatic:
+ case kExprBrOnCastStaticFail: {
IndexImmediate<validate> imm(decoder, pc + length, "type index");
return length + imm.length;
}
@@ -2041,20 +2059,26 @@ class WasmDecoder : public Decoder {
case kGCPrefix: {
opcode = this->read_prefixed_opcode<validate>(pc);
switch (opcode) {
- case kExprStructNewDefault:
+ case kExprStructNewDefaultWithRtt:
case kExprStructGet:
case kExprStructGetS:
case kExprStructGetU:
case kExprI31New:
case kExprI31GetS:
case kExprI31GetU:
+ case kExprArrayNewDefault:
case kExprArrayLen:
case kExprRttSub:
case kExprRttFreshSub:
+ case kExprRefTestStatic:
+ case kExprRefCastStatic:
+ case kExprBrOnCastStatic:
+ case kExprBrOnCastStaticFail:
return {1, 1};
case kExprStructSet:
return {2, 0};
- case kExprArrayNewDefault:
+ case kExprArrayNew:
+ case kExprArrayNewDefaultWithRtt:
case kExprArrayGet:
case kExprArrayGetS:
case kExprArrayGetU:
@@ -2068,6 +2092,7 @@ class WasmDecoder : public Decoder {
case kExprArrayCopy:
return {5, 0};
case kExprRttCanon:
+ case kExprStructNewDefault:
return {0, 1};
case kExprArrayNewWithRtt:
return {3, 1};
@@ -2076,6 +2101,18 @@ class WasmDecoder : public Decoder {
CHECK(Validate(pc + 2, imm));
return {imm.struct_type->field_count() + 1, 1};
}
+ case kExprStructNew: {
+ StructIndexImmediate<validate> imm(this, pc + 2);
+ CHECK(Validate(pc + 2, imm));
+ return {imm.struct_type->field_count(), 1};
+ }
+ case kExprArrayInit:
+ case kExprArrayInitStatic: {
+ ArrayIndexImmediate<validate> array_imm(this, pc + 2);
+ IndexImmediate<validate> length_imm(this, pc + 2 + array_imm.length,
+ "array length");
+ return {length_imm.index + (opcode == kExprArrayInit ? 1 : 0), 1};
+ }
default:
UNREACHABLE();
}
@@ -2224,6 +2261,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
int non_defaultable = 0;
for (uint32_t index = params_count; index < this->num_locals(); index++) {
if (!VALIDATE(this->enabled_.has_nn_locals() ||
+ this->enabled_.has_unsafe_nn_locals() ||
this->local_type(index).is_defaultable())) {
this->DecodeError(
"Cannot define function-level local of non-defaultable type %s",
@@ -2613,9 +2651,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
c->reachability = control_at(1)->innerReachability();
const WasmTagSig* sig = imm.tag->sig;
EnsureStackSpace(static_cast<int>(sig->parameter_count()));
- for (size_t i = 0, e = sig->parameter_count(); i < e; ++i) {
- Push(CreateValue(sig->GetParam(i)));
- }
+ for (ValueType type : sig->parameters()) Push(CreateValue(type));
base::Vector<Value> values(stack_ + c->stack_depth, sig->parameter_count());
current_catch_ = c->previous_catch; // Pop try scope.
CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(CatchException, imm, c, values);
@@ -2634,19 +2670,15 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return 0;
}
// +1 because the current try block is not included in the count.
- Control* target = control_at(imm.depth + 1);
- if (imm.depth + 1 < control_depth() - 1 && !target->is_try()) {
- this->DecodeError(
- "delegate target must be a try block or the function block");
- return 0;
- }
- if (target->is_try_catch() || target->is_try_catchall()) {
- this->DecodeError(
- "cannot delegate inside the catch handler of the target");
- return 0;
+ uint32_t target_depth = imm.depth + 1;
+ while (target_depth < control_depth() - 1 &&
+ (!control_at(target_depth)->is_try() ||
+ control_at(target_depth)->is_try_catch() ||
+ control_at(target_depth)->is_try_catchall())) {
+ target_depth++;
}
FallThrough();
- CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(Delegate, imm.depth + 1, c);
+ CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(Delegate, target_depth, c);
current_catch_ = c->previous_catch;
EndControl();
PopControl();
@@ -2692,19 +2724,19 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
// the stack as it is.
break;
case kOptRef: {
- Value result = CreateValue(
- ValueType::Ref(ref_object.type.heap_type(), kNonNullable));
- // The result of br_on_null has the same value as the argument (but a
- // non-nullable type).
- if (V8_LIKELY(current_code_reachable_and_ok_)) {
- CALL_INTERFACE(BrOnNull, ref_object, imm.depth);
- CALL_INTERFACE(Forward, ref_object, &result);
- c->br_merge()->reached = true;
- }
- // In unreachable code, we still have to push a value of the correct
- // type onto the stack.
- Drop(ref_object);
- Push(result);
+ Value result = CreateValue(
+ ValueType::Ref(ref_object.type.heap_type(), kNonNullable));
+ // The result of br_on_null has the same value as the argument (but a
+ // non-nullable type).
+ if (V8_LIKELY(current_code_reachable_and_ok_)) {
+ CALL_INTERFACE(BrOnNull, ref_object, imm.depth);
+ CALL_INTERFACE(Forward, ref_object, &result);
+ c->br_merge()->reached = true;
+ }
+ // In unreachable code, we still have to push a value of the correct
+ // type onto the stack.
+ Drop(ref_object);
+ Push(result);
break;
}
default:
@@ -3302,7 +3334,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
if (!this->Validate(this->pc_ + 1, imm)) return 0;
if (!VALIDATE(this->CanReturnCall(imm.sig))) {
this->DecodeError("%s: %s", WasmOpcodes::OpcodeName(kExprReturnCall),
- "tail call return types mismatch");
+ "tail call type error");
return 0;
}
ArgVector args = PeekArgs(imm.sig);
@@ -3605,8 +3637,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
V8_NOINLINE int EnsureStackArguments_Slow(int count, uint32_t limit) {
if (!VALIDATE(control_.back().unreachable())) {
- int index = count - stack_size() - 1;
- NotEnoughArgumentsError(index);
+ NotEnoughArgumentsError(count, stack_size() - limit);
}
// Silently create unreachable values out of thin air underneath the
// existing stack values. To do so, we have to move existing stack values
@@ -4003,22 +4034,32 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
int DecodeGCOpcode(WasmOpcode opcode, uint32_t opcode_length) {
switch (opcode) {
+ case kExprStructNew:
case kExprStructNewWithRtt: {
StructIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
- Value rtt = Peek(0, imm.struct_type->field_count());
- if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
- PopTypeError(imm.struct_type->field_count(), rtt, "rtt");
- return 0;
- }
- // TODO(7748): Drop this check if {imm} is dropped from the proposal
- // à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(
- rtt.type.is_bottom() ||
- (rtt.type.ref_index() == imm.index && rtt.type.has_depth()))) {
- PopTypeError(imm.struct_type->field_count(), rtt,
- "rtt with depth for type " + std::to_string(imm.index));
- return 0;
+ Value rtt = opcode == kExprStructNew
+ ? CreateValue(ValueType::Rtt(imm.index))
+ : Peek(0, imm.struct_type->field_count());
+ if (opcode == kExprStructNew) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
+ Push(rtt);
+ } else {
+ DCHECK_EQ(opcode, kExprStructNewWithRtt);
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ PopTypeError(imm.struct_type->field_count(), rtt, "rtt");
+ return 0;
+ }
+ // TODO(7748): Drop this check if {imm} is dropped from the proposal
+ // à la https://github.com/WebAssembly/function-references/pull/31.
+ if (!VALIDATE(rtt.type.is_bottom() ||
+ (rtt.type.ref_index() == imm.index &&
+ rtt.type.has_depth()))) {
+ PopTypeError(
+ imm.struct_type->field_count(), rtt,
+ "rtt with depth for type " + std::to_string(imm.index));
+ return 0;
+ }
}
ArgVector args = PeekArgs(imm.struct_type, 1);
Value value = CreateValue(ValueType::Ref(imm.index, kNonNullable));
@@ -4029,8 +4070,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Push(value);
return opcode_length + imm.length;
}
- case kExprStructNewDefault: {
- NON_CONST_ONLY
+ case kExprStructNewDefault:
+ case kExprStructNewDefaultWithRtt: {
StructIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
if (validate) {
@@ -4038,26 +4079,34 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
ValueType ftype = imm.struct_type->field(i);
if (!VALIDATE(ftype.is_defaultable())) {
this->DecodeError(
- "struct.new_default_with_rtt: immediate struct type %d has "
- "field %d of non-defaultable type %s",
- imm.index, i, ftype.name().c_str());
+ "%s: struct type %d has field %d of non-defaultable type %s",
+ WasmOpcodes::OpcodeName(opcode), imm.index, i,
+ ftype.name().c_str());
return 0;
}
}
}
- Value rtt = Peek(0, 0);
- if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
- PopTypeError(0, rtt, "rtt");
- return 0;
- }
- // TODO(7748): Drop this check if {imm} is dropped from the proposal
- // à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(
- rtt.type.is_bottom() ||
- (rtt.type.ref_index() == imm.index && rtt.type.has_depth()))) {
- PopTypeError(0, rtt,
- "rtt with depth for type " + std::to_string(imm.index));
- return 0;
+ Value rtt = opcode == kExprStructNewDefault
+ ? CreateValue(ValueType::Rtt(imm.index))
+ : Peek(0, 0);
+ if (opcode == kExprStructNewDefault) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
+ Push(rtt);
+ } else {
+ DCHECK_EQ(opcode, kExprStructNewDefaultWithRtt);
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ PopTypeError(0, rtt, "rtt");
+ return 0;
+ }
+ // TODO(7748): Drop this check if {imm} is dropped from the proposal
+ // à la https://github.com/WebAssembly/function-references/pull/31.
+ if (!VALIDATE(rtt.type.is_bottom() ||
+ (rtt.type.ref_index() == imm.index &&
+ rtt.type.has_depth()))) {
+ PopTypeError(
+ 0, rtt, "rtt with depth for type " + std::to_string(imm.index));
+ return 0;
+ }
}
Value value = CreateValue(ValueType::Ref(imm.index, kNonNullable));
CALL_INTERFACE_IF_OK_AND_REACHABLE(StructNewDefault, imm, rtt, &value);
@@ -4131,23 +4180,32 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Drop(2);
return opcode_length + field.length;
}
+ case kExprArrayNew:
case kExprArrayNewWithRtt: {
NON_CONST_ONLY
ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
- Value rtt = Peek(0, 2);
- if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
- PopTypeError(2, rtt, "rtt");
- return 0;
- }
- // TODO(7748): Drop this check if {imm} is dropped from the proposal
- // à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(
- rtt.type.is_bottom() ||
- (rtt.type.ref_index() == imm.index && rtt.type.has_depth()))) {
- PopTypeError(2, rtt,
- "rtt with depth for type " + std::to_string(imm.index));
- return 0;
+ Value rtt = opcode == kExprArrayNew
+ ? CreateValue(ValueType::Rtt(imm.index))
+ : Peek(0, 2);
+ if (opcode == kExprArrayNew) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
+ Push(rtt);
+ } else {
+ DCHECK_EQ(opcode, kExprArrayNewWithRtt);
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ PopTypeError(2, rtt, "rtt");
+ return 0;
+ }
+ // TODO(7748): Drop this check if {imm} is dropped from the proposal
+ // à la https://github.com/WebAssembly/function-references/pull/31.
+ if (!VALIDATE(rtt.type.is_bottom() ||
+ (rtt.type.ref_index() == imm.index &&
+ rtt.type.has_depth()))) {
+ PopTypeError(
+ 2, rtt, "rtt with depth for type " + std::to_string(imm.index));
+ return 0;
+ }
}
Value length = Peek(1, 1, kWasmI32);
Value initial_value =
@@ -4159,30 +4217,39 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Push(value);
return opcode_length + imm.length;
}
- case kExprArrayNewDefault: {
+ case kExprArrayNewDefault:
+ case kExprArrayNewDefaultWithRtt: {
NON_CONST_ONLY
ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
if (!VALIDATE(imm.array_type->element_type().is_defaultable())) {
this->DecodeError(
- "array.new_default_with_rtt: immediate array type %d has "
- "non-defaultable element type %s",
- imm.index, imm.array_type->element_type().name().c_str());
- return 0;
- }
- Value rtt = Peek(0, 1);
- if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
- PopTypeError(1, rtt, "rtt");
+ "%s: array type %d has non-defaultable element type %s",
+ WasmOpcodes::OpcodeName(opcode), imm.index,
+ imm.array_type->element_type().name().c_str());
return 0;
}
- // TODO(7748): Drop this check if {imm} is dropped from the proposal
- // à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(
- rtt.type.is_bottom() ||
- (rtt.type.ref_index() == imm.index && rtt.type.has_depth()))) {
- PopTypeError(1, rtt,
- "rtt with depth for type " + std::to_string(imm.index));
- return 0;
+ Value rtt = opcode == kExprArrayNewDefault
+ ? CreateValue(ValueType::Rtt(imm.index))
+ : Peek(0, 1);
+ if (opcode == kExprArrayNewDefault) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
+ Push(rtt);
+ } else {
+ DCHECK_EQ(opcode, kExprArrayNewDefaultWithRtt);
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ PopTypeError(1, rtt, "rtt");
+ return 0;
+ }
+ // TODO(7748): Drop this check if {imm} is dropped from the proposal
+ // à la https://github.com/WebAssembly/function-references/pull/31.
+ if (!VALIDATE(rtt.type.is_bottom() ||
+ (rtt.type.ref_index() == imm.index &&
+ rtt.type.has_depth()))) {
+ PopTypeError(
+ 1, rtt, "rtt with depth for type " + std::to_string(imm.index));
+ return 0;
+ }
}
Value length = Peek(1, 0, kWasmI32);
Value value = CreateValue(ValueType::Ref(imm.index, kNonNullable));
@@ -4264,7 +4331,6 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
case kExprArrayCopy: {
NON_CONST_ONLY
- CHECK_PROTOTYPE_OPCODE(gc_experiments);
ArrayIndexImmediate<validate> dst_imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, dst_imm)) return 0;
if (!VALIDATE(dst_imm.array_type->mutability())) {
@@ -4298,12 +4364,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Drop(5);
return opcode_length + dst_imm.length + src_imm.length;
}
- case kExprArrayInit: {
- CHECK_PROTOTYPE_OPCODE(gc_experiments);
- if (decoding_mode != kInitExpression) {
- this->DecodeError("array.init is only allowed in init. expressions");
- return 0;
- }
+ case kExprArrayInit:
+ case kExprArrayInitStatic: {
ArrayIndexImmediate<validate> array_imm(this,
this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, array_imm)) return 0;
@@ -4317,12 +4379,18 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
length_imm.index, kV8MaxWasmArrayInitLength);
return 0;
}
+ Value rtt = opcode == kExprArrayInit
+ ? Peek(0, elem_count, ValueType::Rtt(array_imm.index))
+ : CreateValue(ValueType::Rtt(array_imm.index));
+ if (opcode == kExprArrayInitStatic) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, array_imm.index, &rtt);
+ Push(rtt);
+ }
ValueType element_type = array_imm.array_type->element_type();
std::vector<ValueType> element_types(elem_count,
element_type.Unpacked());
FunctionSig element_sig(0, elem_count, element_types.data());
ArgVector elements = PeekArgs(&element_sig, 1);
- Value rtt = Peek(0, elem_count, ValueType::Rtt(array_imm.index));
Value result =
CreateValue(ValueType::Ref(array_imm.index, kNonNullable));
CALL_INTERFACE_IF_OK_AND_REACHABLE(ArrayInit, array_imm, elements, rtt,
@@ -4362,14 +4430,13 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
"type index");
if (!this->ValidateType(this->pc_ + opcode_length, imm)) return 0;
- Value value = CreateValue(ValueType::Rtt(imm.index, 0));
+ Value value = CreateValue(ValueType::Rtt(
+ imm.index, GetSubtypingDepth(this->module_, imm.index)));
CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &value);
Push(value);
return opcode_length + imm.length;
}
case kExprRttFreshSub:
- CHECK_PROTOTYPE_OPCODE(gc_experiments);
- V8_FALLTHROUGH;
case kExprRttSub: {
IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
"type index");
@@ -4402,16 +4469,29 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
return opcode_length + imm.length;
}
- case kExprRefTest: {
+ case kExprRefTest:
+ case kExprRefTestStatic: {
NON_CONST_ONLY
// "Tests whether {obj}'s runtime type is a runtime subtype of {rtt}."
- Value rtt = Peek(0, 1);
+ Value rtt = Peek(0, 1); // This is safe for the ...Static instruction.
+ if (opcode == kExprRefTestStatic) {
+ IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
+ "type index");
+ if (!this->ValidateType(this->pc_ + opcode_length, imm)) return 0;
+ opcode_length += imm.length;
+ rtt = CreateValue(ValueType::Rtt(
+ imm.index, GetSubtypingDepth(this->module_, imm.index)));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
+ Push(rtt);
+ } else {
+ DCHECK_EQ(opcode, kExprRefTest);
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ PopTypeError(1, rtt, "rtt");
+ return 0;
+ }
+ }
Value obj = Peek(1, 0);
Value value = CreateValue(kWasmI32);
- if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
- PopTypeError(1, rtt, "rtt");
- return 0;
- }
if (!VALIDATE(IsSubtypeOf(obj.type, kWasmFuncRef, this->module_) ||
IsSubtypeOf(obj.type,
ValueType::Ref(HeapType::kData, kNullable),
@@ -4426,6 +4506,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
if (V8_LIKELY(ObjectRelatedWithRtt(obj, rtt))) {
CALL_INTERFACE(RefTest, obj, rtt, &value);
} else {
+ CALL_INTERFACE(Drop);
+ CALL_INTERFACE(Drop);
// Unrelated types. Will always fail.
CALL_INTERFACE(I32Const, &value, 0);
}
@@ -4434,14 +4516,27 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Push(value);
return opcode_length;
}
- case kExprRefCast: {
+ case kExprRefCast:
+ case kExprRefCastStatic: {
NON_CONST_ONLY
- Value rtt = Peek(0, 1);
- Value obj = Peek(1, 0);
- if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
- PopTypeError(1, rtt, "rtt");
- return 0;
+ Value rtt = Peek(0, 1); // This is safe for the ...Static instruction.
+ if (opcode == kExprRefCastStatic) {
+ IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
+ "type index");
+ if (!this->ValidateType(this->pc_ + opcode_length, imm)) return 0;
+ opcode_length += imm.length;
+ rtt = CreateValue(ValueType::Rtt(
+ imm.index, GetSubtypingDepth(this->module_, imm.index)));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
+ Push(rtt);
+ } else {
+ DCHECK_EQ(opcode, kExprRefCast);
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ PopTypeError(1, rtt, "rtt");
+ return 0;
+ }
}
+ Value obj = Peek(1, 0);
if (!VALIDATE(IsSubtypeOf(obj.type, kWasmFuncRef, this->module_) ||
IsSubtypeOf(obj.type,
ValueType::Ref(HeapType::kData, kNullable),
@@ -4480,7 +4575,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Push(value);
return opcode_length;
}
- case kExprBrOnCast: {
+ case kExprBrOnCast:
+ case kExprBrOnCastStatic: {
NON_CONST_ONLY
BranchDepthImmediate<validate> branch_depth(this,
this->pc_ + opcode_length);
@@ -4488,10 +4584,22 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
control_.size())) {
return 0;
}
- Value rtt = Peek(0, 1);
- if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
- PopTypeError(1, rtt, "rtt");
- return 0;
+ Value rtt = Peek(0, 1); // This is safe for the ...Static instruction.
+ if (opcode == kExprBrOnCastStatic) {
+ IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
+ "type index");
+ if (!this->ValidateType(this->pc_ + opcode_length, imm)) return 0;
+ opcode_length += imm.length;
+ rtt = CreateValue(ValueType::Rtt(
+ imm.index, GetSubtypingDepth(this->module_, imm.index)));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
+ Push(rtt);
+ } else {
+ DCHECK_EQ(opcode, kExprBrOnCast);
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ PopTypeError(1, rtt, "rtt");
+ return 0;
+ }
}
Value obj = Peek(1, 0);
if (!VALIDATE(IsSubtypeOf(obj.type, kWasmFuncRef, this->module_) ||
@@ -4538,7 +4646,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Push(obj); // Restore stack state on fallthrough.
return opcode_length + branch_depth.length;
}
- case kExprBrOnCastFail: {
+ case kExprBrOnCastFail:
+ case kExprBrOnCastStaticFail: {
NON_CONST_ONLY
BranchDepthImmediate<validate> branch_depth(this,
this->pc_ + opcode_length);
@@ -4546,10 +4655,22 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
control_.size())) {
return 0;
}
- Value rtt = Peek(0, 1);
- if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
- PopTypeError(1, rtt, "rtt");
- return 0;
+ Value rtt = Peek(0, 1); // This is safe for the ...Static instruction.
+ if (opcode == kExprBrOnCastStaticFail) {
+ IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
+ "type index");
+ if (!this->ValidateType(this->pc_ + opcode_length, imm)) return 0;
+ opcode_length += imm.length;
+ rtt = CreateValue(ValueType::Rtt(
+ imm.index, GetSubtypingDepth(this->module_, imm.index)));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
+ Push(rtt);
+ } else {
+ DCHECK_EQ(opcode, kExprBrOnCastFail);
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ PopTypeError(1, rtt, "rtt");
+ return 0;
+ }
}
Value obj = Peek(1, 0);
if (!VALIDATE(IsSubtypeOf(obj.type, kWasmFuncRef, this->module_) ||
@@ -4729,7 +4850,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return opcode_length + branch_depth.length;
}
default:
- this->DecodeError("invalid gc opcode");
+ this->DecodeError("invalid gc opcode: %x", opcode);
return 0;
}
}
@@ -4974,9 +5095,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
V8_INLINE ReturnVector CreateReturnValues(const FunctionSig* sig) {
size_t return_count = sig->return_count();
ReturnVector values(return_count);
- for (size_t i = 0; i < return_count; ++i) {
- values[i] = CreateValue(sig->GetReturn(i));
- }
+ std::transform(sig->returns().begin(), sig->returns().end(), values.begin(),
+ [this](ValueType type) { return CreateValue(type); });
return values;
}
V8_INLINE void PushReturns(ReturnVector values) {
@@ -5001,10 +5121,13 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
PopTypeError(index, val, ("type " + expected.name()).c_str());
}
- V8_NOINLINE void NotEnoughArgumentsError(int index) {
+ V8_NOINLINE void NotEnoughArgumentsError(int needed, int actual) {
+ DCHECK_LT(0, needed);
+ DCHECK_LE(0, actual);
+ DCHECK_LT(actual, needed);
this->DecodeError(
- "not enough arguments on the stack for %s, expected %d more",
- SafeOpcodeNameAt(this->pc_), index + 1);
+ "not enough arguments on the stack for %s (need %d, got %d)",
+ SafeOpcodeNameAt(this->pc_), needed, actual);
}
V8_INLINE Value Peek(int depth, int index, ValueType expected) {
@@ -5023,7 +5146,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
// Peeking past the current control start in reachable code.
if (!VALIDATE(decoding_mode == kFunctionBody &&
control_.back().unreachable())) {
- NotEnoughArgumentsError(index);
+ NotEnoughArgumentsError(depth + 1, stack_size() - limit);
}
return UnreachableValue(this->pc_);
}
diff --git a/chromium/v8/src/wasm/function-body-decoder.cc b/chromium/v8/src/wasm/function-body-decoder.cc
index 19a862d0d4f..d5a82073d2b 100644
--- a/chromium/v8/src/wasm/function-body-decoder.cc
+++ b/chromium/v8/src/wasm/function-body-decoder.cc
@@ -63,12 +63,13 @@ DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
}
unsigned OpcodeLength(const byte* pc, const byte* end) {
- WasmFeatures no_features = WasmFeatures::None();
+ WasmFeatures unused_detected_features;
Zone* no_zone = nullptr;
WasmModule* no_module = nullptr;
FunctionSig* no_sig = nullptr;
- WasmDecoder<Decoder::kNoValidation> decoder(no_zone, no_module, no_features,
- &no_features, no_sig, pc, end, 0);
+ WasmDecoder<Decoder::kNoValidation> decoder(
+ no_zone, no_module, WasmFeatures::All(), &unused_detected_features,
+ no_sig, pc, end, 0);
return WasmDecoder<Decoder::kNoValidation>::OpcodeLength(&decoder, pc);
}
@@ -253,8 +254,8 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
i.pc() + 1, module);
os << " @" << i.pc_offset();
CHECK(decoder.Validate(i.pc() + 1, imm));
- for (uint32_t i = 0; i < imm.out_arity(); i++) {
- os << " " << imm.out_type(i).name();
+ for (uint32_t j = 0; j < imm.out_arity(); j++) {
+ os << " " << imm.out_type(j).name();
}
control_depth++;
break;
diff --git a/chromium/v8/src/wasm/function-compiler.cc b/chromium/v8/src/wasm/function-compiler.cc
index cd9d941a002..e520a7d6806 100644
--- a/chromium/v8/src/wasm/function-compiler.cc
+++ b/chromium/v8/src/wasm/function-compiler.cc
@@ -134,7 +134,7 @@ WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation(
case ExecutionTier::kTurbofan:
result = compiler::ExecuteTurbofanWasmCompilation(
- env, func_body, func_index_, counters, detected);
+ env, wire_bytes_storage, func_body, func_index_, counters, detected);
result.for_debugging = for_debugging_;
break;
}
@@ -142,30 +142,6 @@ WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation(
return result;
}
-namespace {
-bool must_record_function_compilation(Isolate* isolate) {
- return isolate->logger()->is_listening_to_code_events() ||
- isolate->is_profiling();
-}
-
-PRINTF_FORMAT(3, 4)
-void RecordWasmHeapStubCompilation(Isolate* isolate, Handle<Code> code,
- const char* format, ...) {
- DCHECK(must_record_function_compilation(isolate));
-
- base::ScopedVector<char> buffer(128);
- va_list arguments;
- va_start(arguments, format);
- int len = base::VSNPrintF(buffer, format, arguments);
- CHECK_LT(0, len);
- va_end(arguments);
- Handle<String> name_str =
- isolate->factory()->NewStringFromAsciiChecked(buffer.begin());
- PROFILE(isolate, CodeCreateEvent(CodeEventListener::STUB_TAG,
- Handle<AbstractCode>::cast(code), name_str));
-}
-} // namespace
-
// static
void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
NativeModule* native_module,
@@ -243,17 +219,19 @@ void JSToWasmWrapperCompilationUnit::Execute() {
}
Handle<Code> JSToWasmWrapperCompilationUnit::Finalize() {
- Handle<Code> code;
if (use_generic_wrapper_) {
- code = isolate_->builtins()->code_handle(Builtin::kGenericJSToWasmWrapper);
- } else {
- CompilationJob::Status status = job_->FinalizeJob(isolate_);
- CHECK_EQ(status, CompilationJob::SUCCEEDED);
- code = job_->compilation_info()->code();
+ return isolate_->builtins()->code_handle(Builtin::kGenericJSToWasmWrapper);
}
- if (!use_generic_wrapper_ && must_record_function_compilation(isolate_)) {
- RecordWasmHeapStubCompilation(
- isolate_, code, "%s", job_->compilation_info()->GetDebugName().get());
+
+ CompilationJob::Status status = job_->FinalizeJob(isolate_);
+ CHECK_EQ(status, CompilationJob::SUCCEEDED);
+ Handle<Code> code = job_->compilation_info()->code();
+ if (isolate_->logger()->is_listening_to_code_events() ||
+ isolate_->is_profiling()) {
+ Handle<String> name = isolate_->factory()->NewStringFromAsciiChecked(
+ job_->compilation_info()->GetDebugName().get());
+ PROFILE(isolate_, CodeCreateEvent(CodeEventListener::STUB_TAG,
+ Handle<AbstractCode>::cast(code), name));
}
return code;
}
diff --git a/chromium/v8/src/wasm/graph-builder-interface.cc b/chromium/v8/src/wasm/graph-builder-interface.cc
index 84f34cc0ed8..30775b66ac6 100644
--- a/chromium/v8/src/wasm/graph-builder-interface.cc
+++ b/chromium/v8/src/wasm/graph-builder-interface.cc
@@ -109,9 +109,11 @@ class WasmGraphBuildingInterface {
: ControlBase(std::forward<Args>(args)...) {}
};
- explicit WasmGraphBuildingInterface(compiler::WasmGraphBuilder* builder,
- int func_index)
- : builder_(builder), func_index_(func_index) {}
+ WasmGraphBuildingInterface(compiler::WasmGraphBuilder* builder,
+ int func_index, InlinedStatus inlined_status)
+ : builder_(builder),
+ func_index_(func_index),
+ inlined_status_(inlined_status) {}
void StartFunction(FullDecoder* decoder) {
// Get the branch hints map for this function (if available)
@@ -138,7 +140,9 @@ class WasmGraphBuildingInterface {
while (index < num_locals) {
ValueType type = decoder->local_type(index);
TFNode* node;
- if (decoder->enabled_.has_nn_locals() && !type.is_defaultable()) {
+ if ((decoder->enabled_.has_nn_locals() ||
+ decoder->enabled_.has_unsafe_nn_locals()) &&
+ !type.is_defaultable()) {
DCHECK(type.is_reference());
// TODO(jkummerow): Consider using "the hole" instead, to make any
// illegal uses more obvious.
@@ -153,7 +157,9 @@ class WasmGraphBuildingInterface {
}
LoadContextIntoSsa(ssa_env);
- if (FLAG_trace_wasm) builder_->TraceFunctionEntry(decoder->position());
+ if (FLAG_trace_wasm && inlined_status_ == kRegularFunction) {
+ builder_->TraceFunctionEntry(decoder->position());
+ }
}
// Reload the instance cache entries into the Ssa Environment.
@@ -163,7 +169,11 @@ class WasmGraphBuildingInterface {
void StartFunctionBody(FullDecoder* decoder, Control* block) {}
- void FinishFunction(FullDecoder*) { builder_->PatchInStackCheckIfNeeded(); }
+ void FinishFunction(FullDecoder*) {
+ if (inlined_status_ == kRegularFunction) {
+ builder_->PatchInStackCheckIfNeeded();
+ }
+ }
void OnFirstError(FullDecoder*) {}
@@ -185,7 +195,7 @@ class WasmGraphBuildingInterface {
TFNode* loop_node = builder_->Loop(control());
- if (FLAG_wasm_loop_unrolling) {
+ if (emit_loop_exits()) {
uint32_t nesting_depth = 0;
for (uint32_t depth = 1; depth < decoder->control_depth(); depth++) {
if (decoder->control_at(depth)->is_loop()) {
@@ -295,7 +305,7 @@ class WasmGraphBuildingInterface {
// However, if loop unrolling is enabled, we must create a loop exit and
// wrap the fallthru values on the stack.
if (block->is_loop()) {
- if (FLAG_wasm_loop_unrolling && block->reachable()) {
+ if (emit_loop_exits() && block->reachable()) {
BuildLoopExits(decoder, block);
WrapLocalsAtLoopExit(decoder, block);
uint32_t arity = block->end_merge.arity;
@@ -423,7 +433,7 @@ class WasmGraphBuildingInterface {
void Trap(FullDecoder* decoder, TrapReason reason) {
ValueVector values;
- if (FLAG_wasm_loop_unrolling) {
+ if (emit_loop_exits()) {
BuildNestedLoopExits(decoder, decoder->control_depth() - 1, false,
values);
}
@@ -462,7 +472,7 @@ class WasmGraphBuildingInterface {
uint32_t ret_count = static_cast<uint32_t>(decoder->sig_->return_count());
NodeVector values(ret_count);
SsaEnv* internal_env = ssa_env_;
- if (FLAG_wasm_loop_unrolling) {
+ if (emit_loop_exits()) {
SsaEnv* exit_env = Split(decoder->zone(), ssa_env_);
SetEnv(exit_env);
auto stack_values = CopyStackValues(decoder, ret_count, drop_values);
@@ -475,7 +485,7 @@ class WasmGraphBuildingInterface {
: decoder->stack_value(ret_count + drop_values);
GetNodes(values.begin(), stack_base, ret_count);
}
- if (FLAG_trace_wasm) {
+ if (FLAG_trace_wasm && inlined_status_ == kRegularFunction) {
builder_->TraceFunctionExit(base::VectorOf(values), decoder->position());
}
builder_->Return(base::VectorOf(values));
@@ -487,7 +497,7 @@ class WasmGraphBuildingInterface {
DoReturn(decoder, drop_values);
} else {
Control* target = decoder->control_at(depth);
- if (FLAG_wasm_loop_unrolling) {
+ if (emit_loop_exits()) {
SsaEnv* internal_env = ssa_env_;
SsaEnv* exit_env = Split(decoder->zone(), ssa_env_);
SetEnv(exit_env);
@@ -614,55 +624,127 @@ class WasmGraphBuildingInterface {
LoadContextIntoSsa(ssa_env_);
}
- enum CallMode { kCallDirect, kCallIndirect, kCallRef };
-
void CallDirect(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[], Value returns[]) {
- DoCall(decoder, kCallDirect, 0, CheckForNull::kWithoutNullCheck, nullptr,
- imm.sig, imm.index, args, returns);
+ DoCall(decoder, CallInfo::CallDirect(imm.index), imm.sig, args, returns);
}
void ReturnCall(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[]) {
- DoReturnCall(decoder, kCallDirect, 0, CheckForNull::kWithoutNullCheck,
- Value{nullptr, kWasmBottom}, imm.sig, imm.index, args);
+ DoReturnCall(decoder, CallInfo::CallDirect(imm.index), imm.sig, args);
}
void CallIndirect(FullDecoder* decoder, const Value& index,
const CallIndirectImmediate<validate>& imm,
const Value args[], Value returns[]) {
- DoCall(decoder, kCallIndirect, imm.table_imm.index,
- CheckForNull::kWithoutNullCheck, index.node, imm.sig,
- imm.sig_imm.index, args, returns);
+ DoCall(
+ decoder,
+ CallInfo::CallIndirect(index, imm.table_imm.index, imm.sig_imm.index),
+ imm.sig, args, returns);
}
void ReturnCallIndirect(FullDecoder* decoder, const Value& index,
const CallIndirectImmediate<validate>& imm,
const Value args[]) {
- DoReturnCall(decoder, kCallIndirect, imm.table_imm.index,
- CheckForNull::kWithoutNullCheck, index, imm.sig,
- imm.sig_imm.index, args);
+ DoReturnCall(
+ decoder,
+ CallInfo::CallIndirect(index, imm.table_imm.index, imm.sig_imm.index),
+ imm.sig, args);
}
void CallRef(FullDecoder* decoder, const Value& func_ref,
const FunctionSig* sig, uint32_t sig_index, const Value args[],
Value returns[]) {
- CheckForNull null_check = func_ref.type.is_nullable()
- ? CheckForNull::kWithNullCheck
- : CheckForNull::kWithoutNullCheck;
- DoCall(decoder, kCallRef, 0, null_check, func_ref.node, sig, sig_index,
- args, returns);
+ if (!FLAG_wasm_inlining) {
+ DoCall(decoder, CallInfo::CallRef(func_ref, NullCheckFor(func_ref.type)),
+ sig, args, returns);
+ return;
+ }
+
+ // Check for equality against a function at a specific index, and if
+ // successful, just emit a direct call.
+ // TODO(12166): For now, we check against function 0. Decide the index based
+ // on liftoff feedback.
+ const uint32_t expected_function_index = 0;
+
+ TFNode* success_control;
+ TFNode* failure_control;
+ builder_->CompareToExternalFunctionAtIndex(
+ func_ref.node, expected_function_index, &success_control,
+ &failure_control);
+ TFNode* initial_effect = effect();
+
+ builder_->SetControl(success_control);
+ ssa_env_->control = success_control;
+ Value* returns_direct =
+ decoder->zone()->NewArray<Value>(sig->return_count());
+ DoCall(decoder, CallInfo::CallDirect(expected_function_index),
+ decoder->module_->signature(sig_index), args, returns_direct);
+ TFNode* control_direct = control();
+ TFNode* effect_direct = effect();
+
+ builder_->SetEffectControl(initial_effect, failure_control);
+ ssa_env_->effect = initial_effect;
+ ssa_env_->control = failure_control;
+ Value* returns_ref = decoder->zone()->NewArray<Value>(sig->return_count());
+ DoCall(decoder, CallInfo::CallRef(func_ref, NullCheckFor(func_ref.type)),
+ sig, args, returns_ref);
+
+ TFNode* control_ref = control();
+ TFNode* effect_ref = effect();
+
+ TFNode* control_args[] = {control_direct, control_ref};
+ TFNode* control = builder_->Merge(2, control_args);
+
+ TFNode* effect_args[] = {effect_direct, effect_ref, control};
+ TFNode* effect = builder_->EffectPhi(2, effect_args);
+
+ ssa_env_->control = control;
+ ssa_env_->effect = effect;
+ builder_->SetEffectControl(effect, control);
+
+ for (uint32_t i = 0; i < sig->return_count(); i++) {
+ TFNode* phi_args[] = {returns_direct[i].node, returns_ref[i].node,
+ control};
+ returns[i].node = builder_->Phi(sig->GetReturn(i), 2, phi_args);
+ }
}
void ReturnCallRef(FullDecoder* decoder, const Value& func_ref,
const FunctionSig* sig, uint32_t sig_index,
const Value args[]) {
- CheckForNull null_check = func_ref.type.is_nullable()
- ? CheckForNull::kWithNullCheck
- : CheckForNull::kWithoutNullCheck;
- DoReturnCall(decoder, kCallRef, 0, null_check, func_ref, sig, sig_index,
+ if (!FLAG_wasm_inlining) {
+ DoReturnCall(decoder,
+ CallInfo::CallRef(func_ref, NullCheckFor(func_ref.type)),
+ sig, args);
+ return;
+ }
+
+ // Check for equality against a function at a specific index, and if
+ // successful, just emit a direct call.
+ // TODO(12166): For now, we check against function 0. Decide the index based
+ // on liftoff feedback.
+ const uint32_t expected_function_index = 0;
+
+ TFNode* success_control;
+ TFNode* failure_control;
+ builder_->CompareToExternalFunctionAtIndex(
+ func_ref.node, expected_function_index, &success_control,
+ &failure_control);
+ TFNode* initial_effect = effect();
+
+ builder_->SetControl(success_control);
+ ssa_env_->control = success_control;
+ DoReturnCall(decoder, CallInfo::CallDirect(expected_function_index), sig,
+ args);
+
+ builder_->SetEffectControl(initial_effect, failure_control);
+ ssa_env_->effect = initial_effect;
+ ssa_env_->control = failure_control;
+ DoReturnCall(decoder,
+ CallInfo::CallRef(func_ref, NullCheckFor(func_ref.type)), sig,
args);
}
@@ -795,7 +877,7 @@ class WasmGraphBuildingInterface {
}
DCHECK(decoder->control_at(depth)->is_try());
TryInfo* target_try = decoder->control_at(depth)->try_info;
- if (FLAG_wasm_loop_unrolling) {
+ if (emit_loop_exits()) {
ValueVector stack_values;
BuildNestedLoopExits(decoder, depth, true, stack_values,
&block->try_info->exception);
@@ -922,23 +1004,17 @@ class WasmGraphBuildingInterface {
void StructGet(FullDecoder* decoder, const Value& struct_object,
const FieldImmediate<validate>& field, bool is_signed,
Value* result) {
- CheckForNull null_check = struct_object.type.is_nullable()
- ? CheckForNull::kWithNullCheck
- : CheckForNull::kWithoutNullCheck;
result->node = builder_->StructGet(
struct_object.node, field.struct_imm.struct_type, field.field_imm.index,
- null_check, is_signed, decoder->position());
+ NullCheckFor(struct_object.type), is_signed, decoder->position());
}
void StructSet(FullDecoder* decoder, const Value& struct_object,
const FieldImmediate<validate>& field,
const Value& field_value) {
- CheckForNull null_check = struct_object.type.is_nullable()
- ? CheckForNull::kWithNullCheck
- : CheckForNull::kWithoutNullCheck;
builder_->StructSet(struct_object.node, field.struct_imm.struct_type,
- field.field_imm.index, field_value.node, null_check,
- decoder->position());
+ field.field_imm.index, field_value.node,
+ NullCheckFor(struct_object.type), decoder->position());
}
void ArrayNewWithRtt(FullDecoder* decoder,
@@ -967,43 +1043,40 @@ class WasmGraphBuildingInterface {
void ArrayGet(FullDecoder* decoder, const Value& array_obj,
const ArrayIndexImmediate<validate>& imm, const Value& index,
bool is_signed, Value* result) {
- CheckForNull null_check = array_obj.type.is_nullable()
- ? CheckForNull::kWithNullCheck
- : CheckForNull::kWithoutNullCheck;
- result->node =
- builder_->ArrayGet(array_obj.node, imm.array_type, index.node,
- null_check, is_signed, decoder->position());
+ result->node = builder_->ArrayGet(array_obj.node, imm.array_type,
+ index.node, NullCheckFor(array_obj.type),
+ is_signed, decoder->position());
}
void ArraySet(FullDecoder* decoder, const Value& array_obj,
const ArrayIndexImmediate<validate>& imm, const Value& index,
const Value& value) {
- CheckForNull null_check = array_obj.type.is_nullable()
- ? CheckForNull::kWithNullCheck
- : CheckForNull::kWithoutNullCheck;
builder_->ArraySet(array_obj.node, imm.array_type, index.node, value.node,
- null_check, decoder->position());
+ NullCheckFor(array_obj.type), decoder->position());
}
void ArrayLen(FullDecoder* decoder, const Value& array_obj, Value* result) {
- CheckForNull null_check = array_obj.type.is_nullable()
- ? CheckForNull::kWithNullCheck
- : CheckForNull::kWithoutNullCheck;
- result->node =
- builder_->ArrayLen(array_obj.node, null_check, decoder->position());
+ result->node = builder_->ArrayLen(
+ array_obj.node, NullCheckFor(array_obj.type), decoder->position());
}
void ArrayCopy(FullDecoder* decoder, const Value& dst, const Value& dst_index,
const Value& src, const Value& src_index,
const Value& length) {
- builder_->ArrayCopy(dst.node, dst_index.node, src.node, src_index.node,
+ builder_->ArrayCopy(dst.node, dst_index.node, NullCheckFor(dst.type),
+ src.node, src_index.node, NullCheckFor(src.type),
length.node, decoder->position());
}
void ArrayInit(FullDecoder* decoder, const ArrayIndexImmediate<validate>& imm,
const base::Vector<Value>& elements, const Value& rtt,
Value* result) {
- UNREACHABLE();
+ NodeVector element_nodes(elements.size());
+ for (uint32_t i = 0; i < elements.size(); i++) {
+ element_nodes[i] = elements[i].node;
+ }
+ result->node = builder_->ArrayInit(imm.index, imm.array_type, rtt.node,
+ VectorOf(element_nodes));
}
void I31New(FullDecoder* decoder, const Value& input, Value* result) {
@@ -1177,6 +1250,7 @@ class WasmGraphBuildingInterface {
const BranchHintMap* branch_hints_ = nullptr;
// Tracks loop data for loop unrolling.
std::vector<compiler::WasmLoopInfo> loop_infos_;
+ InlinedStatus inlined_status_;
TFNode* effect() { return builder_->effect(); }
@@ -1188,6 +1262,14 @@ class WasmGraphBuildingInterface {
->try_info;
}
+ // Loop exits are only used during loop unrolling and are then removed, as
+ // they cannot be handled by later optimization stages. Since unrolling comes
+ // before inlining in the compilation pipeline, we should not emit loop exits
+ // in inlined functions. Also, we should not do so when unrolling is disabled.
+ bool emit_loop_exits() {
+ return FLAG_wasm_loop_unrolling && inlined_status_ == kRegularFunction;
+ }
+
void GetNodes(TFNode** nodes, Value* values, size_t count) {
for (size_t i = 0; i < count; ++i) {
nodes[i] = values[i].node;
@@ -1255,7 +1337,7 @@ class WasmGraphBuildingInterface {
exception_env->effect = if_exception;
SetEnv(exception_env);
TryInfo* try_info = current_try_info(decoder);
- if (FLAG_wasm_loop_unrolling) {
+ if (emit_loop_exits()) {
ValueVector values;
BuildNestedLoopExits(decoder, decoder->control_depth_of_current_catch(),
true, values, &if_exception);
@@ -1267,7 +1349,7 @@ class WasmGraphBuildingInterface {
} else {
DCHECK_EQ(SsaEnv::kMerged, try_info->catch_env->state);
try_info->exception = builder_->CreateOrMergeIntoPhi(
- MachineRepresentation::kWord32, try_info->catch_env->control,
+ MachineRepresentation::kTaggedPointer, try_info->catch_env->control,
try_info->exception, if_exception);
}
@@ -1445,36 +1527,102 @@ class WasmGraphBuildingInterface {
return result;
}
- void DoCall(FullDecoder* decoder, CallMode call_mode, uint32_t table_index,
- CheckForNull null_check, TFNode* caller_node,
- const FunctionSig* sig, uint32_t sig_index, const Value args[],
- Value returns[]) {
+ class CallInfo {
+ public:
+ enum CallMode { kCallDirect, kCallIndirect, kCallRef };
+
+ static CallInfo CallDirect(uint32_t callee_index) {
+ return {kCallDirect, callee_index, nullptr, 0,
+ CheckForNull::kWithoutNullCheck};
+ }
+
+ static CallInfo CallIndirect(const Value& index_value, uint32_t table_index,
+ uint32_t sig_index) {
+ return {kCallIndirect, sig_index, &index_value, table_index,
+ CheckForNull::kWithoutNullCheck};
+ }
+
+ static CallInfo CallRef(const Value& funcref_value,
+ CheckForNull null_check) {
+ return {kCallRef, 0, &funcref_value, 0, null_check};
+ }
+
+ CallMode call_mode() { return call_mode_; }
+
+ uint32_t sig_index() {
+ DCHECK_EQ(call_mode_, kCallIndirect);
+ return callee_or_sig_index_;
+ }
+
+ uint32_t callee_index() {
+ DCHECK_EQ(call_mode_, kCallDirect);
+ return callee_or_sig_index_;
+ }
+
+ CheckForNull null_check() {
+ DCHECK_EQ(call_mode_, kCallRef);
+ return null_check_;
+ }
+
+ const Value* index_or_callee_value() {
+ DCHECK_NE(call_mode_, kCallDirect);
+ return index_or_callee_value_;
+ }
+
+ uint32_t table_index() {
+ DCHECK_EQ(call_mode_, kCallIndirect);
+ return table_index_;
+ }
+
+ private:
+ CallInfo(CallMode call_mode, uint32_t callee_or_sig_index,
+ const Value* index_or_callee_value, uint32_t table_index,
+ CheckForNull null_check)
+ : call_mode_(call_mode),
+ callee_or_sig_index_(callee_or_sig_index),
+ index_or_callee_value_(index_or_callee_value),
+ table_index_(table_index),
+ null_check_(null_check) {}
+ CallMode call_mode_;
+ uint32_t callee_or_sig_index_;
+ const Value* index_or_callee_value_;
+ uint32_t table_index_;
+ CheckForNull null_check_;
+ };
+
+ void DoCall(FullDecoder* decoder, CallInfo call_info, const FunctionSig* sig,
+ const Value args[], Value returns[]) {
size_t param_count = sig->parameter_count();
size_t return_count = sig->return_count();
NodeVector arg_nodes(param_count + 1);
base::SmallVector<TFNode*, 1> return_nodes(return_count);
- arg_nodes[0] = caller_node;
+ arg_nodes[0] = (call_info.call_mode() == CallInfo::kCallDirect)
+ ? nullptr
+ : call_info.index_or_callee_value()->node;
+
for (size_t i = 0; i < param_count; ++i) {
arg_nodes[i + 1] = args[i].node;
}
- switch (call_mode) {
- case kCallIndirect:
+ switch (call_info.call_mode()) {
+ case CallInfo::kCallIndirect:
CheckForException(
decoder, builder_->CallIndirect(
- table_index, sig_index, base::VectorOf(arg_nodes),
+ call_info.table_index(), call_info.sig_index(),
+ base::VectorOf(arg_nodes),
base::VectorOf(return_nodes), decoder->position()));
break;
- case kCallDirect:
+ case CallInfo::kCallDirect:
CheckForException(
- decoder, builder_->CallDirect(sig_index, base::VectorOf(arg_nodes),
- base::VectorOf(return_nodes),
- decoder->position()));
+ decoder, builder_->CallDirect(
+ call_info.callee_index(), base::VectorOf(arg_nodes),
+ base::VectorOf(return_nodes), decoder->position()));
break;
- case kCallRef:
+ case CallInfo::kCallRef:
CheckForException(
- decoder, builder_->CallRef(sig_index, base::VectorOf(arg_nodes),
- base::VectorOf(return_nodes), null_check,
- decoder->position()));
+ decoder,
+ builder_->CallRef(sig, base::VectorOf(arg_nodes),
+ base::VectorOf(return_nodes),
+ call_info.null_check(), decoder->position()));
break;
}
for (size_t i = 0; i < return_count; ++i) {
@@ -1485,18 +1633,23 @@ class WasmGraphBuildingInterface {
LoadContextIntoSsa(ssa_env_);
}
- void DoReturnCall(FullDecoder* decoder, CallMode call_mode,
- uint32_t table_index, CheckForNull null_check,
- Value index_or_caller_value, const FunctionSig* sig,
- uint32_t sig_index, const Value args[]) {
+ void DoReturnCall(FullDecoder* decoder, CallInfo call_info,
+ const FunctionSig* sig, const Value args[]) {
size_t arg_count = sig->parameter_count();
ValueVector arg_values(arg_count + 1);
- arg_values[0] = index_or_caller_value;
- for (uint32_t i = 0; i < arg_count; i++) {
- arg_values[i + 1] = args[i];
+ if (call_info.call_mode() == CallInfo::kCallDirect) {
+ arg_values[0].node = nullptr;
+ } else {
+ arg_values[0] = *call_info.index_or_callee_value();
+ // This is not done by copy assignment.
+ arg_values[0].node = call_info.index_or_callee_value()->node;
+ }
+ if (arg_count > 0) {
+ std::memcpy(arg_values.data() + 1, args, arg_count * sizeof(Value));
}
- if (FLAG_wasm_loop_unrolling) {
+
+ if (emit_loop_exits()) {
BuildNestedLoopExits(decoder, decoder->control_depth(), false,
arg_values);
}
@@ -1504,22 +1657,24 @@ class WasmGraphBuildingInterface {
NodeVector arg_nodes(arg_count + 1);
GetNodes(arg_nodes.data(), base::VectorOf(arg_values));
- switch (call_mode) {
- case kCallIndirect:
- CheckForException(
- decoder, builder_->ReturnCallIndirect(table_index, sig_index,
- base::VectorOf(arg_nodes),
- decoder->position()));
+ switch (call_info.call_mode()) {
+ case CallInfo::kCallIndirect:
+ CheckForException(decoder,
+ builder_->ReturnCallIndirect(
+ call_info.table_index(), call_info.sig_index(),
+ base::VectorOf(arg_nodes), decoder->position()));
break;
- case kCallDirect:
- CheckForException(
- decoder, builder_->ReturnCall(sig_index, base::VectorOf(arg_nodes),
- decoder->position()));
+ case CallInfo::kCallDirect:
+ CheckForException(decoder,
+ builder_->ReturnCall(call_info.callee_index(),
+ base::VectorOf(arg_nodes),
+ decoder->position()));
break;
- case kCallRef:
- CheckForException(decoder, builder_->ReturnCallRef(
- sig_index, base::VectorOf(arg_nodes),
- null_check, decoder->position()));
+ case CallInfo::kCallRef:
+ CheckForException(
+ decoder, builder_->ReturnCallRef(sig, base::VectorOf(arg_nodes),
+ call_info.null_check(),
+ decoder->position()));
break;
}
}
@@ -1547,7 +1702,6 @@ class WasmGraphBuildingInterface {
WRAP_CACHE_FIELD(mem_start);
WRAP_CACHE_FIELD(mem_size);
- WRAP_CACHE_FIELD(mem_mask);
#undef WRAP_CACHE_FIELD
}
}
@@ -1555,7 +1709,7 @@ class WasmGraphBuildingInterface {
void BuildNestedLoopExits(FullDecoder* decoder, uint32_t depth_limit,
bool wrap_exit_values, ValueVector& stack_values,
TFNode** exception_value = nullptr) {
- DCHECK(FLAG_wasm_loop_unrolling);
+ DCHECK(emit_loop_exits());
Control* control = nullptr;
// We are only interested in exits from the innermost loop.
for (uint32_t i = 0; i < depth_limit; i++) {
@@ -1584,7 +1738,7 @@ class WasmGraphBuildingInterface {
}
void TerminateThrow(FullDecoder* decoder) {
- if (FLAG_wasm_loop_unrolling) {
+ if (emit_loop_exits()) {
SsaEnv* internal_env = ssa_env_;
SsaEnv* exit_env = Split(decoder->zone(), ssa_env_);
SetEnv(exit_env);
@@ -1597,6 +1751,12 @@ class WasmGraphBuildingInterface {
builder_->TerminateThrow(effect(), control());
}
}
+
+ CheckForNull NullCheckFor(ValueType type) {
+ DCHECK(type.is_object_reference());
+ return type.is_nullable() ? CheckForNull::kWithNullCheck
+ : CheckForNull::kWithoutNullCheck;
+ }
};
} // namespace
@@ -1607,10 +1767,11 @@ DecodeResult BuildTFGraph(AccountingAllocator* allocator,
WasmFeatures* detected, const FunctionBody& body,
std::vector<compiler::WasmLoopInfo>* loop_infos,
compiler::NodeOriginTable* node_origins,
- int func_index) {
+ int func_index, InlinedStatus inlined_status) {
Zone zone(allocator, ZONE_NAME);
WasmFullDecoder<Decoder::kFullValidation, WasmGraphBuildingInterface> decoder(
- &zone, module, enabled, detected, body, builder, func_index);
+ &zone, module, enabled, detected, body, builder, func_index,
+ inlined_status);
if (node_origins) {
builder->AddBytecodePositionDecorator(node_origins, &decoder);
}
@@ -1618,7 +1779,7 @@ DecodeResult BuildTFGraph(AccountingAllocator* allocator,
if (node_origins) {
builder->RemoveBytecodePositionDecorator();
}
- if (FLAG_wasm_loop_unrolling) {
+ if (FLAG_wasm_loop_unrolling && inlined_status == kRegularFunction) {
*loop_infos = decoder.interface().loop_infos();
}
return decoder.toResult(nullptr);
diff --git a/chromium/v8/src/wasm/graph-builder-interface.h b/chromium/v8/src/wasm/graph-builder-interface.h
index 6c668e2b0a0..49d9dd353cb 100644
--- a/chromium/v8/src/wasm/graph-builder-interface.h
+++ b/chromium/v8/src/wasm/graph-builder-interface.h
@@ -27,12 +27,15 @@ struct FunctionBody;
class WasmFeatures;
struct WasmModule;
+enum InlinedStatus { kInlinedFunction, kRegularFunction };
+
V8_EXPORT_PRIVATE DecodeResult
BuildTFGraph(AccountingAllocator* allocator, const WasmFeatures& enabled,
const WasmModule* module, compiler::WasmGraphBuilder* builder,
WasmFeatures* detected, const FunctionBody& body,
std::vector<compiler::WasmLoopInfo>* loop_infos,
- compiler::NodeOriginTable* node_origins, int func_index);
+ compiler::NodeOriginTable* node_origins, int func_index,
+ InlinedStatus inlined_status);
} // namespace wasm
} // namespace internal
diff --git a/chromium/v8/src/wasm/init-expr-interface.cc b/chromium/v8/src/wasm/init-expr-interface.cc
index 52c45bd18b7..818145d0954 100644
--- a/chromium/v8/src/wasm/init-expr-interface.cc
+++ b/chromium/v8/src/wasm/init-expr-interface.cc
@@ -89,6 +89,48 @@ void InitExprInterface::StructNewWithRtt(
ValueType::Ref(HeapType(imm.index), kNonNullable));
}
+namespace {
+WasmValue DefaultValueForType(ValueType type, Isolate* isolate) {
+ switch (type.kind()) {
+ case kI32:
+ case kI8:
+ case kI16:
+ return WasmValue(0);
+ case kI64:
+ return WasmValue(int64_t{0});
+ case kF32:
+ return WasmValue(0.0f);
+ case kF64:
+ return WasmValue(0.0);
+ case kS128:
+ return WasmValue(Simd128());
+ case kOptRef:
+ return WasmValue(isolate->factory()->null_value(), type);
+ case kVoid:
+ case kRtt:
+ case kRttWithDepth:
+ case kRef:
+ case kBottom:
+ UNREACHABLE();
+ }
+}
+} // namespace
+
+void InitExprInterface::StructNewDefault(
+ FullDecoder* decoder, const StructIndexImmediate<validate>& imm,
+ const Value& rtt, Value* result) {
+ if (isolate_ == nullptr) return;
+ std::vector<WasmValue> field_values(imm.struct_type->field_count());
+ for (uint32_t i = 0; i < field_values.size(); i++) {
+ field_values[i] = DefaultValueForType(imm.struct_type->field(i), isolate_);
+ }
+ result->runtime_value =
+ WasmValue(isolate_->factory()->NewWasmStruct(
+ imm.struct_type, field_values.data(),
+ Handle<Map>::cast(rtt.runtime_value.to_ref())),
+ ValueType::Ref(HeapType(imm.index), kNonNullable));
+}
+
void InitExprInterface::ArrayInit(FullDecoder* decoder,
const ArrayIndexImmediate<validate>& imm,
const base::Vector<Value>& elements,
diff --git a/chromium/v8/src/wasm/jump-table-assembler.cc b/chromium/v8/src/wasm/jump-table-assembler.cc
index db2514791bc..4dc808fe33e 100644
--- a/chromium/v8/src/wasm/jump-table-assembler.cc
+++ b/chromium/v8/src/wasm/jump-table-assembler.cc
@@ -268,6 +268,36 @@ void JumpTableAssembler::NopBytes(int bytes) {
}
}
+#elif V8_TARGET_ARCH_LOONG64
+void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
+ Address lazy_compile_target) {
+ DCHECK(is_int32(func_index));
+ int start = pc_offset();
+ li(kWasmCompileLazyFuncIndexRegister, (int32_t)func_index); // max. 2 instr
+ // Jump produces max 4 instructions.
+ Jump(lazy_compile_target, RelocInfo::NONE);
+ int nop_bytes = start + kLazyCompileTableSlotSize - pc_offset();
+ DCHECK_EQ(nop_bytes % kInstrSize, 0);
+ for (int i = 0; i < nop_bytes; i += kInstrSize) nop();
+}
+bool JumpTableAssembler::EmitJumpSlot(Address target) {
+ PatchAndJump(target);
+ return true;
+}
+void JumpTableAssembler::EmitFarJumpSlot(Address target) {
+ JumpToInstructionStream(target);
+}
+void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) {
+ UNREACHABLE();
+}
+void JumpTableAssembler::NopBytes(int bytes) {
+ DCHECK_LE(0, bytes);
+ DCHECK_EQ(0, bytes % kInstrSize);
+ for (; bytes > 0; bytes -= kInstrSize) {
+ nop();
+ }
+}
+
#elif V8_TARGET_ARCH_PPC64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
diff --git a/chromium/v8/src/wasm/jump-table-assembler.h b/chromium/v8/src/wasm/jump-table-assembler.h
index 3963de9824a..433608decba 100644
--- a/chromium/v8/src/wasm/jump-table-assembler.h
+++ b/chromium/v8/src/wasm/jump-table-assembler.h
@@ -224,6 +224,11 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
static constexpr int kJumpTableSlotSize = 6 * kInstrSize;
static constexpr int kFarJumpTableSlotSize = 6 * kInstrSize;
static constexpr int kLazyCompileTableSlotSize = 10 * kInstrSize;
+#elif V8_TARGET_ARCH_LOONG64
+ static constexpr int kJumpTableLineSize = 8 * kInstrSize;
+ static constexpr int kJumpTableSlotSize = 8 * kInstrSize;
+ static constexpr int kFarJumpTableSlotSize = 4 * kInstrSize;
+ static constexpr int kLazyCompileTableSlotSize = 8 * kInstrSize;
#else
#error Unknown architecture.
#endif
diff --git a/chromium/v8/src/wasm/memory-protection-key.cc b/chromium/v8/src/wasm/memory-protection-key.cc
index 441826e7075..c3e844ff1c4 100644
--- a/chromium/v8/src/wasm/memory-protection-key.cc
+++ b/chromium/v8/src/wasm/memory-protection-key.cc
@@ -166,7 +166,7 @@ bool SetPermissionsAndMemoryProtectionKey(
DISABLE_CFI_ICALL
void SetPermissionsForMemoryProtectionKey(
int key, MemoryProtectionKeyPermission permissions) {
- CHECK_NE(kNoMemoryProtectionKey, key);
+ DCHECK_NE(kNoMemoryProtectionKey, key);
#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
typedef int (*pkey_set_t)(int, unsigned int);
@@ -177,8 +177,27 @@ void SetPermissionsForMemoryProtectionKey(
int ret = pkey_set(key, permissions);
CHECK_EQ(0 /* success */, ret);
#else
- // On platforms without PKU support, we should have failed the CHECK above
- // because the key must be {kNoMemoryProtectionKey}.
+ // On platforms without PKU support, this method cannot be called because
+ // no protection key can have been allocated.
+ UNREACHABLE();
+#endif
+}
+
+DISABLE_CFI_ICALL
+bool MemoryProtectionKeyWritable(int key) {
+ DCHECK_NE(kNoMemoryProtectionKey, key);
+
+#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
+ typedef int (*pkey_get_t)(int);
+ static auto* pkey_get = bit_cast<pkey_get_t>(dlsym(RTLD_DEFAULT, "pkey_get"));
+ // If a valid key was allocated, {pkey_get()} must also be available.
+ DCHECK_NOT_NULL(pkey_get);
+
+ int permissions = pkey_get(key);
+ return permissions == kNoRestrictions;
+#else
+ // On platforms without PKU support, this method cannot be called because
+ // no protection key can have been allocated.
UNREACHABLE();
#endif
}
diff --git a/chromium/v8/src/wasm/memory-protection-key.h b/chromium/v8/src/wasm/memory-protection-key.h
index c4353575679..7a9ba721941 100644
--- a/chromium/v8/src/wasm/memory-protection-key.h
+++ b/chromium/v8/src/wasm/memory-protection-key.h
@@ -82,6 +82,10 @@ bool SetPermissionsAndMemoryProtectionKey(
void SetPermissionsForMemoryProtectionKey(
int key, MemoryProtectionKeyPermission permissions);
+// Returns {true} if the protection key {key} is write-enabled for the current
+// thread.
+bool MemoryProtectionKeyWritable(int key);
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/wasm/module-compiler.cc b/chromium/v8/src/wasm/module-compiler.cc
index 303270059de..2611c2d9e9c 100644
--- a/chromium/v8/src/wasm/module-compiler.cc
+++ b/chromium/v8/src/wasm/module-compiler.cc
@@ -16,6 +16,7 @@
#include "src/base/platform/time.h"
#include "src/base/utils/random-number-generator.h"
#include "src/compiler/wasm-compiler.h"
+#include "src/handles/global-handles-inl.h"
#include "src/heap/heap-inl.h" // For CodeSpaceMemoryModificationScope.
#include "src/logging/counters-scopes.h"
#include "src/logging/metrics.h"
@@ -528,7 +529,8 @@ bool CompilationUnitQueues::Queue::ShouldPublish(
class CompilationStateImpl {
public:
CompilationStateImpl(const std::shared_ptr<NativeModule>& native_module,
- std::shared_ptr<Counters> async_counters);
+ std::shared_ptr<Counters> async_counters,
+ DynamicTiering dynamic_tiering);
~CompilationStateImpl() {
if (compile_job_->IsValid()) compile_job_->CancelAndDetach();
}
@@ -637,6 +639,8 @@ class CompilationStateImpl {
return outstanding_recompilation_functions_ == 0;
}
+ DynamicTiering dynamic_tiering() const { return dynamic_tiering_; }
+
Counters* counters() const { return async_counters_.get(); }
void SetWireBytesStorage(
@@ -662,7 +666,7 @@ class CompilationStateImpl {
private:
uint8_t SetupCompilationProgressForFunction(
- bool lazy_module, const WasmModule* module,
+ bool lazy_module, NativeModule* module,
const WasmFeatures& enabled_features, int func_index);
// Returns the potentially-updated {function_progress}.
@@ -701,6 +705,10 @@ class CompilationStateImpl {
std::vector<std::shared_ptr<JSToWasmWrapperCompilationUnit>>
js_to_wasm_wrapper_units_;
+ // Cache the dynamic tiering configuration to be consistent for the whole
+ // compilation.
+ const DynamicTiering dynamic_tiering_;
+
// This mutex protects all information of this {CompilationStateImpl} which is
// being accessed concurrently.
mutable base::Mutex mutex_;
@@ -745,6 +753,9 @@ class CompilationStateImpl {
int outstanding_baseline_units_ = 0;
int outstanding_export_wrappers_ = 0;
int outstanding_top_tier_functions_ = 0;
+ // The amount of generated top tier code since the last
+ // {kFinishedCompilationChunk} event.
+ size_t bytes_since_last_chunk_ = 0;
std::vector<uint8_t> compilation_progress_;
int outstanding_recompilation_functions_ = 0;
@@ -860,13 +871,17 @@ void CompilationState::set_compilation_id(int compilation_id) {
Impl(this)->set_compilation_id(compilation_id);
}
+DynamicTiering CompilationState::dynamic_tiering() const {
+ return Impl(this)->dynamic_tiering();
+}
+
// static
std::unique_ptr<CompilationState> CompilationState::New(
const std::shared_ptr<NativeModule>& native_module,
- std::shared_ptr<Counters> async_counters) {
- return std::unique_ptr<CompilationState>(
- reinterpret_cast<CompilationState*>(new CompilationStateImpl(
- std::move(native_module), std::move(async_counters))));
+ std::shared_ptr<Counters> async_counters, DynamicTiering dynamic_tiering) {
+ return std::unique_ptr<CompilationState>(reinterpret_cast<CompilationState*>(
+ new CompilationStateImpl(std::move(native_module),
+ std::move(async_counters), dynamic_tiering)));
}
// End of PIMPL implementation of {CompilationState}.
@@ -926,13 +941,18 @@ struct ExecutionTierPair {
};
ExecutionTierPair GetRequestedExecutionTiers(
- const WasmModule* module, const WasmFeatures& enabled_features,
+ NativeModule* native_module, const WasmFeatures& enabled_features,
uint32_t func_index) {
+ const WasmModule* module = native_module->module();
ExecutionTierPair result;
result.baseline_tier = WasmCompilationUnit::GetBaselineExecutionTier(module);
- if (module->origin != kWasmOrigin || !FLAG_wasm_tier_up) {
+ bool dynamic_tiering =
+ Impl(native_module->compilation_state())->dynamic_tiering() ==
+ DynamicTiering::kEnabled;
+ bool tier_up_enabled = !dynamic_tiering && FLAG_wasm_tier_up;
+ if (module->origin != kWasmOrigin || !tier_up_enabled) {
result.top_tier = result.baseline_tier;
return result;
}
@@ -975,8 +995,7 @@ class CompilationUnitBuilder {
return;
}
ExecutionTierPair tiers = GetRequestedExecutionTiers(
- native_module_->module(), native_module_->enabled_features(),
- func_index);
+ native_module_, native_module_->enabled_features(), func_index);
// Compile everything for non-debugging initially. If needed, we will tier
// down when the module is fully compiled. Synchronization would be pretty
// difficult otherwise.
@@ -1141,7 +1160,7 @@ bool CompileLazy(Isolate* isolate, Handle<WasmModuleObject> module_object,
CompilationStateImpl* compilation_state =
Impl(native_module->compilation_state());
ExecutionTierPair tiers =
- GetRequestedExecutionTiers(module, enabled_features, func_index);
+ GetRequestedExecutionTiers(native_module, enabled_features, func_index);
DCHECK_LE(native_module->num_imported_functions(), func_index);
DCHECK_LT(func_index, native_module->num_functions());
@@ -1530,13 +1549,13 @@ class CompilationTimeCallback {
native_module_(std::move(native_module)),
compile_mode_(compile_mode) {}
- void operator()(CompilationEvent event) {
+ void operator()(CompilationEvent compilation_event) {
DCHECK(base::TimeTicks::IsHighResolution());
std::shared_ptr<NativeModule> native_module = native_module_.lock();
if (!native_module) return;
auto now = base::TimeTicks::Now();
auto duration = now - start_time_;
- if (event == CompilationEvent::kFinishedBaselineCompilation) {
+ if (compilation_event == CompilationEvent::kFinishedBaselineCompilation) {
// Reset {start_time_} to measure tier-up time.
start_time_ = now;
if (compile_mode_ != kSynchronous) {
@@ -1561,7 +1580,7 @@ class CompilationTimeCallback {
native_module->baseline_compilation_cpu_duration())};
metrics_recorder_->DelayMainThreadEvent(event, context_id_);
}
- if (event == CompilationEvent::kFinishedTopTierCompilation) {
+ if (compilation_event == CompilationEvent::kFinishedTopTierCompilation) {
TimedHistogram* histogram = async_counters_->wasm_tier_up_module_time();
histogram->AddSample(static_cast<int>(duration.InMicroseconds()));
@@ -1573,7 +1592,7 @@ class CompilationTimeCallback {
native_module->tier_up_cpu_duration())};
metrics_recorder_->DelayMainThreadEvent(event, context_id_);
}
- if (event == CompilationEvent::kFailedCompilation) {
+ if (compilation_event == CompilationEvent::kFailedCompilation) {
v8::metrics::WasmModuleCompiled event{
(compile_mode_ != kSynchronous), // async
(compile_mode_ == kStreaming), // streamed
@@ -1646,12 +1665,8 @@ void CompileNativeModule(Isolate* isolate,
return;
}
- if (!FLAG_predictable) {
- // For predictable mode, do not finalize wrappers yet to make sure we catch
- // validation errors first.
- compilation_state->FinalizeJSToWasmWrappers(
- isolate, native_module->module(), export_wrappers_out);
- }
+ compilation_state->FinalizeJSToWasmWrappers(isolate, native_module->module(),
+ export_wrappers_out);
compilation_state->WaitForCompilationEvent(
CompilationEvent::kFinishedBaselineCompilation);
@@ -1663,9 +1678,6 @@ void CompileNativeModule(Isolate* isolate,
ValidateSequentially(wasm_module, native_module.get(), isolate->counters(),
isolate->allocator(), thrower, lazy_module);
CHECK(thrower->error());
- } else if (FLAG_predictable) {
- compilation_state->FinalizeJSToWasmWrappers(
- isolate, native_module->module(), export_wrappers_out);
}
}
@@ -2101,8 +2113,12 @@ class AsyncCompileJob::CompilationStateCallback {
: nullptr);
}
break;
+ case CompilationEvent::kFinishedCompilationChunk:
+ DCHECK(CompilationEvent::kFinishedBaselineCompilation == last_event_ ||
+ CompilationEvent::kFinishedCompilationChunk == last_event_);
+ break;
case CompilationEvent::kFinishedTopTierCompilation:
- DCHECK_EQ(CompilationEvent::kFinishedBaselineCompilation, last_event_);
+ DCHECK(CompilationEvent::kFinishedBaselineCompilation == last_event_);
// At this point, the job will already be gone, thus do not access it
// here.
break;
@@ -2828,11 +2844,12 @@ bool AsyncStreamingProcessor::Deserialize(
CompilationStateImpl::CompilationStateImpl(
const std::shared_ptr<NativeModule>& native_module,
- std::shared_ptr<Counters> async_counters)
+ std::shared_ptr<Counters> async_counters, DynamicTiering dynamic_tiering)
: native_module_(native_module.get()),
native_module_weak_(std::move(native_module)),
async_counters_(std::move(async_counters)),
- compilation_unit_queues_(native_module->num_functions()) {}
+ compilation_unit_queues_(native_module->num_functions()),
+ dynamic_tiering_(dynamic_tiering) {}
void CompilationStateImpl::InitCompileJob() {
DCHECK_NULL(compile_job_);
@@ -2865,12 +2882,12 @@ bool CompilationStateImpl::cancelled() const {
}
uint8_t CompilationStateImpl::SetupCompilationProgressForFunction(
- bool lazy_module, const WasmModule* module,
+ bool lazy_module, NativeModule* native_module,
const WasmFeatures& enabled_features, int func_index) {
ExecutionTierPair requested_tiers =
- GetRequestedExecutionTiers(module, enabled_features, func_index);
- CompileStrategy strategy =
- GetCompileStrategy(module, enabled_features, func_index, lazy_module);
+ GetRequestedExecutionTiers(native_module, enabled_features, func_index);
+ CompileStrategy strategy = GetCompileStrategy(
+ native_module->module(), enabled_features, func_index, lazy_module);
bool required_for_baseline = strategy == CompileStrategy::kEager;
bool required_for_top_tier = strategy != CompileStrategy::kLazy;
@@ -2923,7 +2940,7 @@ void CompilationStateImpl::InitializeCompilationProgress(
continue;
}
uint8_t function_progress = SetupCompilationProgressForFunction(
- lazy_module, module, enabled_features, func_index);
+ lazy_module, native_module_, enabled_features, func_index);
compilation_progress_.push_back(function_progress);
}
DCHECK_IMPLIES(lazy_module, outstanding_baseline_units_ == 0);
@@ -3057,7 +3074,7 @@ void CompilationStateImpl::InitializeCompilationProgressAfterDeserialization(
native_module_->UseLazyStub(func_index);
}
compilation_progress_[declared_function_index(module, func_index)] =
- SetupCompilationProgressForFunction(lazy_module, module,
+ SetupCompilationProgressForFunction(lazy_module, native_module_,
enabled_features, func_index);
}
}
@@ -3197,6 +3214,10 @@ void CompilationStateImpl::CommitTopTierCompilationUnit(
void CompilationStateImpl::AddTopTierPriorityCompilationUnit(
WasmCompilationUnit unit, size_t priority) {
compilation_unit_queues_.AddTopTierPriorityUnit(unit, priority);
+ {
+ base::MutexGuard guard(&callbacks_mutex_);
+ outstanding_top_tier_functions_++;
+ }
compile_job_->NotifyConcurrencyIncrease();
}
@@ -3309,6 +3330,9 @@ void CompilationStateImpl::OnFinishedUnits(
DCHECK_GT(outstanding_baseline_units_, 0);
outstanding_baseline_units_--;
}
+ if (code->tier() == ExecutionTier::kTurbofan) {
+ bytes_since_last_chunk_ += code->instructions().size();
+ }
if (reached_tier < required_top_tier &&
required_top_tier <= code->tier()) {
DCHECK_GT(outstanding_top_tier_functions_, 0);
@@ -3362,12 +3386,19 @@ void CompilationStateImpl::TriggerCallbacks(
triggered_events.Add(CompilationEvent::kFinishedExportWrappers);
if (outstanding_baseline_units_ == 0) {
triggered_events.Add(CompilationEvent::kFinishedBaselineCompilation);
- if (outstanding_top_tier_functions_ == 0) {
+ if (dynamic_tiering_ == DynamicTiering::kDisabled &&
+ outstanding_top_tier_functions_ == 0) {
triggered_events.Add(CompilationEvent::kFinishedTopTierCompilation);
}
}
}
+ if (dynamic_tiering_ == DynamicTiering::kEnabled &&
+ static_cast<size_t>(FLAG_wasm_caching_threshold) <
+ bytes_since_last_chunk_) {
+ triggered_events.Add(CompilationEvent::kFinishedCompilationChunk);
+ bytes_since_last_chunk_ = 0;
+ }
if (compile_failed_.load(std::memory_order_relaxed)) {
// *Only* trigger the "failed" event.
triggered_events =
@@ -3378,9 +3409,11 @@ void CompilationStateImpl::TriggerCallbacks(
// Don't trigger past events again.
triggered_events -= finished_events_;
- // Recompilation can happen multiple times, thus do not store this.
- finished_events_ |=
- triggered_events - CompilationEvent::kFinishedRecompilation;
+ // Recompilation can happen multiple times, thus do not store this. There can
+ // also be multiple compilation chunks.
+ finished_events_ |= triggered_events -
+ CompilationEvent::kFinishedRecompilation -
+ CompilationEvent::kFinishedCompilationChunk;
for (auto event :
{std::make_pair(CompilationEvent::kFailedCompilation,
@@ -3391,6 +3424,8 @@ void CompilationStateImpl::TriggerCallbacks(
"wasm.BaselineFinished"),
std::make_pair(CompilationEvent::kFinishedTopTierCompilation,
"wasm.TopTierFinished"),
+ std::make_pair(CompilationEvent::kFinishedCompilationChunk,
+ "wasm.CompilationChunkFinished"),
std::make_pair(CompilationEvent::kFinishedRecompilation,
"wasm.RecompilationFinished")}) {
if (!triggered_events.contains(event.first)) continue;
@@ -3401,7 +3436,11 @@ void CompilationStateImpl::TriggerCallbacks(
}
}
- if (outstanding_baseline_units_ == 0 && outstanding_export_wrappers_ == 0 &&
+ // With dynamic tiering, we don't know if we can ever delete the callback.
+ // TODO(https://crbug.com/v8/12289): Release some callbacks also when dynamic
+ // tiering is enabled.
+ if (dynamic_tiering_ == DynamicTiering::kDisabled &&
+ outstanding_baseline_units_ == 0 && outstanding_export_wrappers_ == 0 &&
outstanding_top_tier_functions_ == 0 &&
outstanding_recompilation_functions_ == 0) {
// Clear the callbacks because no more events will be delivered.
@@ -3665,13 +3704,17 @@ WasmCode* CompileImportWrapper(
CompilationEnv env = native_module->CreateCompilationEnv();
WasmCompilationResult result = compiler::CompileWasmImportCallWrapper(
&env, kind, sig, source_positions, expected_arity);
- std::unique_ptr<WasmCode> wasm_code = native_module->AddCode(
- result.func_index, result.code_desc, result.frame_slot_count,
- result.tagged_parameter_slots,
- result.protected_instructions_data.as_vector(),
- result.source_positions.as_vector(), GetCodeKind(result),
- ExecutionTier::kNone, kNoDebugging);
- WasmCode* published_code = native_module->PublishCode(std::move(wasm_code));
+ WasmCode* published_code;
+ {
+ CodeSpaceWriteScope code_space_write_scope(native_module);
+ std::unique_ptr<WasmCode> wasm_code = native_module->AddCode(
+ result.func_index, result.code_desc, result.frame_slot_count,
+ result.tagged_parameter_slots,
+ result.protected_instructions_data.as_vector(),
+ result.source_positions.as_vector(), GetCodeKind(result),
+ ExecutionTier::kNone, kNoDebugging);
+ published_code = native_module->PublishCode(std::move(wasm_code));
+ }
(*cache_scope)[key] = published_code;
published_code->IncRef();
counters->wasm_generated_code_size()->Increment(
diff --git a/chromium/v8/src/wasm/module-compiler.h b/chromium/v8/src/wasm/module-compiler.h
index e8bd2597bc6..16ac753547c 100644
--- a/chromium/v8/src/wasm/module-compiler.h
+++ b/chromium/v8/src/wasm/module-compiler.h
@@ -44,9 +44,11 @@ class CompilationResultResolver;
class ErrorThrower;
class ModuleCompiler;
class NativeModule;
+class StreamingDecoder;
class WasmCode;
struct WasmModule;
+V8_EXPORT_PRIVATE
std::shared_ptr<NativeModule> CompileToNativeModule(
Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower,
std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes,
diff --git a/chromium/v8/src/wasm/module-decoder.cc b/chromium/v8/src/wasm/module-decoder.cc
index b014f8a8c7f..8129882ce89 100644
--- a/chromium/v8/src/wasm/module-decoder.cc
+++ b/chromium/v8/src/wasm/module-decoder.cc
@@ -550,34 +550,40 @@ class ModuleDecoderImpl : public Decoder {
}
void DecodeTypeSection() {
- uint32_t signatures_count = consume_count("types count", kV8MaxWasmTypes);
- module_->types.reserve(signatures_count);
- for (uint32_t i = 0; ok() && i < signatures_count; ++i) {
+ uint32_t types_count = consume_count("types count", kV8MaxWasmTypes);
+ module_->types.reserve(types_count);
+ for (uint32_t i = 0; ok() && i < types_count; ++i) {
TRACE("DecodeSignature[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
uint8_t kind = consume_u8("type kind");
switch (kind) {
- case kWasmFunctionTypeCode: {
+ case kWasmFunctionTypeCode:
+ case kWasmFunctionSubtypeCode: {
const FunctionSig* s = consume_sig(module_->signature_zone.get());
- module_->add_signature(s);
- break;
- }
- case kWasmFunctionExtendingTypeCode: {
- if (!enabled_features_.has_gc_experiments()) {
- errorf(pc(),
- "nominal types need --experimental-wasm-gc-experiments");
- break;
- }
- const FunctionSig* s = consume_sig(module_->signature_zone.get());
- module_->add_signature(s);
- uint32_t super_index = consume_u32v("supertype");
- if (!module_->has_signature(super_index)) {
- errorf(pc(), "invalid function supertype index: %d", super_index);
- break;
+ uint32_t super_index = kNoSuperType;
+ if (kind == kWasmFunctionSubtypeCode) {
+ if (!enabled_features_.has_gc()) {
+ errorf(pc(),
+ "invalid function type definition, enable with "
+ "--experimental-wasm-gc");
+ break;
+ }
+ HeapType super_type = consume_super_type();
+ if (super_type == HeapType::kFunc) {
+ super_index = kGenericSuperType;
+ } else if (super_type.is_index()) {
+ super_index = super_type.representation();
+ } else {
+ errorf(pc(), "type %d: invalid supertype %d", i,
+ super_type.code());
+ break;
+ }
}
+ module_->add_signature(s, super_index);
break;
}
- case kWasmStructTypeCode: {
+ case kWasmStructTypeCode:
+ case kWasmStructSubtypeCode: {
if (!enabled_features_.has_gc()) {
errorf(pc(),
"invalid struct type definition, enable with "
@@ -585,27 +591,26 @@ class ModuleDecoderImpl : public Decoder {
break;
}
const StructType* s = consume_struct(module_->signature_zone.get());
- module_->add_struct_type(s);
+ uint32_t super_index = kNoSuperType;
+ if (kind == kWasmStructSubtypeCode) {
+ HeapType super_type = consume_super_type();
+ if (super_type == HeapType::kData) {
+ super_index = kGenericSuperType;
+ } else if (super_type.is_index()) {
+ super_index = super_type.representation();
+ } else {
+ errorf(pc(), "type %d: invalid supertype %d", i,
+ super_type.code());
+ break;
+ }
+ }
+ module_->add_struct_type(s, super_index);
// TODO(7748): Should we canonicalize struct types, like
// {signature_map} does for function signatures?
break;
}
- case kWasmStructExtendingTypeCode: {
- if (!enabled_features_.has_gc_experiments()) {
- errorf(pc(),
- "nominal types need --experimental-wasm-gc-experiments");
- break;
- }
- const StructType* s = consume_struct(module_->signature_zone.get());
- module_->add_struct_type(s);
- uint32_t super_index = consume_u32v("supertype");
- if (!module_->has_struct(super_index)) {
- errorf(pc(), "invalid struct supertype: %d", super_index);
- break;
- }
- break;
- }
- case kWasmArrayTypeCode: {
+ case kWasmArrayTypeCode:
+ case kWasmArraySubtypeCode: {
if (!enabled_features_.has_gc()) {
errorf(pc(),
"invalid array type definition, enable with "
@@ -613,22 +618,20 @@ class ModuleDecoderImpl : public Decoder {
break;
}
const ArrayType* type = consume_array(module_->signature_zone.get());
- module_->add_array_type(type);
- break;
- }
- case kWasmArrayExtendingTypeCode: {
- if (!enabled_features_.has_gc_experiments()) {
- errorf(pc(),
- "nominal types need --experimental-wasm-gc-experiments");
- break;
- }
- const ArrayType* type = consume_array(module_->signature_zone.get());
- module_->add_array_type(type);
- uint32_t super_index = consume_u32v("supertype");
- if (!module_->has_array(super_index)) {
- errorf(pc(), "invalid array supertype: %d", super_index);
- break;
+ uint32_t super_index = kNoSuperType;
+ if (kind == kWasmArraySubtypeCode) {
+ HeapType super_type = consume_super_type();
+ if (super_type == HeapType::kData) {
+ super_index = kGenericSuperType;
+ } else if (super_type.is_index()) {
+ super_index = super_type.representation();
+ } else {
+ errorf(pc(), "type %d: invalid supertype %d", i,
+ super_type.code());
+ break;
+ }
}
+ module_->add_array_type(type, super_index);
break;
}
default:
@@ -636,6 +639,46 @@ class ModuleDecoderImpl : public Decoder {
break;
}
}
+ // Check validity of explicitly defined supertypes.
+ const WasmModule* module = module_.get();
+ for (uint32_t i = 0; ok() && i < types_count; ++i) {
+ uint32_t explicit_super = module_->supertype(i);
+ if (explicit_super == kNoSuperType) continue;
+ if (explicit_super == kGenericSuperType) continue;
+ DCHECK_LT(explicit_super, types_count); // {consume_super_type} checks.
+ // Only types that have an explicit supertype themselves can be explicit
+ // supertypes of other types.
+ if (!module->has_supertype(explicit_super)) {
+ errorf("type %d has invalid explicit supertype %d", i, explicit_super);
+ continue;
+ }
+ int depth = GetSubtypingDepth(module, i);
+ if (depth > static_cast<int>(kV8MaxRttSubtypingDepth)) {
+ errorf("type %d: subtyping depth is greater than allowed", i);
+ continue;
+ }
+ if (depth == -1) {
+ errorf("type %d: cyclic inheritance", i);
+ continue;
+ }
+ switch (module_->type_kinds[i]) {
+ case kWasmStructTypeCode:
+ if (!module->has_struct(explicit_super)) break;
+ if (!StructIsSubtypeOf(i, explicit_super, module, module)) break;
+ continue;
+ case kWasmArrayTypeCode:
+ if (!module->has_array(explicit_super)) break;
+ if (!ArrayIsSubtypeOf(i, explicit_super, module, module)) break;
+ continue;
+ case kWasmFunctionTypeCode:
+ if (!module->has_signature(explicit_super)) break;
+ if (!FunctionIsSubtypeOf(i, explicit_super, module, module)) break;
+ continue;
+ default:
+ UNREACHABLE();
+ }
+ errorf("type %d has invalid explicit supertype %d", i, explicit_super);
+ }
module_->signature_map.Freeze();
}
@@ -1106,7 +1149,7 @@ class ModuleDecoderImpl : public Decoder {
// Decode module name, ignore the rest.
// Function and local names will be decoded when needed.
- if (name_type == NameSectionKindCode::kModule) {
+ if (name_type == NameSectionKindCode::kModuleCode) {
WireBytesRef name = consume_string(&inner, false, "module name");
if (inner.ok() && validate_utf8(&inner, name)) {
module_->name = name;
@@ -1784,6 +1827,15 @@ class ModuleDecoderImpl : public Decoder {
return result;
}
+ HeapType consume_super_type() {
+ uint32_t type_length;
+ HeapType result = value_type_reader::read_heap_type<kFullValidation>(
+ this, this->pc(), &type_length, module_.get(),
+ origin_ == kWasmOrigin ? enabled_features_ : WasmFeatures::None());
+ consume_bytes(type_length, "supertype");
+ return result;
+ }
+
ValueType consume_storage_type() {
uint8_t opcode = read_u8<kFullValidation>(this->pc());
switch (opcode) {
@@ -2360,7 +2412,7 @@ void DecodeFunctionNames(const byte* module_start, const byte* module_end,
uint32_t name_payload_len = decoder.consume_u32v("name payload length");
if (!decoder.checkAvailable(name_payload_len)) break;
- if (name_type != NameSectionKindCode::kFunction) {
+ if (name_type != NameSectionKindCode::kFunctionCode) {
decoder.consume_bytes(name_payload_len, "name subsection payload");
continue;
}
diff --git a/chromium/v8/src/wasm/module-instantiate.cc b/chromium/v8/src/wasm/module-instantiate.cc
index f56ab55cd7a..4eb13352d83 100644
--- a/chromium/v8/src/wasm/module-instantiate.cc
+++ b/chromium/v8/src/wasm/module-instantiate.cc
@@ -65,9 +65,10 @@ class CompileImportWrapperJob final : public JobTask {
}
void Run(JobDelegate* delegate) override {
- CodeSpaceWriteScope code_space_write_scope(native_module_);
while (base::Optional<WasmImportWrapperCache::CacheKey> key =
queue_->pop()) {
+ // TODO(wasm): Batch code publishing, to avoid repeated locking and
+ // permission switching.
CompileImportWrapper(native_module_, counters_, key->kind, key->signature,
key->expected_arity, cache_scope_);
if (delegate->ShouldYield()) return;
@@ -162,6 +163,7 @@ Handle<Map> CreateStructMap(Isolate* isolate, const WasmModule* module,
map->SetInstanceDescriptors(isolate, *descriptors,
descriptors->number_of_descriptors());
map->set_is_extensible(false);
+ WasmStruct::EncodeInstanceSizeInMap(real_instance_size, *map);
return map;
}
@@ -187,9 +189,46 @@ Handle<Map> CreateArrayMap(Isolate* isolate, const WasmModule* module,
map->SetInstanceDescriptors(isolate, *descriptors,
descriptors->number_of_descriptors());
map->set_is_extensible(false);
+ WasmArray::EncodeElementSizeInMap(type->element_type().element_size_bytes(),
+ *map);
return map;
}
+void CreateMapForType(Isolate* isolate, const WasmModule* module,
+ int type_index, Handle<WasmInstanceObject> instance,
+ Handle<FixedArray> maps) {
+ // Recursive calls for supertypes may already have created this map.
+ if (maps->get(type_index).IsMap()) return;
+ Handle<Map> rtt_parent;
+ // If the type with {type_index} has an explicit supertype, make sure the
+ // map for that supertype is created first, so that the supertypes list
+ // that's cached on every RTT can be set up correctly.
+ uint32_t supertype = module->supertype(type_index);
+ if (supertype != kNoSuperType && supertype != kGenericSuperType) {
+ // This recursion is safe, because kV8MaxRttSubtypingDepth limits the
+ // number of recursive steps, so we won't overflow the stack.
+ CreateMapForType(isolate, module, supertype, instance, maps);
+ rtt_parent = handle(Map::cast(maps->get(supertype)), isolate);
+ }
+ Handle<Map> map;
+ switch (module->type_kinds[type_index]) {
+ case kWasmStructTypeCode:
+ map = CreateStructMap(isolate, module, type_index, rtt_parent, instance);
+ break;
+ case kWasmArrayTypeCode:
+ map = CreateArrayMap(isolate, module, type_index, rtt_parent, instance);
+ break;
+ case kWasmFunctionTypeCode:
+ // TODO(7748): Think about canonicalizing rtts to make them work for
+ // identical function types.
+ map = Map::Copy(isolate, isolate->wasm_exported_function_map(),
+ "fresh function map for function type canonical rtt "
+ "initialization");
+ break;
+ }
+ maps->set(type_index, *map);
+}
+
namespace {
// TODO(7748): Consider storing this array in Maps'
@@ -614,9 +653,12 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
Handle<FixedArray> tables = isolate_->factory()->NewFixedArray(table_count);
for (int i = module_->num_imported_tables; i < table_count; i++) {
const WasmTable& table = module_->tables[i];
+ // Initialize tables with null for now. We will initialize non-defaultable
+ // tables later, in {InitializeIndirectFunctionTables}.
Handle<WasmTableObject> table_obj = WasmTableObject::New(
isolate_, instance, table.type, table.initial_size,
- table.has_maximum_size, table.maximum_size, nullptr);
+ table.has_maximum_size, table.maximum_size, nullptr,
+ isolate_->factory()->null_value());
tables->set(i, *table_obj);
}
instance->set_tables(*tables);
@@ -657,28 +699,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
if (enabled_.has_gc()) {
Handle<FixedArray> maps = isolate_->factory()->NewFixedArray(
static_cast<int>(module_->type_kinds.size()));
- for (int map_index = 0;
- map_index < static_cast<int>(module_->type_kinds.size());
- map_index++) {
- Handle<Map> map;
- switch (module_->type_kinds[map_index]) {
- case kWasmStructTypeCode:
- map = CreateStructMap(isolate_, module_, map_index, Handle<Map>(),
- instance);
- break;
- case kWasmArrayTypeCode:
- map = CreateArrayMap(isolate_, module_, map_index, Handle<Map>(),
- instance);
- break;
- case kWasmFunctionTypeCode:
- // TODO(7748): Think about canonicalizing rtts to make them work for
- // identical function types.
- map = Map::Copy(isolate_, isolate_->wasm_exported_function_map(),
- "fresh function map for function type canonical rtt "
- "initialization");
- break;
- }
- maps->set(map_index, *map);
+ for (uint32_t index = 0; index < module_->type_kinds.size(); index++) {
+ CreateMapForType(isolate_, module_, index, instance, maps);
}
instance->set_managed_object_maps(*maps);
}
@@ -826,6 +848,39 @@ MaybeHandle<Object> InstanceBuilder::LookupImport(uint32_t index,
return result;
}
+namespace {
+bool HasDefaultToNumberBehaviour(Isolate* isolate,
+ Handle<JSFunction> function) {
+ // Disallow providing a [Symbol.toPrimitive] member.
+ LookupIterator to_primitive_it{isolate, function,
+ isolate->factory()->to_primitive_symbol()};
+ if (to_primitive_it.state() != LookupIterator::NOT_FOUND) return false;
+
+ // The {valueOf} member must be the default "ObjectPrototypeValueOf".
+ LookupIterator value_of_it{isolate, function,
+ isolate->factory()->valueOf_string()};
+ if (value_of_it.state() != LookupIterator::DATA) return false;
+ Handle<Object> value_of = value_of_it.GetDataValue();
+ if (!value_of->IsJSFunction()) return false;
+ Builtin value_of_builtin_id =
+ Handle<JSFunction>::cast(value_of)->code().builtin_id();
+ if (value_of_builtin_id != Builtin::kObjectPrototypeValueOf) return false;
+
+ // The {toString} member must be the default "FunctionPrototypeToString".
+ LookupIterator to_string_it{isolate, function,
+ isolate->factory()->toString_string()};
+ if (to_string_it.state() != LookupIterator::DATA) return false;
+ Handle<Object> to_string = to_string_it.GetDataValue();
+ if (!to_string->IsJSFunction()) return false;
+ Builtin to_string_builtin_id =
+ Handle<JSFunction>::cast(to_string)->code().builtin_id();
+ if (to_string_builtin_id != Builtin::kFunctionPrototypeToString) return false;
+
+ // Just a default function, which will convert to "Nan". Accept this.
+ return true;
+}
+} // namespace
+
// Look up an import value in the {ffi_} object specifically for linking an
// asm.js module. This only performs non-observable lookups, which allows
// falling back to JavaScript proper (and hence re-executing all lookups) if
@@ -840,7 +895,6 @@ MaybeHandle<Object> InstanceBuilder::LookupImportAsm(
// Perform lookup of the given {import_name} without causing any observable
// side-effect. We only accept accesses that resolve to data properties,
// which is indicated by the asm.js spec in section 7 ("Linking") as well.
- Handle<Object> result;
PropertyKey key(isolate_, Handle<Name>::cast(import_name));
LookupIterator it(isolate_, ffi_.ToHandleChecked(), key);
switch (it.state()) {
@@ -854,14 +908,23 @@ MaybeHandle<Object> InstanceBuilder::LookupImportAsm(
case LookupIterator::NOT_FOUND:
// Accepting missing properties as undefined does not cause any
// observable difference from JavaScript semantics, we are lenient.
- result = isolate_->factory()->undefined_value();
- break;
- case LookupIterator::DATA:
- result = it.GetDataValue();
- break;
+ return isolate_->factory()->undefined_value();
+ case LookupIterator::DATA: {
+ Handle<Object> value = it.GetDataValue();
+ // For legacy reasons, we accept functions for imported globals (see
+ // {ProcessImportedGlobal}), but only if we can easily determine that
+ // their Number-conversion is side effect free and returns NaN (which is
+ // the case as long as "valueOf" (or others) are not overwritten).
+ if (value->IsJSFunction() &&
+ module_->import_table[index].kind == kExternalGlobal &&
+ !HasDefaultToNumberBehaviour(isolate_,
+ Handle<JSFunction>::cast(value))) {
+ return ReportLinkError("function has special ToNumber behaviour", index,
+ import_name);
+ }
+ return value;
+ }
}
-
- return result;
}
// Load data segments into the memory.
@@ -1035,7 +1098,8 @@ bool InstanceBuilder::ProcessImportedFunction(
if (kind == compiler::WasmImportCallKind::kJSFunctionArityMismatch) {
Handle<JSFunction> function = Handle<JSFunction>::cast(js_receiver);
SharedFunctionInfo shared = function->shared();
- expected_arity = shared.internal_formal_parameter_count();
+ expected_arity =
+ shared.internal_formal_parameter_count_without_receiver();
}
NativeModule* native_module = instance->module_object().native_module();
@@ -1336,9 +1400,9 @@ bool InstanceBuilder::ProcessImportedGlobal(Handle<WasmInstanceObject> instance,
// Accepting {JSFunction} on top of just primitive values here is a
// workaround to support legacy asm.js code with broken binding. Note
// that using {NaN} (or Smi::zero()) here is what using the observable
- // conversion via {ToPrimitive} would produce as well.
- // TODO(wasm): Still observable if Function.prototype.valueOf or friends
- // are patched, we might need to check for that as well.
+ // conversion via {ToPrimitive} would produce as well. {LookupImportAsm}
+ // checked via {HasDefaultToNumberBehaviour} that "valueOf" or friends have
+ // not been patched.
if (value->IsJSFunction()) value = isolate_->factory()->nan_value();
if (value->IsPrimitive()) {
MaybeHandle<Object> converted = global.type == kWasmI32
@@ -1439,7 +1503,8 @@ void InstanceBuilder::CompileImportWrappers(
compiler::WasmImportCallKind::kJSFunctionArityMismatch) {
Handle<JSFunction> function = Handle<JSFunction>::cast(resolved.second);
SharedFunctionInfo shared = function->shared();
- expected_arity = shared.internal_formal_parameter_count();
+ expected_arity =
+ shared.internal_formal_parameter_count_without_receiver();
}
WasmImportWrapperCache::CacheKey key(kind, sig, expected_arity);
diff --git a/chromium/v8/src/wasm/streaming-decoder.cc b/chromium/v8/src/wasm/streaming-decoder.cc
index 22bc7d259a5..c332f3f94af 100644
--- a/chromium/v8/src/wasm/streaming-decoder.cc
+++ b/chromium/v8/src/wasm/streaming-decoder.cc
@@ -35,7 +35,7 @@ class V8_EXPORT_PRIVATE AsyncStreamingDecoder : public StreamingDecoder {
// The buffer passed into OnBytesReceived is owned by the caller.
void OnBytesReceived(base::Vector<const uint8_t> bytes) override;
- void Finish() override;
+ void Finish(bool can_use_compiled_module) override;
void Abort() override;
@@ -258,7 +258,7 @@ size_t AsyncStreamingDecoder::DecodingState::ReadBytes(
return num_bytes;
}
-void AsyncStreamingDecoder::Finish() {
+void AsyncStreamingDecoder::Finish(bool can_use_compiled_module) {
TRACE_STREAMING("Finish\n");
DCHECK(!stream_finished_);
stream_finished_ = true;
@@ -268,9 +268,12 @@ void AsyncStreamingDecoder::Finish() {
base::Vector<const uint8_t> wire_bytes =
base::VectorOf(wire_bytes_for_deserializing_);
// Try to deserialize the module from wire bytes and module bytes.
- if (processor_->Deserialize(compiled_module_bytes_, wire_bytes)) return;
+ if (can_use_compiled_module &&
+ processor_->Deserialize(compiled_module_bytes_, wire_bytes))
+ return;
- // Deserialization failed. Restart decoding using |wire_bytes|.
+ // Compiled module bytes are invalidated by can_use_compiled_module = false
+ // or the deserialization failed. Restart decoding using |wire_bytes|.
compiled_module_bytes_ = {};
DCHECK(!deserializing());
OnBytesReceived(wire_bytes);
@@ -312,33 +315,29 @@ void AsyncStreamingDecoder::Abort() {
namespace {
-class TopTierCompiledCallback {
+class CompilationChunkFinishedCallback {
public:
- TopTierCompiledCallback(
+ CompilationChunkFinishedCallback(
std::weak_ptr<NativeModule> native_module,
AsyncStreamingDecoder::ModuleCompiledCallback callback)
: native_module_(std::move(native_module)),
callback_(std::move(callback)) {}
void operator()(CompilationEvent event) const {
- if (event != CompilationEvent::kFinishedTopTierCompilation) return;
+ if (event != CompilationEvent::kFinishedCompilationChunk &&
+ event != CompilationEvent::kFinishedTopTierCompilation) {
+ return;
+ }
// If the native module is still alive, get back a shared ptr and call the
// callback.
if (std::shared_ptr<NativeModule> native_module = native_module_.lock()) {
callback_(native_module);
}
-#ifdef DEBUG
- DCHECK(!called_);
- called_ = true;
-#endif
}
private:
const std::weak_ptr<NativeModule> native_module_;
const AsyncStreamingDecoder::ModuleCompiledCallback callback_;
-#ifdef DEBUG
- mutable bool called_ = false;
-#endif
};
} // namespace
@@ -347,7 +346,7 @@ void AsyncStreamingDecoder::NotifyNativeModuleCreated(
const std::shared_ptr<NativeModule>& native_module) {
if (!module_compiled_callback_) return;
auto* comp_state = native_module->compilation_state();
- comp_state->AddCallback(TopTierCompiledCallback{
+ comp_state->AddCallback(CompilationChunkFinishedCallback{
std::move(native_module), std::move(module_compiled_callback_)});
module_compiled_callback_ = {};
}
diff --git a/chromium/v8/src/wasm/streaming-decoder.h b/chromium/v8/src/wasm/streaming-decoder.h
index 2c5e1eae3c0..6f4601b9f47 100644
--- a/chromium/v8/src/wasm/streaming-decoder.h
+++ b/chromium/v8/src/wasm/streaming-decoder.h
@@ -78,7 +78,7 @@ class V8_EXPORT_PRIVATE StreamingDecoder {
// The buffer passed into OnBytesReceived is owned by the caller.
virtual void OnBytesReceived(base::Vector<const uint8_t> bytes) = 0;
- virtual void Finish() = 0;
+ virtual void Finish(bool can_use_compiled_module = true) = 0;
virtual void Abort() = 0;
@@ -96,6 +96,7 @@ class V8_EXPORT_PRIVATE StreamingDecoder {
}
// Passes previously compiled module bytes from the embedder's cache.
+ // The content shouldn't be used until Finish(true) is called.
bool SetCompiledModuleBytes(
base::Vector<const uint8_t> compiled_module_bytes) {
compiled_module_bytes_ = compiled_module_bytes;
@@ -124,6 +125,8 @@ class V8_EXPORT_PRIVATE StreamingDecoder {
std::string url_;
ModuleCompiledCallback module_compiled_callback_;
+ // The content of `compiled_module_bytes_` shouldn't be used until
+ // Finish(true) is called.
base::Vector<const uint8_t> compiled_module_bytes_;
};
diff --git a/chromium/v8/src/wasm/sync-streaming-decoder.cc b/chromium/v8/src/wasm/sync-streaming-decoder.cc
index 73c22cb5a32..ebe1ead525e 100644
--- a/chromium/v8/src/wasm/sync-streaming-decoder.cc
+++ b/chromium/v8/src/wasm/sync-streaming-decoder.cc
@@ -32,7 +32,7 @@ class V8_EXPORT_PRIVATE SyncStreamingDecoder : public StreamingDecoder {
buffer_size_ += bytes.size();
}
- void Finish() override {
+ void Finish(bool can_use_compiled_module) override {
// We copy all received chunks into one byte buffer.
auto bytes = std::make_unique<uint8_t[]>(buffer_size_);
uint8_t* destination = bytes.get();
@@ -43,7 +43,7 @@ class V8_EXPORT_PRIVATE SyncStreamingDecoder : public StreamingDecoder {
CHECK_EQ(destination - bytes.get(), buffer_size_);
// Check if we can deserialize the module from cache.
- if (deserializing()) {
+ if (can_use_compiled_module && deserializing()) {
HandleScope scope(isolate_);
SaveAndSwitchContext saved_context(isolate_, *context_);
diff --git a/chromium/v8/src/wasm/value-type.h b/chromium/v8/src/wasm/value-type.h
index c12496759fc..29482d007b5 100644
--- a/chromium/v8/src/wasm/value-type.h
+++ b/chromium/v8/src/wasm/value-type.h
@@ -284,8 +284,8 @@ constexpr bool is_defaultable(ValueKind kind) {
// representation (for reference types), and an inheritance depth (for rtts
// only). Those are encoded into 32 bits using base::BitField. The underlying
// ValueKind enumeration includes four elements which do not strictly correspond
-// to value types: the two packed types i8 and i16, the type of void blocks
-// (stmt), and a bottom value (for internal use).
+// to value types: the two packed types i8 and i16, the void type (for control
+// structures), and a bottom value (for internal use).
class ValueType {
public:
/******************************* Constructors *******************************/
diff --git a/chromium/v8/src/wasm/wasm-code-manager.cc b/chromium/v8/src/wasm/wasm-code-manager.cc
index d080d1285ed..27687f6e1de 100644
--- a/chromium/v8/src/wasm/wasm-code-manager.cc
+++ b/chromium/v8/src/wasm/wasm-code-manager.cc
@@ -191,7 +191,7 @@ std::unique_ptr<const byte[]> WasmCode::ConcatenateBytes(
void WasmCode::RegisterTrapHandlerData() {
DCHECK(!has_trap_handler_index());
- if (kind() != WasmCode::kFunction) return;
+ if (kind() != WasmCode::kWasmFunction) return;
if (protected_instructions_size_ == 0) return;
Address base = instruction_start();
@@ -217,6 +217,42 @@ bool WasmCode::ShouldBeLogged(Isolate* isolate) {
isolate->is_profiling();
}
+std::string WasmCode::DebugName() const {
+ if (IsAnonymous()) {
+ return "anonymous function";
+ }
+
+ ModuleWireBytes wire_bytes(native_module()->wire_bytes());
+ const WasmModule* module = native_module()->module();
+ WireBytesRef name_ref =
+ module->lazily_generated_names.LookupFunctionName(wire_bytes, index());
+ WasmName name = wire_bytes.GetNameOrNull(name_ref);
+ std::string name_buffer;
+ if (kind() == kWasmToJsWrapper) {
+ name_buffer = "wasm-to-js:";
+ size_t prefix_len = name_buffer.size();
+ constexpr size_t kMaxSigLength = 128;
+ name_buffer.resize(prefix_len + kMaxSigLength);
+ const FunctionSig* sig = module->functions[index()].sig;
+ size_t sig_length = PrintSignature(
+ base::VectorOf(&name_buffer[prefix_len], kMaxSigLength), sig);
+ name_buffer.resize(prefix_len + sig_length);
+ // If the import has a name, also append that (separated by "-").
+ if (!name.empty()) {
+ name_buffer += '-';
+ name_buffer.append(name.begin(), name.size());
+ }
+ } else if (name.empty()) {
+ name_buffer.resize(32);
+ name_buffer.resize(
+ SNPrintF(base::VectorOf(&name_buffer.front(), name_buffer.size()),
+ "wasm-function[%d]", index()));
+ } else {
+ name_buffer.append(name.begin(), name.end());
+ }
+ return name_buffer;
+}
+
void WasmCode::LogCode(Isolate* isolate, const char* source_url,
int script_id) const {
DCHECK(ShouldBeLogged(isolate));
@@ -224,9 +260,8 @@ void WasmCode::LogCode(Isolate* isolate, const char* source_url,
ModuleWireBytes wire_bytes(native_module_->wire_bytes());
const WasmModule* module = native_module_->module();
- WireBytesRef name_ref =
- module->lazily_generated_names.LookupFunctionName(wire_bytes, index());
- WasmName name = wire_bytes.GetNameOrNull(name_ref);
+ std::string fn_name = DebugName();
+ WasmName name = base::VectorOf(fn_name);
const WasmDebugSymbols& debug_symbols = module->debug_symbols;
auto load_wasm_source_map = isolate->wasm_load_source_map_callback();
@@ -244,37 +279,16 @@ void WasmCode::LogCode(Isolate* isolate, const char* source_url,
std::make_unique<WasmModuleSourceMap>(v8_isolate, source_map_str));
}
- std::string name_buffer;
- if (kind() == kWasmToJsWrapper) {
- name_buffer = "wasm-to-js:";
- size_t prefix_len = name_buffer.size();
- constexpr size_t kMaxSigLength = 128;
- name_buffer.resize(prefix_len + kMaxSigLength);
- const FunctionSig* sig = module->functions[index_].sig;
- size_t sig_length = PrintSignature(
- base::VectorOf(&name_buffer[prefix_len], kMaxSigLength), sig);
- name_buffer.resize(prefix_len + sig_length);
- // If the import has a name, also append that (separated by "-").
- if (!name.empty()) {
- name_buffer += '-';
- name_buffer.append(name.begin(), name.size());
- }
- name = base::VectorOf(name_buffer);
- } else if (name.empty()) {
- name_buffer.resize(32);
- name_buffer.resize(
- SNPrintF(base::VectorOf(&name_buffer.front(), name_buffer.size()),
- "wasm-function[%d]", index()));
- name = base::VectorOf(name_buffer);
+ // Record source positions before adding code, otherwise when code is added,
+ // there are no source positions to associate with the added code.
+ if (!source_positions().empty()) {
+ LOG_CODE_EVENT(isolate, WasmCodeLinePosInfoRecordEvent(instruction_start(),
+ source_positions()));
}
+
int code_offset = module->functions[index_].code.offset();
PROFILE(isolate, CodeCreateEvent(CodeEventListener::FUNCTION_TAG, this, name,
source_url, code_offset, script_id));
-
- if (!source_positions().empty()) {
- LOG_CODE_EVENT(isolate, CodeLinePosInfoRecordEvent(instruction_start(),
- source_positions()));
- }
}
void WasmCode::Validate() const {
@@ -331,15 +345,16 @@ void WasmCode::Validate() const {
#endif
}
-void WasmCode::MaybePrint(const char* name) const {
+void WasmCode::MaybePrint() const {
// Determines whether flags want this code to be printed.
bool function_index_matches =
(!IsAnonymous() &&
FLAG_print_wasm_code_function_index == static_cast<int>(index()));
- if (FLAG_print_code ||
- (kind() == kFunction ? (FLAG_print_wasm_code || function_index_matches)
- : FLAG_print_wasm_stub_code)) {
- Print(name);
+ if (FLAG_print_code || (kind() == kWasmFunction
+ ? (FLAG_print_wasm_code || function_index_matches)
+ : FLAG_print_wasm_stub_code)) {
+ std::string name = DebugName();
+ Print(name.c_str());
}
}
@@ -361,7 +376,7 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
if (name) os << "name: " << name << "\n";
if (!IsAnonymous()) os << "index: " << index() << "\n";
os << "kind: " << GetWasmCodeKindAsString(kind()) << "\n";
- if (kind() == kFunction) {
+ if (kind() == kWasmFunction) {
DCHECK(is_liftoff() || tier() == ExecutionTier::kTurbofan);
const char* compiler =
is_liftoff() ? (for_debugging() ? "Liftoff (debug)" : "Liftoff")
@@ -435,8 +450,8 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
os << " registers: ";
uint32_t register_bits = entry.register_bits();
int bits = 32 - base::bits::CountLeadingZeros32(register_bits);
- for (int i = bits - 1; i >= 0; --i) {
- os << ((register_bits >> i) & 1);
+ for (int j = bits - 1; j >= 0; --j) {
+ os << ((register_bits >> j) & 1);
}
}
os << "\n";
@@ -455,7 +470,7 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
const char* GetWasmCodeKindAsString(WasmCode::Kind kind) {
switch (kind) {
- case WasmCode::kFunction:
+ case WasmCode::kWasmFunction:
return "wasm function";
case WasmCode::kWasmToCapiWrapper:
return "wasm-to-capi";
@@ -664,12 +679,13 @@ class CheckWritableMemoryRegions {
DCHECK(std::none_of(writable_memory_.begin(), writable_memory_.end(),
[](auto region) { return region.is_empty(); }));
- // Regions are sorted and disjoint.
- std::accumulate(writable_memory_.begin(), writable_memory_.end(),
- Address{0}, [](Address previous_end, auto region) {
- DCHECK_LT(previous_end, region.begin());
- return region.end();
- });
+ // Regions are sorted and disjoint. (std::accumulate has nodiscard on msvc
+ // so USE is required to prevent build failures in debug builds).
+ USE(std::accumulate(writable_memory_.begin(), writable_memory_.end(),
+ Address{0}, [](Address previous_end, auto region) {
+ DCHECK_LT(previous_end, region.begin());
+ return region.end();
+ }));
}
private:
@@ -954,6 +970,7 @@ BoundsCheckStrategy GetBoundsChecks(const WasmModule* module) {
} // namespace
NativeModule::NativeModule(const WasmFeatures& enabled,
+ DynamicTiering dynamic_tiering,
VirtualMemory code_space,
std::shared_ptr<const WasmModule> module,
std::shared_ptr<Counters> async_counters,
@@ -972,8 +989,8 @@ NativeModule::NativeModule(const WasmFeatures& enabled,
DCHECK_NOT_NULL(shared_this);
DCHECK_NULL(*shared_this);
shared_this->reset(this);
- compilation_state_ =
- CompilationState::New(*shared_this, std::move(async_counters));
+ compilation_state_ = CompilationState::New(
+ *shared_this, std::move(async_counters), dynamic_tiering);
compilation_state_->InitCompileJob();
DCHECK_NOT_NULL(module_);
if (module_->num_declared_functions > 0) {
@@ -1032,18 +1049,15 @@ void NativeModule::LogWasmCodes(Isolate* isolate, Script script) {
// Log all owned code, not just the current entries in the code table. This
// will also include import wrappers.
- base::RecursiveMutexGuard lock(&allocation_mutex_);
- for (auto& owned_entry : owned_code_) {
- owned_entry.second->LogCode(isolate, source_url.get(), script.id());
- }
- for (auto& owned_entry : new_owned_code_) {
- owned_entry->LogCode(isolate, source_url.get(), script.id());
+ WasmCodeRefScope code_ref_scope;
+ for (auto& code : SnapshotAllOwnedCode()) {
+ code->LogCode(isolate, source_url.get(), script.id());
}
}
CompilationEnv NativeModule::CreateCompilationEnv() const {
- return {module(), bounds_checks_, kRuntimeExceptionSupport,
- enabled_features_};
+ return {module(), bounds_checks_, kRuntimeExceptionSupport, enabled_features_,
+ compilation_state()->dynamic_tiering()};
}
WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
@@ -1116,22 +1130,22 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
FlushInstructionCache(dst_code_bytes.begin(), dst_code_bytes.size());
std::unique_ptr<WasmCode> new_code{
- new WasmCode{this, // native_module
- kAnonymousFuncIndex, // index
- dst_code_bytes, // instructions
- stack_slots, // stack_slots
- 0, // tagged_parameter_slots
- safepoint_table_offset, // safepoint_table_offset
- handler_table_offset, // handler_table_offset
- constant_pool_offset, // constant_pool_offset
- code_comments_offset, // code_comments_offset
- instructions.length(), // unpadded_binary_size
- {}, // protected_instructions
- reloc_info.as_vector(), // reloc_info
- source_pos.as_vector(), // source positions
- WasmCode::kFunction, // kind
- ExecutionTier::kNone, // tier
- kNoDebugging}}; // for_debugging
+ new WasmCode{this, // native_module
+ kAnonymousFuncIndex, // index
+ dst_code_bytes, // instructions
+ stack_slots, // stack_slots
+ 0, // tagged_parameter_slots
+ safepoint_table_offset, // safepoint_table_offset
+ handler_table_offset, // handler_table_offset
+ constant_pool_offset, // constant_pool_offset
+ code_comments_offset, // code_comments_offset
+ instructions.length(), // unpadded_binary_size
+ {}, // protected_instructions
+ reloc_info.as_vector(), // reloc_info
+ source_pos.as_vector(), // source positions
+ WasmCode::kWasmFunction, // kind
+ ExecutionTier::kNone, // tier
+ kNoDebugging}}; // for_debugging
new_code->MaybePrint();
new_code->Validate();
@@ -1179,7 +1193,6 @@ std::unique_ptr<WasmCode> NativeModule::AddCode(
ExecutionTier tier, ForDebugging for_debugging) {
base::Vector<byte> code_space;
NativeModule::JumpTablesRef jump_table_ref;
- CodeSpaceWriteScope code_space_write_scope(this);
{
base::RecursiveMutexGuard guard{&allocation_mutex_};
code_space = code_allocator_.AllocateForCode(this, desc.instr_size);
@@ -1255,6 +1268,7 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
safepoint_table_offset, handler_table_offset, constant_pool_offset,
code_comments_offset, instr_size, protected_instructions_data, reloc_info,
source_position_table, kind, tier, for_debugging}};
+
code->MaybePrint();
code->Validate();
@@ -1291,7 +1305,7 @@ WasmCode::Kind GetCodeKind(const WasmCompilationResult& result) {
case WasmCompilationResult::kWasmToJsWrapper:
return WasmCode::Kind::kWasmToJsWrapper;
case WasmCompilationResult::kFunction:
- return WasmCode::Kind::kFunction;
+ return WasmCode::Kind::kWasmFunction;
default:
UNREACHABLE();
}
@@ -1429,6 +1443,17 @@ std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
return std::vector<WasmCode*>{start, end};
}
+std::vector<WasmCode*> NativeModule::SnapshotAllOwnedCode() const {
+ base::RecursiveMutexGuard lock(&allocation_mutex_);
+ if (!new_owned_code_.empty()) TransferNewOwnedCodeLocked();
+
+ std::vector<WasmCode*> all_code(owned_code_.size());
+ std::transform(owned_code_.begin(), owned_code_.end(), all_code.begin(),
+ [](auto& entry) { return entry.second.get(); });
+ std::for_each(all_code.begin(), all_code.end(), WasmCodeRefScope::AddRef);
+ return all_code;
+}
+
WasmCode* NativeModule::GetCode(uint32_t index) const {
base::RecursiveMutexGuard guard(&allocation_mutex_);
WasmCode* code = code_table_[declared_function_index(module(), index)];
@@ -1960,7 +1985,6 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
DCHECK_GT(size, 0);
size_t allocate_page_size = page_allocator->AllocatePageSize();
size = RoundUp(size, allocate_page_size);
- if (!BackingStore::ReserveAddressSpace(size)) return {};
if (hint == nullptr) hint = page_allocator->GetRandomMmapAddr();
// When we start exposing Wasm in jitless mode, then the jitless flag
@@ -1968,10 +1992,7 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
DCHECK(!FLAG_jitless);
VirtualMemory mem(page_allocator, size, hint, allocate_page_size,
VirtualMemory::kMapAsJittable);
- if (!mem.IsReserved()) {
- BackingStore::ReleaseReservation(size);
- return {};
- }
+ if (!mem.IsReserved()) return {};
TRACE_HEAP("VMem alloc: 0x%" PRIxPTR ":0x%" PRIxPTR " (%zu)\n", mem.address(),
mem.end(), mem.size());
@@ -2104,6 +2125,11 @@ void WasmCodeManager::SetThreadWritable(bool writable) {
MemoryProtectionKeyPermission permissions =
writable ? kNoRestrictions : kDisableWrite;
+ // When switching to writable we should not already be writable. Otherwise
+ // this points at a problem with counting writers, or with wrong
+ // initialization (globally or per thread).
+ DCHECK_IMPLIES(writable, !MemoryProtectionKeyWritable());
+
TRACE_HEAP("Setting memory protection key %d to writable: %d.\n",
memory_protection_key_, writable);
SetPermissionsForMemoryProtectionKey(memory_protection_key_, permissions);
@@ -2113,6 +2139,16 @@ bool WasmCodeManager::HasMemoryProtectionKeySupport() const {
return memory_protection_key_ != kNoMemoryProtectionKey;
}
+bool WasmCodeManager::MemoryProtectionKeyWritable() const {
+ return wasm::MemoryProtectionKeyWritable(memory_protection_key_);
+}
+
+void WasmCodeManager::InitializeMemoryProtectionKeyForTesting() {
+ if (memory_protection_key_ == kNoMemoryProtectionKey) {
+ memory_protection_key_ = AllocateMemoryProtectionKey();
+ }
+}
+
std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
Isolate* isolate, const WasmFeatures& enabled, size_t code_size_estimate,
std::shared_ptr<const WasmModule> module) {
@@ -2166,8 +2202,11 @@ std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
size_t size = code_space.size();
Address end = code_space.end();
std::shared_ptr<NativeModule> ret;
- new NativeModule(enabled, std::move(code_space), std::move(module),
- isolate->async_counters(), &ret);
+ DynamicTiering dynamic_tiering = isolate->IsWasmDynamicTieringEnabled()
+ ? DynamicTiering::kEnabled
+ : DynamicTiering::kDisabled;
+ new NativeModule(enabled, dynamic_tiering, std::move(code_space),
+ std::move(module), isolate->async_counters(), &ret);
// The constructor initialized the shared_ptr.
DCHECK_NOT_NULL(ret);
TRACE_HEAP("New NativeModule %p: Mem: 0x%" PRIxPTR ",+%zu\n", ret.get(),
@@ -2397,7 +2436,6 @@ void WasmCodeManager::FreeNativeModule(
#endif // V8_OS_WIN64
lookup_map_.erase(code_space.address());
- BackingStore::ReleaseReservation(code_space.size());
code_space.Free();
DCHECK(!code_space.IsReserved());
}
diff --git a/chromium/v8/src/wasm/wasm-code-manager.h b/chromium/v8/src/wasm/wasm-code-manager.h
index 2baf46e8886..ad7e4ab26bc 100644
--- a/chromium/v8/src/wasm/wasm-code-manager.h
+++ b/chromium/v8/src/wasm/wasm-code-manager.h
@@ -102,6 +102,14 @@ struct WasmModule;
IF_TSAN(V, TSANRelaxedStore32SaveFP) \
IF_TSAN(V, TSANRelaxedStore64IgnoreFP) \
IF_TSAN(V, TSANRelaxedStore64SaveFP) \
+ IF_TSAN(V, TSANSeqCstStore8IgnoreFP) \
+ IF_TSAN(V, TSANSeqCstStore8SaveFP) \
+ IF_TSAN(V, TSANSeqCstStore16IgnoreFP) \
+ IF_TSAN(V, TSANSeqCstStore16SaveFP) \
+ IF_TSAN(V, TSANSeqCstStore32IgnoreFP) \
+ IF_TSAN(V, TSANSeqCstStore32SaveFP) \
+ IF_TSAN(V, TSANSeqCstStore64IgnoreFP) \
+ IF_TSAN(V, TSANSeqCstStore64SaveFP) \
IF_TSAN(V, TSANRelaxedLoad32IgnoreFP) \
IF_TSAN(V, TSANRelaxedLoad32SaveFP) \
IF_TSAN(V, TSANRelaxedLoad64IgnoreFP) \
@@ -109,7 +117,6 @@ struct WasmModule;
V(WasmAllocateArray_Uninitialized) \
V(WasmAllocateArray_InitNull) \
V(WasmAllocateArray_InitZero) \
- V(WasmArrayCopy) \
V(WasmArrayCopyWithChecks) \
V(WasmAllocateRtt) \
V(WasmAllocateFreshRtt) \
@@ -149,12 +156,7 @@ class V8_EXPORT_PRIVATE DisjointAllocationPool final {
class V8_EXPORT_PRIVATE WasmCode final {
public:
- enum Kind {
- kFunction,
- kWasmToCapiWrapper,
- kWasmToJsWrapper,
- kJumpTable
- };
+ enum Kind { kWasmFunction, kWasmToCapiWrapper, kWasmToJsWrapper, kJumpTable };
// Each runtime stub is identified by an id. This id is used to reference the
// stub via {RelocInfo::WASM_STUB_CALL} and gets resolved during relocation.
@@ -188,25 +190,47 @@ class V8_EXPORT_PRIVATE WasmCode final {
}
#ifdef V8_IS_TSAN
- static RuntimeStubId GetTSANRelaxedStoreStub(SaveFPRegsMode fp_mode,
- int size) {
- if (size == kInt8Size) {
- return fp_mode == SaveFPRegsMode::kIgnore
- ? RuntimeStubId::kTSANRelaxedStore8IgnoreFP
- : RuntimeStubId::kTSANRelaxedStore8SaveFP;
- } else if (size == kInt16Size) {
- return fp_mode == SaveFPRegsMode::kIgnore
- ? RuntimeStubId::kTSANRelaxedStore16IgnoreFP
- : RuntimeStubId::kTSANRelaxedStore16SaveFP;
- } else if (size == kInt32Size) {
- return fp_mode == SaveFPRegsMode::kIgnore
- ? RuntimeStubId::kTSANRelaxedStore32IgnoreFP
- : RuntimeStubId::kTSANRelaxedStore32SaveFP;
+ static RuntimeStubId GetTSANStoreStub(SaveFPRegsMode fp_mode, int size,
+ std::memory_order order) {
+ if (order == std::memory_order_relaxed) {
+ if (size == kInt8Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANRelaxedStore8IgnoreFP
+ : RuntimeStubId::kTSANRelaxedStore8SaveFP;
+ } else if (size == kInt16Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANRelaxedStore16IgnoreFP
+ : RuntimeStubId::kTSANRelaxedStore16SaveFP;
+ } else if (size == kInt32Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANRelaxedStore32IgnoreFP
+ : RuntimeStubId::kTSANRelaxedStore32SaveFP;
+ } else {
+ CHECK_EQ(size, kInt64Size);
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANRelaxedStore64IgnoreFP
+ : RuntimeStubId::kTSANRelaxedStore64SaveFP;
+ }
} else {
- CHECK_EQ(size, kInt64Size);
- return fp_mode == SaveFPRegsMode::kIgnore
- ? RuntimeStubId::kTSANRelaxedStore64IgnoreFP
- : RuntimeStubId::kTSANRelaxedStore64SaveFP;
+ DCHECK_EQ(order, std::memory_order_seq_cst);
+ if (size == kInt8Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANSeqCstStore8IgnoreFP
+ : RuntimeStubId::kTSANSeqCstStore8SaveFP;
+ } else if (size == kInt16Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANSeqCstStore16IgnoreFP
+ : RuntimeStubId::kTSANSeqCstStore16SaveFP;
+ } else if (size == kInt32Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANSeqCstStore32IgnoreFP
+ : RuntimeStubId::kTSANSeqCstStore32SaveFP;
+ } else {
+ CHECK_EQ(size, kInt64Size);
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANSeqCstStore64IgnoreFP
+ : RuntimeStubId::kTSANSeqCstStore64SaveFP;
+ }
}
}
@@ -289,7 +313,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
void Validate() const;
void Print(const char* name = nullptr) const;
- void MaybePrint(const char* name = nullptr) const;
+ void MaybePrint() const;
void Disassemble(const char* name, std::ostream& os,
Address current_pc = kNullAddress) const;
@@ -391,6 +415,10 @@ class V8_EXPORT_PRIVATE WasmCode final {
std::unique_ptr<const byte[]> ConcatenateBytes(
std::initializer_list<base::Vector<const byte>>);
+ // Tries to get a reasonable name. Lazily looks up the name section, and falls
+ // back to the function index. Return value is guaranteed to not be empty.
+ std::string DebugName() const;
+
// Code objects that have been registered with the global trap handler within
// this process, will have a {trap_handler_index} associated with them.
int trap_handler_index() const {
@@ -520,7 +548,7 @@ class WasmCodeAllocator {
// Make a code region writable. Only allowed if there is at lease one writer
// (see above).
// Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
- void MakeWritable(base::AddressRegion);
+ V8_EXPORT_PRIVATE void MakeWritable(base::AddressRegion);
// Free memory pages of all given code objects. Used for wasm code GC.
// Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
@@ -637,6 +665,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
// Creates a snapshot of the current state of the code table. This is useful
// to get a consistent view of the table (e.g. used by the serializer).
std::vector<WasmCode*> SnapshotCodeTable() const;
+ // Creates a snapshot of all {owned_code_}, will transfer new code (if any) to
+ // {owned_code_}.
+ std::vector<WasmCode*> SnapshotAllOwnedCode() const;
WasmCode* GetCode(uint32_t index) const;
bool HasCode(uint32_t index) const;
@@ -697,7 +728,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
void LogWasmCodes(Isolate*, Script);
- CompilationState* compilation_state() { return compilation_state_.get(); }
+ CompilationState* compilation_state() const {
+ return compilation_state_.get();
+ }
// Create a {CompilationEnv} object for compilation. The caller has to ensure
// that the {WasmModule} pointer stays valid while the {CompilationEnv} is
@@ -817,7 +850,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
};
// Private constructor, called via {WasmCodeManager::NewNativeModule()}.
- NativeModule(const WasmFeatures& enabled_features, VirtualMemory code_space,
+ NativeModule(const WasmFeatures& enabled_features,
+ DynamicTiering dynamic_tiering, VirtualMemory code_space,
std::shared_ptr<const WasmModule> module,
std::shared_ptr<Counters> async_counters,
std::shared_ptr<NativeModule>* shared_this);
@@ -1006,6 +1040,15 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
// Returns true if there is PKU support, false otherwise.
bool HasMemoryProtectionKeySupport() const;
+ // Returns {true} if the memory protection key is write-enabled for the
+ // current thread.
+ // Can only be called if {HasMemoryProtectionKeySupport()} is {true}.
+ bool MemoryProtectionKeyWritable() const;
+
+ // This allocates a memory protection key (if none was allocated before),
+ // independent of the --wasm-memory-protection-keys flag.
+ void InitializeMemoryProtectionKeyForTesting();
+
private:
friend class WasmCodeAllocator;
friend class WasmEngine;
@@ -1033,7 +1076,7 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
// and updated after each GC.
std::atomic<size_t> critical_committed_code_space_;
- const int memory_protection_key_;
+ int memory_protection_key_;
mutable base::Mutex native_modules_mutex_;
diff --git a/chromium/v8/src/wasm/wasm-constants.h b/chromium/v8/src/wasm/wasm-constants.h
index 726ceaa0185..5bb12bc863e 100644
--- a/chromium/v8/src/wasm/wasm-constants.h
+++ b/chromium/v8/src/wasm/wasm-constants.h
@@ -50,9 +50,9 @@ enum ValueTypeCode : uint8_t {
constexpr uint8_t kWasmFunctionTypeCode = 0x60;
constexpr uint8_t kWasmStructTypeCode = 0x5f;
constexpr uint8_t kWasmArrayTypeCode = 0x5e;
-constexpr uint8_t kWasmFunctionExtendingTypeCode = 0x5d;
-constexpr uint8_t kWasmStructExtendingTypeCode = 0x5c;
-constexpr uint8_t kWasmArrayExtendingTypeCode = 0x5b;
+constexpr uint8_t kWasmFunctionSubtypeCode = 0x5d;
+constexpr uint8_t kWasmStructSubtypeCode = 0x5c;
+constexpr uint8_t kWasmArraySubtypeCode = 0x5b;
// Binary encoding of import/export kinds.
enum ImportExportKindCode : uint8_t {
@@ -118,19 +118,19 @@ constexpr uint8_t kNoCompilationHint = kMaxUInt8;
// Binary encoding of name section kinds.
enum NameSectionKindCode : uint8_t {
- kModule = 0,
- kFunction = 1,
- kLocal = 2,
+ kModuleCode = 0,
+ kFunctionCode = 1,
+ kLocalCode = 2,
// https://github.com/WebAssembly/extended-name-section/
- kLabel = 3,
- kType = 4,
- kTable = 5,
- kMemory = 6,
- kGlobal = 7,
- kElementSegment = 8,
- kDataSegment = 9,
+ kLabelCode = 3,
+ kTypeCode = 4,
+ kTableCode = 5,
+ kMemoryCode = 6,
+ kGlobalCode = 7,
+ kElementSegmentCode = 8,
+ kDataSegmentCode = 9,
// https://github.com/WebAssembly/gc/issues/193
- kField = 10
+ kFieldCode = 10
};
constexpr size_t kWasmPageSize = 0x10000;
diff --git a/chromium/v8/src/wasm/wasm-debug.cc b/chromium/v8/src/wasm/wasm-debug.cc
index 65f05ad507d..a0ecab95964 100644
--- a/chromium/v8/src/wasm/wasm-debug.cc
+++ b/chromium/v8/src/wasm/wasm-debug.cc
@@ -194,7 +194,7 @@ class DebugInfoImpl {
base::MutexGuard guard(&mutex_);
if (!type_names_) {
type_names_ = std::make_unique<NameMap>(DecodeNameMap(
- native_module_->wire_bytes(), NameSectionKindCode::kType));
+ native_module_->wire_bytes(), NameSectionKindCode::kTypeCode));
}
return type_names_->GetName(type_index);
}
@@ -203,7 +203,7 @@ class DebugInfoImpl {
base::MutexGuard guard(&mutex_);
if (!local_names_) {
local_names_ = std::make_unique<IndirectNameMap>(DecodeIndirectNameMap(
- native_module_->wire_bytes(), NameSectionKindCode::kLocal));
+ native_module_->wire_bytes(), NameSectionKindCode::kLocalCode));
}
return local_names_->GetName(func_index, local_index);
}
@@ -212,7 +212,7 @@ class DebugInfoImpl {
base::MutexGuard guard(&mutex_);
if (!field_names_) {
field_names_ = std::make_unique<IndirectNameMap>(DecodeIndirectNameMap(
- native_module_->wire_bytes(), NameSectionKindCode::kField));
+ native_module_->wire_bytes(), NameSectionKindCode::kFieldCode));
}
return field_names_->GetName(struct_index, field_index);
}
diff --git a/chromium/v8/src/wasm/wasm-engine.cc b/chromium/v8/src/wasm/wasm-engine.cc
index 6da33f1ab29..f21e2b76877 100644
--- a/chromium/v8/src/wasm/wasm-engine.cc
+++ b/chromium/v8/src/wasm/wasm-engine.cc
@@ -11,9 +11,11 @@
#include "src/diagnostics/compilation-statistics.h"
#include "src/execution/frames.h"
#include "src/execution/v8threads.h"
+#include "src/handles/global-handles-inl.h"
#include "src/logging/counters.h"
#include "src/objects/heap-number.h"
#include "src/objects/js-promise.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
#include "src/strings/string-hasher-inl.h"
#include "src/utils/ostreams.h"
@@ -1034,10 +1036,10 @@ void WasmEngine::RemoveIsolate(Isolate* isolate) {
for (auto* native_module : info->native_modules) {
DCHECK_EQ(1, native_modules_.count(native_module));
DCHECK_EQ(1, native_modules_[native_module]->isolates.count(isolate));
- auto* info = native_modules_[native_module].get();
- info->isolates.erase(isolate);
+ auto* module = native_modules_[native_module].get();
+ module->isolates.erase(isolate);
if (current_gc_info_) {
- for (WasmCode* code : info->potentially_dead_code) {
+ for (WasmCode* code : module->potentially_dead_code) {
current_gc_info_->dead_code.erase(code);
}
}
@@ -1228,9 +1230,9 @@ void WasmEngine::StreamingCompilationFailed(size_t prefix_hash) {
void WasmEngine::FreeNativeModule(NativeModule* native_module) {
base::MutexGuard guard(&mutex_);
- auto it = native_modules_.find(native_module);
- DCHECK_NE(native_modules_.end(), it);
- for (Isolate* isolate : it->second->isolates) {
+ auto module = native_modules_.find(native_module);
+ DCHECK_NE(native_modules_.end(), module);
+ for (Isolate* isolate : module->second->isolates) {
DCHECK_EQ(1, isolates_.count(isolate));
IsolateInfo* info = isolates_[isolate].get();
DCHECK_EQ(1, info->native_modules.count(native_module));
@@ -1274,7 +1276,7 @@ void WasmEngine::FreeNativeModule(NativeModule* native_module) {
native_module, current_gc_info_->dead_code.size());
}
native_module_cache_.Erase(native_module);
- native_modules_.erase(it);
+ native_modules_.erase(module);
}
namespace {
@@ -1617,6 +1619,9 @@ WasmCodeManager* GetWasmCodeManager() {
// {max_mem_pages} is declared in wasm-limits.h.
uint32_t max_mem_pages() {
+ static_assert(
+ kV8MaxWasmMemoryPages * kWasmPageSize <= JSArrayBuffer::kMaxByteLength,
+ "Wasm memories must not be bigger than JSArrayBuffers");
STATIC_ASSERT(kV8MaxWasmMemoryPages <= kMaxUInt32);
return std::min(uint32_t{kV8MaxWasmMemoryPages}, FLAG_wasm_max_mem_pages);
}
diff --git a/chromium/v8/src/wasm/wasm-engine.h b/chromium/v8/src/wasm/wasm-engine.h
index 72090969111..5cf61ef543d 100644
--- a/chromium/v8/src/wasm/wasm-engine.h
+++ b/chromium/v8/src/wasm/wasm-engine.h
@@ -45,6 +45,7 @@ class GdbServer;
class AsyncCompileJob;
class ErrorThrower;
struct ModuleWireBytes;
+class StreamingDecoder;
class WasmFeatures;
class V8_EXPORT_PRIVATE CompilationResultResolver {
diff --git a/chromium/v8/src/wasm/wasm-external-refs.cc b/chromium/v8/src/wasm/wasm-external-refs.cc
index 101d5638765..0d8c14a6412 100644
--- a/chromium/v8/src/wasm/wasm-external-refs.cc
+++ b/chromium/v8/src/wasm/wasm-external-refs.cc
@@ -451,7 +451,6 @@ class V8_NODISCARD ThreadNotInWasmScope {
#endif
};
-#ifdef DISABLE_UNTRUSTED_CODE_MITIGATIONS
inline byte* EffectiveAddress(WasmInstanceObject instance, uint32_t index) {
return instance.memory_start() + index;
}
@@ -460,19 +459,6 @@ inline byte* EffectiveAddress(byte* base, size_t size, uint32_t index) {
return base + index;
}
-#else
-inline byte* EffectiveAddress(WasmInstanceObject instance, uint32_t index) {
- // Compute the effective address of the access, making sure to condition
- // the index even in the in-bounds case.
- return instance.memory_start() + (index & instance.memory_mask());
-}
-
-inline byte* EffectiveAddress(byte* base, size_t size, uint32_t index) {
- size_t mem_mask = base::bits::RoundUpToPowerOfTwo(size) - 1;
- return base + (index & mem_mask);
-}
-#endif
-
template <typename V>
V ReadAndIncrementOffset(Address data, size_t* offset) {
V result = ReadUnalignedValue<V>(data + *offset);
@@ -551,6 +537,54 @@ int32_t memory_fill_wrapper(Address data) {
return kSuccess;
}
+namespace {
+inline void* ArrayElementAddress(WasmArray array, uint32_t index,
+ int element_size_bytes) {
+ return reinterpret_cast<void*>(array.ptr() + WasmArray::kHeaderSize -
+ kHeapObjectTag + index * element_size_bytes);
+}
+} // namespace
+
+void array_copy_wrapper(Address raw_instance, Address raw_dst_array,
+ uint32_t dst_index, Address raw_src_array,
+ uint32_t src_index, uint32_t length) {
+ DCHECK_GT(length, 0);
+ ThreadNotInWasmScope thread_not_in_wasm_scope;
+ DisallowGarbageCollection no_gc;
+ WasmArray dst_array = WasmArray::cast(Object(raw_dst_array));
+ WasmArray src_array = WasmArray::cast(Object(raw_src_array));
+
+ bool overlapping_ranges =
+ dst_array.ptr() == src_array.ptr() &&
+ (dst_index < src_index ? dst_index + length > src_index
+ : src_index + length > dst_index);
+ wasm::ValueType element_type = src_array.type()->element_type();
+ if (element_type.is_reference()) {
+ WasmInstanceObject instance =
+ WasmInstanceObject::cast(Object(raw_instance));
+ Isolate* isolate = Isolate::FromRootAddress(instance.isolate_root());
+ ObjectSlot dst_slot = dst_array.ElementSlot(dst_index);
+ ObjectSlot src_slot = src_array.ElementSlot(src_index);
+ if (overlapping_ranges) {
+ isolate->heap()->MoveRange(dst_array, dst_slot, src_slot, length,
+ UPDATE_WRITE_BARRIER);
+ } else {
+ isolate->heap()->CopyRange(dst_array, dst_slot, src_slot, length,
+ UPDATE_WRITE_BARRIER);
+ }
+ } else {
+ int element_size_bytes = element_type.element_size_bytes();
+ void* dst = ArrayElementAddress(dst_array, dst_index, element_size_bytes);
+ void* src = ArrayElementAddress(src_array, src_index, element_size_bytes);
+ size_t copy_size = length * element_size_bytes;
+ if (overlapping_ranges) {
+ MemMove(dst, src, copy_size);
+ } else {
+ MemCopy(dst, src, copy_size);
+ }
+ }
+}
+
static WasmTrapCallbackForTesting wasm_trap_callback_for_testing = nullptr;
void set_trap_callback_for_testing(WasmTrapCallbackForTesting callback) {
diff --git a/chromium/v8/src/wasm/wasm-external-refs.h b/chromium/v8/src/wasm/wasm-external-refs.h
index e8363d59367..24d4d35bece 100644
--- a/chromium/v8/src/wasm/wasm-external-refs.h
+++ b/chromium/v8/src/wasm/wasm-external-refs.h
@@ -111,6 +111,11 @@ int32_t memory_copy_wrapper(Address data);
// zero-extend the result in the return register.
int32_t memory_fill_wrapper(Address data);
+// Assumes copy ranges are in-bounds and length > 0.
+void array_copy_wrapper(Address raw_instance, Address raw_dst_array,
+ uint32_t dst_index, Address raw_src_array,
+ uint32_t src_index, uint32_t length);
+
using WasmTrapCallbackForTesting = void (*)();
V8_EXPORT_PRIVATE void set_trap_callback_for_testing(
diff --git a/chromium/v8/src/wasm/wasm-feature-flags.h b/chromium/v8/src/wasm/wasm-feature-flags.h
index 1c4c2acaec3..cf9ef00bf82 100644
--- a/chromium/v8/src/wasm/wasm-feature-flags.h
+++ b/chromium/v8/src/wasm/wasm-feature-flags.h
@@ -26,8 +26,12 @@
\
/* Non-specified, V8-only experimental additions to the GC proposal */ \
/* V8 side owner: jkummerow */ \
- V(gc_experiments, "garbage collection V8-only experimental features", false) \
- V(nn_locals, "allow non-defaultable/non-nullable locals", false) \
+ V(nn_locals, \
+ "allow non-defaultable/non-nullable locals, validated with 'until end of " \
+ "block' semantics", \
+ false) \
+ V(unsafe_nn_locals, \
+ "allow non-defaultable/non-nullable locals, no validation", false) \
\
/* Typed function references proposal. */ \
/* Official proposal: https://github.com/WebAssembly/function-references */ \
@@ -47,7 +51,12 @@
/* Branch Hinting proposal. */ \
/* https://github.com/WebAssembly/branch-hinting */ \
/* V8 side owner: jkummerow */ \
- V(branch_hinting, "branch hinting", false)
+ V(branch_hinting, "branch hinting", false) \
+ \
+ /* Stack Switching proposal. */ \
+ /* https://github.com/WebAssembly/stack-switching */ \
+ /* V8 side owner: thibaudm, fgm */ \
+ V(stack_switching, "stack switching", false)
// #############################################################################
// Staged features (disabled by default, but enabled via --wasm-staging (also
@@ -58,18 +67,6 @@
// be shipped with enough lead time to the next branch to allow for
// stabilization.
#define FOREACH_WASM_STAGING_FEATURE_FLAG(V) /* (force 80 columns) */ \
- /* Exception handling proposal. */ \
- /* https://github.com/WebAssembly/exception-handling */ \
- /* V8 side owner: thibaudm */ \
- /* Staged in v8.9 */ \
- V(eh, "exception handling opcodes", false) \
- \
- /* Reference Types, a.k.a. reftypes proposal. */ \
- /* https://github.com/WebAssembly/reference-types */ \
- /* V8 side owner: ahaas */ \
- /* Staged in v7.8. */ \
- V(reftypes, "reference type opcodes", false) \
- \
/* Tail call / return call proposal. */ \
/* https://github.com/webassembly/tail-call */ \
/* V8 side owner: thibaudm */ \
@@ -93,6 +90,13 @@
/* Shipped in v9.1 * */ \
V(simd, "SIMD opcodes", true) \
\
+ /* Reference Types, a.k.a. reftypes proposal. */ \
+ /* https://github.com/WebAssembly/reference-types */ \
+ /* V8 side owner: ahaas */ \
+ /* Staged in v7.8. */ \
+ /* Shipped in v9.6 * */ \
+ V(reftypes, "reference type opcodes", true) \
+ \
/* Threads proposal. */ \
/* https://github.com/webassembly/threads */ \
/* NOTE: This is enabled via chromium flag on desktop systems since v7.4, */ \
@@ -104,6 +108,13 @@
/* V8 side owner: gdeepti */ \
V(threads, "thread opcodes", true) \
\
+ /* Exception handling proposal. */ \
+ /* https://github.com/WebAssembly/exception-handling */ \
+ /* V8 side owner: thibaudm */ \
+ /* Staged in v8.9 */ \
+ /* Shipped in v9.5 */ \
+ V(eh, "exception handling opcodes", true) \
+ \
// Combination of all available wasm feature flags.
#define FOREACH_WASM_FEATURE_FLAG(V) \
FOREACH_WASM_EXPERIMENTAL_FEATURE_FLAG(V) \
diff --git a/chromium/v8/src/wasm/wasm-init-expr.cc b/chromium/v8/src/wasm/wasm-init-expr.cc
index 14a7e3b6a6f..c6641034ba8 100644
--- a/chromium/v8/src/wasm/wasm-init-expr.cc
+++ b/chromium/v8/src/wasm/wasm-init-expr.cc
@@ -39,7 +39,11 @@ ValueType WasmInitExpr::type(const WasmModule* module,
case kRefNullConst:
return ValueType::Ref(immediate().heap_type, kNullable);
case kStructNewWithRtt:
+ case kStructNew:
+ case kStructNewDefaultWithRtt:
+ case kStructNewDefault:
case kArrayInit:
+ case kArrayInitStatic:
return ValueType::Ref(immediate().index, kNonNullable);
case kRttCanon:
return ValueType::Rtt(immediate().heap_type, 0);
diff --git a/chromium/v8/src/wasm/wasm-init-expr.h b/chromium/v8/src/wasm/wasm-init-expr.h
index bf68265b2a1..551fce29915 100644
--- a/chromium/v8/src/wasm/wasm-init-expr.h
+++ b/chromium/v8/src/wasm/wasm-init-expr.h
@@ -34,7 +34,11 @@ class WasmInitExpr {
kRefNullConst,
kRefFuncConst,
kStructNewWithRtt,
+ kStructNew,
+ kStructNewDefaultWithRtt,
+ kStructNewDefault,
kArrayInit,
+ kArrayInitStatic,
kRttCanon,
kRttSub,
kRttFreshSub,
@@ -99,6 +103,31 @@ class WasmInitExpr {
return expr;
}
+ static WasmInitExpr StructNew(uint32_t index,
+ std::vector<WasmInitExpr> elements) {
+ WasmInitExpr expr;
+ expr.kind_ = kStructNew;
+ expr.immediate_.index = index;
+ expr.operands_ = std::move(elements);
+ return expr;
+ }
+
+ static WasmInitExpr StructNewDefaultWithRtt(uint32_t index,
+ WasmInitExpr rtt) {
+ WasmInitExpr expr;
+ expr.kind_ = kStructNewDefaultWithRtt;
+ expr.immediate_.index = index;
+ expr.operands_.push_back(std::move(rtt));
+ return expr;
+ }
+
+ static WasmInitExpr StructNewDefault(uint32_t index) {
+ WasmInitExpr expr;
+ expr.kind_ = kStructNewDefault;
+ expr.immediate_.index = index;
+ return expr;
+ }
+
static WasmInitExpr ArrayInit(uint32_t index,
std::vector<WasmInitExpr> elements) {
WasmInitExpr expr;
@@ -108,6 +137,15 @@ class WasmInitExpr {
return expr;
}
+ static WasmInitExpr ArrayInitStatic(uint32_t index,
+ std::vector<WasmInitExpr> elements) {
+ WasmInitExpr expr;
+ expr.kind_ = kArrayInitStatic;
+ expr.immediate_.index = index;
+ expr.operands_ = std::move(elements);
+ return expr;
+ }
+
static WasmInitExpr RttCanon(uint32_t index) {
WasmInitExpr expr;
expr.kind_ = kRttCanon;
@@ -157,6 +195,9 @@ class WasmInitExpr {
case kRefNullConst:
return immediate().heap_type == other.immediate().heap_type;
case kStructNewWithRtt:
+ case kStructNew:
+ case kStructNewDefaultWithRtt:
+ case kStructNewDefault:
if (immediate().index != other.immediate().index) return false;
DCHECK_EQ(operands().size(), other.operands().size());
for (uint32_t i = 0; i < operands().size(); i++) {
@@ -164,6 +205,7 @@ class WasmInitExpr {
}
return true;
case kArrayInit:
+ case kArrayInitStatic:
if (immediate().index != other.immediate().index) return false;
if (operands().size() != other.operands().size()) return false;
for (uint32_t i = 0; i < operands().size(); i++) {
diff --git a/chromium/v8/src/wasm/wasm-js.cc b/chromium/v8/src/wasm/wasm-js.cc
index b65db601545..fab66c598dd 100644
--- a/chromium/v8/src/wasm/wasm-js.cc
+++ b/chromium/v8/src/wasm/wasm-js.cc
@@ -7,6 +7,8 @@
#include <cinttypes>
#include <cstring>
+#include "include/v8-function.h"
+#include "include/v8-wasm.h"
#include "src/api/api-inl.h"
#include "src/api/api-natives.h"
#include "src/ast/ast.h"
@@ -17,12 +19,14 @@
#include "src/execution/execution.h"
#include "src/execution/frames-inl.h"
#include "src/execution/isolate.h"
+#include "src/handles/global-handles-inl.h"
#include "src/handles/handles.h"
#include "src/heap/factory.h"
#include "src/init/v8.h"
#include "src/objects/fixed-array.h"
#include "src/objects/instance-type.h"
#include "src/objects/js-promise-inl.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/templates.h"
#include "src/parsing/parse-info.h"
@@ -59,7 +63,9 @@ class WasmStreaming::WasmStreamingImpl {
void OnBytesReceived(const uint8_t* bytes, size_t size) {
streaming_decoder_->OnBytesReceived(base::VectorOf(bytes, size));
}
- void Finish() { streaming_decoder_->Finish(); }
+ void Finish(bool can_use_compiled_module) {
+ streaming_decoder_->Finish(can_use_compiled_module);
+ }
void Abort(MaybeLocal<Value> exception) {
i::HandleScope scope(reinterpret_cast<i::Isolate*>(isolate_));
@@ -112,9 +118,9 @@ void WasmStreaming::OnBytesReceived(const uint8_t* bytes, size_t size) {
impl_->OnBytesReceived(bytes, size);
}
-void WasmStreaming::Finish() {
+void WasmStreaming::Finish(bool can_use_compiled_module) {
TRACE_EVENT0("v8.wasm", "wasm.FinishStreaming");
- impl_->Finish();
+ impl_->Finish(can_use_compiled_module);
}
void WasmStreaming::Abort(MaybeLocal<Value> exception) {
@@ -182,9 +188,6 @@ Local<String> v8_str(Isolate* isolate, const char* str) {
}
GET_FIRST_ARGUMENT_AS(Module)
-GET_FIRST_ARGUMENT_AS(Memory)
-GET_FIRST_ARGUMENT_AS(Table)
-GET_FIRST_ARGUMENT_AS(Global)
GET_FIRST_ARGUMENT_AS(Tag)
#undef GET_FIRST_ARGUMENT_AS
@@ -652,6 +655,25 @@ void WebAssemblyValidate(const v8::FunctionCallbackInfo<v8::Value>& args) {
return_value.Set(Boolean::New(isolate, validated));
}
+namespace {
+bool TransferPrototype(i::Isolate* isolate, i::Handle<i::JSObject> destination,
+ i::Handle<i::JSReceiver> source) {
+ i::MaybeHandle<i::HeapObject> maybe_prototype =
+ i::JSObject::GetPrototype(isolate, source);
+ i::Handle<i::HeapObject> prototype;
+ if (maybe_prototype.ToHandle(&prototype)) {
+ Maybe<bool> result = i::JSObject::SetPrototype(destination, prototype,
+ /*from_javascript=*/false,
+ internal::kThrowOnError);
+ if (!result.FromJust()) {
+ DCHECK(isolate->has_pending_exception());
+ return false;
+ }
+ }
+ return true;
+}
+} // namespace
+
// new WebAssembly.Module(bytes) -> WebAssembly.Module
void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
@@ -677,25 +699,38 @@ void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
auto enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate);
- i::MaybeHandle<i::Object> module_obj;
+ i::MaybeHandle<i::WasmModuleObject> maybe_module_obj;
if (is_shared) {
// Make a copy of the wire bytes to avoid concurrent modification.
std::unique_ptr<uint8_t[]> copy(new uint8_t[bytes.length()]);
memcpy(copy.get(), bytes.start(), bytes.length());
i::wasm::ModuleWireBytes bytes_copy(copy.get(),
copy.get() + bytes.length());
- module_obj = i::wasm::GetWasmEngine()->SyncCompile(
+ maybe_module_obj = i::wasm::GetWasmEngine()->SyncCompile(
i_isolate, enabled_features, &thrower, bytes_copy);
} else {
// The wire bytes are not shared, OK to use them directly.
- module_obj = i::wasm::GetWasmEngine()->SyncCompile(
+ maybe_module_obj = i::wasm::GetWasmEngine()->SyncCompile(
i_isolate, enabled_features, &thrower, bytes);
}
- if (module_obj.is_null()) return;
+ i::Handle<i::WasmModuleObject> module_obj;
+ if (!maybe_module_obj.ToHandle(&module_obj)) return;
+
+ // The infrastructure for `new Foo` calls allocates an object, which is
+ // available here as {args.This()}. We're going to discard this object
+ // and use {module_obj} instead, but it does have the correct prototype,
+ // which we must harvest from it. This makes a difference when the JS
+ // constructor function wasn't {WebAssembly.Module} directly, but some
+ // subclass: {module_obj} has {WebAssembly.Module}'s prototype at this
+ // point, so we must overwrite that with the correct prototype for {Foo}.
+ if (!TransferPrototype(i_isolate, module_obj,
+ Utils::OpenHandle(*args.This()))) {
+ return;
+ }
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
- return_value.Set(Utils::ToLocal(module_obj.ToHandleChecked()));
+ return_value.Set(Utils::ToLocal(i::Handle<i::JSObject>::cast(module_obj)));
}
// WebAssembly.Module.imports(module) -> Array<Import>
@@ -752,37 +787,6 @@ void WebAssemblyModuleCustomSections(
args.GetReturnValue().Set(Utils::ToLocal(custom_sections));
}
-MaybeLocal<Value> WebAssemblyInstantiateImpl(Isolate* isolate,
- Local<Value> module,
- Local<Value> ffi) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-
- i::MaybeHandle<i::Object> instance_object;
- {
- ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Instance()");
-
- // TODO(ahaas): These checks on the module should not be necessary here They
- // are just a workaround for https://crbug.com/837417.
- i::Handle<i::Object> module_obj = Utils::OpenHandle(*module);
- if (!module_obj->IsWasmModuleObject()) {
- thrower.TypeError("Argument 0 must be a WebAssembly.Module object");
- return {};
- }
-
- i::MaybeHandle<i::JSReceiver> maybe_imports =
- GetValueAsImports(ffi, &thrower);
- if (thrower.error()) return {};
-
- instance_object = i::wasm::GetWasmEngine()->SyncInstantiate(
- i_isolate, &thrower, i::Handle<i::WasmModuleObject>::cast(module_obj),
- maybe_imports, i::MaybeHandle<i::JSArrayBuffer>());
- }
-
- DCHECK_EQ(instance_object.is_null(), i_isolate->has_scheduled_exception());
- if (instance_object.is_null()) return {};
- return Utils::ToLocal(instance_object.ToHandleChecked());
-}
-
// new WebAssembly.Instance(module, imports) -> WebAssembly.Instance
void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
@@ -793,23 +797,48 @@ void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(args.GetIsolate());
if (i_isolate->wasm_instance_callback()(args)) return;
- ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Instance()");
- if (!args.IsConstructCall()) {
- thrower.TypeError("WebAssembly.Instance must be invoked with 'new'");
- return;
- }
+ i::MaybeHandle<i::JSObject> maybe_instance_obj;
+ {
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Instance()");
+ if (!args.IsConstructCall()) {
+ thrower.TypeError("WebAssembly.Instance must be invoked with 'new'");
+ return;
+ }
- GetFirstArgumentAsModule(args, &thrower);
- if (thrower.error()) return;
+ i::MaybeHandle<i::WasmModuleObject> maybe_module =
+ GetFirstArgumentAsModule(args, &thrower);
+ if (thrower.error()) return;
- // If args.Length < 2, this will be undefined - see FunctionCallbackInfo.
- // We'll check for that in WebAssemblyInstantiateImpl.
- Local<Value> data = args[1];
+ i::Handle<i::WasmModuleObject> module_obj = maybe_module.ToHandleChecked();
+
+ i::MaybeHandle<i::JSReceiver> maybe_imports =
+ GetValueAsImports(args[1], &thrower);
+ if (thrower.error()) return;
+
+ maybe_instance_obj = i::wasm::GetWasmEngine()->SyncInstantiate(
+ i_isolate, &thrower, module_obj, maybe_imports,
+ i::MaybeHandle<i::JSArrayBuffer>());
+ }
+
+ i::Handle<i::JSObject> instance_obj;
+ if (!maybe_instance_obj.ToHandle(&instance_obj)) {
+ DCHECK(i_isolate->has_scheduled_exception());
+ return;
+ }
- Local<Value> instance;
- if (WebAssemblyInstantiateImpl(isolate, args[0], data).ToLocal(&instance)) {
- args.GetReturnValue().Set(instance);
+ // The infrastructure for `new Foo` calls allocates an object, which is
+ // available here as {args.This()}. We're going to discard this object
+ // and use {instance_obj} instead, but it does have the correct prototype,
+ // which we must harvest from it. This makes a difference when the JS
+ // constructor function wasn't {WebAssembly.Instance} directly, but some
+ // subclass: {instance_obj} has {WebAssembly.Instance}'s prototype at this
+ // point, so we must overwrite that with the correct prototype for {Foo}.
+ if (!TransferPrototype(i_isolate, instance_obj,
+ Utils::OpenHandle(*args.This()))) {
+ return;
}
+
+ args.GetReturnValue().Set(Utils::ToLocal(instance_obj));
}
// WebAssembly.instantiateStreaming(Response | Promise<Response> [, imports])
@@ -1030,7 +1059,7 @@ bool GetOptionalIntegerProperty(v8::Isolate* isolate, ErrorThrower* thrower,
}
// Fetch 'initial' or 'minimum' property from object. If both are provided,
-// 'initial' is used.
+// a TypeError is thrown.
// TODO(aseemgarg): change behavior when the following bug is resolved:
// https://github.com/WebAssembly/js-types/issues/6
bool GetInitialOrMinimumProperty(v8::Isolate* isolate, ErrorThrower* thrower,
@@ -1043,13 +1072,27 @@ bool GetInitialOrMinimumProperty(v8::Isolate* isolate, ErrorThrower* thrower,
result, lower_bound, upper_bound)) {
return false;
}
- auto enabled_features = i::wasm::WasmFeatures::FromFlags();
- if (!has_initial && enabled_features.has_type_reflection()) {
+ auto enabled_features = i::wasm::WasmFeatures::FromIsolate(
+ reinterpret_cast<i::Isolate*>(isolate));
+ if (enabled_features.has_type_reflection()) {
+ bool has_minimum = false;
+ int64_t minimum = 0;
if (!GetOptionalIntegerProperty(isolate, thrower, context, object,
- v8_str(isolate, "minimum"), &has_initial,
- result, lower_bound, upper_bound)) {
+ v8_str(isolate, "minimum"), &has_minimum,
+ &minimum, lower_bound, upper_bound)) {
return false;
}
+ if (has_initial && has_minimum) {
+ thrower->TypeError(
+ "The properties 'initial' and 'minimum' are not allowed at the same "
+ "time");
+ return false;
+ }
+ if (has_minimum) {
+ // Only {minimum} exists, so we use {minimum} as {initial}.
+ has_initial = true;
+ *result = minimum;
+ }
}
if (!has_initial) {
// TODO(aseemgarg): update error message when the spec issue is resolved.
@@ -1059,6 +1102,19 @@ bool GetInitialOrMinimumProperty(v8::Isolate* isolate, ErrorThrower* thrower,
return true;
}
+namespace {
+i::Handle<i::Object> DefaultReferenceValue(i::Isolate* isolate,
+ i::wasm::ValueType type) {
+ if (type == i::wasm::kWasmFuncRef) {
+ return isolate->factory()->null_value();
+ }
+ if (type.is_reference()) {
+ return isolate->factory()->undefined_value();
+ }
+ UNREACHABLE();
+}
+} // namespace
+
// new WebAssembly.Table(args) -> WebAssembly.Table
void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
@@ -1084,7 +1140,7 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (!maybe.ToLocal(&value)) return;
v8::Local<v8::String> string;
if (!value->ToString(context).ToLocal(&string)) return;
- auto enabled_features = i::wasm::WasmFeatures::FromFlags();
+ auto enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate);
// The JS api uses 'anyfunc' instead of 'funcref'.
if (string->StringEquals(v8_str(isolate, "anyfunc"))) {
type = i::wasm::kWasmFuncRef;
@@ -1115,12 +1171,38 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
i::Handle<i::FixedArray> fixed_array;
- i::Handle<i::JSObject> table_obj =
+ i::Handle<i::WasmTableObject> table_obj =
i::WasmTableObject::New(i_isolate, i::Handle<i::WasmInstanceObject>(),
type, static_cast<uint32_t>(initial), has_maximum,
- static_cast<uint32_t>(maximum), &fixed_array);
+ static_cast<uint32_t>(maximum), &fixed_array,
+ DefaultReferenceValue(i_isolate, type));
+
+ // The infrastructure for `new Foo` calls allocates an object, which is
+ // available here as {args.This()}. We're going to discard this object
+ // and use {table_obj} instead, but it does have the correct prototype,
+ // which we must harvest from it. This makes a difference when the JS
+ // constructor function wasn't {WebAssembly.Table} directly, but some
+ // subclass: {table_obj} has {WebAssembly.Table}'s prototype at this
+ // point, so we must overwrite that with the correct prototype for {Foo}.
+ if (!TransferPrototype(i_isolate, table_obj,
+ Utils::OpenHandle(*args.This()))) {
+ return;
+ }
+
+ if (initial > 0 && args.Length() >= 2 && !args[1]->IsUndefined()) {
+ i::Handle<i::Object> element = Utils::OpenHandle(*args[1]);
+ if (!i::WasmTableObject::IsValidElement(i_isolate, table_obj, element)) {
+ thrower.TypeError(
+ "Argument 2 must be undefined, null, or a value of type compatible "
+ "with the type of the new table.");
+ return;
+ }
+ for (uint32_t index = 0; index < static_cast<uint32_t>(initial); ++index) {
+ i::WasmTableObject::Set(i_isolate, table_obj, index, element);
+ }
+ }
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
- return_value.Set(Utils::ToLocal(table_obj));
+ return_value.Set(Utils::ToLocal(i::Handle<i::JSObject>::cast(table_obj)));
}
void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
@@ -1183,6 +1265,19 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
thrower.RangeError("could not allocate memory");
return;
}
+
+ // The infrastructure for `new Foo` calls allocates an object, which is
+ // available here as {args.This()}. We're going to discard this object
+ // and use {memory_obj} instead, but it does have the correct prototype,
+ // which we must harvest from it. This makes a difference when the JS
+ // constructor function wasn't {WebAssembly.Memory} directly, but some
+ // subclass: {memory_obj} has {WebAssembly.Memory}'s prototype at this
+ // point, so we must overwrite that with the correct prototype for {Foo}.
+ if (!TransferPrototype(i_isolate, memory_obj,
+ Utils::OpenHandle(*args.This()))) {
+ return;
+ }
+
if (shared == i::SharedFlag::kShared) {
i::Handle<i::JSArrayBuffer> buffer(
i::Handle<i::WasmMemoryObject>::cast(memory_obj)->array_buffer(),
@@ -1336,6 +1431,18 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
+ // The infrastructure for `new Foo` calls allocates an object, which is
+ // available here as {args.This()}. We're going to discard this object
+ // and use {global_obj} instead, but it does have the correct prototype,
+ // which we must harvest from it. This makes a difference when the JS
+ // constructor function wasn't {WebAssembly.Global} directly, but some
+ // subclass: {global_obj} has {WebAssembly.Global}'s prototype at this
+ // point, so we must overwrite that with the correct prototype for {Foo}.
+ if (!TransferPrototype(i_isolate, global_obj,
+ Utils::OpenHandle(*args.This()))) {
+ return;
+ }
+
// Convert value to a WebAssembly value, the default value is 0.
Local<v8::Value> value = Local<Value>::Cast(args[1]);
switch (type.kind()) {
@@ -1578,7 +1685,6 @@ void EncodeExceptionValues(v8::Isolate* isolate,
case i::wasm::kBottom:
case i::wasm::kS128:
UNREACHABLE();
- break;
}
}
}
@@ -1821,16 +1927,16 @@ void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- i::Handle<i::Object> init_value = i_isolate->factory()->null_value();
- auto enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate);
- if (enabled_features.has_typed_funcref()) {
- if (args.Length() >= 2 && !args[1]->IsUndefined()) {
- init_value = Utils::OpenHandle(*args[1]);
- }
+ i::Handle<i::Object> init_value;
+
+ if (args.Length() >= 2 && !args[1]->IsUndefined()) {
+ init_value = Utils::OpenHandle(*args[1]);
if (!i::WasmTableObject::IsValidElement(i_isolate, receiver, init_value)) {
thrower.TypeError("Argument 1 must be a valid type for the table");
return;
}
+ } else {
+ init_value = DefaultReferenceValue(i_isolate, receiver->type());
}
int old_size =
@@ -1888,7 +1994,12 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- i::Handle<i::Object> element = Utils::OpenHandle(*args[1]);
+ i::Handle<i::Object> element;
+ if (args.Length() >= 2) {
+ element = Utils::OpenHandle(*args[1]);
+ } else {
+ element = DefaultReferenceValue(i_isolate, table_object->type());
+ }
if (!i::WasmTableObject::IsValidElement(i_isolate, table_object, element)) {
thrower.TypeError(
"Argument 1 must be null or a WebAssembly function of type compatible "
@@ -1898,16 +2009,14 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::WasmTableObject::Set(i_isolate, table_object, index, element);
}
-// WebAssembly.Table.type(WebAssembly.Table) -> TableType
+// WebAssembly.Table.type() -> TableType
void WebAssemblyTableType(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
HandleScope scope(isolate);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.type()");
- auto maybe_table = GetFirstArgumentAsTable(args, &thrower);
- if (thrower.error()) return;
- i::Handle<i::WasmTableObject> table = maybe_table.ToHandleChecked();
+ EXTRACT_THIS(table, WasmTableObject);
base::Optional<uint32_t> max_size;
if (!table->maximum_length().IsUndefined()) {
uint64_t max_size64 = table->maximum_length().Number();
@@ -1980,16 +2089,14 @@ void WebAssemblyMemoryGetBuffer(
return_value.Set(Utils::ToLocal(buffer));
}
-// WebAssembly.Memory.type(WebAssembly.Memory) -> MemoryType
+// WebAssembly.Memory.type() -> MemoryType
void WebAssemblyMemoryType(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
HandleScope scope(isolate);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Memory.type()");
- auto maybe_memory = GetFirstArgumentAsMemory(args, &thrower);
- if (thrower.error()) return;
- i::Handle<i::WasmMemoryObject> memory = maybe_memory.ToHandleChecked();
+ EXTRACT_THIS(memory, WasmMemoryObject);
i::Handle<i::JSArrayBuffer> buffer(memory->array_buffer(), i_isolate);
size_t curr_size = buffer->byte_length() / i::wasm::kWasmPageSize;
DCHECK_LE(curr_size, std::numeric_limits<uint32_t>::max());
@@ -2000,7 +2107,8 @@ void WebAssemblyMemoryType(const v8::FunctionCallbackInfo<v8::Value>& args) {
DCHECK_LE(max_size64, std::numeric_limits<uint32_t>::max());
max_size.emplace(static_cast<uint32_t>(max_size64));
}
- auto type = i::wasm::GetTypeForMemory(i_isolate, min_size, max_size);
+ bool shared = buffer->is_shared();
+ auto type = i::wasm::GetTypeForMemory(i_isolate, min_size, max_size, shared);
args.GetReturnValue().Set(Utils::ToLocal(type));
}
@@ -2346,16 +2454,14 @@ void WebAssemblyGlobalSetValue(
}
}
-// WebAssembly.Global.type(WebAssembly.Global) -> GlobalType
+// WebAssembly.Global.type() -> GlobalType
void WebAssemblyGlobalType(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
HandleScope scope(isolate);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Global.type()");
- auto maybe_global = GetFirstArgumentAsGlobal(args, &thrower);
- if (thrower.error()) return;
- i::Handle<i::WasmGlobalObject> global = maybe_global.ToHandleChecked();
+ EXTRACT_THIS(global, WasmGlobalObject);
auto type = i::wasm::GetTypeForGlobal(i_isolate, global->is_mutable(),
global->type());
args.GetReturnValue().Set(Utils::ToLocal(type));
@@ -2580,7 +2686,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
SideEffectType::kHasNoSideEffect);
InstallFunc(isolate, table_proto, "set", WebAssemblyTableSet, 2);
if (enabled_features.has_type_reflection()) {
- InstallFunc(isolate, table_constructor, "type", WebAssemblyTableType, 1);
+ InstallFunc(isolate, table_proto, "type", WebAssemblyTableType, 0, false,
+ NONE, SideEffectType::kHasNoSideEffect);
}
JSObject::AddProperty(isolate, table_proto, factory->to_string_tag_symbol(),
v8_str(isolate, "WebAssembly.Table"), ro_attributes);
@@ -2600,7 +2707,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
InstallFunc(isolate, memory_proto, "grow", WebAssemblyMemoryGrow, 1);
InstallGetter(isolate, memory_proto, "buffer", WebAssemblyMemoryGetBuffer);
if (enabled_features.has_type_reflection()) {
- InstallFunc(isolate, memory_constructor, "type", WebAssemblyMemoryType, 1);
+ InstallFunc(isolate, memory_proto, "type", WebAssemblyMemoryType, 0, false,
+ NONE, SideEffectType::kHasNoSideEffect);
}
JSObject::AddProperty(isolate, memory_proto, factory->to_string_tag_symbol(),
v8_str(isolate, "WebAssembly.Memory"), ro_attributes);
@@ -2622,7 +2730,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
InstallGetterSetter(isolate, global_proto, "value", WebAssemblyGlobalGetValue,
WebAssemblyGlobalSetValue);
if (enabled_features.has_type_reflection()) {
- InstallFunc(isolate, global_constructor, "type", WebAssemblyGlobalType, 1);
+ InstallFunc(isolate, global_proto, "type", WebAssemblyGlobalType, 0, false,
+ NONE, SideEffectType::kHasNoSideEffect);
}
JSObject::AddProperty(isolate, global_proto, factory->to_string_tag_symbol(),
v8_str(isolate, "WebAssembly.Global"), ro_attributes);
diff --git a/chromium/v8/src/wasm/wasm-limits.h b/chromium/v8/src/wasm/wasm-limits.h
index b7806af797f..fa7784e724c 100644
--- a/chromium/v8/src/wasm/wasm-limits.h
+++ b/chromium/v8/src/wasm/wasm-limits.h
@@ -40,7 +40,7 @@ constexpr size_t kV8MaxWasmDataSegments = 100000;
// Also, do not use this limit to validate declared memory, use
// kSpecMaxMemoryPages for that.
constexpr size_t kV8MaxWasmMemoryPages = kSystemPointerSize == 4
- ? 32768 // = 2 GiB
+ ? 32767 // = 2 GiB
: 65536; // = 4 GiB
constexpr size_t kV8MaxWasmStringSize = 100000;
constexpr size_t kV8MaxWasmModuleSize = 1024 * 1024 * 1024; // = 1 GiB
@@ -58,9 +58,6 @@ constexpr size_t kV8MaxWasmMemories = 1;
// GC proposal. These limits are not standardized yet.
constexpr size_t kV8MaxWasmStructFields = 999;
constexpr uint32_t kV8MaxRttSubtypingDepth = 31;
-// Maximum supported by implementation: ((1<<27)-3).
-// Reason: total object size in bytes must fit into a Smi, for filler objects.
-constexpr size_t kV8MaxWasmArrayLength = 1u << 26;
constexpr size_t kV8MaxWasmArrayInitLength = 999;
static_assert(kV8MaxWasmTableSize <= 4294967295, // 2^32 - 1
diff --git a/chromium/v8/src/wasm/wasm-linkage.h b/chromium/v8/src/wasm/wasm-linkage.h
index 2d980555192..ecf59f9ed56 100644
--- a/chromium/v8/src/wasm/wasm-linkage.h
+++ b/chromium/v8/src/wasm/wasm-linkage.h
@@ -80,6 +80,15 @@ constexpr Register kGpReturnRegisters[] = {v0, v1};
constexpr DoubleRegister kFpParamRegisters[] = {f2, f4, f6, f8, f10, f12, f14};
constexpr DoubleRegister kFpReturnRegisters[] = {f2, f4};
+#elif V8_TARGET_ARCH_LOONG64
+// ===========================================================================
+// == LOONG64 ================================================================
+// ===========================================================================
+constexpr Register kGpParamRegisters[] = {a0, a2, a3, a4, a5, a6, a7};
+constexpr Register kGpReturnRegisters[] = {a0, a1};
+constexpr DoubleRegister kFpParamRegisters[] = {f0, f1, f2, f3, f4, f5, f6, f7};
+constexpr DoubleRegister kFpReturnRegisters[] = {f0, f1};
+
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
// ===========================================================================
// == ppc & ppc64 ============================================================
diff --git a/chromium/v8/src/wasm/wasm-module-builder.cc b/chromium/v8/src/wasm/wasm-module-builder.cc
index 2bf20ea3ec3..9bb34721388 100644
--- a/chromium/v8/src/wasm/wasm-module-builder.cc
+++ b/chromium/v8/src/wasm/wasm-module-builder.cc
@@ -264,7 +264,7 @@ WasmModuleBuilder::WasmModuleBuilder(Zone* zone)
functions_(zone),
tables_(zone),
data_segments_(zone),
- indirect_functions_(zone),
+ element_segments_(zone),
globals_(zone),
exceptions_(zone),
signature_map_(zone),
@@ -290,15 +290,20 @@ void WasmModuleBuilder::AddDataSegment(const byte* data, uint32_t size,
}
}
-uint32_t WasmModuleBuilder::AddSignature(FunctionSig* sig) {
- auto sig_entry = signature_map_.find(*sig);
- if (sig_entry != signature_map_.end()) return sig_entry->second;
+uint32_t WasmModuleBuilder::ForceAddSignature(FunctionSig* sig,
+ uint32_t supertype) {
uint32_t index = static_cast<uint32_t>(types_.size());
signature_map_.emplace(*sig, index);
- types_.push_back(Type(sig));
+ types_.push_back(Type(sig, supertype));
return index;
}
+uint32_t WasmModuleBuilder::AddSignature(FunctionSig* sig, uint32_t supertype) {
+ auto sig_entry = signature_map_.find(*sig);
+ if (sig_entry != signature_map_.end()) return sig_entry->second;
+ return ForceAddSignature(sig, supertype);
+}
+
uint32_t WasmModuleBuilder::AddException(FunctionSig* type) {
DCHECK_EQ(0, type->return_count());
int type_index = AddSignature(type);
@@ -307,15 +312,16 @@ uint32_t WasmModuleBuilder::AddException(FunctionSig* type) {
return except_index;
}
-uint32_t WasmModuleBuilder::AddStructType(StructType* type) {
+uint32_t WasmModuleBuilder::AddStructType(StructType* type,
+ uint32_t supertype) {
uint32_t index = static_cast<uint32_t>(types_.size());
- types_.push_back(Type(type));
+ types_.push_back(Type(type, supertype));
return index;
}
-uint32_t WasmModuleBuilder::AddArrayType(ArrayType* type) {
+uint32_t WasmModuleBuilder::AddArrayType(ArrayType* type, uint32_t supertype) {
uint32_t index = static_cast<uint32_t>(types_.size());
- types_.push_back(Type(type));
+ types_.push_back(Type(type, supertype));
return index;
}
@@ -323,75 +329,52 @@ uint32_t WasmModuleBuilder::AddArrayType(ArrayType* type) {
const uint32_t WasmModuleBuilder::kNullIndex =
std::numeric_limits<uint32_t>::max();
-// TODO(9495): Add support for typed function tables and more init. expressions.
-uint32_t WasmModuleBuilder::AllocateIndirectFunctions(uint32_t count) {
- DCHECK(allocating_indirect_functions_allowed_);
- uint32_t index = static_cast<uint32_t>(indirect_functions_.size());
- DCHECK_GE(FLAG_wasm_max_table_size, index);
- if (count > FLAG_wasm_max_table_size - index) {
+uint32_t WasmModuleBuilder::IncreaseTableMinSize(uint32_t table_index,
+ uint32_t count) {
+ DCHECK_LT(table_index, tables_.size());
+ uint32_t old_min_size = tables_[table_index].min_size;
+ if (count > FLAG_wasm_max_table_size - old_min_size) {
return std::numeric_limits<uint32_t>::max();
}
- uint32_t new_size = static_cast<uint32_t>(indirect_functions_.size()) + count;
- DCHECK(max_table_size_ == 0 || new_size <= max_table_size_);
- indirect_functions_.resize(new_size, kNullIndex);
- uint32_t max = max_table_size_ > 0 ? max_table_size_ : new_size;
- if (tables_.empty()) {
- // This cannot use {AddTable} because that would flip the
- // {allocating_indirect_functions_allowed_} flag.
- tables_.push_back({kWasmFuncRef, new_size, max, true, {}});
- } else {
- // There can only be the indirect function table so far, otherwise the
- // {allocating_indirect_functions_allowed_} flag would have been false.
- DCHECK_EQ(1u, tables_.size());
- DCHECK_EQ(kWasmFuncRef, tables_[0].type);
- DCHECK(tables_[0].has_maximum);
- tables_[0].min_size = new_size;
- tables_[0].max_size = max;
- }
- return index;
-}
-
-void WasmModuleBuilder::SetIndirectFunction(uint32_t indirect,
- uint32_t direct) {
- indirect_functions_[indirect] = direct;
-}
-
-void WasmModuleBuilder::SetMaxTableSize(uint32_t max) {
- DCHECK_GE(FLAG_wasm_max_table_size, max);
- DCHECK_GE(max, indirect_functions_.size());
- max_table_size_ = max;
- DCHECK(allocating_indirect_functions_allowed_);
- if (!tables_.empty()) {
- tables_[0].max_size = max;
- }
+ tables_[table_index].min_size = old_min_size + count;
+ tables_[table_index].max_size =
+ std::max(old_min_size + count, tables_[table_index].max_size);
+ return old_min_size;
}
uint32_t WasmModuleBuilder::AddTable(ValueType type, uint32_t min_size) {
-#if DEBUG
- allocating_indirect_functions_allowed_ = false;
-#endif
tables_.push_back({type, min_size, 0, false, {}});
return static_cast<uint32_t>(tables_.size() - 1);
}
uint32_t WasmModuleBuilder::AddTable(ValueType type, uint32_t min_size,
uint32_t max_size) {
-#if DEBUG
- allocating_indirect_functions_allowed_ = false;
-#endif
tables_.push_back({type, min_size, max_size, true, {}});
return static_cast<uint32_t>(tables_.size() - 1);
}
uint32_t WasmModuleBuilder::AddTable(ValueType type, uint32_t min_size,
uint32_t max_size, WasmInitExpr init) {
-#if DEBUG
- allocating_indirect_functions_allowed_ = false;
-#endif
tables_.push_back({type, min_size, max_size, true, std::move(init)});
return static_cast<uint32_t>(tables_.size() - 1);
}
+void WasmModuleBuilder::AddElementSegment(WasmElemSegment segment) {
+ element_segments_.push_back(std::move(segment));
+}
+
+void WasmModuleBuilder::SetIndirectFunction(
+ uint32_t table_index, uint32_t index_in_table,
+ uint32_t direct_function_index,
+ WasmElemSegment::FunctionIndexingMode indexing_mode) {
+ WasmElemSegment segment(zone_, kWasmFuncRef, table_index,
+ WasmInitExpr(static_cast<int>(index_in_table)));
+ segment.indexing_mode = indexing_mode;
+ segment.entries.emplace_back(WasmElemSegment::Entry::kRefFuncEntry,
+ direct_function_index);
+ AddElementSegment(std::move(segment));
+}
+
uint32_t WasmModuleBuilder::AddImport(base::Vector<const char> name,
FunctionSig* sig,
base::Vector<const char> module) {
@@ -454,8 +437,9 @@ void WasmModuleBuilder::SetMaxMemorySize(uint32_t value) {
void WasmModuleBuilder::SetHasSharedMemory() { has_shared_memory_ = true; }
namespace {
-void WriteInitializerExpression(ZoneBuffer* buffer, const WasmInitExpr& init,
- ValueType type) {
+void WriteInitializerExpressionWithEnd(ZoneBuffer* buffer,
+ const WasmInitExpr& init,
+ ValueType type) {
switch (init.kind()) {
case WasmInitExpr::kI32Const:
buffer->write_u8(kExprI32Const);
@@ -531,22 +515,49 @@ void WriteInitializerExpression(ZoneBuffer* buffer, const WasmInitExpr& init,
}
break;
}
+ case WasmInitExpr::kStructNew:
case WasmInitExpr::kStructNewWithRtt:
+ case WasmInitExpr::kStructNewDefault:
+ case WasmInitExpr::kStructNewDefaultWithRtt:
+ STATIC_ASSERT((kExprStructNew >> 8) == kGCPrefix);
STATIC_ASSERT((kExprStructNewWithRtt >> 8) == kGCPrefix);
+ STATIC_ASSERT((kExprStructNewDefault >> 8) == kGCPrefix);
+ STATIC_ASSERT((kExprStructNewDefaultWithRtt >> 8) == kGCPrefix);
for (const WasmInitExpr& operand : init.operands()) {
- WriteInitializerExpression(buffer, operand, kWasmBottom);
+ WriteInitializerExpressionWithEnd(buffer, operand, kWasmBottom);
}
buffer->write_u8(kGCPrefix);
- buffer->write_u8(static_cast<uint8_t>(kExprStructNewWithRtt));
+ WasmOpcode opcode;
+ switch (init.kind()) {
+ case WasmInitExpr::kStructNewWithRtt:
+ opcode = kExprStructNewWithRtt;
+ break;
+ case WasmInitExpr::kStructNew:
+ opcode = kExprStructNew;
+ break;
+ case WasmInitExpr::kStructNewDefaultWithRtt:
+ opcode = kExprStructNewDefaultWithRtt;
+ break;
+ case WasmInitExpr::kStructNewDefault:
+ opcode = kExprStructNewDefault;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ buffer->write_u8(static_cast<uint8_t>(opcode));
buffer->write_u32v(init.immediate().index);
break;
case WasmInitExpr::kArrayInit:
+ case WasmInitExpr::kArrayInitStatic:
STATIC_ASSERT((kExprArrayInit >> 8) == kGCPrefix);
+ STATIC_ASSERT((kExprArrayInitStatic >> 8) == kGCPrefix);
for (const WasmInitExpr& operand : init.operands()) {
- WriteInitializerExpression(buffer, operand, kWasmBottom);
+ WriteInitializerExpressionWithEnd(buffer, operand, kWasmBottom);
}
buffer->write_u8(kGCPrefix);
- buffer->write_u8(static_cast<uint8_t>(kExprArrayInit));
+ buffer->write_u8(static_cast<uint8_t>(
+ init.kind() == WasmInitExpr::kArrayInit ? kExprArrayInit
+ : kExprArrayInitStatic));
buffer->write_u32v(init.immediate().index);
buffer->write_u32v(static_cast<uint32_t>(init.operands().size() - 1));
break;
@@ -559,7 +570,8 @@ void WriteInitializerExpression(ZoneBuffer* buffer, const WasmInitExpr& init,
case WasmInitExpr::kRttSub:
case WasmInitExpr::kRttFreshSub:
// The operand to rtt.sub must be emitted first.
- WriteInitializerExpression(buffer, init.operands()[0], kWasmBottom);
+ WriteInitializerExpressionWithEnd(buffer, init.operands()[0],
+ kWasmBottom);
STATIC_ASSERT((kExprRttSub >> 8) == kGCPrefix);
STATIC_ASSERT((kExprRttFreshSub >> 8) == kGCPrefix);
buffer->write_u8(kGCPrefix);
@@ -571,6 +583,11 @@ void WriteInitializerExpression(ZoneBuffer* buffer, const WasmInitExpr& init,
}
}
+void WriteInitializerExpression(ZoneBuffer* buffer, const WasmInitExpr& init,
+ ValueType type) {
+ WriteInitializerExpressionWithEnd(buffer, init, type);
+ buffer->write_u8(kExprEnd);
+}
} // namespace
void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
@@ -584,10 +601,12 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
buffer->write_size(types_.size());
for (const Type& type : types_) {
+ bool has_super = type.supertype != kNoSuperType;
switch (type.kind) {
case Type::kFunctionSig: {
FunctionSig* sig = type.sig;
- buffer->write_u8(kWasmFunctionTypeCode);
+ buffer->write_u8(has_super ? kWasmFunctionSubtypeCode
+ : kWasmFunctionTypeCode);
buffer->write_size(sig->parameter_count());
for (auto param : sig->parameters()) {
WriteValueType(buffer, param);
@@ -596,23 +615,40 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
for (auto ret : sig->returns()) {
WriteValueType(buffer, ret);
}
+ if (type.supertype == kGenericSuperType) {
+ buffer->write_u8(kFuncRefCode);
+ } else if (has_super) {
+ buffer->write_i32v(type.supertype);
+ }
break;
}
case Type::kStructType: {
StructType* struct_type = type.struct_type;
- buffer->write_u8(kWasmStructTypeCode);
+ buffer->write_u8(has_super ? kWasmStructSubtypeCode
+ : kWasmStructTypeCode);
buffer->write_size(struct_type->field_count());
for (uint32_t i = 0; i < struct_type->field_count(); i++) {
WriteValueType(buffer, struct_type->field(i));
buffer->write_u8(struct_type->mutability(i) ? 1 : 0);
}
+ if (type.supertype == kGenericSuperType) {
+ buffer->write_u8(kDataRefCode);
+ } else if (has_super) {
+ buffer->write_i32v(type.supertype);
+ }
break;
}
case Type::kArrayType: {
ArrayType* array_type = type.array_type;
- buffer->write_u8(kWasmArrayTypeCode);
+ buffer->write_u8(has_super ? kWasmArraySubtypeCode
+ : kWasmArrayTypeCode);
WriteValueType(buffer, array_type->element_type());
buffer->write_u8(array_type->mutability() ? 1 : 0);
+ if (type.supertype == kGenericSuperType) {
+ buffer->write_u8(kDataRefCode);
+ } else if (has_super) {
+ buffer->write_i32v(type.supertype);
+ }
break;
}
}
@@ -705,7 +741,6 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
WriteValueType(buffer, global.type);
buffer->write_u8(global.mutability ? 1 : 0);
WriteInitializerExpression(buffer, global.init, global.type);
- buffer->write_u8(kExprEnd);
}
FixupSection(buffer, start);
}
@@ -744,31 +779,67 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
FixupSection(buffer, start);
}
- // == emit function table elements ===========================================
- if (indirect_functions_.size() > 0) {
+ // == emit element segments ==================================================
+ if (element_segments_.size() > 0) {
size_t start = EmitSection(kElementSectionCode, buffer);
- buffer->write_u8(1); // count of entries
- buffer->write_u8(0); // table index
- uint32_t first_element = 0;
- while (first_element < indirect_functions_.size() &&
- indirect_functions_[first_element] == kNullIndex) {
- first_element++;
- }
- uint32_t last_element =
- static_cast<uint32_t>(indirect_functions_.size() - 1);
- while (last_element >= first_element &&
- indirect_functions_[last_element] == kNullIndex) {
- last_element--;
- }
- buffer->write_u8(kExprI32Const); // offset
- buffer->write_u32v(first_element);
- buffer->write_u8(kExprEnd);
- uint32_t element_count = last_element - first_element + 1;
- buffer->write_size(element_count);
- for (uint32_t i = first_element; i <= last_element; i++) {
- buffer->write_size(indirect_functions_[i] + function_imports_.size());
+ buffer->write_size(element_segments_.size());
+ for (const WasmElemSegment& segment : element_segments_) {
+ bool is_active = segment.status == WasmElemSegment::kStatusActive;
+ // If this segment is expressible in the backwards-compatible syntax
+ // (before reftypes proposal), we should emit it in that syntax.
+ // This is the case if the segment is active and all entries are function
+ // references. Note that this is currently the only path that allows
+ // kRelativeToImports function indexing mode.
+ // TODO(manoskouk): Remove this logic once reftypes has shipped.
+ bool backwards_compatible =
+ is_active && segment.table_index == 0 &&
+ std::all_of(
+ segment.entries.begin(), segment.entries.end(), [](auto& entry) {
+ return entry.kind ==
+ WasmModuleBuilder::WasmElemSegment::Entry::kRefFuncEntry;
+ });
+ if (backwards_compatible) {
+ buffer->write_u8(0);
+ WriteInitializerExpression(buffer, segment.offset, segment.type);
+ buffer->write_size(segment.entries.size());
+ for (const WasmElemSegment::Entry entry : segment.entries) {
+ buffer->write_u32v(
+ segment.indexing_mode == WasmElemSegment::kRelativeToImports
+ ? entry.index
+ : entry.index +
+ static_cast<uint32_t>(function_imports_.size()));
+ }
+ } else {
+ DCHECK_EQ(segment.indexing_mode, WasmElemSegment::kRelativeToImports);
+ // If we pick the general syntax, we always explicitly emit the table
+ // index and the type, and use the expressions-as-elements syntax. I.e.
+ // the initial byte is one of 0x05, 0x06, and 0x07.
+ uint8_t kind_mask =
+ segment.status == WasmElemSegment::kStatusActive
+ ? 0b10
+ : segment.status == WasmElemSegment::kStatusDeclarative ? 0b11
+ : 0b01;
+ uint8_t expressions_as_elements_mask = 0b100;
+ buffer->write_u8(kind_mask | expressions_as_elements_mask);
+ if (is_active) {
+ buffer->write_u32v(segment.table_index);
+ WriteInitializerExpression(buffer, segment.offset, segment.type);
+ }
+ WriteValueType(buffer, segment.type);
+ buffer->write_size(segment.entries.size());
+ for (const WasmElemSegment::Entry entry : segment.entries) {
+ uint8_t opcode =
+ entry.kind == WasmElemSegment::Entry::kGlobalGetEntry
+ ? kExprGlobalGet
+ : entry.kind == WasmElemSegment::Entry::kRefFuncEntry
+ ? kExprRefFunc
+ : kExprRefNull;
+ buffer->write_u8(opcode);
+ buffer->write_u32v(entry.index);
+ buffer->write_u8(kExprEnd);
+ }
+ }
}
-
FixupSection(buffer, start);
}
@@ -833,7 +904,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
// Emit the section string.
buffer->write_string(base::CStrVector("name"));
// Emit a subsection for the function names.
- buffer->write_u8(NameSectionKindCode::kFunction);
+ buffer->write_u8(NameSectionKindCode::kFunctionCode);
// Emit a placeholder for the subsection length.
size_t functions_start = buffer->reserve_u32v();
// Emit the function names.
diff --git a/chromium/v8/src/wasm/wasm-module-builder.h b/chromium/v8/src/wasm/wasm-module-builder.h
index db2091cdba8..7ba140775d8 100644
--- a/chromium/v8/src/wasm/wasm-module-builder.h
+++ b/chromium/v8/src/wasm/wasm-module-builder.h
@@ -207,6 +207,7 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
WasmModuleBuilder* builder() const { return builder_; }
uint32_t func_index() { return func_index_; }
+ uint32_t sig_index() { return signature_index_; }
inline FunctionSig* signature();
private:
@@ -245,6 +246,68 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
WasmModuleBuilder(const WasmModuleBuilder&) = delete;
WasmModuleBuilder& operator=(const WasmModuleBuilder&) = delete;
+ // Static representation of wasm element segment (table initializer). This is
+ // different than the version in wasm-module.h.
+ class WasmElemSegment {
+ public:
+ // asm.js gives function indices starting with the first non-imported
+ // function.
+ enum FunctionIndexingMode {
+ kRelativeToImports,
+ kRelativeToDeclaredFunctions
+ };
+ enum Status {
+ kStatusActive, // copied automatically during instantiation.
+ kStatusPassive, // copied explicitly after instantiation.
+ kStatusDeclarative // purely declarative and never copied.
+ };
+ struct Entry {
+ enum Kind { kGlobalGetEntry, kRefFuncEntry, kRefNullEntry } kind;
+ uint32_t index;
+ Entry(Kind kind, uint32_t index) : kind(kind), index(index) {}
+ Entry() : kind(kRefNullEntry), index(0) {}
+ };
+
+ // Construct an active segment.
+ WasmElemSegment(Zone* zone, ValueType type, uint32_t table_index,
+ WasmInitExpr offset)
+ : type(type),
+ table_index(table_index),
+ offset(std::move(offset)),
+ entries(zone),
+ status(kStatusActive) {
+ DCHECK(IsValidOffsetKind(offset.kind()));
+ }
+
+ // Construct a passive or declarative segment, which has no table
+ // index or offset.
+ WasmElemSegment(Zone* zone, ValueType type, bool declarative)
+ : type(type),
+ table_index(0),
+ entries(zone),
+ status(declarative ? kStatusDeclarative : kStatusPassive) {
+ DCHECK(IsValidOffsetKind(offset.kind()));
+ }
+
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(WasmElemSegment);
+
+ ValueType type;
+ uint32_t table_index;
+ WasmInitExpr offset;
+ FunctionIndexingMode indexing_mode = kRelativeToImports;
+ ZoneVector<Entry> entries;
+ Status status;
+
+ private:
+ // This ensures no {WasmInitExpr} with subexpressions is used, which would
+ // cause a memory leak because those are stored in an std::vector. Such
+ // offset would also be mistyped.
+ bool IsValidOffsetKind(WasmInitExpr::Operator kind) {
+ return kind == WasmInitExpr::kI32Const ||
+ kind == WasmInitExpr::kGlobalGet;
+ }
+ };
+
// Building methods.
uint32_t AddImport(base::Vector<const char> name, FunctionSig* sig,
base::Vector<const char> module = {});
@@ -255,16 +318,27 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
bool mutability,
base::Vector<const char> module = {});
void AddDataSegment(const byte* data, uint32_t size, uint32_t dest);
- uint32_t AddSignature(FunctionSig* sig);
+ // Add an element segment to this {WasmModuleBuilder}. {segment}'s enties
+ // have to be initialized.
+ void AddElementSegment(WasmElemSegment segment);
+ // Helper method to create an active segment with one function. Assumes that
+ // table segment at {table_index} is typed as funcref.
+ void SetIndirectFunction(uint32_t table_index, uint32_t index_in_table,
+ uint32_t direct_function_index,
+ WasmElemSegment::FunctionIndexingMode indexing_mode);
+ // Increase the starting size of the table at {table_index} by {count}. Also
+ // increases the maximum table size if needed. Returns the former starting
+ // size, or the maximum uint32_t value if the maximum table size has been
+ // exceeded.
+ uint32_t IncreaseTableMinSize(uint32_t table_index, uint32_t count);
+ // Adds the signature to the module if it does not already exist.
+ uint32_t AddSignature(FunctionSig* sig, uint32_t supertype = kNoSuperType);
+ // Does not deduplicate function signatures.
+ uint32_t ForceAddSignature(FunctionSig* sig,
+ uint32_t supertype = kNoSuperType);
uint32_t AddException(FunctionSig* type);
- uint32_t AddStructType(StructType* type);
- uint32_t AddArrayType(ArrayType* type);
- // In the current implementation, it's supported to have uninitialized slots
- // at the beginning and/or end of the indirect function table, as long as
- // the filled slots form a contiguous block in the middle.
- uint32_t AllocateIndirectFunctions(uint32_t count);
- void SetIndirectFunction(uint32_t indirect, uint32_t direct);
- void SetMaxTableSize(uint32_t max);
+ uint32_t AddStructType(StructType* type, uint32_t supertype = kNoSuperType);
+ uint32_t AddArrayType(ArrayType* type, uint32_t supertype = kNoSuperType);
uint32_t AddTable(ValueType type, uint32_t min_size);
uint32_t AddTable(ValueType type, uint32_t min_size, uint32_t max_size);
uint32_t AddTable(ValueType type, uint32_t min_size, uint32_t max_size,
@@ -288,10 +362,17 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
Zone* zone() { return zone_; }
+ ValueType GetTableType(uint32_t index) { return tables_[index].type; }
+
+ bool IsSignature(uint32_t index) {
+ return types_[index].kind == Type::kFunctionSig;
+ }
+
FunctionSig* GetSignature(uint32_t index) {
DCHECK(types_[index].kind == Type::kFunctionSig);
return types_[index].sig;
}
+
bool IsStructType(uint32_t index) {
return types_[index].kind == Type::kStructType;
}
@@ -304,10 +385,15 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
}
ArrayType* GetArrayType(uint32_t index) { return types_[index].array_type; }
+ WasmFunctionBuilder* GetFunction(uint32_t index) { return functions_[index]; }
int NumExceptions() { return static_cast<int>(exceptions_.size()); }
int NumTypes() { return static_cast<int>(types_.size()); }
+ int NumTables() { return static_cast<int>(tables_.size()); }
+
+ int NumFunctions() { return static_cast<int>(functions_.size()); }
+
FunctionSig* GetExceptionType(int index) {
return types_[exceptions_[index]].sig;
}
@@ -317,13 +403,14 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
private:
struct Type {
enum Kind { kFunctionSig, kStructType, kArrayType };
- explicit Type(FunctionSig* signature)
- : kind(kFunctionSig), sig(signature) {}
- explicit Type(StructType* struct_type)
- : kind(kStructType), struct_type(struct_type) {}
- explicit Type(ArrayType* array_type)
- : kind(kArrayType), array_type(array_type) {}
+ explicit Type(FunctionSig* signature, uint32_t supertype)
+ : kind(kFunctionSig), supertype(supertype), sig(signature) {}
+ explicit Type(StructType* struct_type, uint32_t supertype)
+ : kind(kStructType), supertype(supertype), struct_type(struct_type) {}
+ explicit Type(ArrayType* array_type, uint32_t supertype)
+ : kind(kArrayType), supertype(supertype), array_type(array_type) {}
Kind kind;
+ uint32_t supertype;
union {
FunctionSig* sig;
StructType* struct_type;
@@ -380,12 +467,11 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
ZoneVector<WasmFunctionBuilder*> functions_;
ZoneVector<WasmTable> tables_;
ZoneVector<WasmDataSegment> data_segments_;
- ZoneVector<uint32_t> indirect_functions_;
+ ZoneVector<WasmElemSegment> element_segments_;
ZoneVector<WasmGlobal> globals_;
ZoneVector<int> exceptions_;
ZoneUnorderedMap<FunctionSig, uint32_t> signature_map_;
int start_function_index_;
- uint32_t max_table_size_ = 0;
uint32_t min_memory_size_;
uint32_t max_memory_size_;
bool has_max_memory_size_;
@@ -393,8 +479,6 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
#if DEBUG
// Once AddExportedImport is called, no more imports can be added.
bool adding_imports_allowed_ = true;
- // Indirect functions must be allocated before adding extra tables.
- bool allocating_indirect_functions_allowed_ = true;
#endif
};
diff --git a/chromium/v8/src/wasm/wasm-module-sourcemap.cc b/chromium/v8/src/wasm/wasm-module-sourcemap.cc
index 85a171e5acb..ea03dae8e2f 100644
--- a/chromium/v8/src/wasm/wasm-module-sourcemap.cc
+++ b/chromium/v8/src/wasm/wasm-module-sourcemap.cc
@@ -6,11 +6,18 @@
#include <algorithm>
-#include "include/v8.h"
+#include "include/v8-context.h"
+#include "include/v8-json.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-object.h"
+#include "include/v8-primitive.h"
#include "src/api/api.h"
#include "src/base/vlq-base64.h"
namespace v8 {
+
+class String;
+
namespace internal {
namespace wasm {
diff --git a/chromium/v8/src/wasm/wasm-module-sourcemap.h b/chromium/v8/src/wasm/wasm-module-sourcemap.h
index fd8c1117fa7..38c0358f90c 100644
--- a/chromium/v8/src/wasm/wasm-module-sourcemap.h
+++ b/chromium/v8/src/wasm/wasm-module-sourcemap.h
@@ -12,10 +12,13 @@
#include <string>
#include <vector>
-#include "include/v8.h"
+#include "include/v8-local-handle.h"
#include "src/base/macros.h"
namespace v8 {
+
+class String;
+
namespace internal {
namespace wasm {
// The class is for decoding and managing source map generated by a WebAssembly
diff --git a/chromium/v8/src/wasm/wasm-module.cc b/chromium/v8/src/wasm/wasm-module.cc
index 97a31487ea9..0035c00bf2f 100644
--- a/chromium/v8/src/wasm/wasm-module.cc
+++ b/chromium/v8/src/wasm/wasm-module.cc
@@ -113,6 +113,23 @@ int GetContainingWasmFunction(const WasmModule* module, uint32_t byte_offset) {
return func_index;
}
+// TODO(7748): Measure whether this iterative implementation is fast enough.
+// We could cache the result on the module, in yet another vector indexed by
+// type index.
+int GetSubtypingDepth(const WasmModule* module, uint32_t type_index) {
+ uint32_t starting_point = type_index;
+ int depth = 0;
+ while ((type_index = module->supertype(type_index)) != kGenericSuperType) {
+ if (type_index == starting_point) return -1; // Cycle detected.
+ // This is disallowed and will be rejected by validation, but might occur
+ // when this function is called.
+ if (type_index == kNoSuperType) break;
+ depth++;
+ if (depth > static_cast<int>(kV8MaxRttSubtypingDepth)) break;
+ }
+ return depth;
+}
+
void LazilyGeneratedNames::AddForTesting(int function_index,
WireBytesRef name) {
base::MutexGuard lock(&mutex_);
@@ -293,19 +310,23 @@ Handle<JSObject> GetTypeForGlobal(Isolate* isolate, bool is_mutable,
}
Handle<JSObject> GetTypeForMemory(Isolate* isolate, uint32_t min_size,
- base::Optional<uint32_t> max_size) {
+ base::Optional<uint32_t> max_size,
+ bool shared) {
Factory* factory = isolate->factory();
Handle<JSFunction> object_function = isolate->object_function();
Handle<JSObject> object = factory->NewJSObject(object_function);
Handle<String> minimum_string = factory->InternalizeUtf8String("minimum");
Handle<String> maximum_string = factory->InternalizeUtf8String("maximum");
+ Handle<String> shared_string = factory->InternalizeUtf8String("shared");
JSObject::AddProperty(isolate, object, minimum_string,
factory->NewNumberFromUint(min_size), NONE);
if (max_size.has_value()) {
JSObject::AddProperty(isolate, object, maximum_string,
factory->NewNumberFromUint(max_size.value()), NONE);
}
+ JSObject::AddProperty(isolate, object, shared_string,
+ factory->ToBoolean(shared), NONE);
return object;
}
@@ -401,7 +422,8 @@ Handle<JSArray> GetImports(Isolate* isolate,
maximum_size.emplace(module->maximum_pages);
}
type_value =
- GetTypeForMemory(isolate, module->initial_pages, maximum_size);
+ GetTypeForMemory(isolate, module->initial_pages, maximum_size,
+ module->has_shared_memory);
}
import_kind = memory_string;
break;
@@ -498,7 +520,8 @@ Handle<JSArray> GetExports(Isolate* isolate,
maximum_size.emplace(module->maximum_pages);
}
type_value =
- GetTypeForMemory(isolate, module->initial_pages, maximum_size);
+ GetTypeForMemory(isolate, module->initial_pages, maximum_size,
+ module->has_shared_memory);
}
export_kind = memory_string;
break;
diff --git a/chromium/v8/src/wasm/wasm-module.h b/chromium/v8/src/wasm/wasm-module.h
index d1f874a9085..08a88c4a8e6 100644
--- a/chromium/v8/src/wasm/wasm-module.h
+++ b/chromium/v8/src/wasm/wasm-module.h
@@ -259,6 +259,11 @@ struct V8_EXPORT_PRIVATE WasmDebugSymbols {
struct WasmTable;
+// End of a chain of explicit supertypes.
+constexpr uint32_t kGenericSuperType = 0xFFFFFFFE;
+// Used for types that have no explicit supertype.
+constexpr uint32_t kNoSuperType = 0xFFFFFFFF;
+
// Static representation of a module.
struct V8_EXPORT_PRIVATE WasmModule {
std::unique_ptr<Zone> signature_zone;
@@ -288,6 +293,7 @@ struct V8_EXPORT_PRIVATE WasmModule {
WireBytesRef name = {0, 0};
std::vector<TypeDefinition> types; // by type index
std::vector<uint8_t> type_kinds; // by type index
+ std::vector<uint32_t> supertypes; // by type index
// Map from each type index to the index of its corresponding canonical type.
// Note: right now, only functions are canonicalized, and arrays and structs
// map to themselves.
@@ -295,9 +301,10 @@ struct V8_EXPORT_PRIVATE WasmModule {
bool has_type(uint32_t index) const { return index < types.size(); }
- void add_signature(const FunctionSig* sig) {
+ void add_signature(const FunctionSig* sig, uint32_t supertype) {
types.push_back(TypeDefinition(sig));
type_kinds.push_back(kWasmFunctionTypeCode);
+ supertypes.push_back(supertype);
uint32_t canonical_id = sig ? signature_map.FindOrInsert(*sig) : 0;
canonicalized_type_ids.push_back(canonical_id);
}
@@ -309,9 +316,10 @@ struct V8_EXPORT_PRIVATE WasmModule {
return types[index].function_sig;
}
- void add_struct_type(const StructType* type) {
+ void add_struct_type(const StructType* type, uint32_t supertype) {
types.push_back(TypeDefinition(type));
type_kinds.push_back(kWasmStructTypeCode);
+ supertypes.push_back(supertype);
// No canonicalization for structs.
canonicalized_type_ids.push_back(0);
}
@@ -323,9 +331,10 @@ struct V8_EXPORT_PRIVATE WasmModule {
return types[index].struct_type;
}
- void add_array_type(const ArrayType* type) {
+ void add_array_type(const ArrayType* type, uint32_t supertype) {
types.push_back(TypeDefinition(type));
type_kinds.push_back(kWasmArrayTypeCode);
+ supertypes.push_back(supertype);
// No canonicalization for arrays.
canonicalized_type_ids.push_back(0);
}
@@ -337,6 +346,14 @@ struct V8_EXPORT_PRIVATE WasmModule {
return types[index].array_type;
}
+ uint32_t supertype(uint32_t index) const {
+ DCHECK(index < supertypes.size());
+ return supertypes[index];
+ }
+ bool has_supertype(uint32_t index) const {
+ return supertype(index) != kNoSuperType;
+ }
+
std::vector<WasmFunction> functions;
std::vector<WasmDataSegment> data_segments;
std::vector<WasmTable> tables;
@@ -418,6 +435,12 @@ int GetContainingWasmFunction(const WasmModule* module, uint32_t byte_offset);
// contained within a function.
int GetNearestWasmFunction(const WasmModule* module, uint32_t byte_offset);
+// Gets the explicitly defined subtyping depth for the given type.
+// Returns 0 if the type has no explicit supertype.
+// The result is capped to {kV8MaxRttSubtypingDepth + 1}.
+// Invalid cyclic hierarchies will return -1.
+int GetSubtypingDepth(const WasmModule* module, uint32_t type_index);
+
// Interface to the storage (wire bytes) of a wasm module.
// It is illegal for anyone receiving a ModuleWireBytes to store pointers based
// on module_bytes, as this storage is only guaranteed to be alive as long as
@@ -477,7 +500,8 @@ Handle<JSObject> GetTypeForFunction(Isolate* isolate, const FunctionSig* sig,
Handle<JSObject> GetTypeForGlobal(Isolate* isolate, bool is_mutable,
ValueType type);
Handle<JSObject> GetTypeForMemory(Isolate* isolate, uint32_t min_size,
- base::Optional<uint32_t> max_size);
+ base::Optional<uint32_t> max_size,
+ bool shared);
Handle<JSObject> GetTypeForTable(Isolate* isolate, ValueType type,
uint32_t min_size,
base::Optional<uint32_t> max_size);
diff --git a/chromium/v8/src/wasm/wasm-objects-inl.h b/chromium/v8/src/wasm/wasm-objects-inl.h
index a75d83df027..be6d7dd6f70 100644
--- a/chromium/v8/src/wasm/wasm-objects-inl.h
+++ b/chromium/v8/src/wasm/wasm-objects-inl.h
@@ -186,7 +186,6 @@ bool WasmGlobalObject::SetFuncRef(Isolate* isolate, Handle<Object> value) {
// WasmInstanceObject
PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_start, byte*, kMemoryStartOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_size, size_t, kMemorySizeOffset)
-PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_mask, size_t, kMemoryMaskOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, isolate_root, Address,
kIsolateRootOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, stack_limit_address, Address,
@@ -559,11 +558,26 @@ int WasmStruct::Size(const wasm::StructType* type) {
Heap::kMinObjectSizeInTaggedWords * kTaggedSize);
}
-int WasmStruct::GcSafeSize(Map map) {
- wasm::StructType* type = GcSafeType(map);
- return Size(type);
+// static
+void WasmStruct::EncodeInstanceSizeInMap(int instance_size, Map map) {
+ // WasmStructs can be bigger than the {map.instance_size_in_words} field
+ // can describe; yet we have to store the instance size somewhere on the
+ // map so that the GC can read it without relying on any other objects
+ // still being around. To solve this problem, we store the instance size
+ // in two other fields that are otherwise unused for WasmStructs.
+ STATIC_ASSERT(0xFFFF - kHeaderSize >
+ wasm::kMaxValueTypeSize * wasm::kV8MaxWasmStructFields);
+ map.SetWasmByte1(instance_size & 0xFF);
+ map.SetWasmByte2(instance_size >> 8);
+}
+
+// static
+int WasmStruct::DecodeInstanceSizeFromMap(Map map) {
+ return (map.WasmByte2() << 8) | map.WasmByte1();
}
+int WasmStruct::GcSafeSize(Map map) { return DecodeInstanceSizeFromMap(map); }
+
wasm::StructType* WasmStruct::type() const { return type(map()); }
Address WasmStruct::RawFieldAddress(int raw_offset) {
@@ -614,13 +628,24 @@ wasm::ArrayType* WasmArray::GcSafeType(Map map) {
wasm::ArrayType* WasmArray::type() const { return type(map()); }
int WasmArray::SizeFor(Map map, int length) {
- int element_size = type(map)->element_type().element_size_bytes();
+ int element_size = DecodeElementSizeFromMap(map);
return kHeaderSize + RoundUp(element_size * length, kTaggedSize);
}
-int WasmArray::GcSafeSizeFor(Map map, int length) {
- int element_size = GcSafeType(map)->element_type().element_size_bytes();
- return kHeaderSize + RoundUp(element_size * length, kTaggedSize);
+uint32_t WasmArray::element_offset(uint32_t index) {
+ DCHECK_LE(index, length());
+ return WasmArray::kHeaderSize +
+ index * type()->element_type().element_size_bytes();
+}
+
+Address WasmArray::ElementAddress(uint32_t index) {
+ return ptr() + element_offset(index) - kHeapObjectTag;
+}
+
+ObjectSlot WasmArray::ElementSlot(uint32_t index) {
+ DCHECK_LE(index, length());
+ DCHECK(type()->element_type().is_reference());
+ return RawField(kHeaderSize + kTaggedSize * index);
}
// static
@@ -630,11 +655,18 @@ Handle<Object> WasmArray::GetElement(Isolate* isolate, Handle<WasmArray> array,
return isolate->factory()->undefined_value();
}
wasm::ValueType element_type = array->type()->element_type();
- uint32_t offset =
- WasmArray::kHeaderSize + index * element_type.element_size_bytes();
- return ReadValueAt(isolate, array, element_type, offset);
+ return ReadValueAt(isolate, array, element_type,
+ array->element_offset(index));
}
+// static
+void WasmArray::EncodeElementSizeInMap(int element_size, Map map) {
+ map.SetWasmByte1(element_size);
+}
+
+// static
+int WasmArray::DecodeElementSizeFromMap(Map map) { return map.WasmByte1(); }
+
void WasmTypeInfo::clear_foreign_address(Isolate* isolate) {
#ifdef V8_HEAP_SANDBOX
// Due to the type-specific pointer tags for external pointers, we need to
diff --git a/chromium/v8/src/wasm/wasm-objects.cc b/chromium/v8/src/wasm/wasm-objects.cc
index a6ff80f6242..8112221c28f 100644
--- a/chromium/v8/src/wasm/wasm-objects.cc
+++ b/chromium/v8/src/wasm/wasm-objects.cc
@@ -12,11 +12,13 @@
#include "src/debug/debug-interface.h"
#include "src/logging/counters.h"
#include "src/objects/debug-objects-inl.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/struct-inl.h"
#include "src/trap-handler/trap-handler.h"
#include "src/utils/utils.h"
+#include "src/wasm/code-space-access.h"
#include "src/wasm/jump-table-assembler.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
@@ -250,7 +252,7 @@ base::Vector<const uint8_t> WasmModuleObject::GetRawFunctionName(
Handle<WasmTableObject> WasmTableObject::New(
Isolate* isolate, Handle<WasmInstanceObject> instance, wasm::ValueType type,
uint32_t initial, bool has_maximum, uint32_t maximum,
- Handle<FixedArray>* entries) {
+ Handle<FixedArray>* entries, Handle<Object> initial_value) {
// TODO(7748): Make this work with other types when spec clears up.
{
const WasmModule* module =
@@ -259,9 +261,8 @@ Handle<WasmTableObject> WasmTableObject::New(
}
Handle<FixedArray> backing_store = isolate->factory()->NewFixedArray(initial);
- Object null = ReadOnlyRoots(isolate).null_value();
for (int i = 0; i < static_cast<int>(initial); ++i) {
- backing_store->set(i, null);
+ backing_store->set(i, *initial_value);
}
Handle<Object> max;
@@ -1242,21 +1243,13 @@ bool WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
void WasmInstanceObject::SetRawMemory(byte* mem_start, size_t mem_size) {
CHECK_LE(mem_size, wasm::max_mem_bytes());
#if V8_HOST_ARCH_64_BIT
- uint64_t mem_mask64 = base::bits::RoundUpToPowerOfTwo64(mem_size) - 1;
set_memory_start(mem_start);
set_memory_size(mem_size);
- set_memory_mask(mem_mask64);
#else
// Must handle memory > 2GiB specially.
CHECK_LE(mem_size, size_t{kMaxUInt32});
- uint32_t mem_mask32 =
- (mem_size > 2 * size_t{GB})
- ? 0xFFFFFFFFu
- : base::bits::RoundUpToPowerOfTwo32(static_cast<uint32_t>(mem_size)) -
- 1;
set_memory_start(mem_start);
set_memory_size(mem_size);
- set_memory_mask(mem_mask32);
#endif
}
@@ -1540,7 +1533,8 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
if (sig_id >= 0) {
wasm::NativeModule* native_module =
instance->module_object().native_module();
- // TODO(wasm): Cache and reuse wrapper code.
+ // TODO(wasm): Cache and reuse wrapper code, to avoid repeated compilation
+ // and permissions switching.
const wasm::WasmFeatures enabled = native_module->enabled_features();
auto resolved = compiler::ResolveWasmImportCall(
callable, sig, instance->module(), enabled);
@@ -1553,10 +1547,11 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
if (kind == compiler::WasmImportCallKind ::kJSFunctionArityMismatch) {
expected_arity = Handle<JSFunction>::cast(callable)
->shared()
- .internal_formal_parameter_count();
+ .internal_formal_parameter_count_without_receiver();
}
wasm::WasmCompilationResult result = compiler::CompileWasmImportCallWrapper(
&env, kind, sig, false, expected_arity);
+ wasm::CodeSpaceWriteScope write_scope(native_module);
std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
result.func_index, result.code_desc, result.frame_slot_count,
result.tagged_parameter_slots,
@@ -1693,18 +1688,6 @@ wasm::WasmValue WasmArray::GetElement(uint32_t index) {
}
}
-ObjectSlot WasmArray::ElementSlot(uint32_t index) {
- DCHECK_LE(index, length());
- DCHECK(type()->element_type().is_reference());
- return RawField(kHeaderSize + kTaggedSize * index);
-}
-
-Address WasmArray::ElementAddress(uint32_t index) {
- DCHECK_LE(index, length());
- return ptr() + WasmArray::kHeaderSize +
- index * type()->element_type().element_size_bytes() - kHeapObjectTag;
-}
-
// static
Handle<WasmTagObject> WasmTagObject::New(Isolate* isolate,
const wasm::FunctionSig* sig,
@@ -2030,7 +2013,7 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
// method. This does not apply to functions exported from asm.js however.
DCHECK_EQ(is_asm_js_module, js_function->IsConstructor());
shared->set_length(arity);
- shared->set_internal_formal_parameter_count(arity);
+ shared->set_internal_formal_parameter_count(JSParameterCount(arity));
shared->set_script(instance->module_object().script());
return Handle<WasmExportedFunction>::cast(js_function);
}
@@ -2115,7 +2098,8 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
CK kind = compiler::kDefaultImportCallKind;
if (callable->IsJSFunction()) {
SharedFunctionInfo shared = Handle<JSFunction>::cast(callable)->shared();
- expected_arity = shared.internal_formal_parameter_count();
+ expected_arity =
+ shared.internal_formal_parameter_count_without_receiver();
if (expected_arity != parameter_count) {
kind = CK::kJSFunctionArityMismatch;
}
@@ -2143,7 +2127,8 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
Factory::JSFunctionBuilder{isolate, shared, context}
.set_map(function_map)
.Build();
- js_function->shared().set_internal_formal_parameter_count(parameter_count);
+ js_function->shared().set_internal_formal_parameter_count(
+ JSParameterCount(parameter_count));
return Handle<WasmJSFunction>::cast(js_function);
}
@@ -2217,10 +2202,6 @@ Handle<AsmWasmData> AsmWasmData::New(
return result;
}
-static_assert(wasm::kV8MaxWasmArrayLength <=
- (Smi::kMaxValue - WasmArray::kHeaderSize) / kDoubleSize,
- "max Wasm array size must fit into max object size");
-
namespace wasm {
bool TypecheckJSObject(Isolate* isolate, const WasmModule* module,
diff --git a/chromium/v8/src/wasm/wasm-objects.h b/chromium/v8/src/wasm/wasm-objects.h
index 11d5c265ed5..3c554575f1f 100644
--- a/chromium/v8/src/wasm/wasm-objects.h
+++ b/chromium/v8/src/wasm/wasm-objects.h
@@ -182,9 +182,6 @@ class WasmModuleObject
class WasmTableObject
: public TorqueGeneratedWasmTableObject<WasmTableObject, JSObject> {
public:
- // Dispatched behavior.
- DECL_PRINTER(WasmTableObject)
-
inline wasm::ValueType type();
V8_EXPORT_PRIVATE static int Grow(Isolate* isolate,
@@ -194,7 +191,8 @@ class WasmTableObject
V8_EXPORT_PRIVATE static Handle<WasmTableObject> New(
Isolate* isolate, Handle<WasmInstanceObject> instance,
wasm::ValueType type, uint32_t initial, bool has_maximum,
- uint32_t maximum, Handle<FixedArray>* entries);
+ uint32_t maximum, Handle<FixedArray>* entries,
+ Handle<Object> initial_value);
V8_EXPORT_PRIVATE static void AddDispatchTable(
Isolate* isolate, Handle<WasmTableObject> table,
@@ -266,9 +264,6 @@ class WasmMemoryObject
public:
DECL_OPTIONAL_ACCESSORS(instances, WeakArrayList)
- // Dispatched behavior.
- DECL_PRINTER(WasmMemoryObject)
-
// Add an instance to the internal (weak) list.
V8_EXPORT_PRIVATE static void AddInstance(Isolate* isolate,
Handle<WasmMemoryObject> memory,
@@ -356,7 +351,6 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
DECL_ACCESSORS(managed_object_maps, FixedArray)
DECL_PRIMITIVE_ACCESSORS(memory_start, byte*)
DECL_PRIMITIVE_ACCESSORS(memory_size, size_t)
- DECL_PRIMITIVE_ACCESSORS(memory_mask, size_t)
DECL_PRIMITIVE_ACCESSORS(isolate_root, Address)
DECL_PRIMITIVE_ACCESSORS(stack_limit_address, Address)
DECL_PRIMITIVE_ACCESSORS(real_stack_limit_address, Address)
@@ -397,7 +391,6 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
V(kOptionalPaddingOffset, POINTER_SIZE_PADDING(kOptionalPaddingOffset)) \
V(kMemoryStartOffset, kSystemPointerSize) \
V(kMemorySizeOffset, kSizetSize) \
- V(kMemoryMaskOffset, kSizetSize) \
V(kStackLimitAddressOffset, kSystemPointerSize) \
V(kImportedFunctionTargetsOffset, kSystemPointerSize) \
V(kIndirectFunctionTableTargetsOffset, kSystemPointerSize) \
@@ -555,9 +548,6 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
class WasmTagObject
: public TorqueGeneratedWasmTagObject<WasmTagObject, JSObject> {
public:
- // Dispatched behavior.
- DECL_PRINTER(WasmTagObject)
-
// Checks whether the given {sig} has the same parameter types as the
// serialized signature stored within this tag object.
bool MatchesSignature(const wasm::FunctionSig* sig);
@@ -842,8 +832,6 @@ class WasmExceptionTag
V8_EXPORT_PRIVATE static Handle<WasmExceptionTag> New(Isolate* isolate,
int index);
- DECL_PRINTER(WasmExceptionTag)
-
TQ_OBJECT_CONSTRUCTORS(WasmExceptionTag)
};
@@ -903,6 +891,8 @@ class WasmStruct : public TorqueGeneratedWasmStruct<WasmStruct, WasmObject> {
static inline wasm::StructType* GcSafeType(Map map);
static inline int Size(const wasm::StructType* type);
static inline int GcSafeSize(Map map);
+ static inline void EncodeInstanceSizeInMap(int instance_size, Map map);
+ static inline int DecodeInstanceSizeFromMap(Map map);
// Returns the address of the field at given offset.
inline Address RawFieldAddress(int raw_offset);
@@ -935,19 +925,30 @@ class WasmArray : public TorqueGeneratedWasmArray<WasmArray, WasmObject> {
// Get the {ObjectSlot} corresponding to the element at {index}. Requires that
// this is a reference array.
- ObjectSlot ElementSlot(uint32_t index);
- wasm::WasmValue GetElement(uint32_t index);
+ inline ObjectSlot ElementSlot(uint32_t index);
+ V8_EXPORT_PRIVATE wasm::WasmValue GetElement(uint32_t index);
static inline int SizeFor(Map map, int length);
- static inline int GcSafeSizeFor(Map map, int length);
// Returns boxed value of the array's element.
static inline Handle<Object> GetElement(Isolate* isolate,
Handle<WasmArray> array,
uint32_t index);
- // Returns the Address of the element at {index}.
- Address ElementAddress(uint32_t index);
+ // Returns the offset/Address of the element at {index}.
+ inline uint32_t element_offset(uint32_t index);
+ inline Address ElementAddress(uint32_t index);
+
+ static int MaxLength(const wasm::ArrayType* type) {
+ // The total object size must fit into a Smi, for filler objects. To make
+ // the behavior of Wasm programs independent from the Smi configuration,
+ // we hard-code the smaller of the two supported ranges.
+ int element_shift = type->element_type().element_size_log2();
+ return (SmiTagging<4>::kSmiMaxValue - kHeaderSize) >> element_shift;
+ }
+
+ static inline void EncodeElementSizeInMap(int element_size, Map map);
+ static inline int DecodeElementSizeFromMap(Map map);
DECL_PRINTER(WasmArray)
diff --git a/chromium/v8/src/wasm/wasm-opcodes-inl.h b/chromium/v8/src/wasm/wasm-opcodes-inl.h
index 550d7f4671c..1034b72d917 100644
--- a/chromium/v8/src/wasm/wasm-opcodes-inl.h
+++ b/chromium/v8/src/wasm/wasm-opcodes-inl.h
@@ -382,12 +382,16 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
// GC operations.
CASE_OP(StructNewWithRtt, "struct.new_with_rtt")
+ CASE_OP(StructNewDefaultWithRtt, "struct.new_default_with_rtt")
+ CASE_OP(StructNew, "struct.new")
CASE_OP(StructNewDefault, "struct.new_default")
CASE_OP(StructGet, "struct.get")
CASE_OP(StructGetS, "struct.get_s")
CASE_OP(StructGetU, "struct.get_u")
CASE_OP(StructSet, "struct.set")
CASE_OP(ArrayNewWithRtt, "array.new_with_rtt")
+ CASE_OP(ArrayNewDefaultWithRtt, "array.new_default_with_rtt")
+ CASE_OP(ArrayNew, "array.new")
CASE_OP(ArrayNewDefault, "array.new_default")
CASE_OP(ArrayGet, "array.get")
CASE_OP(ArrayGetS, "array.get_s")
@@ -396,6 +400,7 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_OP(ArrayLen, "array.len")
CASE_OP(ArrayCopy, "array.copy")
CASE_OP(ArrayInit, "array.init")
+ CASE_OP(ArrayInitStatic, "array.init_static")
CASE_OP(I31New, "i31.new")
CASE_OP(I31GetS, "i31.get_s")
CASE_OP(I31GetU, "i31.get_u")
@@ -403,9 +408,13 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_OP(RttSub, "rtt.sub")
CASE_OP(RttFreshSub, "rtt.fresh_sub")
CASE_OP(RefTest, "ref.test")
+ CASE_OP(RefTestStatic, "ref.test_static")
CASE_OP(RefCast, "ref.cast")
+ CASE_OP(RefCastStatic, "ref.cast_static")
CASE_OP(BrOnCast, "br_on_cast")
+ CASE_OP(BrOnCastStatic, "br_on_cast_static")
CASE_OP(BrOnCastFail, "br_on_cast_fail")
+ CASE_OP(BrOnCastStaticFail, "br_on_cast_static_fail")
CASE_OP(RefIsFunc, "ref.is_func")
CASE_OP(RefIsData, "ref.is_data")
CASE_OP(RefIsI31, "ref.is_i31")
diff --git a/chromium/v8/src/wasm/wasm-opcodes.h b/chromium/v8/src/wasm/wasm-opcodes.h
index 50e813ad024..d920b7660b0 100644
--- a/chromium/v8/src/wasm/wasm-opcodes.h
+++ b/chromium/v8/src/wasm/wasm-opcodes.h
@@ -650,13 +650,15 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
#define FOREACH_GC_OPCODE(V) \
V(StructNewWithRtt, 0xfb01, _) \
- V(StructNewDefault, 0xfb02, _) \
+ V(StructNewDefaultWithRtt, 0xfb02, _) \
V(StructGet, 0xfb03, _) \
V(StructGetS, 0xfb04, _) \
V(StructGetU, 0xfb05, _) \
V(StructSet, 0xfb06, _) \
+ V(StructNew, 0xfb07, _) \
+ V(StructNewDefault, 0xfb08, _) \
V(ArrayNewWithRtt, 0xfb11, _) \
- V(ArrayNewDefault, 0xfb12, _) \
+ V(ArrayNewDefaultWithRtt, 0xfb12, _) \
V(ArrayGet, 0xfb13, _) \
V(ArrayGetS, 0xfb14, _) \
V(ArrayGetU, 0xfb15, _) \
@@ -664,6 +666,9 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(ArrayLen, 0xfb17, _) \
V(ArrayCopy, 0xfb18, _) /* not standardized - V8 experimental */ \
V(ArrayInit, 0xfb19, _) /* not standardized - V8 experimental */ \
+ V(ArrayInitStatic, 0xfb1a, _) \
+ V(ArrayNew, 0xfb1b, _) \
+ V(ArrayNewDefault, 0xfb1c, _) \
V(I31New, 0xfb20, _) \
V(I31GetS, 0xfb21, _) \
V(I31GetU, 0xfb22, _) \
@@ -674,6 +679,10 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(RefCast, 0xfb41, _) \
V(BrOnCast, 0xfb42, _) \
V(BrOnCastFail, 0xfb43, _) \
+ V(RefTestStatic, 0xfb44, _) \
+ V(RefCastStatic, 0xfb45, _) \
+ V(BrOnCastStatic, 0xfb46, _) \
+ V(BrOnCastStaticFail, 0xfb47, _) \
V(RefIsFunc, 0xfb50, _) \
V(RefIsData, 0xfb51, _) \
V(RefIsI31, 0xfb52, _) \
diff --git a/chromium/v8/src/wasm/wasm-serialization.cc b/chromium/v8/src/wasm/wasm-serialization.cc
index d3165582c83..b0d697924e2 100644
--- a/chromium/v8/src/wasm/wasm-serialization.cc
+++ b/chromium/v8/src/wasm/wasm-serialization.cc
@@ -303,7 +303,7 @@ NativeModuleSerializer::NativeModuleSerializer(
size_t NativeModuleSerializer::MeasureCode(const WasmCode* code) const {
if (code == nullptr) return sizeof(bool);
- DCHECK_EQ(WasmCode::kFunction, code->kind());
+ DCHECK_EQ(WasmCode::kWasmFunction, code->kind());
if (code->tier() != ExecutionTier::kTurbofan) {
return sizeof(bool);
}
@@ -334,7 +334,7 @@ bool NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
writer->Write(false);
return true;
}
- DCHECK_EQ(WasmCode::kFunction, code->kind());
+ DCHECK_EQ(WasmCode::kWasmFunction, code->kind());
// Only serialize TurboFan code, as Liftoff code can contain breakpoints or
// non-relocatable constants.
if (code->tier() != ExecutionTier::kTurbofan) {
diff --git a/chromium/v8/src/wasm/wasm-subtyping.cc b/chromium/v8/src/wasm/wasm-subtyping.cc
index d2b7e9fe31d..83b1bbe4629 100644
--- a/chromium/v8/src/wasm/wasm-subtyping.cc
+++ b/chromium/v8/src/wasm/wasm-subtyping.cc
@@ -223,6 +223,8 @@ V8_INLINE bool EquivalentIndices(uint32_t index1, uint32_t index2,
}
}
+} // namespace
+
bool StructIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
const WasmModule* sub_module,
const WasmModule* super_module) {
@@ -234,8 +236,10 @@ bool StructIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
return false;
}
- TypeJudgementCache::instance()->cache_subtype(subtype_index, supertype_index,
- sub_module, super_module);
+ if (!sub_module->has_supertype(subtype_index)) {
+ TypeJudgementCache::instance()->cache_subtype(
+ subtype_index, supertype_index, sub_module, super_module);
+ }
for (uint32_t i = 0; i < super_struct->field_count(); i++) {
bool sub_mut = sub_struct->mutability(i);
bool super_mut = super_struct->mutability(i);
@@ -261,8 +265,10 @@ bool ArrayIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
super_module->types[supertype_index].array_type;
bool sub_mut = sub_array->mutability();
bool super_mut = super_array->mutability();
- TypeJudgementCache::instance()->cache_subtype(subtype_index, supertype_index,
- sub_module, super_module);
+ if (!sub_module->has_supertype(subtype_index)) {
+ TypeJudgementCache::instance()->cache_subtype(
+ subtype_index, supertype_index, sub_module, super_module);
+ }
if (sub_mut != super_mut ||
(sub_mut &&
!EquivalentTypes(sub_array->element_type(), super_array->element_type(),
@@ -294,8 +300,10 @@ bool FunctionIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
return false;
}
- TypeJudgementCache::instance()->cache_subtype(subtype_index, supertype_index,
- sub_module, super_module);
+ if (!sub_module->has_supertype(subtype_index)) {
+ TypeJudgementCache::instance()->cache_subtype(
+ subtype_index, supertype_index, sub_module, super_module);
+ }
for (uint32_t i = 0; i < sub_func->parameter_count(); i++) {
// Contravariance for params.
@@ -318,7 +326,6 @@ bool FunctionIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
return true;
}
-} // namespace
V8_NOINLINE V8_EXPORT_PRIVATE bool IsSubtypeOfImpl(
ValueType subtype, ValueType supertype, const WasmModule* sub_module,
@@ -410,11 +417,35 @@ V8_NOINLINE V8_EXPORT_PRIVATE bool IsSubtypeOfImpl(
DCHECK(super_heap.is_index());
uint32_t super_index = super_heap.ref_index();
DCHECK(super_module->has_type(super_index));
+ // The {IsSubtypeOf} entry point already has a fast path checking ValueType
+ // equality; here we catch (ref $x) being a subtype of (ref null $x).
+ if (sub_module == super_module && sub_index == super_index) return true;
uint8_t sub_kind = sub_module->type_kinds[sub_index];
if (sub_kind != super_module->type_kinds[super_index]) return false;
+ // Types with explicit supertypes just check those.
+ if (sub_module->has_supertype(sub_index)) {
+ // TODO(7748): Figure out cross-module story.
+ if (sub_module != super_module) return false;
+
+ uint32_t explicit_super = sub_module->supertype(sub_index);
+ while (true) {
+ if (explicit_super == super_index) return true;
+ // Reached the end of the explicitly defined inheritance chain.
+ if (explicit_super == kGenericSuperType) return false;
+ // Types without explicit supertype can't occur here, they would have
+ // failed validation.
+ DCHECK_NE(explicit_super, kNoSuperType);
+ explicit_super = sub_module->supertype(explicit_super);
+ }
+ } else {
+ // A structural type (without explicit supertype) is never a subtype of
+ // a nominal type (with explicit supertype).
+ if (super_module->has_supertype(super_index)) return false;
+ }
+
// Accessing the caches for subtyping and equivalence from multiple background
// threads is protected by a lock.
base::RecursiveMutexGuard type_cache_access(
diff --git a/chromium/v8/src/wasm/wasm-subtyping.h b/chromium/v8/src/wasm/wasm-subtyping.h
index 59e7935d1f1..53232ca2c24 100644
--- a/chromium/v8/src/wasm/wasm-subtyping.h
+++ b/chromium/v8/src/wasm/wasm-subtyping.h
@@ -97,6 +97,20 @@ V8_INLINE bool IsHeapSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
// case another WasmModule gets allocated in the same address later.
void DeleteCachedTypeJudgementsForModule(const WasmModule* module);
+// Checks whether {subtype_index} is a legal subtype of {supertype_index}.
+// These are the same checks that {IsSubtypeOf} uses for comparing types without
+// explicitly given supertypes; for validating such explicit supertypes they
+// can be called directly.
+bool StructIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
+ const WasmModule* sub_module,
+ const WasmModule* super_module);
+bool ArrayIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
+ const WasmModule* sub_module,
+ const WasmModule* super_module);
+bool FunctionIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
+ const WasmModule* sub_module,
+ const WasmModule* super_module);
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/web-snapshot/web-snapshot.cc b/chromium/v8/src/web-snapshot/web-snapshot.cc
index 5e8ae15c0b2..06a09ad6a4b 100644
--- a/chromium/v8/src/web-snapshot/web-snapshot.cc
+++ b/chromium/v8/src/web-snapshot/web-snapshot.cc
@@ -6,7 +6,11 @@
#include <limits>
-#include "include/v8.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-object.h"
+#include "include/v8-primitive.h"
+#include "include/v8-script.h"
#include "src/api/api-inl.h"
#include "src/base/platform/wrappers.h"
#include "src/handles/handles.h"
@@ -388,7 +392,7 @@ void WebSnapshotSerializer::SerializeMap(Handle<Map> map, uint32_t& id) {
PropertyDetails details =
map->instance_descriptors(kRelaxedLoad).GetDetails(i);
- if (details.location() != kField) {
+ if (details.location() != PropertyLocation::kField) {
Throw("Web snapshot: Properties which are not fields not supported");
return;
}
@@ -690,19 +694,16 @@ void WebSnapshotSerializer::WriteValue(Handle<Object> object,
serializer.WriteUint32(ValueType::DOUBLE);
serializer.WriteDouble(HeapNumber::cast(*object).value());
break;
- case JS_FUNCTION_TYPE: {
- Handle<JSFunction> function = Handle<JSFunction>::cast(object);
- FunctionKind kind = function->shared().kind();
- if (IsClassConstructor(kind)) {
- SerializeClass(function, id);
- serializer.WriteUint32(ValueType::CLASS_ID);
- } else {
- SerializeFunction(function, id);
- serializer.WriteUint32(ValueType::FUNCTION_ID);
- }
+ case JS_FUNCTION_TYPE:
+ SerializeFunction(Handle<JSFunction>::cast(object), id);
+ serializer.WriteUint32(ValueType::FUNCTION_ID);
+ serializer.WriteUint32(id);
+ break;
+ case JS_CLASS_CONSTRUCTOR_TYPE:
+ SerializeClass(Handle<JSFunction>::cast(object), id);
+ serializer.WriteUint32(ValueType::CLASS_ID);
serializer.WriteUint32(id);
break;
- }
case JS_OBJECT_TYPE:
SerializeObject(Handle<JSObject>::cast(object), id);
serializer.WriteUint32(ValueType::OBJECT_ID);
@@ -720,9 +721,9 @@ void WebSnapshotSerializer::WriteValue(Handle<Object> object,
return;
}
uint32_t pattern_id, flags_id;
- Handle<String> pattern = handle(regexp->Pattern(), isolate_);
+ Handle<String> pattern = handle(regexp->source(), isolate_);
Handle<String> flags_string =
- JSRegExp::StringFromFlags(isolate_, regexp->GetFlags());
+ JSRegExp::StringFromFlags(isolate_, regexp->flags());
SerializeString(pattern, pattern_id);
SerializeString(flags_string, flags_id);
serializer.WriteUint32(ValueType::REGEXP);
@@ -1281,7 +1282,7 @@ void WebSnapshotDeserializer::DeserializeObjects() {
ReadValue(value, wanted_representation, property_array, i);
// Read the representation from the map.
PropertyDetails details = descriptors->GetDetails(InternalIndex(i));
- CHECK_EQ(details.location(), kField);
+ CHECK_EQ(details.location(), PropertyLocation::kField);
CHECK_EQ(kData, details.kind());
Representation r = details.representation();
if (r.IsNone()) {
@@ -1513,15 +1514,14 @@ void WebSnapshotDeserializer::ReadValue(
case ValueType::REGEXP: {
Handle<String> pattern = ReadString(false);
Handle<String> flags_string = ReadString(false);
- bool success = false;
- JSRegExp::Flags flags =
- JSRegExp::FlagsFromString(isolate_, flags_string, &success);
- if (!success) {
+ base::Optional<JSRegExp::Flags> flags =
+ JSRegExp::FlagsFromString(isolate_, flags_string);
+ if (!flags.has_value()) {
Throw("Web snapshot: Malformed flags in regular expression");
return;
}
MaybeHandle<JSRegExp> maybe_regexp =
- JSRegExp::New(isolate_, pattern, flags);
+ JSRegExp::New(isolate_, pattern, flags.value());
if (!maybe_regexp.ToHandle(&value)) {
Throw("Web snapshot: Malformed RegExp");
return;
diff --git a/chromium/v8/src/zone/accounting-allocator.cc b/chromium/v8/src/zone/accounting-allocator.cc
index baa70162fd9..40b4756bd1d 100644
--- a/chromium/v8/src/zone/accounting-allocator.cc
+++ b/chromium/v8/src/zone/accounting-allocator.cc
@@ -54,7 +54,8 @@ std::unique_ptr<v8::base::BoundedPageAllocator> CreateBoundedAllocator(
auto allocator = std::make_unique<v8::base::BoundedPageAllocator>(
platform_allocator, reservation_start, ZoneCompression::kReservationSize,
- kZonePageSize);
+ kZonePageSize,
+ base::PageInitializationMode::kAllocatedPagesCanBeUninitialized);
// Exclude first page from allocation to ensure that accesses through
// decompressed null pointer will seg-fault.
@@ -65,7 +66,11 @@ std::unique_ptr<v8::base::BoundedPageAllocator> CreateBoundedAllocator(
} // namespace
-AccountingAllocator::AccountingAllocator() {
+AccountingAllocator::AccountingAllocator()
+ : zone_backing_malloc_(
+ V8::GetCurrentPlatform()->GetZoneBackingAllocator()->GetMallocFn()),
+ zone_backing_free_(
+ V8::GetCurrentPlatform()->GetZoneBackingAllocator()->GetFreeFn()) {
if (COMPRESS_ZONES_BOOL) {
v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator();
VirtualMemory memory = ReserveAddressSpace(platform_page_allocator);
@@ -86,7 +91,7 @@ Segment* AccountingAllocator::AllocateSegment(size_t bytes,
kZonePageSize, PageAllocator::kReadWrite);
} else {
- memory = AllocWithRetry(bytes);
+ memory = AllocWithRetry(bytes, zone_backing_malloc_);
}
if (memory == nullptr) return nullptr;
@@ -110,7 +115,7 @@ void AccountingAllocator::ReturnSegment(Segment* segment,
if (COMPRESS_ZONES_BOOL && supports_compression) {
CHECK(FreePages(bounded_page_allocator_.get(), segment, segment_size));
} else {
- base::Free(segment);
+ zone_backing_free_(segment);
}
}
diff --git a/chromium/v8/src/zone/accounting-allocator.h b/chromium/v8/src/zone/accounting-allocator.h
index 88a0c755971..7fdc86da7d2 100644
--- a/chromium/v8/src/zone/accounting-allocator.h
+++ b/chromium/v8/src/zone/accounting-allocator.h
@@ -8,6 +8,7 @@
#include <atomic>
#include <memory>
+#include "include/v8-platform.h"
#include "src/base/macros.h"
#include "src/logging/tracing-flags.h"
@@ -71,6 +72,9 @@ class V8_EXPORT_PRIVATE AccountingAllocator {
std::unique_ptr<VirtualMemory> reserved_area_;
std::unique_ptr<base::BoundedPageAllocator> bounded_page_allocator_;
+
+ ZoneBackingAllocator::MallocFn zone_backing_malloc_ = nullptr;
+ ZoneBackingAllocator::FreeFn zone_backing_free_ = nullptr;
};
} // namespace internal
diff --git a/chromium/v8/src/zone/zone.cc b/chromium/v8/src/zone/zone.cc
index 57f198e9aa7..42617aadb8c 100644
--- a/chromium/v8/src/zone/zone.cc
+++ b/chromium/v8/src/zone/zone.cc
@@ -89,14 +89,8 @@ void Zone::DeleteAll() {
// Traverse the chained list of segments and return them all to the allocator.
while (current) {
Segment* next = current->next();
- size_t size = current->total_size();
-
- // Un-poison the segment content so we can re-use or zap it later.
- ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<void*>(current->start()),
- current->capacity());
-
- segment_bytes_allocated_ -= size;
- allocator_->ReturnSegment(current, supports_compression());
+ segment_bytes_allocated_ -= current->total_size();
+ ReleaseSegment(current);
current = next;
}
@@ -107,6 +101,13 @@ void Zone::DeleteAll() {
#endif
}
+void Zone::ReleaseSegment(Segment* segment) {
+ // Un-poison the segment content so we can re-use or zap it later.
+ ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<void*>(segment->start()),
+ segment->capacity());
+ allocator_->ReturnSegment(segment, supports_compression());
+}
+
Address Zone::NewExpand(size_t size) {
// Make sure the requested size is already properly aligned and that
// there isn't enough room in the Zone to satisfy the request.
@@ -168,5 +169,48 @@ Address Zone::NewExpand(size_t size) {
return result;
}
+ZoneScope::ZoneScope(Zone* zone)
+ : zone_(zone),
+#ifdef V8_ENABLE_PRECISE_ZONE_STATS
+ allocation_size_for_tracing_(zone->allocation_size_for_tracing_),
+ freed_size_for_tracing_(zone->freed_size_for_tracing_),
+#endif
+ allocation_size_(zone->allocation_size_),
+ segment_bytes_allocated_(zone->segment_bytes_allocated_),
+ position_(zone->position_),
+ limit_(zone->limit_),
+ segment_head_(zone->segment_head_) {
+}
+
+ZoneScope::~ZoneScope() {
+ // Release segments up to the stored segment_head_.
+ Segment* current = zone_->segment_head_;
+ while (current != segment_head_) {
+ Segment* next = current->next();
+ zone_->ReleaseSegment(current);
+ current = next;
+ }
+
+ // Un-poison the trailing segment content so we can re-use or zap it later.
+ if (segment_head_ != nullptr) {
+ void* const start = reinterpret_cast<void*>(position_);
+ DCHECK_GE(start, reinterpret_cast<void*>(current->start()));
+ DCHECK_LE(start, reinterpret_cast<void*>(current->end()));
+ const size_t length = current->end() - reinterpret_cast<Address>(start);
+ ASAN_UNPOISON_MEMORY_REGION(start, length);
+ }
+
+ // Reset the Zone to the stored state.
+ zone_->allocation_size_ = allocation_size_;
+ zone_->segment_bytes_allocated_ = segment_bytes_allocated_;
+ zone_->position_ = position_;
+ zone_->limit_ = limit_;
+ zone_->segment_head_ = segment_head_;
+#ifdef V8_ENABLE_PRECISE_ZONE_STATS
+ zone_->allocation_size_for_tracing_ = allocation_size_for_tracing_;
+ zone_->freed_size_for_tracing_ = freed_size_for_tracing_;
+#endif
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/zone/zone.h b/chromium/v8/src/zone/zone.h
index aa76091621b..ef2f0b3dc84 100644
--- a/chromium/v8/src/zone/zone.h
+++ b/chromium/v8/src/zone/zone.h
@@ -189,6 +189,10 @@ class V8_EXPORT_PRIVATE Zone final {
// Deletes all objects and free all memory allocated in the Zone.
void DeleteAll();
+ // Releases the current segment without performing any local bookkeeping
+ // (e.g. tracking allocated bytes, maintaining linked lists, etc).
+ void ReleaseSegment(Segment* segment);
+
// All pointers returned from New() are 8-byte aligned.
static const size_t kAlignmentInBytes = 8;
@@ -235,6 +239,30 @@ class V8_EXPORT_PRIVATE Zone final {
// The number of bytes freed in this zone so far.
size_t freed_size_for_tracing_ = 0;
#endif
+
+ friend class ZoneScope;
+};
+
+// Similar to the HandleScope, the ZoneScope defines a region of validity for
+// zone memory. All memory allocated in the given Zone during the scope's
+// lifetime is freed when the scope is destructed, i.e. the Zone is reset to
+// the state it was in when the scope was created.
+class ZoneScope final {
+ public:
+ explicit ZoneScope(Zone* zone);
+ ~ZoneScope();
+
+ private:
+ Zone* const zone_;
+#ifdef V8_ENABLE_PRECISE_ZONE_STATS
+ const size_t allocation_size_for_tracing_;
+ const size_t freed_size_for_tracing_;
+#endif
+ const size_t allocation_size_;
+ const size_t segment_bytes_allocated_;
+ const Address position_;
+ const Address limit_;
+ Segment* const segment_head_;
};
// ZoneObject is an abstraction that helps define classes of objects
diff --git a/chromium/v8/test/cctest/BUILD.gn b/chromium/v8/test/cctest/BUILD.gn
index e7f011df74d..351292ba283 100644
--- a/chromium/v8/test/cctest/BUILD.gn
+++ b/chromium/v8/test/cctest/BUILD.gn
@@ -89,6 +89,7 @@ v8_source_set("cctest_sources") {
"compiler/function-tester.cc",
"compiler/function-tester.h",
"compiler/node-observer-tester.h",
+ "compiler/test-atomic-load-store-codegen.cc",
"compiler/test-basic-block-profiler.cc",
"compiler/test-branch-combine.cc",
"compiler/test-calls-with-arraylike-or-spread.cc",
@@ -119,7 +120,6 @@ v8_source_set("cctest_sources") {
"compiler/test-run-jsops.cc",
"compiler/test-run-load-store.cc",
"compiler/test-run-machops.cc",
- "compiler/test-run-retpoline.cc",
"compiler/test-run-stackcheck.cc",
"compiler/test-run-tail-calls.cc",
"compiler/test-run-unwinding-info.cc",
@@ -290,6 +290,7 @@ v8_source_set("cctest_sources") {
"test-utils.cc",
"test-verifiers.cc",
"test-version.cc",
+ "test-virtual-memory-cage.cc",
"test-weakmaps.cc",
"test-weaksets.cc",
"test-web-snapshots.cc",
@@ -307,7 +308,6 @@ v8_source_set("cctest_sources") {
"test-assembler-arm.cc",
"test-disasm-arm.cc",
"test-macro-assembler-arm.cc",
- "test-poison-disasm-arm.cc",
"test-sync-primitives-arm.cc",
]
} else if (v8_current_cpu == "arm64") {
@@ -319,7 +319,6 @@ v8_source_set("cctest_sources") {
"test-js-arm64-variables.cc",
"test-macro-assembler-arm64.cc",
"test-pointer-auth-arm64.cc",
- "test-poison-disasm-arm64.cc",
"test-sync-primitives-arm64.cc",
"test-utils-arm64.cc",
"test-utils-arm64.h",
@@ -385,6 +384,12 @@ v8_source_set("cctest_sources") {
"test-macro-assembler-riscv64.cc",
"test-simple-riscv64.cc",
]
+ } else if (v8_current_cpu == "loong64") {
+ sources += [ ### gcmole(arch:loong64) ###
+ "test-assembler-loong64.cc",
+ "test-disasm-loong64.cc",
+ "test-macro-assembler-loong64.cc",
+ ]
}
if (v8_use_perfetto) {
@@ -482,7 +487,7 @@ v8_source_set("cctest_sources") {
v8_current_cpu == "s390" || v8_current_cpu == "s390x" ||
v8_current_cpu == "mips" || v8_current_cpu == "mips64" ||
v8_current_cpu == "mipsel" || v8_current_cpu == "mipsel64" ||
- v8_current_cpu == "riscv64") {
+ v8_current_cpu == "riscv64" || v8_current_cpu == "loong64") {
# Disable fmadd/fmsub so that expected results match generated code in
# RunFloat64MulAndFloat64Add1 and friends.
if (!is_win) {
diff --git a/chromium/v8/test/torque/test-torque.tq b/chromium/v8/test/torque/test-torque.tq
index 1e11465f5a4..1fc6f062b4c 100644
--- a/chromium/v8/test/torque/test-torque.tq
+++ b/chromium/v8/test/torque/test-torque.tq
@@ -40,20 +40,20 @@ macro LabelTestHelper3(): never
}
@export
-macro TestConstexpr1() {
+macro TestConstexpr1(): void {
check(FromConstexpr<bool>(
IsFastElementsKind(ElementsKind::PACKED_SMI_ELEMENTS)));
}
@export
-macro TestConstexprIf() {
+macro TestConstexprIf(): void {
check(ElementsKindTestHelper1(ElementsKind::UINT8_ELEMENTS));
check(ElementsKindTestHelper1(ElementsKind::UINT16_ELEMENTS));
check(!ElementsKindTestHelper1(ElementsKind::UINT32_ELEMENTS));
}
@export
-macro TestConstexprReturn() {
+macro TestConstexprReturn(): void {
check(FromConstexpr<bool>(
ElementsKindTestHelper2(ElementsKind::UINT8_ELEMENTS)));
check(FromConstexpr<bool>(
@@ -103,7 +103,7 @@ GenericBuiltinTest<JSAny>(param: JSAny): JSAny {
}
@export
-macro TestBuiltinSpecialization() {
+macro TestBuiltinSpecialization(): void {
check(GenericBuiltinTest<Smi>(0) == Null);
check(GenericBuiltinTest<Smi>(1) == Null);
check(GenericBuiltinTest<JSAny>(Undefined) == Undefined);
@@ -160,7 +160,7 @@ GenericMacroTestWithLabels<Object>(param2: Object): Object
}
@export
-macro TestMacroSpecialization() {
+macro TestMacroSpecialization(): void {
try {
const _smi0: Smi = 0;
check(GenericMacroTest<Smi>(0) == Undefined);
@@ -208,7 +208,7 @@ macro TestTernaryOperator(x: Smi): Smi {
}
@export
-macro TestFunctionPointerToGeneric() {
+macro TestFunctionPointerToGeneric(): void {
const fptr1: builtin(Smi) => JSAny = GenericBuiltinTest<Smi>;
const fptr2: builtin(JSAny) => JSAny = GenericBuiltinTest<JSAny>;
@@ -236,19 +236,19 @@ macro TestUnsafeCast(implicit context: Context)(n: Number): Boolean {
}
@export
-macro TestHexLiteral() {
+macro TestHexLiteral(): void {
check(Convert<intptr>(0xffff) + 1 == 0x10000);
check(Convert<intptr>(-0xffff) == -65535);
}
@export
-macro TestLargeIntegerLiterals(implicit c: Context)() {
+macro TestLargeIntegerLiterals(implicit c: Context)(): void {
let _x: int32 = 0x40000000;
let _y: int32 = 0x7fffffff;
}
@export
-macro TestMultilineAssert() {
+macro TestMultilineAssert(): void {
const someVeryLongVariableNameThatWillCauseLineBreaks: Smi = 5;
check(
someVeryLongVariableNameThatWillCauseLineBreaks > 0 &&
@@ -256,7 +256,7 @@ macro TestMultilineAssert() {
}
@export
-macro TestNewlineInString() {
+macro TestNewlineInString(): void {
Print('Hello, World!\n');
}
@@ -265,14 +265,14 @@ const kIntptrConst: intptr = 4;
const kSmiConst: Smi = 3;
@export
-macro TestModuleConstBindings() {
+macro TestModuleConstBindings(): void {
check(kConstexprConst == Int32Constant(5));
check(kIntptrConst == 4);
check(kSmiConst == 3);
}
@export
-macro TestLocalConstBindings() {
+macro TestLocalConstBindings(): void {
const x: constexpr int31 = 3;
const xSmi: Smi = x;
{
@@ -347,7 +347,7 @@ Foo(TestStructA) {
goto Foo(TestStruct2());
}
@export // Silence unused warning.
-macro CallTestStructInLabel(implicit context: Context)() {
+macro CallTestStructInLabel(implicit context: Context)(): void {
try {
TestStructInLabel() otherwise Foo;
} label Foo(_s: TestStructA) {}
@@ -356,7 +356,7 @@ macro CallTestStructInLabel(implicit context: Context)() {
// This macro tests different versions of the for-loop where some parts
// are (not) present.
@export
-macro TestForLoop() {
+macro TestForLoop(): void {
let sum: Smi = 0;
for (let i: Smi = 0; i < 5; ++i) sum += i;
check(sum == 10);
@@ -455,7 +455,7 @@ macro TestForLoop() {
}
@export
-macro TestSubtyping(x: Smi) {
+macro TestSubtyping(x: Smi): void {
const _foo: JSAny = x;
}
@@ -501,7 +501,7 @@ macro TypeswitchExample(implicit context: Context)(x: NumberOrFixedArray):
}
@export
-macro TestTypeswitch(implicit context: Context)() {
+macro TestTypeswitch(implicit context: Context)(): void {
check(TypeswitchExample(FromConstexpr<Smi>(5)) == 26);
const a: FixedArray = AllocateZeroedFixedArray(3);
check(TypeswitchExample(a) == 13);
@@ -509,7 +509,8 @@ macro TestTypeswitch(implicit context: Context)() {
}
@export
-macro TestTypeswitchAsanLsanFailure(implicit context: Context)(obj: Object) {
+macro TestTypeswitchAsanLsanFailure(implicit context: Context)(obj: Object):
+ void {
typeswitch (obj) {
case (_o: Smi): {
}
@@ -530,7 +531,7 @@ macro ExampleGenericOverload<A: type>(o: Smi): A {
}
@export
-macro TestGenericOverload(implicit context: Context)() {
+macro TestGenericOverload(implicit context: Context)(): void {
const xSmi: Smi = 5;
const xObject: Object = xSmi;
check(ExampleGenericOverload<Smi>(xSmi) == 6);
@@ -538,7 +539,7 @@ macro TestGenericOverload(implicit context: Context)() {
}
@export
-macro TestEquality(implicit context: Context)() {
+macro TestEquality(implicit context: Context)(): void {
const notEqual: bool =
AllocateHeapNumberWithValue(0.5) != AllocateHeapNumberWithValue(0.5);
check(!notEqual);
@@ -558,7 +559,7 @@ macro TestAndOr(x: bool, y: bool, z: bool): bool {
}
@export
-macro TestLogicalOperators() {
+macro TestLogicalOperators(): void {
check(TestAndOr(true, true, true));
check(TestAndOr(true, true, false));
check(TestAndOr(true, false, true));
@@ -584,7 +585,7 @@ macro TestCall(i: Smi): Smi labels A {
}
@export
-macro TestOtherwiseWithCode1() {
+macro TestOtherwiseWithCode1(): void {
let v: Smi = 0;
let s: Smi = 1;
try {
@@ -592,41 +593,41 @@ macro TestOtherwiseWithCode1() {
} label B(v1: Smi) {
v = v1;
}
- assert(v == 2);
+ dcheck(v == 2);
}
@export
-macro TestOtherwiseWithCode2() {
+macro TestOtherwiseWithCode2(): void {
let s: Smi = 0;
for (let i: Smi = 0; i < 10; ++i) {
TestCall(i) otherwise break;
++s;
}
- assert(s == 5);
+ dcheck(s == 5);
}
@export
-macro TestOtherwiseWithCode3() {
+macro TestOtherwiseWithCode3(): void {
let s: Smi = 0;
for (let i: Smi = 0; i < 10; ++i) {
s += TestCall(i) otherwise break;
}
- assert(s == 10);
+ dcheck(s == 10);
}
@export
-macro TestForwardLabel() {
+macro TestForwardLabel(): void {
try {
goto A;
} label A {
goto B(5);
} label B(b: Smi) {
- assert(b == 5);
+ dcheck(b == 5);
}
}
@export
-macro TestQualifiedAccess(implicit context: Context)() {
+macro TestQualifiedAccess(implicit context: Context)(): void {
const s: Smi = 0;
check(!Is<JSArray>(s));
}
@@ -683,7 +684,7 @@ macro TestCatch3(implicit context: Context)(): Smi {
// iterator.tq.
@export
transitioning macro TestIterator(implicit context: Context)(
- o: JSReceiver, map: Map) {
+ o: JSReceiver, map: Map): void {
try {
const t1: JSAny = iterator::GetIteratorMethod(o);
const t2: iterator::IteratorRecord = iterator::GetIterator(o);
@@ -701,12 +702,12 @@ transitioning macro TestIterator(implicit context: Context)(
}
@export
-macro TestFrame1(implicit context: Context)() {
+macro TestFrame1(implicit context: Context)(): void {
const f: Frame = LoadFramePointer();
const frameType: FrameType =
Cast<FrameType>(f.context_or_frame_type) otherwise unreachable;
- assert(frameType == STUB_FRAME);
- assert(f.caller == LoadParentFramePointer());
+ dcheck(frameType == STUB_FRAME);
+ dcheck(f.caller == LoadParentFramePointer());
typeswitch (f) {
case (_f: StandardFrame): {
unreachable;
@@ -717,14 +718,14 @@ macro TestFrame1(implicit context: Context)() {
}
@export
-macro TestNew(implicit context: Context)() {
+macro TestNew(implicit context: Context)(): void {
const f: JSArray = NewJSArray();
check(f.IsEmpty());
f.length = 0;
}
struct TestInner {
- macro SetX(newValue: int32) {
+ macro SetX(newValue: int32): void {
this.x = newValue;
}
macro GetX(): int32 {
@@ -741,7 +742,7 @@ struct TestOuter {
}
@export
-macro TestStructConstructor(implicit context: Context)() {
+macro TestStructConstructor(implicit context: Context)(): void {
// Test default constructor
let a: TestOuter = TestOuter{a: 5, b: TestInner{x: 6, y: 7}, c: 8};
check(a.a == 5);
@@ -756,7 +757,7 @@ macro TestStructConstructor(implicit context: Context)() {
}
class InternalClass extends HeapObject {
- macro Flip() labels NotASmi {
+ macro Flip(): void labels NotASmi {
const tmp = Cast<Smi>(this.b) otherwise NotASmi;
this.b = this.a;
this.a = tmp;
@@ -770,7 +771,7 @@ macro NewInternalClass(x: Smi): InternalClass {
}
@export
-macro TestInternalClass(implicit context: Context)() {
+macro TestInternalClass(implicit context: Context)(): void {
const o = NewInternalClass(5);
o.Flip() otherwise unreachable;
check(o.a == 6);
@@ -789,7 +790,7 @@ struct StructWithConst {
}
@export
-macro TestConstInStructs() {
+macro TestConstInStructs(): void {
const x = StructWithConst{a: Null, b: 1};
let y = StructWithConst{a: Null, b: 1};
y.a = Undefined;
@@ -800,7 +801,7 @@ macro TestConstInStructs() {
}
@export
-macro TestParentFrameArguments(implicit context: Context)() {
+macro TestParentFrameArguments(implicit context: Context)(): void {
const parentFrame = LoadParentFramePointer();
const castFrame = Cast<StandardFrame>(parentFrame) otherwise unreachable;
const arguments = GetFrameArguments(castFrame, 1);
@@ -829,14 +830,14 @@ class SmiPair extends HeapObject {
b: Smi;
}
-macro Swap<T: type>(a:&T, b:&T) {
+macro Swap<T: type>(a:&T, b:&T): void {
const tmp = *a;
*a = *b;
*b = tmp;
}
@export
-macro TestReferences() {
+macro TestReferences(): void {
const array = new SmiPair{a: 7, b: 2};
const ref:&Smi = &array.a;
*ref = 3 + *ref;
@@ -847,7 +848,7 @@ macro TestReferences() {
}
@export
-macro TestSlices() {
+macro TestSlices(): void {
const it = TestIterator{count: 3};
const a = new FixedArray{map: kFixedArrayMap, length: 3, objects: ...it};
check(a.length == 3);
@@ -904,7 +905,7 @@ macro TestSliceEnumeration(implicit context: Context)(): Undefined {
}
@export
-macro TestStaticAssert() {
+macro TestStaticAssert(): void {
static_assert(1 + 2 == 3);
static_assert(Convert<uintptr>(5) < Convert<uintptr>(6));
@@ -932,7 +933,7 @@ builtin NewSmiBox(implicit context: Context)(value: Smi): SmiBox {
}
@export
-macro TestLoadEliminationFixed(implicit context: Context)() {
+macro TestLoadEliminationFixed(implicit context: Context)(): void {
const box = NewSmiBox(123);
const v1 = box.value;
box.unrelated = 999;
@@ -946,7 +947,7 @@ macro TestLoadEliminationFixed(implicit context: Context)() {
}
@export
-macro TestLoadEliminationVariable(implicit context: Context)() {
+macro TestLoadEliminationVariable(implicit context: Context)(): void {
const a = UnsafeCast<FixedArray>(kEmptyFixedArray);
const box = NewSmiBox(1);
const v1 = a.objects[box.value];
@@ -1035,7 +1036,8 @@ macro BranchAndWriteResult(x: Smi, box: SmiBox): bool {
}
@export
-macro TestBranchOnBoolOptimization(implicit context: Context)(input: Smi) {
+macro TestBranchOnBoolOptimization(implicit context: Context)(input: Smi):
+ void {
const box = NewSmiBox(1);
// If the two branches get combined into one, we should be able to determine
// the value of {box} statically.
@@ -1056,7 +1058,7 @@ bitfield struct TestBitFieldStruct extends uint8 {
@export
macro TestBitFieldLoad(
val: TestBitFieldStruct, expectedA: bool, expectedB: uint16,
- expectedC: uint32, expectedD: bool) {
+ expectedC: uint32, expectedD: bool): void {
check(val.a == expectedA);
check(val.b == expectedB);
check(val.c == expectedC);
@@ -1064,7 +1066,7 @@ macro TestBitFieldLoad(
}
@export
-macro TestBitFieldStore(val: TestBitFieldStruct) {
+macro TestBitFieldStore(val: TestBitFieldStruct): void {
let val: TestBitFieldStruct = val; // Get a mutable local copy.
const a: bool = val.a;
const b: uint16 = val.b;
@@ -1083,7 +1085,7 @@ macro TestBitFieldStore(val: TestBitFieldStruct) {
}
@export
-macro TestBitFieldInit(a: bool, b: uint16, c: uint32, d: bool) {
+macro TestBitFieldInit(a: bool, b: uint16, c: uint32, d: bool): void {
const val: TestBitFieldStruct = TestBitFieldStruct{a: a, b: b, c: c, d: d};
TestBitFieldLoad(val, a, b, c, d);
}
@@ -1102,7 +1104,7 @@ bitfield struct TestBitFieldStruct3 extends uintptr {
@export
macro TestBitFieldUintptrOps(
- val2: TestBitFieldStruct2, val3: TestBitFieldStruct3) {
+ val2: TestBitFieldStruct2, val3: TestBitFieldStruct3): void {
let val2: TestBitFieldStruct2 = val2; // Get a mutable local copy.
let val3: TestBitFieldStruct3 = val3; // Get a mutable local copy.
@@ -1142,7 +1144,7 @@ bitfield struct TestBitFieldStruct5 extends uint31 {
}
@export
-macro TestBitFieldMultipleFlags(a: bool, b: int32, c: bool) {
+macro TestBitFieldMultipleFlags(a: bool, b: int32, c: bool): void {
const f = TestBitFieldStruct4{a: a, b: b, c: c};
let simpleExpression = f.a & f.b == 3 & !f.c;
let expectedReduction = (Signed(f) & 0x1f) == Convert<int32>(1 | 3 << 1);
@@ -1222,7 +1224,7 @@ struct InternalClassStructElementGeneratorIterator {
}
@export
-macro TestFullyGeneratedClassWithElements() {
+macro TestFullyGeneratedClassWithElements(): void {
// Test creation, initialization and access of a fully generated class with
// simple (Smi) elements
const length: Smi = Convert<Smi>(3);
@@ -1234,12 +1236,12 @@ macro TestFullyGeneratedClassWithElements() {
value: 11
}
};
- assert(object1.length == 3);
- assert(object1.data == 0);
- assert(object1.object == Undefined);
- assert(object1.entries[0] == 11);
- assert(object1.entries[1] == 12);
- assert(object1.entries[2] == 13);
+ dcheck(object1.length == 3);
+ dcheck(object1.data == 0);
+ dcheck(object1.object == Undefined);
+ dcheck(object1.entries[0] == 11);
+ dcheck(object1.entries[1] == 12);
+ dcheck(object1.entries[2] == 13);
// Test creation, initialization and access of a fully generated class
// with elements that are a struct.
@@ -1255,20 +1257,20 @@ macro TestFullyGeneratedClassWithElements() {
}
};
- assert(object2.dummy1 == 44);
- assert(object2.dummy2 == 45);
- assert(object2.count == 3);
- assert(object2.data == 55);
- assert(object2.object == Undefined);
- assert(object2.entries[0] == 3);
- assert(object2.entries[1] == 4);
- assert(object2.entries[2] == 5);
- assert(object2.more_entries[0].a == 1);
- assert(object2.more_entries[0].b == 2);
- assert(object2.more_entries[1].a == 3);
- assert(object2.more_entries[1].b == 4);
- assert(object2.more_entries[2].a == 5);
- assert(object2.more_entries[2].b == 6);
+ dcheck(object2.dummy1 == 44);
+ dcheck(object2.dummy2 == 45);
+ dcheck(object2.count == 3);
+ dcheck(object2.data == 55);
+ dcheck(object2.object == Undefined);
+ dcheck(object2.entries[0] == 3);
+ dcheck(object2.entries[1] == 4);
+ dcheck(object2.entries[2] == 5);
+ dcheck(object2.more_entries[0].a == 1);
+ dcheck(object2.more_entries[0].b == 2);
+ dcheck(object2.more_entries[1].a == 3);
+ dcheck(object2.more_entries[1].b == 4);
+ dcheck(object2.more_entries[2].a == 5);
+ dcheck(object2.more_entries[2].b == 6);
}
@export
@@ -1285,7 +1287,7 @@ class ExportedSubClass2 extends ExportedSubClassBase {
}
@export
-macro TestGeneratedCastOperators(implicit context: Context)() {
+macro TestGeneratedCastOperators(implicit context: Context)(): void {
const a = new
ExportedSubClass{a: Null, b: Null, c_field: 3, d_field: 4, e_field: 5};
const b = new ExportedSubClassBase{a: Undefined, b: Null};
@@ -1294,39 +1296,39 @@ macro TestGeneratedCastOperators(implicit context: Context)() {
const aO: Object = a;
const bO: Object = b;
const cO: Object = c;
- assert(Is<ExportedSubClassBase>(aO));
- assert(Is<ExportedSubClass>(aO));
- assert(!Is<ExportedSubClass2>(aO));
- assert(Is<ExportedSubClassBase>(bO));
- assert(!Is<ExportedSubClass>(bO));
- assert(Is<ExportedSubClassBase>(cO));
- assert(!Is<ExportedSubClass>(cO));
- assert(Is<ExportedSubClass2>(cO));
+ dcheck(Is<ExportedSubClassBase>(aO));
+ dcheck(Is<ExportedSubClass>(aO));
+ dcheck(!Is<ExportedSubClass2>(aO));
+ dcheck(Is<ExportedSubClassBase>(bO));
+ dcheck(!Is<ExportedSubClass>(bO));
+ dcheck(Is<ExportedSubClassBase>(cO));
+ dcheck(!Is<ExportedSubClass>(cO));
+ dcheck(Is<ExportedSubClass2>(cO));
const jsf: JSFunction =
*NativeContextSlot(ContextSlot::REGEXP_FUNCTION_INDEX);
- assert(!Is<JSSloppyArgumentsObject>(jsf));
+ dcheck(!Is<JSSloppyArgumentsObject>(jsf));
const parameterValues = NewFixedArray(0, ConstantIterator(TheHole));
const elements = NewSloppyArgumentsElements(
0, context, parameterValues, ConstantIterator(TheHole));
const fastArgs = arguments::NewJSFastAliasedArgumentsObject(
elements, Convert<Smi>(0), jsf);
- assert(Is<JSArgumentsObject>(fastArgs));
+ dcheck(Is<JSArgumentsObject>(fastArgs));
}
extern runtime InYoungGeneration(implicit context: Context)(HeapObject):
Boolean;
@export
-macro TestNewPretenured(implicit context: Context)() {
+macro TestNewPretenured(implicit context: Context)(): void {
const obj = new (Pretenured) ExportedSubClassBase{a: Undefined, b: Null};
- assert(Is<ExportedSubClassBase>(obj));
- assert(InYoungGeneration(obj) == False);
+ dcheck(Is<ExportedSubClassBase>(obj));
+ dcheck(InYoungGeneration(obj) == False);
}
@export
-macro TestWord8Phi() {
+macro TestWord8Phi(): void {
for (let i: intptr = -5; i < 5; ++i) {
let x: int8;
if (i == -1) {
@@ -1339,7 +1341,7 @@ macro TestWord8Phi() {
}
@export
-macro TestOffHeapSlice(ptr: RawPtr<char8>, length: intptr) {
+macro TestOffHeapSlice(ptr: RawPtr<char8>, length: intptr): void {
const string = UnsafeCast<SeqOneByteString>(Convert<String>('Hello World!'));
check(*torque_internal::unsafe::NewOffHeapReference(ptr) == string.chars[0]);
@@ -1362,7 +1364,7 @@ builtin ReturnTwoValues(implicit context: Context)(
}
@export
-macro TestCallMultiReturnBuiltin(implicit context: Context)() {
+macro TestCallMultiReturnBuiltin(implicit context: Context)(): void {
const result = ReturnTwoValues(444, FromConstexpr<String>('hi'));
check(result.a == 445);
check(result.b == FromConstexpr<String>('hi').map);
@@ -1388,7 +1390,7 @@ macro AddSmiAndConstexprValues(a: Smi, b: constexpr int31): Smi {
}
@export
-macro TestCreateLazyNodeFromTorque() {
+macro TestCreateLazyNodeFromTorque(): void {
const lazy = %MakeLazy<Smi>('GetLazySmi');
const result = TestRunLazyTwice(lazy);
check(result == 6);
diff --git a/chromium/v8/test/unittests/BUILD.gn b/chromium/v8/test/unittests/BUILD.gn
index 53319442419..e2ea833cf9b 100644
--- a/chromium/v8/test/unittests/BUILD.gn
+++ b/chromium/v8/test/unittests/BUILD.gn
@@ -119,6 +119,7 @@ v8_source_set("cppgc_unittests_sources") {
"heap/cppgc/object-start-bitmap-unittest.cc",
"heap/cppgc/page-memory-unittest.cc",
"heap/cppgc/persistent-family-unittest.cc",
+ "heap/cppgc/platform-unittest.cc",
"heap/cppgc/prefinalizer-unittest.cc",
"heap/cppgc/sanitizer-unittest.cc",
"heap/cppgc/source-location-unittest.cc",
@@ -305,6 +306,7 @@ v8_source_set("unittests_sources") {
"debug/debug-property-iterator-unittest.cc",
"diagnostics/eh-frame-iterator-unittest.cc",
"diagnostics/eh-frame-writer-unittest.cc",
+ "diagnostics/gdb-jit-unittest.cc",
"execution/microtask-queue-unittest.cc",
"heap/allocation-observer-unittest.cc",
"heap/barrier-unittest.cc",
@@ -327,6 +329,7 @@ v8_source_set("unittests_sources") {
"heap/memory-reducer-unittest.cc",
"heap/object-stats-unittest.cc",
"heap/persistent-handles-unittest.cc",
+ "heap/progressbar-unittest.cc",
"heap/safepoint-unittest.cc",
"heap/slot-set-unittest.cc",
"heap/spaces-unittest.cc",
@@ -413,6 +416,7 @@ v8_source_set("unittests_sources") {
"wasm/leb-helper-unittest.cc",
"wasm/liftoff-register-unittests.cc",
"wasm/loop-assignment-analysis-unittest.cc",
+ "wasm/memory-protection-unittest.cc",
"wasm/module-decoder-memory64-unittest.cc",
"wasm/module-decoder-unittest.cc",
"wasm/simd-shuffle-unittest.cc",
@@ -482,20 +486,28 @@ v8_source_set("unittests_sources") {
"assembler/turbo-assembler-s390-unittest.cc",
"compiler/s390/instruction-selector-s390-unittest.cc",
]
+ } else if (v8_current_cpu == "loong64") {
+ sources += [
+ "assembler/turbo-assembler-loong64-unittest.cc",
+ "compiler/loong64/instruction-selector-loong64-unittest.cc",
+ ]
}
- if (is_posix && v8_enable_webassembly) {
- sources += [ "wasm/trap-handler-posix-unittest.cc" ]
- }
+ if (v8_enable_webassembly) {
+ if (is_posix) {
+ sources += [ "wasm/trap-handler-posix-unittest.cc" ]
+ }
- if (is_win && v8_enable_webassembly) {
- sources += [ "wasm/trap-handler-win-unittest.cc" ]
- }
+ if (is_win) {
+ sources += [ "wasm/trap-handler-win-unittest.cc" ]
+ }
- # Include this test only on arm64 simulator builds on x64 on Linux.
- if (current_cpu == "x64" && v8_current_cpu == "arm64" && is_linux &&
- v8_enable_webassembly) {
- sources += [ "wasm/trap-handler-simulator-unittest.cc" ]
+ # Include this test only on arm64 simulator builds on x64 on Linux, Mac and
+ # Windows.
+ if (current_cpu == "x64" && v8_current_cpu == "arm64" &&
+ (is_linux || is_mac || is_win)) {
+ sources += [ "wasm/trap-handler-simulator-unittest.cc" ]
+ }
}
configs = [
diff --git a/chromium/v8/testing/gtest-support.h b/chromium/v8/testing/gtest-support.h
index ba0e2f41f99..21c8ebf4d3f 100644
--- a/chromium/v8/testing/gtest-support.h
+++ b/chromium/v8/testing/gtest-support.h
@@ -36,21 +36,20 @@ GET_TYPE_NAME(double)
// |var| while inside the loop body.
#define TRACED_FOREACH(_type, _var, _container) \
for (_type const _var : _container) \
- for (bool _done = false; !_done;) \
+ for (bool _var##_done = false; !_var##_done;) \
for (SCOPED_TRACE(::testing::Message() << #_var << " = " << _var); \
- !_done; _done = true)
-
+ !_var##_done; _var##_done = true)
// TRACED_FORRANGE(type, var, low, high) expands to a loop that assigns |var|
// every value in the range |low| to (including) |high| and adds a
// SCOPED_TRACE() message for the |var| while inside the loop body.
// TODO(bmeurer): Migrate to C++11 once we're ready.
#define TRACED_FORRANGE(_type, _var, _low, _high) \
- for (_type _i = _low; _i <= _high; ++_i) \
- for (bool _done = false; !_done;) \
- for (_type const _var = _i; !_done;) \
+ for (_type _var##_i = _low; _var##_i <= _high; ++_var##_i) \
+ for (bool _var##_done = false; !_var##_done;) \
+ for (_type const _var = _var##_i; !_var##_done;) \
for (SCOPED_TRACE(::testing::Message() << #_var << " = " << _var); \
- !_done; _done = true)
+ !_var##_done; _var##_done = true)
} // namespace internal
} // namespace testing
diff --git a/chromium/v8/third_party/v8/builtins/array-sort.tq b/chromium/v8/third_party/v8/builtins/array-sort.tq
index 334bc44922a..e63c2240658 100644
--- a/chromium/v8/third_party/v8/builtins/array-sort.tq
+++ b/chromium/v8/third_party/v8/builtins/array-sort.tq
@@ -20,7 +20,7 @@ class SortState extends HeapObject {
return sortCompare(context, this.userCmpFn, x, y);
}
- macro CheckAccessor(implicit context: Context)() labels Bailout {
+ macro CheckAccessor(implicit context: Context)(): void labels Bailout {
if (!IsFastJSArray(this.receiver, context)) goto Bailout;
const canUseSameAccessorFn: CanUseSameAccessorFn =
@@ -33,7 +33,7 @@ class SortState extends HeapObject {
}
}
- macro ResetToGenericAccessor() {
+ macro ResetToGenericAccessor(): void {
this.loadFn = Load<GenericElementsAccessor>;
this.storeFn = Store<GenericElementsAccessor>;
this.deleteFn = Delete<GenericElementsAccessor>;
@@ -307,7 +307,7 @@ transitioning builtin Delete<ElementsAccessor : type extends ElementsKind>(
Delete<FastSmiElements>(
context: Context, sortState: SortState, index: Smi): Smi {
- assert(IsHoleyFastElementsKind(sortState.receiver.map.elements_kind));
+ dcheck(IsHoleyFastElementsKind(sortState.receiver.map.elements_kind));
const object = UnsafeCast<JSObject>(sortState.receiver);
const elements = UnsafeCast<FixedArray>(object.elements);
@@ -317,7 +317,7 @@ Delete<FastSmiElements>(
Delete<FastObjectElements>(
context: Context, sortState: SortState, index: Smi): Smi {
- assert(IsHoleyFastElementsKind(sortState.receiver.map.elements_kind));
+ dcheck(IsHoleyFastElementsKind(sortState.receiver.map.elements_kind));
const object = UnsafeCast<JSObject>(sortState.receiver);
const elements = UnsafeCast<FixedArray>(object.elements);
@@ -327,7 +327,7 @@ Delete<FastObjectElements>(
Delete<FastDoubleElements>(
context: Context, sortState: SortState, index: Smi): Smi {
- assert(IsHoleyFastElementsKind(sortState.receiver.map.elements_kind));
+ dcheck(IsHoleyFastElementsKind(sortState.receiver.map.elements_kind));
const object = UnsafeCast<JSObject>(sortState.receiver);
const elements = UnsafeCast<FixedDoubleArray>(object.elements);
@@ -337,7 +337,7 @@ Delete<FastDoubleElements>(
transitioning builtin SortCompareDefault(
context: Context, comparefn: JSAny, x: JSAny, y: JSAny): Number {
- assert(comparefn == Undefined);
+ dcheck(comparefn == Undefined);
if (TaggedIsSmi(x) && TaggedIsSmi(y)) {
return SmiLexicographicCompare(UnsafeCast<Smi>(x), UnsafeCast<Smi>(y));
@@ -365,7 +365,7 @@ transitioning builtin SortCompareDefault(
transitioning builtin SortCompareUserFn(
context: Context, comparefn: JSAny, x: JSAny, y: JSAny): Number {
- assert(comparefn != Undefined);
+ dcheck(comparefn != Undefined);
const cmpfn = UnsafeCast<Callable>(comparefn);
// a. Let v be ? ToNumber(? Call(comparefn, undefined, x, y)).
@@ -383,7 +383,7 @@ builtin CanUseSameAccessor<ElementsAccessor : type extends ElementsKind>(
initialReceiverLength: Number): Boolean {
if (receiver.map != initialReceiverMap) return False;
- assert(TaggedIsSmi(initialReceiverLength));
+ dcheck(TaggedIsSmi(initialReceiverLength));
const array = UnsafeCast<JSArray>(receiver);
const originalLength = UnsafeCast<Smi>(initialReceiverLength);
@@ -401,7 +401,7 @@ CanUseSameAccessor<GenericElementsAccessor>(
// for easier invariant checks at all use sites.
macro GetPendingRunsSize(implicit context: Context)(sortState: SortState): Smi {
const stackSize: Smi = sortState.pendingRunsSize;
- assert(stackSize >= 0);
+ dcheck(stackSize >= 0);
return stackSize;
}
@@ -410,7 +410,7 @@ macro GetPendingRunBase(implicit context: Context)(
return UnsafeCast<Smi>(pendingRuns.objects[run << 1]);
}
-macro SetPendingRunBase(pendingRuns: FixedArray, run: Smi, value: Smi) {
+macro SetPendingRunBase(pendingRuns: FixedArray, run: Smi, value: Smi): void {
pendingRuns.objects[run << 1] = value;
}
@@ -419,13 +419,13 @@ macro GetPendingRunLength(implicit context: Context)(
return UnsafeCast<Smi>(pendingRuns.objects[(run << 1) + 1]);
}
-macro SetPendingRunLength(pendingRuns: FixedArray, run: Smi, value: Smi) {
+macro SetPendingRunLength(pendingRuns: FixedArray, run: Smi, value: Smi): void {
pendingRuns.objects[(run << 1) + 1] = value;
}
macro PushRun(implicit context: Context)(
- sortState: SortState, base: Smi, length: Smi) {
- assert(GetPendingRunsSize(sortState) < kMaxMergePending);
+ sortState: SortState, base: Smi, length: Smi): void {
+ dcheck(GetPendingRunsSize(sortState) < kMaxMergePending);
const stackSize: Smi = GetPendingRunsSize(sortState);
const pendingRuns: FixedArray = sortState.pendingRuns;
@@ -458,10 +458,10 @@ transitioning builtin
Copy(implicit context: Context)(
source: FixedArray, srcPos: Smi, target: FixedArray, dstPos: Smi,
length: Smi): JSAny {
- assert(srcPos >= 0);
- assert(dstPos >= 0);
- assert(srcPos <= source.length - length);
- assert(dstPos <= target.length - length);
+ dcheck(srcPos >= 0);
+ dcheck(dstPos >= 0);
+ dcheck(srcPos <= source.length - length);
+ dcheck(dstPos <= target.length - length);
// TODO(szuend): Investigate whether this builtin should be replaced
// by CopyElements/MoveElements for perfomance.
@@ -498,8 +498,8 @@ Copy(implicit context: Context)(
// On entry, must have low <= start <= high, and that [low, start) is
// already sorted. Pass start == low if you do not know!.
macro BinaryInsertionSort(implicit context: Context, sortState: SortState)(
- low: Smi, startArg: Smi, high: Smi) {
- assert(low <= startArg && startArg <= high);
+ low: Smi, startArg: Smi, high: Smi): void {
+ dcheck(low <= startArg && startArg <= high);
const workArray = sortState.workArray;
@@ -515,7 +515,7 @@ macro BinaryInsertionSort(implicit context: Context, sortState: SortState)(
// Invariants:
// pivot >= all in [low, left).
// pivot < all in [right, start).
- assert(left < right);
+ dcheck(left < right);
// Find pivot insertion point.
while (left < right) {
@@ -529,7 +529,7 @@ macro BinaryInsertionSort(implicit context: Context, sortState: SortState)(
left = mid + 1;
}
}
- assert(left == right);
+ dcheck(left == right);
// The invariants still hold, so:
// pivot >= all in [low, left) and
@@ -564,7 +564,7 @@ macro BinaryInsertionSort(implicit context: Context, sortState: SortState)(
// returned length is always an ascending sequence.
macro CountAndMakeRun(implicit context: Context, sortState: SortState)(
lowArg: Smi, high: Smi): Smi {
- assert(lowArg < high);
+ dcheck(lowArg < high);
const workArray = sortState.workArray;
@@ -604,7 +604,7 @@ macro CountAndMakeRun(implicit context: Context, sortState: SortState)(
return runLength;
}
-macro ReverseRange(array: FixedArray, from: Smi, to: Smi) {
+macro ReverseRange(array: FixedArray, from: Smi, to: Smi): void {
let low: Smi = from;
let high: Smi = to - 1;
@@ -624,9 +624,9 @@ MergeAt(implicit context: Context, sortState: SortState)(i: Smi): Smi {
// We are only allowed to either merge the two top-most runs, or leave
// the top most run alone and merge the two next runs.
- assert(stackSize >= 2);
- assert(i >= 0);
- assert(i == stackSize - 2 || i == stackSize - 3);
+ dcheck(stackSize >= 2);
+ dcheck(i >= 0);
+ dcheck(i == stackSize - 2 || i == stackSize - 3);
const workArray = sortState.workArray;
@@ -635,8 +635,8 @@ MergeAt(implicit context: Context, sortState: SortState)(i: Smi): Smi {
let lengthA: Smi = GetPendingRunLength(pendingRuns, i);
const baseB: Smi = GetPendingRunBase(pendingRuns, i + 1);
let lengthB: Smi = GetPendingRunLength(pendingRuns, i + 1);
- assert(lengthA > 0 && lengthB > 0);
- assert(baseA + lengthA == baseB);
+ dcheck(lengthA > 0 && lengthB > 0);
+ dcheck(baseA + lengthA == baseB);
// Record the length of the combined runs; if i is the 3rd-last run now,
// also slide over the last run (which isn't involved in this merge).
@@ -654,18 +654,18 @@ MergeAt(implicit context: Context, sortState: SortState)(i: Smi): Smi {
// because they are already in place.
const keyRight = UnsafeCast<JSAny>(workArray.objects[baseB]);
const k: Smi = GallopRight(workArray, keyRight, baseA, lengthA, 0);
- assert(k >= 0);
+ dcheck(k >= 0);
baseA = baseA + k;
lengthA = lengthA - k;
if (lengthA == 0) return kSuccess;
- assert(lengthA > 0);
+ dcheck(lengthA > 0);
// Where does a end in b? Elements in b after that can be ignored,
// because they are already in place.
const keyLeft = UnsafeCast<JSAny>(workArray.objects[baseA + lengthA - 1]);
lengthB = GallopLeft(workArray, keyLeft, baseB, lengthB, lengthB - 1);
- assert(lengthB >= 0);
+ dcheck(lengthB >= 0);
if (lengthB == 0) return kSuccess;
// Merge what remains of the runs, using a temp array with
@@ -698,8 +698,8 @@ MergeAt(implicit context: Context, sortState: SortState)(i: Smi): Smi {
// is plus infinity. In other words, key belongs at index base + k.
builtin GallopLeft(implicit context: Context, sortState: SortState)(
array: FixedArray, key: JSAny, base: Smi, length: Smi, hint: Smi): Smi {
- assert(length > 0 && base >= 0);
- assert(0 <= hint && hint < length);
+ dcheck(length > 0 && base >= 0);
+ dcheck(0 <= hint && hint < length);
let lastOfs: Smi = 0;
let offset: Smi = 1;
@@ -736,7 +736,7 @@ builtin GallopLeft(implicit context: Context, sortState: SortState)(
} else {
// key <= a[base + hint]: gallop left, until
// a[base + hint - offset] < key <= a[base + hint - lastOfs].
- assert(order >= 0);
+ dcheck(order >= 0);
// a[base + hint] is lowest.
const maxOfs: Smi = hint + 1;
@@ -762,7 +762,7 @@ builtin GallopLeft(implicit context: Context, sortState: SortState)(
offset = hint - tmp;
}
- assert(-1 <= lastOfs && lastOfs < offset && offset <= length);
+ dcheck(-1 <= lastOfs && lastOfs < offset && offset <= length);
// Now a[base+lastOfs] < key <= a[base+offset], so key belongs
// somewhere to the right of lastOfs but no farther right than offset.
@@ -781,8 +781,8 @@ builtin GallopLeft(implicit context: Context, sortState: SortState)(
}
}
// so a[base + offset - 1] < key <= a[base + offset].
- assert(lastOfs == offset);
- assert(0 <= offset && offset <= length);
+ dcheck(lastOfs == offset);
+ dcheck(0 <= offset && offset <= length);
return offset;
}
@@ -797,8 +797,8 @@ builtin GallopLeft(implicit context: Context, sortState: SortState)(
// or kFailure on error.
builtin GallopRight(implicit context: Context, sortState: SortState)(
array: FixedArray, key: JSAny, base: Smi, length: Smi, hint: Smi): Smi {
- assert(length > 0 && base >= 0);
- assert(0 <= hint && hint < length);
+ dcheck(length > 0 && base >= 0);
+ dcheck(0 <= hint && hint < length);
let lastOfs: Smi = 0;
let offset: Smi = 1;
@@ -859,7 +859,7 @@ builtin GallopRight(implicit context: Context, sortState: SortState)(
lastOfs = lastOfs + hint;
offset = offset + hint;
}
- assert(-1 <= lastOfs && lastOfs < offset && offset <= length);
+ dcheck(-1 <= lastOfs && lastOfs < offset && offset <= length);
// Now a[base + lastOfs] <= key < a[base + ofs], so key belongs
// somewhere to the right of lastOfs but no farther right than ofs.
@@ -878,8 +878,8 @@ builtin GallopRight(implicit context: Context, sortState: SortState)(
}
}
// so a[base + offset - 1] <= key < a[base + offset].
- assert(lastOfs == offset);
- assert(0 <= offset && offset <= length);
+ dcheck(lastOfs == offset);
+ dcheck(0 <= offset && offset <= length);
return offset;
}
@@ -890,10 +890,10 @@ builtin GallopRight(implicit context: Context, sortState: SortState)(
// that array[baseA + lengthA - 1] belongs at the end of the merge,
// and should have lengthA <= lengthB.
transitioning macro MergeLow(implicit context: Context, sortState: SortState)(
- baseA: Smi, lengthAArg: Smi, baseB: Smi, lengthBArg: Smi) {
- assert(0 < lengthAArg && 0 < lengthBArg);
- assert(0 <= baseA && 0 < baseB);
- assert(baseA + lengthAArg == baseB);
+ baseA: Smi, lengthAArg: Smi, baseB: Smi, lengthBArg: Smi): void {
+ dcheck(0 < lengthAArg && 0 < lengthBArg);
+ dcheck(0 <= baseA && 0 < baseB);
+ dcheck(baseA + lengthAArg == baseB);
let lengthA: Smi = lengthAArg;
let lengthB: Smi = lengthBArg;
@@ -924,7 +924,7 @@ transitioning macro MergeLow(implicit context: Context, sortState: SortState)(
// TODO(szuend): Replace with something that does not have a runtime
// overhead as soon as its available in Torque.
while (Int32TrueConstant()) {
- assert(lengthA > 1 && lengthB > 0);
+ dcheck(lengthA > 1 && lengthB > 0);
const order = sortState.Compare(
UnsafeCast<JSAny>(workArray.objects[cursorB]),
@@ -959,7 +959,7 @@ transitioning macro MergeLow(implicit context: Context, sortState: SortState)(
while (nofWinsA >= kMinGallopWins || nofWinsB >= kMinGallopWins ||
firstIteration) {
firstIteration = false;
- assert(lengthA > 1 && lengthB > 0);
+ dcheck(lengthA > 1 && lengthB > 0);
minGallop = SmiMax(1, minGallop - 1);
sortState.minGallop = minGallop;
@@ -967,7 +967,7 @@ transitioning macro MergeLow(implicit context: Context, sortState: SortState)(
nofWinsA = GallopRight(
tempArray, UnsafeCast<JSAny>(workArray.objects[cursorB]),
cursorTemp, lengthA, 0);
- assert(nofWinsA >= 0);
+ dcheck(nofWinsA >= 0);
if (nofWinsA > 0) {
Copy(tempArray, cursorTemp, workArray, dest, nofWinsA);
@@ -987,7 +987,7 @@ transitioning macro MergeLow(implicit context: Context, sortState: SortState)(
nofWinsB = GallopLeft(
workArray, UnsafeCast<JSAny>(tempArray.objects[cursorTemp]),
cursorB, lengthB, 0);
- assert(nofWinsB >= 0);
+ dcheck(nofWinsB >= 0);
if (nofWinsB > 0) {
Copy(workArray, cursorB, workArray, dest, nofWinsB);
@@ -1008,7 +1008,7 @@ transitioning macro MergeLow(implicit context: Context, sortState: SortState)(
Copy(tempArray, cursorTemp, workArray, dest, lengthA);
}
} label CopyB {
- assert(lengthA == 1 && lengthB > 0);
+ dcheck(lengthA == 1 && lengthB > 0);
// The last element of run A belongs at the end of the merge.
Copy(workArray, cursorB, workArray, dest, lengthB);
workArray.objects[dest + lengthB] = tempArray.objects[cursorTemp];
@@ -1020,10 +1020,10 @@ transitioning macro MergeLow(implicit context: Context, sortState: SortState)(
// be > 0. Must also have that array[baseA + lengthA - 1] belongs at the
// end of the merge and should have lengthA >= lengthB.
transitioning macro MergeHigh(implicit context: Context, sortState: SortState)(
- baseA: Smi, lengthAArg: Smi, baseB: Smi, lengthBArg: Smi) {
- assert(0 < lengthAArg && 0 < lengthBArg);
- assert(0 <= baseA && 0 < baseB);
- assert(baseA + lengthAArg == baseB);
+ baseA: Smi, lengthAArg: Smi, baseB: Smi, lengthBArg: Smi): void {
+ dcheck(0 < lengthAArg && 0 < lengthBArg);
+ dcheck(0 <= baseA && 0 < baseB);
+ dcheck(baseA + lengthAArg == baseB);
let lengthA: Smi = lengthAArg;
let lengthB: Smi = lengthBArg;
@@ -1055,7 +1055,7 @@ transitioning macro MergeHigh(implicit context: Context, sortState: SortState)(
// TODO(szuend): Replace with something that does not have a runtime
// overhead as soon as its available in Torque.
while (Int32TrueConstant()) {
- assert(lengthA > 0 && lengthB > 1);
+ dcheck(lengthA > 0 && lengthB > 1);
const order = sortState.Compare(
UnsafeCast<JSAny>(tempArray.objects[cursorTemp]),
@@ -1091,7 +1091,7 @@ transitioning macro MergeHigh(implicit context: Context, sortState: SortState)(
firstIteration) {
firstIteration = false;
- assert(lengthA > 0 && lengthB > 1);
+ dcheck(lengthA > 0 && lengthB > 1);
minGallop = SmiMax(1, minGallop - 1);
sortState.minGallop = minGallop;
@@ -1099,7 +1099,7 @@ transitioning macro MergeHigh(implicit context: Context, sortState: SortState)(
let k: Smi = GallopRight(
workArray, UnsafeCast<JSAny>(tempArray.objects[cursorTemp]), baseA,
lengthA, lengthA - 1);
- assert(k >= 0);
+ dcheck(k >= 0);
nofWinsA = lengthA - k;
if (nofWinsA > 0) {
@@ -1116,7 +1116,7 @@ transitioning macro MergeHigh(implicit context: Context, sortState: SortState)(
k = GallopLeft(
tempArray, UnsafeCast<JSAny>(workArray.objects[cursorA]), 0,
lengthB, lengthB - 1);
- assert(k >= 0);
+ dcheck(k >= 0);
nofWinsB = lengthB - k;
if (nofWinsB > 0) {
@@ -1139,11 +1139,11 @@ transitioning macro MergeHigh(implicit context: Context, sortState: SortState)(
}
} label Succeed {
if (lengthB > 0) {
- assert(lengthA == 0);
+ dcheck(lengthA == 0);
Copy(tempArray, 0, workArray, dest - (lengthB - 1), lengthB);
}
} label CopyA {
- assert(lengthB == 1 && lengthA > 0);
+ dcheck(lengthB == 1 && lengthA > 0);
// The first element of run B belongs at the front of the merge.
dest = dest - lengthA;
@@ -1166,14 +1166,14 @@ macro ComputeMinRunLength(nArg: Smi): Smi {
let n: Smi = nArg;
let r: Smi = 0; // Becomes 1 if any 1 bits are shifted off.
- assert(n >= 0);
+ dcheck(n >= 0);
while (n >= 64) {
r = r | (n & 1);
n = n >> 1;
}
const minRunLength: Smi = n + r;
- assert(nArg < 64 || (32 <= minRunLength && minRunLength <= 64));
+ dcheck(nArg < 64 || (32 <= minRunLength && minRunLength <= 64));
return minRunLength;
}
@@ -1198,7 +1198,8 @@ macro RunInvariantEstablished(implicit context: Context)(
// TODO(szuend): Remove unnecessary loads. This macro was refactored to
// improve readability, introducing unnecessary loads in the
// process. Determine if all these extra loads are ok.
-transitioning macro MergeCollapse(context: Context, sortState: SortState) {
+transitioning macro MergeCollapse(
+ context: Context, sortState: SortState): void {
const pendingRuns: FixedArray = sortState.pendingRuns;
// Reload the stack size because MergeAt might change it.
@@ -1226,7 +1227,7 @@ transitioning macro MergeCollapse(context: Context, sortState: SortState) {
// Regardless of invariants, merge all runs on the stack until only one
// remains. This is used at the end of the mergesort.
transitioning macro
-MergeForceCollapse(context: Context, sortState: SortState) {
+MergeForceCollapse(context: Context, sortState: SortState): void {
const pendingRuns: FixedArray = sortState.pendingRuns;
// Reload the stack size becuase MergeAt might change it.
@@ -1243,7 +1244,7 @@ MergeForceCollapse(context: Context, sortState: SortState) {
}
transitioning macro
-ArrayTimSortImpl(context: Context, sortState: SortState, length: Smi) {
+ArrayTimSortImpl(context: Context, sortState: SortState, length: Smi): void {
if (length < 2) return;
let remaining: Smi = length;
@@ -1272,8 +1273,8 @@ ArrayTimSortImpl(context: Context, sortState: SortState, length: Smi) {
}
MergeForceCollapse(context, sortState);
- assert(GetPendingRunsSize(sortState) == 1);
- assert(GetPendingRunLength(sortState.pendingRuns, 0) == length);
+ dcheck(GetPendingRunsSize(sortState) == 1);
+ dcheck(GetPendingRunLength(sortState.pendingRuns, 0) == length);
}
transitioning macro
@@ -1290,7 +1291,7 @@ CompactReceiverElementsIntoWorkArray(
// TODO(szuend): Implement full range sorting, not only up to MaxSmi.
// https://crbug.com/v8/7970.
const receiverLength: Number = sortState.initialReceiverLength;
- assert(IsNumberNormalized(receiverLength));
+ dcheck(IsNumberNormalized(receiverLength));
const sortLength: Smi = TaggedIsSmi(receiverLength) ?
UnsafeCast<Smi>(receiverLength) :
@@ -1322,12 +1323,12 @@ CompactReceiverElementsIntoWorkArray(
transitioning macro
CopyWorkArrayToReceiver(implicit context: Context, sortState: SortState)(
- numberOfNonUndefined: Smi) {
+ numberOfNonUndefined: Smi): void {
const storeFn = sortState.storeFn;
const workArray = sortState.workArray;
- assert(numberOfNonUndefined <= workArray.length);
- assert(
+ dcheck(numberOfNonUndefined <= workArray.length);
+ dcheck(
numberOfNonUndefined + sortState.numberOfUndefined <=
sortState.sortLength);
diff --git a/chromium/v8/tools/OWNERS b/chromium/v8/tools/OWNERS
index 89ee345b007..c04255f03e5 100644
--- a/chromium/v8/tools/OWNERS
+++ b/chromium/v8/tools/OWNERS
@@ -1,2 +1,2 @@
file:../COMMON_OWNERS
-
+file:../INFRA_OWNERS
diff --git a/chromium/v8/tools/clusterfuzz/js_fuzzer/build_db.js b/chromium/v8/tools/clusterfuzz/js_fuzzer/build_db.js
index 675a322c64d..c00d286eb1f 100644
--- a/chromium/v8/tools/clusterfuzz/js_fuzzer/build_db.js
+++ b/chromium/v8/tools/clusterfuzz/js_fuzzer/build_db.js
@@ -34,7 +34,6 @@ function main() {
}
const mutateDb = new db.MutateDbWriter(program.output_dir);
- const expressions = new Set();
const inputDir = path.resolve(program.input_dir);
for (const corpusName of program.args) {
@@ -53,7 +52,7 @@ function main() {
}
try{
- mutateDb.process(source, expressions);
+ mutateDb.process(source);
} catch (e) {
console.log(e);
}
diff --git a/chromium/v8/tools/clusterfuzz/js_fuzzer/db.js b/chromium/v8/tools/clusterfuzz/js_fuzzer/db.js
index e96265b0681..3fbe4380235 100644
--- a/chromium/v8/tools/clusterfuzz/js_fuzzer/db.js
+++ b/chromium/v8/tools/clusterfuzz/js_fuzzer/db.js
@@ -11,11 +11,13 @@ const fs = require('fs');
const fsPath = require('path');
const babelGenerator = require('@babel/generator').default;
+const babelTemplate = require('@babel/template').default;
const babelTraverse = require('@babel/traverse').default;
const babelTypes = require('@babel/types');
const globals = require('globals');
const random = require('./random.js');
+const sourceHelpers = require('./source_helpers.js');
const globalIdentifiers = new Set(Object.keys(globals.builtin));
const propertyNames = new Set([
@@ -238,6 +240,29 @@ function _markSkipped(path) {
}
}
+/**
+ * Returns true if an expression can be applied or false otherwise.
+ */
+function isValid(expression) {
+ const expressionTemplate = babelTemplate(
+ expression.source,
+ sourceHelpers.BABYLON_REPLACE_VAR_OPTIONS);
+
+ const dependencies = {};
+ if (expression.dependencies) {
+ for (const dependency of expression.dependencies) {
+ dependencies[dependency] = babelTypes.identifier('__v_0');
+ }
+ }
+
+ try {
+ expressionTemplate(dependencies);
+ } catch (e) {
+ return false;
+ }
+ return true;
+}
+
class MutateDbWriter {
constructor(outputDir) {
this.seen = new Set();
@@ -393,6 +418,11 @@ class MutateDbWriter {
return;
}
+ // Test results.
+ if (!isValid(expression)) {
+ return;
+ }
+
// Write results.
let dirPath = fsPath.join(self.outputDir, expression.type);
if (!fs.existsSync(dirPath)) {
diff --git a/chromium/v8/tools/clusterfuzz/js_fuzzer/exceptions.js b/chromium/v8/tools/clusterfuzz/js_fuzzer/exceptions.js
index efb1a8a6499..4a571d5dd0d 100644
--- a/chromium/v8/tools/clusterfuzz/js_fuzzer/exceptions.js
+++ b/chromium/v8/tools/clusterfuzz/js_fuzzer/exceptions.js
@@ -144,24 +144,6 @@ const DISALLOWED_DIFFERENTIAL_FUZZ_FLAGS = [
'--validate-asm',
];
-const ALLOWED_RUNTIME_FUNCTIONS = new Set([
- // List of allowed runtime functions. Others will be replaced with no-ops.
- 'ArrayBufferDetach',
- 'CompileBaseline',
- 'DeoptimizeFunction',
- 'DeoptimizeNow',
- 'EnableCodeLoggingForTesting',
- 'GetUndetectable',
- 'HeapObjectVerify',
- 'IsBeingInterpreted',
- 'NeverOptimizeFunction',
- 'OptimizeFunctionOnNextCall',
- 'OptimizeOsr',
- 'PrepareFunctionForOptimization',
- 'SetAllocationTimeout',
- 'SimulateNewspaceFull',
-]);
-
const MAX_FILE_SIZE_BYTES = 128 * 1024; // 128KB
const MEDIUM_FILE_SIZE_BYTES = 32 * 1024; // 32KB
@@ -260,13 +242,6 @@ function filterDifferentialFuzzFlags(flags) {
flag => _doesntMatch(DISALLOWED_DIFFERENTIAL_FUZZ_FLAGS, flag));
}
-function isAllowedRuntimeFunction(name) {
- if (process.env.APP_NAME != 'd8') {
- return false;
- }
-
- return ALLOWED_RUNTIME_FUNCTIONS.has(name);
-}
module.exports = {
filterDifferentialFuzzFlags: filterDifferentialFuzzFlags,
@@ -274,7 +249,6 @@ module.exports = {
getGeneratedSoftSkipped: getGeneratedSoftSkipped,
getGeneratedSloppy: getGeneratedSloppy,
getSoftSkipped: getSoftSkipped,
- isAllowedRuntimeFunction: isAllowedRuntimeFunction,
isTestSkippedAbs: isTestSkippedAbs,
isTestSkippedRel: isTestSkippedRel,
isTestSoftSkippedAbs: isTestSoftSkippedAbs,
diff --git a/chromium/v8/tools/clusterfuzz/js_fuzzer/mutators/crossover_mutator.js b/chromium/v8/tools/clusterfuzz/js_fuzzer/mutators/crossover_mutator.js
index 7e3c4955ce5..491501dc5c6 100644
--- a/chromium/v8/tools/clusterfuzz/js_fuzzer/mutators/crossover_mutator.js
+++ b/chromium/v8/tools/clusterfuzz/js_fuzzer/mutators/crossover_mutator.js
@@ -36,12 +36,9 @@ class CrossOverMutator extends mutator.Mutator {
{canHaveSuper: canHaveSuper});
// Insert the statement.
- var templateOptions = Object.assign({}, sourceHelpers.BABYLON_OPTIONS);
- templateOptions['placeholderPattern'] = /^VAR_[0-9]+$/;
-
let toInsert = babelTemplate(
randomExpression.source,
- templateOptions);
+ sourceHelpers.BABYLON_REPLACE_VAR_OPTIONS);
const dependencies = {};
if (randomExpression.dependencies) {
diff --git a/chromium/v8/tools/clusterfuzz/js_fuzzer/source_helpers.js b/chromium/v8/tools/clusterfuzz/js_fuzzer/source_helpers.js
index 264734607af..d7cb142f812 100644
--- a/chromium/v8/tools/clusterfuzz/js_fuzzer/source_helpers.js
+++ b/chromium/v8/tools/clusterfuzz/js_fuzzer/source_helpers.js
@@ -46,6 +46,9 @@ const BABYLON_OPTIONS = {
],
}
+const BABYLON_REPLACE_VAR_OPTIONS = Object.assign({}, BABYLON_OPTIONS);
+BABYLON_REPLACE_VAR_OPTIONS['placeholderPattern'] = /^VAR_[0-9]+$/;
+
function _isV8OrSpiderMonkeyLoad(path) {
// 'load' and 'loadRelativeToScript' used by V8 and SpiderMonkey.
return (babelTypes.isIdentifier(path.node.callee) &&
@@ -323,7 +326,6 @@ function loadSource(baseDir, relPath, parseStrict=false) {
removeComments(ast);
cleanAsserts(ast);
- neuterDisallowedV8Natives(ast);
annotateWithOriginalPath(ast, relPath);
const flags = loadFlags(data);
@@ -373,28 +375,6 @@ function cleanAsserts(ast) {
}
/**
- * Filter out disallowed V8 runtime functions.
- */
-function neuterDisallowedV8Natives(ast) {
- babelTraverse(ast, {
- CallExpression(path) {
- if (!babelTypes.isIdentifier(path.node.callee) ||
- !path.node.callee.name.startsWith(V8_BUILTIN_PREFIX)) {
- return;
- }
-
- const functionName = path.node.callee.name.substr(
- V8_BUILTIN_PREFIX.length);
-
- if (!exceptions.isAllowedRuntimeFunction(functionName)) {
- path.replaceWith(babelTypes.callExpression(
- babelTypes.identifier('nop'), []));
- }
- }
- });
-}
-
-/**
* Annotate code with original file path.
*/
function annotateWithOriginalPath(ast, relPath) {
@@ -468,6 +448,7 @@ function generateCode(source, dependencies=[]) {
module.exports = {
BABYLON_OPTIONS: BABYLON_OPTIONS,
+ BABYLON_REPLACE_VAR_OPTIONS: BABYLON_REPLACE_VAR_OPTIONS,
generateCode: generateCode,
loadDependencyAbs: loadDependencyAbs,
loadResource: loadResource,
diff --git a/chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/cross_over_mutator_class_input.js b/chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/cross_over_mutator_class_input.js
index f16fb2fe533..f16fb2fe533 100644
--- a/chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/cross_over_mutator_class_input.js
+++ b/chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/cross_over_mutator_class_input.js
diff --git a/chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/this/file.js b/chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/db/this/file.js
index 115616da0d9..115616da0d9 100644
--- a/chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/this/file.js
+++ b/chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/db/this/file.js
diff --git a/chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load.js b/chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load.js
index dfa4bc49bae..342c9d87a34 100644
--- a/chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load.js
+++ b/chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load.js
@@ -3,5 +3,5 @@
// found in the LICENSE file.
var testLoad = 'test_load';
-d8.file.execute('test_data/mjsunit/test_load_1.js');
-d8.file.execute('test_load_0.js');
+load('test_data/mjsunit/test_load_1.js');
+load('test_load_0.js');
diff --git a/chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_0.js b/chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_0.js
index 3959a126b4f..d0e66e4a9fd 100644
--- a/chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_0.js
+++ b/chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_0.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-d8.file.execute('test_data/mjsunit/test_load_1.js');
-d8.file.execute('test_load_2.js');
-d8.file.execute('test_load_3.js');
+load('test_data/mjsunit/test_load_1.js');
+load('test_load_2.js');
+load('test_load_3.js');
var testLoad0 = 'test_load_0';
diff --git a/chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_1.js b/chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_1.js
index 8328dd24689..03c91669751 100644
--- a/chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_1.js
+++ b/chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_1.js
@@ -2,5 +2,5 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-d8.file.execute('test_load_2.js');
+load('test_load_2.js');
var testLoad1 = 'test_load_1';
diff --git a/chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_self.js b/chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_self.js
index cd2dfb5c04f..31a9f4c507b 100644
--- a/chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_self.js
+++ b/chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_self.js
@@ -2,4 +2,4 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-d8.file.execute("test_load_self.js");
+load("test_load_self.js");
diff --git a/chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/cross_over_mutator_input.js b/chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/cross_over_mutator_input.js
deleted file mode 100644
index 3d7ed65c78a..00000000000
--- a/chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/cross_over_mutator_input.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-let x = 2;
-let y = 2;
-Math.pow(x, y);
diff --git a/chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/destructuring/input.js b/chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/destructuring/input.js
deleted file mode 100644
index fce0782617b..00000000000
--- a/chromium/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/destructuring/input.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-
-let x, y;
-(function([ x = y = 1 ]) {}([]));
diff --git a/chromium/v8/tools/clusterfuzz/js_fuzzer/test_db.js b/chromium/v8/tools/clusterfuzz/js_fuzzer/test_db.js
index ff13c383c50..35f7956b764 100644
--- a/chromium/v8/tools/clusterfuzz/js_fuzzer/test_db.js
+++ b/chromium/v8/tools/clusterfuzz/js_fuzzer/test_db.js
@@ -29,7 +29,6 @@ function main() {
return;
}
- const loader = new sourceHelpers.V8SourceLoader();
const mutateDb = new db.MutateDb(program.input_dir);
const mutator = new crossOverMutator.CrossOverMutator(
{ MUTATE_CROSSOVER_INSERT: 1.0, testing: true }, mutateDb);
@@ -47,9 +46,9 @@ function main() {
() => { return expression; });
// Use a source that will try to insert one statement, allowing
// super.
- const source = loader.load(
+ const source = sourceHelpers.loadSource(
__dirname,
- 'test_data/regress/build_db/cross_over_mutator_class_input.js');
+ 'test_data/cross_over_mutator_class_input.js');
try {
mutator.mutate(source);
nPass++;
diff --git a/chromium/v8/tools/clusterfuzz/v8_commands.py b/chromium/v8/tools/clusterfuzz/v8_commands.py
index 924acbedd96..f03161c2c48 100644
--- a/chromium/v8/tools/clusterfuzz/v8_commands.py
+++ b/chromium/v8/tools/clusterfuzz/v8_commands.py
@@ -110,8 +110,7 @@ class Output(object):
self.pid = pid
def HasCrashed(self):
- return (self.exit_code < 0 and
- self.exit_code != -signal.SIGABRT)
+ return self.exit_code < 0
def Execute(args, cwd, timeout=None):
diff --git a/chromium/v8/tools/clusterfuzz/v8_foozzie.py b/chromium/v8/tools/clusterfuzz/v8_foozzie.py
index 52b79540939..92f881df839 100755
--- a/chromium/v8/tools/clusterfuzz/v8_foozzie.py
+++ b/chromium/v8/tools/clusterfuzz/v8_foozzie.py
@@ -78,13 +78,6 @@ CONFIGS = dict(
'--always-opt',
'--force-slow-path',
],
- trusted=[
- '--no-untrusted-code-mitigations',
- ],
- trusted_opt=[
- '--always-opt',
- '--no-untrusted-code-mitigations',
- ],
)
BASELINE_CONFIG = 'ignition'
@@ -173,6 +166,15 @@ KNOWN_FAILURES = {
'CrashTests/5694376231632896/1033966.js': 'flaky',
}
+# Flags that are already crashy during smoke tests should not be used.
+DISALLOWED_FLAGS = [
+ '--gdbjit',
+]
+
+
+def filter_flags(flags):
+ return [flag for flag in flags if flag not in DISALLOWED_FLAGS]
+
def infer_arch(d8):
"""Infer the V8 architecture from the build configuration next to the
@@ -223,7 +225,7 @@ class ExecutionArgumentsConfig(object):
d8 = os.path.join(BASE_PATH, d8)
assert os.path.exists(d8)
- flags = CONFIGS[config] + get('config_extra_flags')
+ flags = CONFIGS[config] + filter_flags(get('config_extra_flags'))
RunOptions = namedtuple('RunOptions', ['arch', 'config', 'd8', 'flags'])
return RunOptions(infer_arch(d8), config, d8, flags)
diff --git a/chromium/v8/tools/clusterfuzz/v8_smoke_tests.js b/chromium/v8/tools/clusterfuzz/v8_smoke_tests.js
index 39eb2d4e219..2c5fab338d3 100644
--- a/chromium/v8/tools/clusterfuzz/v8_smoke_tests.js
+++ b/chromium/v8/tools/clusterfuzz/v8_smoke_tests.js
@@ -40,6 +40,6 @@ print("Sensitive runtime functions are neutered");
%OptimizeFunctionOnNextCall(foo);
foo();
print(%GetOptimizationStatus(foo));
- const fun = new Function("f", "sync", "return %GetOptimizationStatus(f);");
+ const fun = new Function("f", "return %GetOptimizationStatus(f);");
print(fun(foo));
})();
diff --git a/chromium/v8/tools/cppgc/gen_cmake.py b/chromium/v8/tools/cppgc/gen_cmake.py
index 0375d0fd3bd..b4a805c07c4 100755
--- a/chromium/v8/tools/cppgc/gen_cmake.py
+++ b/chromium/v8/tools/cppgc/gen_cmake.py
@@ -244,8 +244,7 @@ set(CMAKE_CXX_STANDARD_REQUIRED ON)
option(CPPGC_ENABLE_OBJECT_NAMES "Enable object names in cppgc for debug purposes" OFF)
option(CPPGC_ENABLE_CAGED_HEAP "Enable heap reservation of size 4GB, only possible for 64bit archs" OFF)
-option(CPPGC_ENABLE_VERIFY_LIVE_BYTES " Enable verification of live bytes in the marking verifier" OFF)
-option(CPPGC_CHECK_ASSIGNMENTS_IN_PREFINALIZERS " Enable assignment checks for Members/Persistents during prefinalizer invocations" OFF)
+option(CPPGC_ENABLE_VERIFY_HEAP "Enables additional heap verification phases and checks" OFF)
option(CPPGC_ENABLE_YOUNG_GENERATION "Enable young generation in cppgc" OFF)
set(CPPGC_TARGET_ARCH "x64" CACHE STRING "Target architecture, possible options: x64, x86, arm, arm64, ppc64, s390x, mipsel, mips64el")
@@ -409,7 +408,7 @@ else{else_cond}
deps=['Threads::Threads'],
desc='Main library'),
'sample':
- Target(name='cppgc_sample',
+ Target(name='cppgc_hello_world',
cmake='add_executable',
deps=['cppgc'],
desc='Example'),
@@ -435,11 +434,8 @@ endif()
if(CPPGC_ENABLE_CAGED_HEAP)
target_compile_definitions({target.name} PRIVATE "-DCPPGC_CAGED_HEAP")
endif()
-if(CPPGC_ENABLE_VERIFY_LIVE_BYTES)
- target_compile_definitions({target.name} PRIVATE "-DCPPGC_VERIFY_LIVE_BYTES")
-endif()
-if(CPPGC_CHECK_ASSIGNMENTS_IN_PREFINALIZERS)
- target_compile_definitions({target.name} PRIVATE "-DCPPGC_CHECK_ASSIGNMENTS_IN_PREFINALIZERS")
+if(CPPGC_ENABLE_VERIFY_HEAP)
+ target_compile_definitions({target.name} PRIVATE "-DCPPGC_ENABLE_VERIFY_HEAP")
endif()
if(CPPGC_ENABLE_YOUNG_GENERATION)
target_compile_definitions({target.name} PRIVATE "-DCPPGC_YOUNG_GENERATION")
diff --git a/chromium/v8/tools/cppgc/test_cmake.sh b/chromium/v8/tools/cppgc/test_cmake.sh
index 77f551c0b57..55765ddcdd3 100755
--- a/chromium/v8/tools/cppgc/test_cmake.sh
+++ b/chromium/v8/tools/cppgc/test_cmake.sh
@@ -50,7 +50,7 @@ cmake -GNinja $rootdir || fail "Failed to execute cmake"
# Build all targets.
ninja cppgc || fail "Failed to build cppgc"
-ninja cppgc_sample || fail "Failed to build sample"
+ninja cppgc_hello_world || fail "Failed to build sample"
ninja cppgc_unittests || fail "Failed to build unittests"
# Run unittests.
diff --git a/chromium/v8/tools/dev/gm.py b/chromium/v8/tools/dev/gm.py
index 3d52b70cdf4..613065d5b11 100755
--- a/chromium/v8/tools/dev/gm.py
+++ b/chromium/v8/tools/dev/gm.py
@@ -28,6 +28,7 @@ not contain spaces.
from __future__ import print_function
import errno
import os
+import platform
import re
import subprocess
import sys
@@ -42,7 +43,7 @@ BUILD_TARGETS_ALL = ["all"]
# All arches that this script understands.
ARCHES = ["ia32", "x64", "arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64",
- "riscv64", "s390", "s390x", "android_arm", "android_arm64"]
+ "riscv64", "s390", "s390x", "android_arm", "android_arm64", "loong64"]
# Arches that get built/run when you don't specify any.
DEFAULT_ARCHES = ["ia32", "x64", "arm", "arm64"]
# Modes that this script understands.
@@ -250,9 +251,7 @@ def _Notify(summary, body):
print("{} - {}".format(summary, body))
def _GetMachine():
- # Once we migrate to Python3, this can use os.uname().machine.
- # The index-based access is compatible with all Python versions.
- return os.uname()[4]
+ return platform.machine()
def GetPath(arch, mode):
subdir = "%s.%s" % (arch, mode)
@@ -299,6 +298,10 @@ class Config(object):
cpu = "arm64"
elif self.arch == "arm" and _GetMachine() in ("aarch64", "arm64"):
cpu = "arm"
+ elif self.arch == "loong64" and _GetMachine() == "loongarch64":
+ cpu = "loong64"
+ elif self.arch == "mips64el" and _GetMachine() == "mips64":
+ cpu = "mips64el"
elif "64" in self.arch or self.arch == "s390x":
# Native x64 or simulator build.
cpu = "x64"
@@ -310,7 +313,7 @@ class Config(object):
elif self.arch == "android_arm64":
v8_cpu = "arm64"
elif self.arch in ("arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64",
- "riscv64", "s390", "s390x"):
+ "riscv64", "s390", "s390x", "loong64"):
v8_cpu = self.arch
else:
return []
@@ -322,9 +325,9 @@ class Config(object):
return []
def GetSpecialCompiler(self):
- if _GetMachine() == "aarch64":
- # We have no prebuilt Clang for arm64 on Linux, so use the system Clang
- # instead.
+ if _GetMachine() in ("aarch64", "mips64", "loongarch64"):
+ # We have no prebuilt Clang for arm64, mips64 or loongarch64 on Linux,
+ # so use the system Clang instead.
return ["clang_base_path = \"/usr\"", "clang_use_chrome_plugins = false"]
return []
@@ -363,7 +366,7 @@ class Config(object):
csa_trap = re.compile("Specify option( --csa-trap-on-node=[^ ]*)")
match = csa_trap.search(output)
extra_opt = match.group(1) if match else ""
- cmdline = re.compile("python ../../tools/run.py ./mksnapshot (.*)")
+ cmdline = re.compile("python3 ../../tools/run.py ./mksnapshot (.*)")
orig_cmdline = cmdline.search(output).group(1).strip()
cmdline = PrepareMksnapshotCmdline(orig_cmdline, path) + extra_opt
_Notify("V8 build requires your attention",
@@ -503,7 +506,7 @@ def Main(argv):
return_code = 0
# If we have Goma but it is not running, start it.
if (IS_GOMA_MACHINE and
- _Call("ps -e | grep compiler_proxy > /dev/null", silent=True) != 0):
+ _Call("pgrep -x compiler_proxy > /dev/null", silent=True) != 0):
_Call("%s/goma_ctl.py ensure_start" % GOMADIR)
for c in configs:
return_code += configs[c].Build()
diff --git a/chromium/v8/tools/gen-postmortem-metadata.py b/chromium/v8/tools/gen-postmortem-metadata.py
index 7b3dcedc923..bad1481d1c3 100644
--- a/chromium/v8/tools/gen-postmortem-metadata.py
+++ b/chromium/v8/tools/gen-postmortem-metadata.py
@@ -91,6 +91,16 @@ consts_misc = [
{ 'name': 'TaggedSize', 'value': 'kTaggedSize' },
{ 'name': 'TaggedSizeLog2', 'value': 'kTaggedSizeLog2' },
+ { 'name': 'CodeKindFieldMask', 'value': 'Code::KindField::kMask' },
+ { 'name': 'CodeKindFieldShift', 'value': 'Code::KindField::kShift' },
+
+ { 'name': 'CodeKindBytecodeHandler',
+ 'value': 'static_cast<int>(CodeKind::BYTECODE_HANDLER)' },
+ { 'name': 'CodeKindInterpretedFunction',
+ 'value': 'static_cast<int>(CodeKind::INTERPRETED_FUNCTION)' },
+ { 'name': 'CodeKindBaseline',
+ 'value': 'static_cast<int>(CodeKind::BASELINE)' },
+
{ 'name': 'OddballFalse', 'value': 'Oddball::kFalse' },
{ 'name': 'OddballTrue', 'value': 'Oddball::kTrue' },
{ 'name': 'OddballTheHole', 'value': 'Oddball::kTheHole' },
@@ -118,9 +128,9 @@ consts_misc = [
{ 'name': 'prop_kind_mask',
'value': 'PropertyDetails::KindField::kMask' },
{ 'name': 'prop_location_Descriptor',
- 'value': 'kDescriptor' },
+ 'value': 'static_cast<int>(PropertyLocation::kDescriptor)' },
{ 'name': 'prop_location_Field',
- 'value': 'kField' },
+ 'value': 'static_cast<int>(PropertyLocation::kField)' },
{ 'name': 'prop_location_mask',
'value': 'PropertyDetails::LocationField::kMask' },
{ 'name': 'prop_location_shift',
@@ -189,6 +199,10 @@ consts_misc = [
'value': 'StandardFrameConstants::kFunctionOffset' },
{ 'name': 'off_fp_args',
'value': 'StandardFrameConstants::kFixedFrameSizeAboveFp' },
+ { 'name': 'off_fp_bytecode_array',
+ 'value': 'UnoptimizedFrameConstants::kBytecodeArrayFromFp' },
+ { 'name': 'off_fp_bytecode_offset',
+ 'value': 'UnoptimizedFrameConstants::kBytecodeOffsetOrFeedbackVectorFromFp' },
{ 'name': 'scopeinfo_idx_nparams',
'value': 'ScopeInfo::kParameterCount' },
@@ -250,6 +264,7 @@ extras_accessors = [
'JSObject, elements, Object, kElementsOffset',
'JSObject, internal_fields, uintptr_t, kHeaderSize',
'FixedArray, data, uintptr_t, kHeaderSize',
+ 'BytecodeArray, data, uintptr_t, kHeaderSize',
'JSArrayBuffer, backing_store, uintptr_t, kBackingStoreOffset',
'JSArrayBuffer, byte_length, size_t, kByteLengthOffset',
'JSArrayBufferView, byte_length, size_t, kByteLengthOffset',
@@ -273,6 +288,7 @@ extras_accessors = [
'UncompiledData, inferred_name, String, kInferredNameOffset',
'UncompiledData, start_position, int32_t, kStartPositionOffset',
'UncompiledData, end_position, int32_t, kEndPositionOffset',
+ 'Script, source, Object, kSourceOffset',
'Script, name, Object, kNameOffset',
'Script, line_ends, Object, kLineEndsOffset',
'SharedFunctionInfo, raw_function_token_offset, int16_t, kFunctionTokenOffsetOffset',
@@ -280,6 +296,7 @@ extras_accessors = [
'SharedFunctionInfo, flags, int, kFlagsOffset',
'SharedFunctionInfo, length, uint16_t, kLengthOffset',
'SlicedString, parent, String, kParentOffset',
+ 'Code, flags, uint32_t, kFlagsOffset',
'Code, instruction_start, uintptr_t, kHeaderSize',
'Code, instruction_size, int, kInstructionSizeOffset',
'String, length, int32_t, kLengthOffset',
diff --git a/chromium/v8/tools/generate-header-include-checks.py b/chromium/v8/tools/generate-header-include-checks.py
index 250b7410680..42c118c9d5e 100755
--- a/chromium/v8/tools/generate-header-include-checks.py
+++ b/chromium/v8/tools/generate-header-include-checks.py
@@ -23,7 +23,7 @@ import re
import sys
# TODO(clemensb): Extend to tests.
-DEFAULT_INPUT = ['base', 'src']
+DEFAULT_INPUT = ['base', 'include', 'src']
DEFAULT_GN_FILE = 'BUILD.gn'
MY_DIR = os.path.dirname(os.path.realpath(__file__))
V8_DIR = os.path.dirname(MY_DIR)
@@ -44,7 +44,7 @@ AUTO_EXCLUDE_PATTERNS = [
# platform-specific headers
'\\b{}\\b'.format(p) for p in
('win', 'win32', 'ia32', 'x64', 'arm', 'arm64', 'mips', 'mips64', 's390',
- 'ppc','riscv64')]
+ 'ppc', 'riscv64', 'loong64')]
args = None
def parse_args():
diff --git a/chromium/v8/tools/mb/mb.py b/chromium/v8/tools/mb/mb.py
index 42ed60c7ef6..408e2b566a0 100755
--- a/chromium/v8/tools/mb/mb.py
+++ b/chromium/v8/tools/mb/mb.py
@@ -242,8 +242,6 @@ class MetaBuildWrapper(object):
' This can be either a regular path or a '
'GN-style source-relative path like '
'//out/Default.'))
- subp.add_argument('-s', '--swarmed', action='store_true',
- help='Run under swarming with the default dimensions')
subp.add_argument('-d', '--dimension', default=[], action='append', nargs=2,
dest='dimensions', metavar='FOO bar',
help='dimension to filter on')
@@ -375,67 +373,7 @@ class MetaBuildWrapper(object):
if ret:
return ret
- if self.args.swarmed:
- return self._RunUnderSwarming(build_dir, target)
- else:
- return self._RunLocallyIsolated(build_dir, target)
-
- def _RunUnderSwarming(self, build_dir, target):
- # TODO(dpranke): Look up the information for the target in
- # the //testing/buildbot.json file, if possible, so that we
- # can determine the isolate target, command line, and additional
- # swarming parameters, if possible.
- #
- # TODO(dpranke): Also, add support for sharding and merging results.
- # TODO(liviurau): While this seems to not be used in V8 yet, we need to add
- # a switch for internal try-bots, since they need to use 'chrome-swarming'
- cas_instance = 'chromium-swarm'
- dimensions = []
- for k, v in self._DefaultDimensions() + self.args.dimensions:
- dimensions += ['-d', k, v]
-
- archive_json_path = self.ToSrcRelPath(
- '%s/%s.archive.json' % (build_dir, target))
- cmd = [
- self.PathJoin(self.chromium_src_dir, 'tools', 'luci-go',
- self.isolate_exe),
- 'archive',
- '-i',
- self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)),
- '-cas-instance', cas_instance,
- '-dump-json',
- archive_json_path,
- ]
- ret, _, _ = self.Run(cmd, force_verbose=False)
- if ret:
- return ret
-
- try:
- archive_hashes = json.loads(self.ReadFile(archive_json_path))
- except Exception:
- self.Print(
- 'Failed to read JSON file "%s"' % archive_json_path, file=sys.stderr)
- return 1
- try:
- cas_digest = archive_hashes[target]
- except Exception:
- self.Print(
- 'Cannot find hash for "%s" in "%s", file content: %s' %
- (target, archive_json_path, archive_hashes),
- file=sys.stderr)
- return 1
-
- cmd = [
- self.executable,
- self.PathJoin('tools', 'swarming_client', 'swarming.py'),
- 'run',
- '-digests', cas_digest,
- '-S', 'chromium-swarm.appspot.com',
- ] + dimensions
- if self.args.extra_args:
- cmd += ['--'] + self.args.extra_args
- ret, _, _ = self.Run(cmd, force_verbose=True, buffer_output=False)
- return ret
+ return self._RunLocallyIsolated(build_dir, target)
def _RunLocallyIsolated(self, build_dir, target):
cmd = [
@@ -866,8 +804,6 @@ class MetaBuildWrapper(object):
self.WriteJSON(
{
'args': [
- '--isolated',
- self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target)),
'--isolate',
self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)),
],
diff --git a/chromium/v8/tools/mb/mb_unittest.py b/chromium/v8/tools/mb/mb_unittest.py
index 4c67495de43..86d9cd403b6 100755
--- a/chromium/v8/tools/mb/mb_unittest.py
+++ b/chromium/v8/tools/mb/mb_unittest.py
@@ -523,28 +523,6 @@ class UnitTest(unittest.TestCase):
self.check(['run', '-c', 'debug_goma', '//out/Default',
'base_unittests'], files=files, ret=0)
- def test_run_swarmed(self):
- files = {
- '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
- "{'base_unittests': {"
- " 'label': '//base:base_unittests',"
- " 'type': 'raw',"
- " 'args': [],"
- "}}\n"
- ),
- '/fake_src/out/Default/base_unittests.runtime_deps': (
- "base_unittests\n"
- ),
- 'out/Default/base_unittests.archive.json':
- ("{\"base_unittests\":\"fake_hash\"}"),
- }
-
- mbw = self.fake_mbw(files=files)
- self.check(['run', '-s', '-c', 'debug_goma', '//out/Default',
- 'base_unittests'], mbw=mbw, ret=0)
- self.check(['run', '-s', '-c', 'debug_goma', '-d', 'os', 'Win7',
- '//out/Default', 'base_unittests'], mbw=mbw, ret=0)
-
def test_lookup(self):
self.check(['lookup', '-c', 'debug_goma'], ret=0,
out=('\n'
diff --git a/chromium/v8/tools/profile.mjs b/chromium/v8/tools/profile.mjs
index 4127b34b07c..526baa835e7 100644
--- a/chromium/v8/tools/profile.mjs
+++ b/chromium/v8/tools/profile.mjs
@@ -116,8 +116,9 @@ export class Script {
sourcePosition = new SourcePosition(this, line, column,)
this._addSourcePosition(line, column, sourcePosition);
}
- if (entry.entry?.type == "Script") {
- // Mark the source position of scripts, for inline scripts which
+ if (this.sourcePosition === undefined && entry.entry?.type === "Script") {
+ // Mark the source position of scripts, for inline scripts which don't
+ // start at line 1.
this.sourcePosition = sourcePosition;
}
sourcePosition.addEntry(entry);
diff --git a/chromium/v8/tools/release/PRESUBMIT.py b/chromium/v8/tools/release/PRESUBMIT.py
index 3bcb26d29fa..a982b2e153a 100644
--- a/chromium/v8/tools/release/PRESUBMIT.py
+++ b/chromium/v8/tools/release/PRESUBMIT.py
@@ -2,7 +2,13 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-def CheckChangeOnCommit(input_api, output_api):
+def _CommonChecks(input_api, output_api):
tests = input_api.canned_checks.GetUnitTestsInDirectory(
input_api, output_api, '.', files_to_check=['test_scripts.py$'])
return input_api.RunTests(tests)
+
+def CheckChangeOnUpload(input_api, output_api):
+ return _CommonChecks(input_api, output_api)
+
+def CheckChangeOnCommit(input_api, output_api):
+ return _CommonChecks(input_api, output_api)
diff --git a/chromium/v8/tools/release/auto_tag.py b/chromium/v8/tools/release/auto_tag.py
index fddefed61f3..7e77c313d83 100755
--- a/chromium/v8/tools/release/auto_tag.py
+++ b/chromium/v8/tools/release/auto_tag.py
@@ -23,7 +23,7 @@ class Preparation(Step):
self.CommonPrepare()
self.PrepareBranch()
- self.GitCheckout("master")
+ self.GitCheckout("main")
self.vc.Pull()
diff --git a/chromium/v8/tools/release/check_clusterfuzz.py b/chromium/v8/tools/release/check_clusterfuzz.py
index 021cd552867..b1b7e084df0 100755
--- a/chromium/v8/tools/release/check_clusterfuzz.py
+++ b/chromium/v8/tools/release/check_clusterfuzz.py
@@ -28,7 +28,7 @@ import urllib2
# Constants to git repos.
BASE_URL = "https://chromium.googlesource.com"
-DEPS_LOG = BASE_URL + "/chromium/src/+log/master/DEPS?format=JSON"
+DEPS_LOG = BASE_URL + "/chromium/src/+log/main/DEPS?format=JSON"
# Constants for retrieving v8 rolls.
CRREV = "https://cr-rev.appspot.com/_ah/api/crrev/v1/commit/%s"
diff --git a/chromium/v8/tools/release/common_includes.py b/chromium/v8/tools/release/common_includes.py
index 5049cc45346..b61a3e2e27f 100644
--- a/chromium/v8/tools/release/common_includes.py
+++ b/chromium/v8/tools/release/common_includes.py
@@ -214,13 +214,13 @@ class VCInterface(object):
def GetBranches(self):
raise NotImplementedError()
- def MasterBranch(self):
+ def MainBranch(self):
raise NotImplementedError()
def CandidateBranch(self):
raise NotImplementedError()
- def RemoteMasterBranch(self):
+ def RemoteMainBranch(self):
raise NotImplementedError()
def RemoteCandidateBranch(self):
@@ -258,14 +258,14 @@ class GitInterface(VCInterface):
# Remove 'branch-heads/' prefix.
return map(lambda s: s[13:], branches)
- def MasterBranch(self):
- return "master"
+ def MainBranch(self):
+ return "main"
def CandidateBranch(self):
return "candidates"
- def RemoteMasterBranch(self):
- return "origin/master"
+ def RemoteMainBranch(self):
+ return "origin/main"
def RemoteCandidateBranch(self):
return "origin/candidates"
@@ -275,7 +275,7 @@ class GitInterface(VCInterface):
# want.
if name.startswith('refs/'):
return name
- if name in ["candidates", "master"]:
+ if name in ["candidates", "main"]:
return "refs/remotes/origin/%s" % name
try:
# Check if branch is in heads.
@@ -474,8 +474,8 @@ class Step(GitRecipesMixin):
if not self.GitIsWorkdirClean(): # pragma: no cover
self.Die("Workspace is not clean. Please commit or undo your changes.")
- # Checkout master in case the script was left on a work branch.
- self.GitCheckout('origin/master')
+ # Checkout main in case the script was left on a work branch.
+ self.GitCheckout('origin/main')
# Fetch unfetched revisions.
self.vc.Fetch()
@@ -485,7 +485,7 @@ class Step(GitRecipesMixin):
self.DeleteBranch(self._config["BRANCHNAME"])
def CommonCleanup(self):
- self.GitCheckout('origin/master')
+ self.GitCheckout('origin/main')
self.GitDeleteBranch(self._config["BRANCHNAME"])
# Clean up all temporary files.
@@ -605,13 +605,13 @@ class Step(GitRecipesMixin):
if match:
# Legacy: In the old process there's one level of indirection. The
# version is on the candidates branch and points to the real release
- # base on master through the commit message.
+ # base on main through the commit message.
return match.group("git_rev")
match = PUSH_MSG_NEW_RE.match(title)
if match:
- # This is a new-style v8 version branched from master. The commit
+ # This is a new-style v8 version branched from main. The commit
# "latest_hash" is the version-file change. Its parent is the release
- # base on master.
+ # base on main.
return self.GitLog(n=1, format="%H", git_hash="%s^" % latest_hash)
self.Die("Unknown latest release: %s" % latest_hash)
diff --git a/chromium/v8/tools/release/create_release.py b/chromium/v8/tools/release/create_release.py
index 20a666fb830..d1a066f00b6 100755
--- a/chromium/v8/tools/release/create_release.py
+++ b/chromium/v8/tools/release/create_release.py
@@ -19,7 +19,7 @@ class Preparation(Step):
def RunStep(self):
self.Git("fetch origin +refs/heads/*:refs/heads/*")
- self.GitCheckout("origin/master")
+ self.GitCheckout("origin/main")
self.DeleteBranch("work-branch")
@@ -28,7 +28,7 @@ class PrepareBranchRevision(Step):
def RunStep(self):
self["push_hash"] = (self._options.revision or
- self.GitLog(n=1, format="%H", branch="origin/master"))
+ self.GitLog(n=1, format="%H", branch="origin/main"))
assert self["push_hash"]
print("Release revision %s" % self["push_hash"])
@@ -39,16 +39,16 @@ class IncrementVersion(Step):
def RunStep(self):
latest_version = self.GetLatestVersion()
- # The version file on master can be used to bump up major/minor at
+ # The version file on main can be used to bump up major/minor at
# branch time.
- self.GitCheckoutFile(VERSION_FILE, self.vc.RemoteMasterBranch())
- self.ReadAndPersistVersion("master_")
- master_version = self.ArrayToVersion("master_")
+ self.GitCheckoutFile(VERSION_FILE, self.vc.RemoteMainBranch())
+ self.ReadAndPersistVersion("main_")
+ main_version = self.ArrayToVersion("main_")
- # Use the highest version from master or from tags to determine the new
+ # Use the highest version from main or from tags to determine the new
# version.
authoritative_version = sorted(
- [master_version, latest_version], key=SortingKey)[1]
+ [main_version, latest_version], key=SortingKey)[1]
self.StoreVersion(authoritative_version, "authoritative_")
# Variables prefixed with 'new_' contain the new version numbers for the
@@ -74,7 +74,7 @@ class DetectLastRelease(Step):
MESSAGE = "Detect commit ID of last release base."
def RunStep(self):
- self["last_push_master"] = self.GetLatestReleaseBase()
+ self["last_push_main"] = self.GetLatestReleaseBase()
class DeleteBranchRef(Step):
@@ -107,7 +107,7 @@ class MakeBranch(Step):
MESSAGE = "Create the branch."
def RunStep(self):
- self.Git("reset --hard origin/master")
+ self.Git("reset --hard origin/main")
self.Git("new-branch work-branch --upstream origin/%s" % self["version"])
self.GitCheckoutFile(VERSION_FILE, self["latest_version"])
@@ -186,7 +186,7 @@ class CleanUp(Step):
print("Congratulations, you have successfully created version %s."
% self["version"])
- self.GitCheckout("origin/master")
+ self.GitCheckout("origin/main")
self.DeleteBranch("work-branch")
self.Git("gc")
diff --git a/chromium/v8/tools/release/list_deprecated.py b/chromium/v8/tools/release/list_deprecated.py
index bc479e16531..e52d80effde 100755
--- a/chromium/v8/tools/release/list_deprecated.py
+++ b/chromium/v8/tools/release/list_deprecated.py
@@ -9,6 +9,8 @@ import re
import subprocess
import sys
from pathlib import Path
+import logging
+from multiprocessing import Pool
RE_GITHASH = re.compile(r"^[0-9a-f]{40}")
RE_AUTHOR_TIME = re.compile(r"^author-time (\d+)$")
@@ -18,132 +20,156 @@ VERSION_CACHE = dict()
RE_VERSION_MAJOR = re.compile(r".*V8_MAJOR_VERSION ([0-9]+)")
RE_VERSION_MINOR = re.compile(r".*V8_MINOR_VERSION ([0-9]+)")
-
-def extract_version(hash):
- if hash in VERSION_CACHE:
- return VERSION_CACHE[hash]
- if hash == '0000000000000000000000000000000000000000':
- return 'HEAD'
- result = subprocess.check_output(
- ['git', 'show', f"{hash}:include/v8-version.h"], encoding='UTF-8')
- major = RE_VERSION_MAJOR.search(result).group(1)
- minor = RE_VERSION_MINOR.search(result).group(1)
- version = f"{major}.{minor}"
- VERSION_CACHE[hash] = version
- return version
-
-
-def get_blame(file_path):
- result = subprocess.check_output(
- ['git', 'blame', '-t', '--line-porcelain', file_path], encoding='UTF-8')
- line_iter = iter(result.splitlines())
- blame_list = list()
- current_blame = None
- while True:
- line = next(line_iter, None)
- if line is None:
- break
- if RE_GITHASH.match(line):
- if current_blame is not None:
- blame_list.append(current_blame)
- hash = line.split(" ")[0]
- current_blame = {
- 'datetime': 0,
- 'filename': None,
- 'content': None,
- 'hash': hash
- }
- continue
- match = RE_AUTHOR_TIME.match(line)
- if match:
- current_blame['datetime'] = datetime.fromtimestamp(int(
- match.groups()[0]))
- continue
- match = RE_FILENAME.match(line)
- if match:
- current_blame['filename'] = match.groups()[0]
- current_blame['content'] = next(line_iter).strip()
- continue
- blame_list.append(current_blame)
- return blame_list
-
-
RE_MACRO_END = re.compile(r"\);")
RE_DEPRECATE_MACRO = re.compile(r"\(.*?,(.*)\);", re.MULTILINE)
-def filter_and_print(blame_list, macro, options):
- before = options.before
- index = 0
- re_macro = re.compile(macro)
- deprecated = list()
- while index < len(blame_list):
- blame = blame_list[index]
- commit_datetime = blame['datetime']
- if commit_datetime >= before:
- index += 1
- continue
- line = blame['content']
- commit_hash = blame['hash']
- match = re_macro.search(line)
- if match:
- pos = match.end()
- start = -1
- parens = 0
- while True:
- if pos >= len(line):
- # Extend to next line
- index = index + 1
- blame = blame_list[index]
- line = line + blame['content']
- if line[pos] == '(':
- parens = parens + 1
- elif line[pos] == ')':
- parens = parens - 1
- if parens == 0:
- # Exclude closing ")
- pos = pos - 2
- break
- elif line[pos] == '"' and start == -1:
- start = pos + 1
- pos = pos + 1
- # Extract content and replace double quotes from merged lines
- content = line[start:pos].strip().replace('""', '')
- deprecated.append((index + 1, commit_datetime, commit_hash, content))
- index = index + 1
- print(f"# Marked as {macro}: {len(deprecated)}")
- for linenumber, commit_datetime, commit_hash, content in deprecated:
- commit_date = commit_datetime.date()
- file_position = (
- f"{options.v8_header}:{linenumber}").rjust(len(options.v8_header) + 5)
- print(f" {file_position}\t{commit_date}\t{commit_hash[:8]}"
- f"\t{extract_version(commit_hash)}\t{content}")
- return len(deprecated)
+class HeaderFile(object):
+ def __init__(self, path):
+ self.path = path
+ self.blame_list = self.get_blame_list()
+
+ @classmethod
+ def get_api_header_files(clazz, options):
+ files = subprocess.check_output(
+ ['git', 'ls-tree', '--name-only', '-r', 'HEAD', options.include_dir],
+ encoding='UTF-8')
+ files = filter(lambda l: l.endswith('.h'), files.splitlines())
+ with Pool(processes=24) as pool:
+ return pool.map(HeaderFile, files)
+
+ def extract_version(self, hash):
+ if hash in VERSION_CACHE:
+ return VERSION_CACHE[hash]
+ if hash == '0000000000000000000000000000000000000000':
+ return 'HEAD'
+ result = subprocess.check_output(
+ ['git', 'show', f"{hash}:include/v8-version.h"], encoding='UTF-8')
+ major = RE_VERSION_MAJOR.search(result).group(1)
+ minor = RE_VERSION_MINOR.search(result).group(1)
+ version = f"{major}.{minor}"
+ VERSION_CACHE[hash] = version
+ return version
+
+ def get_blame_list(self):
+ logging.info(f"blame list for {self.path}")
+ result = subprocess.check_output(
+ ['git', 'blame', '-t', '--line-porcelain', self.path],
+ encoding='UTF-8')
+ line_iter = iter(result.splitlines())
+ blame_list = list()
+ current_blame = None
+ while True:
+ line = next(line_iter, None)
+ if line is None:
+ break
+ if RE_GITHASH.match(line):
+ if current_blame is not None:
+ blame_list.append(current_blame)
+ hash = line.split(" ")[0]
+ current_blame = {
+ 'datetime': 0,
+ 'filename': None,
+ 'content': None,
+ 'hash': hash
+ }
+ continue
+ match = RE_AUTHOR_TIME.match(line)
+ if match:
+ current_blame['datetime'] = datetime.fromtimestamp(
+ int(match.groups()[0]))
+ continue
+ match = RE_FILENAME.match(line)
+ if match:
+ current_blame['filename'] = match.groups()[0]
+ current_blame['content'] = next(line_iter).strip()
+ continue
+ blame_list.append(current_blame)
+ return blame_list
+
+ def filter_and_print(self, macro, options):
+ before = options.before
+ index = 0
+ re_macro = re.compile(macro)
+ deprecated = list()
+ while index < len(self.blame_list):
+ blame = self.blame_list[index]
+ line = blame['content']
+ if line.startswith("#") or line.startswith("//"):
+ index += 1
+ continue
+ commit_datetime = blame['datetime']
+ if commit_datetime >= before:
+ index += 1
+ continue
+ commit_hash = blame['hash']
+ match = re_macro.search(line)
+ if match:
+ pos = match.end()
+ start = -1
+ parens = 0
+ while True:
+ if pos >= len(line):
+ # Extend to next line
+ index = index + 1
+ blame = self.blame_list[index]
+ line = line + blame['content']
+ if line[pos] == '(':
+ parens = parens + 1
+ elif line[pos] == ')':
+ parens = parens - 1
+ if parens == 0:
+ # Exclude closing ")
+ pos = pos - 2
+ break
+ elif line[pos] == '"' and start == -1:
+ start = pos + 1
+ pos = pos + 1
+ # Extract content and replace double quotes from merged lines
+ content = line[start:pos].strip().replace('""', '')
+ deprecated.append((index + 1, commit_datetime, commit_hash, content))
+ index = index + 1
+ if len(deprecated) == 0: return
+ for linenumber, commit_datetime, commit_hash, content in deprecated:
+ commit_date = commit_datetime.date()
+ file_position = (f"{self.path}:{linenumber}").rjust(40)
+ print(f" {file_position}\t{commit_date}\t{commit_hash[:8]}"
+ f"\t{self.extract_version(commit_hash)}\t{content}")
+ return len(deprecated)
def parse_options(args):
parser = argparse.ArgumentParser(
description="Collect deprecation statistics")
- parser.add_argument("v8_header", nargs='?', help="Path to v8.h")
+ parser.add_argument("include_dir", nargs='?', help="Path to includes dir")
parser.add_argument("--before", help="Filter by date")
+ parser.add_argument("--verbose",
+ "-v",
+ help="Verbose logging",
+ action="store_true")
options = parser.parse_args(args)
+ if options.verbose:
+ logging.basicConfig(level=logging.DEBUG)
if options.before:
options.before = datetime.strptime(options.before, '%Y-%m-%d')
else:
options.before = datetime.now()
- if options.v8_header is None:
+ if options.include_dir is None:
base_path = Path(__file__).parent.parent
- options.v8_header = str(
- (base_path / 'include' / 'v8.h').relative_to(base_path))
+ options.include_dir = str((base_path / 'include').relative_to(base_path))
return options
def main(args):
options = parse_options(args)
- blame_list = get_blame(options.v8_header)
- filter_and_print(blame_list, "V8_DEPRECATE_SOON", options)
+ header_files = HeaderFile.get_api_header_files(options)
+ print("V8_DEPRECATE_SOON:")
+ for header in header_files:
+ header.filter_and_print("V8_DEPRECATE_SOON", options)
print("\n")
- filter_and_print(blame_list, "V8_DEPRECATED", options)
+ print("V8_DEPRECATED:")
+ for header in header_files:
+ header.filter_and_print("V8_DEPRECATED", options)
if __name__ == "__main__":
diff --git a/chromium/v8/tools/release/merge_to_branch.py b/chromium/v8/tools/release/merge_to_branch.py
index 44f933e5414..08a36125f8d 100755
--- a/chromium/v8/tools/release/merge_to_branch.py
+++ b/chromium/v8/tools/release/merge_to_branch.py
@@ -77,7 +77,7 @@ class SearchArchitecturePorts(Step):
# Search for commits which matches the "Port XXX" pattern.
git_hashes = self.GitLog(reverse=True, format="%H",
grep="^[Pp]ort %s" % revision,
- branch=self.vc.RemoteMasterBranch())
+ branch=self.vc.RemoteMainBranch())
for git_hash in git_hashes.splitlines():
revision_title = self.GitLog(n=1, format="%s", git_hash=git_hash)
@@ -198,7 +198,7 @@ class CleanUp(Step):
class MergeToBranch(ScriptsBase):
def _Description(self):
return ("Performs the necessary steps to merge revisions from "
- "master to release branches like 4.5. This script does not "
+ "main to release branches like 4.5. This script does not "
"version the commit. See http://goo.gl/9ke2Vw for more "
"information.")
diff --git a/chromium/v8/tools/release/mergeinfo.py b/chromium/v8/tools/release/mergeinfo.py
index bed7441f85e..8fae8ad05c0 100755
--- a/chromium/v8/tools/release/mergeinfo.py
+++ b/chromium/v8/tools/release/mergeinfo.py
@@ -30,25 +30,25 @@ def describe_commit(git_working_dir, hash_to_search, one_line=False):
def get_followup_commits(git_working_dir, hash_to_search):
cmd = ['log', '--grep=' + hash_to_search, GIT_OPTION_HASH_ONLY,
- 'remotes/origin/master'];
+ 'remotes/origin/main'];
return git_execute(git_working_dir, cmd).strip().splitlines()
def get_merge_commits(git_working_dir, hash_to_search):
- merges = get_related_commits_not_on_master(git_working_dir, hash_to_search)
- false_merges = get_related_commits_not_on_master(
+ merges = get_related_commits_not_on_main(git_working_dir, hash_to_search)
+ false_merges = get_related_commits_not_on_main(
git_working_dir, 'Cr-Branched-From: ' + hash_to_search)
false_merges = set(false_merges)
return ([merge_commit for merge_commit in merges
if merge_commit not in false_merges])
-def get_related_commits_not_on_master(git_working_dir, grep_command):
+def get_related_commits_not_on_main(git_working_dir, grep_command):
commits = git_execute(git_working_dir, ['log',
'--all',
'--grep=' + grep_command,
GIT_OPTION_ONELINE,
'--decorate',
'--not',
- 'remotes/origin/master',
+ 'remotes/origin/main',
GIT_OPTION_HASH_ONLY])
return commits.splitlines()
diff --git a/chromium/v8/tools/release/roll_merge.py b/chromium/v8/tools/release/roll_merge.py
index 636c8829807..d25f95e3971 100755
--- a/chromium/v8/tools/release/roll_merge.py
+++ b/chromium/v8/tools/release/roll_merge.py
@@ -78,7 +78,7 @@ class SearchArchitecturePorts(Step):
# Search for commits which matches the "Port XXX" pattern.
git_hashes = self.GitLog(reverse=True, format="%H",
grep="Port %s" % revision,
- branch=self.vc.RemoteMasterBranch())
+ branch=self.vc.RemoteMainBranch())
for git_hash in git_hashes.splitlines():
revision_title = self.GitLog(n=1, format="%s", git_hash=git_hash)
@@ -226,7 +226,7 @@ class CleanUp(Step):
class RollMerge(ScriptsBase):
def _Description(self):
return ("Performs the necessary steps to merge revisions from "
- "master to other branches, including candidates and roll branches.")
+ "main to other branches, including candidates and roll branches.")
def _PrepareOptions(self, parser):
group = parser.add_mutually_exclusive_group(required=True)
diff --git a/chromium/v8/tools/release/search_related_commits.py b/chromium/v8/tools/release/search_related_commits.py
index e6e52d21961..48e6ae25922 100755
--- a/chromium/v8/tools/release/search_related_commits.py
+++ b/chromium/v8/tools/release/search_related_commits.py
@@ -200,7 +200,7 @@ if __name__ == "__main__": # pragma: no cover
"This tool analyzes the commit range between <of> and <until>. "
"It finds commits which belong together e.g. Implement/Revert pairs and "
"Implement/Port/Revert triples. All supplied hashes need to be "
- "from the same branch e.g. master.")
+ "from the same branch e.g. main.")
parser.add_argument("-g", "--git-dir", required=False, default=".",
help="The path to your git working directory.")
parser.add_argument("--verbose", action="store_true",
diff --git a/chromium/v8/tools/release/test_mergeinfo.py b/chromium/v8/tools/release/test_mergeinfo.py
index f8619bb2fdb..9404542ef67 100755
--- a/chromium/v8/tools/release/test_mergeinfo.py
+++ b/chromium/v8/tools/release/test_mergeinfo.py
@@ -31,7 +31,7 @@ class TestMergeInfo(unittest.TestCase):
return output
def _update_origin(self):
- # Fetch from origin to get/update the origin/master branch
+ # Fetch from origin to get/update the origin/main branch
self._execute_git(['fetch', 'origin'])
def setUp(self):
@@ -54,10 +54,10 @@ class TestMergeInfo(unittest.TestCase):
def _assert_correct_standard_result(
self, result, all_commits, hash_of_first_commit):
- self.assertEqual(len(result), 1, "Master commit not found")
+ self.assertEqual(len(result), 1, "Main commit not found")
self.assertTrue(
result.get(hash_of_first_commit),
- "Master commit is wrong")
+ "Main commit is wrong")
self.assertEqual(
len(result[hash_of_first_commit]),
@@ -124,7 +124,7 @@ class TestMergeInfo(unittest.TestCase):
def testSearchMerges(self):
self._execute_git(['branch', 'test'])
- self._execute_git(['checkout', 'master'])
+ self._execute_git(['checkout', 'main'])
message = 'real initial commit'
self._make_empty_commit(message)
commits = self._get_commits()
@@ -142,7 +142,7 @@ class TestMergeInfo(unittest.TestCase):
message = 'Cr-Branched-From: ' + hash_of_first_commit
hash_of_ignored = self._make_empty_commit(message)
- self._execute_git(['checkout', 'master'])
+ self._execute_git(['checkout', 'main'])
followups = mergeinfo.get_followup_commits(
self.base_dir,
diff --git a/chromium/v8/tools/release/test_scripts.py b/chromium/v8/tools/release/test_scripts.py
index e8664cb2f10..e8757cf2771 100755
--- a/chromium/v8/tools/release/test_scripts.py
+++ b/chromium/v8/tools/release/test_scripts.py
@@ -300,7 +300,7 @@ class ScriptTest(unittest.TestCase):
def testCommonPrepareDefault(self):
self.Expect([
Cmd("git status -s -uno", ""),
- Cmd("git checkout -f origin/master", ""),
+ Cmd("git checkout -f origin/main", ""),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
RL("Y"),
@@ -312,7 +312,7 @@ class ScriptTest(unittest.TestCase):
def testCommonPrepareNoConfirm(self):
self.Expect([
Cmd("git status -s -uno", ""),
- Cmd("git checkout -f origin/master", ""),
+ Cmd("git checkout -f origin/main", ""),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
RL("n"),
@@ -323,7 +323,7 @@ class ScriptTest(unittest.TestCase):
def testCommonPrepareDeleteBranchFailure(self):
self.Expect([
Cmd("git status -s -uno", ""),
- Cmd("git checkout -f origin/master", ""),
+ Cmd("git checkout -f origin/main", ""),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
RL("Y"),
@@ -395,13 +395,13 @@ class ScriptTest(unittest.TestCase):
test_tag
"""
- # Version as tag: 3.22.4.0. Version on master: 3.22.6.
+ # Version as tag: 3.22.4.0. Version on main: 3.22.6.
# Make sure that the latest version is 3.22.6.0.
def testIncrementVersion(self):
self.Expect([
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
Cmd("git tag", self.TAGS),
- Cmd("git checkout -f origin/master -- include/v8-version.h",
+ Cmd("git checkout -f origin/main -- include/v8-version.h",
"", cb=lambda: self.WriteFakeVersionFile(3, 22, 6)),
])
@@ -430,7 +430,7 @@ test_tag
def testCreateRelease(self):
TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
- # The version file on master has build level 5.
+ # The version file on main has build level 5.
self.WriteFakeVersionFile(build=5)
commit_msg = """Version 3.22.5"""
@@ -449,18 +449,18 @@ test_tag
expectations = [
Cmd("git fetch origin +refs/heads/*:refs/heads/*", ""),
- Cmd("git checkout -f origin/master", "", cb=self.WriteFakeWatchlistsFile),
+ Cmd("git checkout -f origin/main", "", cb=self.WriteFakeWatchlistsFile),
Cmd("git branch", ""),
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
Cmd("git tag", self.TAGS),
- Cmd("git checkout -f origin/master -- include/v8-version.h",
+ Cmd("git checkout -f origin/main -- include/v8-version.h",
"", cb=self.WriteFakeVersionFile),
Cmd("git log -1 --format=%H 3.22.4", "release_hash\n"),
Cmd("git log -1 --format=%s release_hash", "Version 3.22.4\n"),
Cmd("git log -1 --format=%H release_hash^", "abc3\n"),
Cmd("git log --format=%H abc3..push_hash", "rev1\n"),
Cmd("git push origin push_hash:refs/heads/3.22.5", ""),
- Cmd("git reset --hard origin/master", ""),
+ Cmd("git reset --hard origin/main", ""),
Cmd("git new-branch work-branch --upstream origin/3.22.5", ""),
Cmd("git checkout -f 3.22.4 -- include/v8-version.h", "",
cb=self.WriteFakeVersionFile),
@@ -475,8 +475,8 @@ test_tag
"\"Version 3.22.5\" origin/3.22.5", "hsh_to_tag"),
Cmd("git tag 3.22.5 hsh_to_tag", ""),
Cmd("git push origin refs/tags/3.22.5:refs/tags/3.22.5", ""),
- Cmd("git checkout -f origin/master", ""),
- Cmd("git branch", "* master\n work-branch\n"),
+ Cmd("git checkout -f origin/main", ""),
+ Cmd("git branch", "* main\n work-branch\n"),
Cmd("git branch -D work-branch", ""),
Cmd("git gc", ""),
]
@@ -488,7 +488,7 @@ test_tag
CreateRelease(TEST_CONFIG, self).Run(args)
# Note: The version file is on build number 5 again in the end of this test
- # since the git command that merges to master is mocked out.
+ # since the git command that merges to main is mocked out.
# Check for correct content of the WATCHLISTS file
@@ -718,21 +718,21 @@ BUG=123,234,345,456,567,v8:123
self.Expect([
Cmd("git status -s -uno", ""),
- Cmd("git checkout -f origin/master", ""),
+ Cmd("git checkout -f origin/main", ""),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* branch2\n"),
Cmd("git new-branch %s --upstream refs/remotes/origin/candidates" %
TEST_CONFIG["BRANCHNAME"], ""),
Cmd(("git log --format=%H --grep=\"Port ab12345\" "
- "--reverse origin/master"),
+ "--reverse origin/main"),
"ab45678\nab23456"),
Cmd("git log -1 --format=%s ab45678", "Title1"),
Cmd("git log -1 --format=%s ab23456", "Title2"),
Cmd(("git log --format=%H --grep=\"Port ab23456\" "
- "--reverse origin/master"),
+ "--reverse origin/main"),
""),
Cmd(("git log --format=%H --grep=\"Port ab34567\" "
- "--reverse origin/master"),
+ "--reverse origin/main"),
"ab56789"),
Cmd("git log -1 --format=%s ab56789", "Title3"),
RL("Y"), # Automatically add corresponding ports (ab34567, ab56789)?
@@ -792,7 +792,7 @@ BUG=123,234,345,456,567,v8:123
"hsh_to_tag"),
Cmd("git tag 3.22.5.1 hsh_to_tag", ""),
Cmd("git push origin refs/tags/3.22.5.1:refs/tags/3.22.5.1", ""),
- Cmd("git checkout -f origin/master", ""),
+ Cmd("git checkout -f origin/main", ""),
Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
])
@@ -855,21 +855,21 @@ NOTREECHECKS=true
self.Expect([
Cmd("git status -s -uno", ""),
- Cmd("git checkout -f origin/master", ""),
+ Cmd("git checkout -f origin/main", ""),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* branch2\n"),
Cmd("git new-branch %s --upstream refs/remotes/origin/candidates" %
TEST_CONFIG["BRANCHNAME"], ""),
Cmd(("git log --format=%H --grep=\"^[Pp]ort ab12345\" "
- "--reverse origin/master"),
+ "--reverse origin/main"),
"ab45678\nab23456"),
Cmd("git log -1 --format=%s ab45678", "Title1"),
Cmd("git log -1 --format=%s ab23456", "Title2"),
Cmd(("git log --format=%H --grep=\"^[Pp]ort ab23456\" "
- "--reverse origin/master"),
+ "--reverse origin/main"),
""),
Cmd(("git log --format=%H --grep=\"^[Pp]ort ab34567\" "
- "--reverse origin/master"),
+ "--reverse origin/main"),
"ab56789"),
Cmd("git log -1 --format=%s ab56789", "Title3"),
RL("Y"), # Automatically add corresponding ports (ab34567, ab56789)?
@@ -916,7 +916,7 @@ NOTREECHECKS=true
Cmd("git cl presubmit", "Presubmit successfull\n"),
Cmd("git cl land -f --bypass-hooks", "Closing issue\n",
cb=VerifyLand),
- Cmd("git checkout -f origin/master", ""),
+ Cmd("git checkout -f origin/main", ""),
Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
])
diff --git a/chromium/v8/tools/release/test_search_related_commits.py b/chromium/v8/tools/release/test_search_related_commits.py
index cf6123611f0..6943915fd63 100755
--- a/chromium/v8/tools/release/test_search_related_commits.py
+++ b/chromium/v8/tools/release/test_search_related_commits.py
@@ -43,7 +43,7 @@ class TestSearchRelatedCommits(unittest.TestCase):
Review URL: https://codereview.chromium.org/1084243005
- Cr-Commit-Position: refs/heads/master@{#28059}"""
+ Cr-Commit-Position: refs/heads/main@{#28059}"""
self._make_empty_commit(message)
message = """[crankshaft] Do some stuff
@@ -52,7 +52,7 @@ class TestSearchRelatedCommits(unittest.TestCase):
Review URL: https://codereview.chromium.org/1084243007
- Cr-Commit-Position: refs/heads/master@{#28030}"""
+ Cr-Commit-Position: refs/heads/main@{#28030}"""
self._make_empty_commit(message)
@@ -62,10 +62,10 @@ class TestSearchRelatedCommits(unittest.TestCase):
def _assert_correct_standard_result(
self, result, all_commits, hash_of_first_commit):
- self.assertEqual(len(result), 1, "Master commit not found")
+ self.assertEqual(len(result), 1, "Main commit not found")
self.assertTrue(
result.get(hash_of_first_commit),
- "Master commit is wrong")
+ "Main commit is wrong")
self.assertEqual(
len(result[hash_of_first_commit]),
@@ -86,12 +86,12 @@ class TestSearchRelatedCommits(unittest.TestCase):
def testSearchByCommitPosition(self):
message = """Revert of some stuff.
- > Cr-Commit-Position: refs/heads/master@{#28059}
+ > Cr-Commit-Position: refs/heads/main@{#28059}
R=mstarzinger@chromium.org
Review URL: https://codereview.chromium.org/1084243005
- Cr-Commit-Position: refs/heads/master@{#28088}"""
+ Cr-Commit-Position: refs/heads/main@{#28088}"""
self._make_empty_commit(message)
@@ -106,12 +106,12 @@ class TestSearchRelatedCommits(unittest.TestCase):
def testSearchByTitle(self):
message = """Revert of some stuff.
> [turbofan] Sanitize language mode for javascript operators.
- > Cr-Commit-Position: refs/heads/master@{#289}
+ > Cr-Commit-Position: refs/heads/main@{#289}
R=mstarzinger@chromium.org
Review URL: https://codereview.chromium.org/1084243005
- Cr-Commit-Position: refs/heads/master@{#28088}"""
+ Cr-Commit-Position: refs/heads/main@{#28088}"""
self._make_empty_commit(message)
@@ -134,7 +134,7 @@ class TestSearchRelatedCommits(unittest.TestCase):
Review URL: https://codereview.chromium.org/1084243005
- Cr-Commit-Position: refs/heads/master@{#28088}"""
+ Cr-Commit-Position: refs/heads/main@{#28088}"""
self._make_empty_commit(message)
@@ -162,16 +162,16 @@ class TestSearchRelatedCommits(unittest.TestCase):
Review URL: https://codereview.chromium.org/1084243005
- Cr-Commit-Position: refs/heads/master@{#28088}"""
+ Cr-Commit-Position: refs/heads/main@{#28088}"""
self._make_empty_commit(message)
# Related commits happen before and after separator so it is a hit
- commit_pos_of_master = "27088"
- message = """Implement awesome feature: Master commit
+ commit_pos_of_main = "27088"
+ message = """Implement awesome feature: Main commit
Review URL: https://codereview.chromium.org/1084243235
- Cr-Commit-Position: refs/heads/master@{#""" + commit_pos_of_master + "}"
+ Cr-Commit-Position: refs/heads/main@{#""" + commit_pos_of_main + "}"
self._make_empty_commit(message)
# Separator commit
@@ -179,7 +179,7 @@ class TestSearchRelatedCommits(unittest.TestCase):
Review URL: https://codereview.chromium.org/1084243456
- Cr-Commit-Position: refs/heads/master@{#28173}"""
+ Cr-Commit-Position: refs/heads/main@{#28173}"""
self._make_empty_commit(message)
# Filler commit
@@ -187,11 +187,11 @@ class TestSearchRelatedCommits(unittest.TestCase):
self._make_empty_commit(message)
# Related commit after separator: a hit
- message = "Patch r" + commit_pos_of_master +""" done
+ message = "Patch r" + commit_pos_of_main +""" done
Review URL: https://codereview.chromium.org/1084243235
- Cr-Commit-Position: refs/heads/master@{#29567}"""
+ Cr-Commit-Position: refs/heads/main@{#29567}"""
self._make_empty_commit(message)
#Fetch again for an update
@@ -221,12 +221,12 @@ class TestSearchRelatedCommits(unittest.TestCase):
def testPrettyPrint(self):
message = """Revert of some stuff.
> [turbofan] Sanitize language mode for javascript operators.
- > Cr-Commit-Position: refs/heads/master@{#289}
+ > Cr-Commit-Position: refs/heads/main@{#289}
R=mstarzinger@chromium.org
Review URL: https://codereview.chromium.org/1084243005
- Cr-Commit-Position: refs/heads/master@{#28088}"""
+ Cr-Commit-Position: refs/heads/main@{#28088}"""
self._make_empty_commit(message)
@@ -248,7 +248,7 @@ class TestSearchRelatedCommits(unittest.TestCase):
output.append(current_line)
self.assertIs(len(output), 2, "Not exactly two entries written")
- self.assertTrue(output[0].startswith("+"), "Master entry not marked with +")
+ self.assertTrue(output[0].startswith("+"), "Main entry not marked with +")
self.assertTrue(output[1].startswith("| "), "Child entry not marked with |")
def testNothingFound(self):
diff --git a/chromium/v8/tools/run_perf.py b/chromium/v8/tools/run_perf.py
index cdbbed8176f..1e22b298a80 100644
--- a/chromium/v8/tools/run_perf.py
+++ b/chromium/v8/tools/run_perf.py
@@ -126,6 +126,7 @@ from testrunner.local import command
from testrunner.local import utils
from testrunner.objects.output import Output, NULL_OUTPUT
+# for py2/py3 compatibility
try:
basestring # Python 2
except NameError: # Python 3
@@ -152,7 +153,7 @@ def GeometricMean(values):
The mean is calculated using log to avoid overflow.
"""
- values = map(float, values)
+ values = list(map(float, values))
return math.exp(sum(map(math.log, values)) / len(values))
@@ -224,9 +225,9 @@ class ResultTracker(object):
def ToDict(self):
return {
- 'traces': self.traces.values(),
+ 'traces': list(self.traces.values()),
'errors': self.errors,
- 'runnables': self.runnables.values(),
+ 'runnables': list(self.runnables.values()),
}
def WriteToFile(self, file_name):
@@ -458,7 +459,9 @@ class RunnableConfig(GraphConfig):
"""
suite_dir = os.path.abspath(os.path.dirname(suite_path))
bench_dir = os.path.normpath(os.path.join(*self.path))
- os.chdir(os.path.join(suite_dir, bench_dir))
+ cwd = os.path.join(suite_dir, bench_dir)
+ logging.debug('Changing CWD to: %s' % cwd)
+ os.chdir(cwd)
def GetCommandFlags(self, extra_flags=None):
suffix = ['--'] + self.test_flags if self.test_flags else []
@@ -596,9 +599,11 @@ def find_build_directory(base_path, arch):
'Release',
]
possible_paths = [os.path.join(base_path, p) for p in possible_paths]
- actual_paths = filter(is_build, possible_paths)
+ actual_paths = list(filter(is_build, possible_paths))
assert actual_paths, 'No build directory found.'
- assert len(actual_paths) == 1, 'Found ambiguous build directories.'
+ assert len(
+ actual_paths
+ ) == 1, 'Found ambiguous build directories use --binary-override-path.'
return actual_paths[0]
@@ -677,10 +682,10 @@ class DesktopPlatform(Platform):
if args.prioritize:
self.command_prefix += ['-n', '-20']
if args.affinitize != None:
- # schedtool expects a bit pattern when setting affinity, where each
- # bit set to '1' corresponds to a core where the process may run on.
- # First bit corresponds to CPU 0. Since the 'affinitize' parameter is
- # a core number, we need to map to said bit pattern.
+ # schedtool expects a bit pattern when setting affinity, where each
+ # bit set to '1' corresponds to a core where the process may run on.
+ # First bit corresponds to CPU 0. Since the 'affinitize' parameter is
+ # a core number, we need to map to said bit pattern.
cpu = int(args.affinitize)
core = 1 << cpu
self.command_prefix += ['-a', ('0x%x' % core)]
@@ -699,6 +704,7 @@ class DesktopPlatform(Platform):
def _Run(self, runnable, count, secondary=False):
shell_dir = self.shell_dir_secondary if secondary else self.shell_dir
cmd = runnable.GetCommand(self.command_prefix, shell_dir, self.extra_flags)
+ logging.debug('Running command: %s' % cmd)
output = cmd.execute()
if output.IsSuccess() and '--prof' in self.extra_flags:
@@ -841,10 +847,10 @@ class CustomMachineConfiguration:
try:
with open('/sys/devices/system/cpu/present', 'r') as f:
indexes = f.readline()
- r = map(int, indexes.split('-'))
+ r = list(map(int, indexes.split('-')))
if len(r) == 1:
- return range(r[0], r[0] + 1)
- return range(r[0], r[1] + 1)
+ return list(range(r[0], r[0] + 1))
+ return list(range(r[0], r[1] + 1))
except Exception:
logging.exception('Failed to retrieve number of CPUs.')
raise
@@ -1034,7 +1040,7 @@ def Main(argv):
# Ensure all arguments have absolute path before we start changing current
# directory.
- args.suite = map(os.path.abspath, args.suite)
+ args.suite = list(map(os.path.abspath, args.suite))
prev_aslr = None
prev_cpu_gov = None
diff --git a/chromium/v8/tools/system-analyzer/view/timeline/timeline-track-base.mjs b/chromium/v8/tools/system-analyzer/view/timeline/timeline-track-base.mjs
index 4faa8fc3aa8..1ef8347088b 100644
--- a/chromium/v8/tools/system-analyzer/view/timeline/timeline-track-base.mjs
+++ b/chromium/v8/tools/system-analyzer/view/timeline/timeline-track-base.mjs
@@ -170,6 +170,9 @@ export class TimelineTrackBase extends V8CustomElement {
}
_updateDimensions() {
+ // No data in this timeline, no need to resize
+ if (!this._timeline) return;
+
const centerOffset = this._timelineBoundingClientRect.width / 2;
const time =
this.relativePositionToTime(this._timelineScrollLeft + centerOffset);
diff --git a/chromium/v8/tools/system-analyzer/view/timeline/timeline-track-stacked-base.mjs b/chromium/v8/tools/system-analyzer/view/timeline/timeline-track-stacked-base.mjs
index 24b389b9596..5d60a5fa09a 100644
--- a/chromium/v8/tools/system-analyzer/view/timeline/timeline-track-stacked-base.mjs
+++ b/chromium/v8/tools/system-analyzer/view/timeline/timeline-track-stacked-base.mjs
@@ -24,7 +24,7 @@ export class TimelineTrackStackedBase extends TimelineTrackBase {
set data(timeline) {
super.data = timeline;
this._contentWidth = 0;
- this._prepareDrawableItems();
+ if (timeline.values.length > 0) this._prepareDrawableItems();
}
_handleDoubleClick(event) {
diff --git a/chromium/v8/tools/testrunner/base_runner.py b/chromium/v8/tools/testrunner/base_runner.py
index 009893a23a2..d86dea1cb45 100644
--- a/chromium/v8/tools/testrunner/base_runner.py
+++ b/chromium/v8/tools/testrunner/base_runner.py
@@ -113,7 +113,8 @@ SLOW_ARCHS = [
"mips64el",
"s390",
"s390x",
- "riscv64"
+ "riscv64",
+ "loong64"
]
@@ -191,6 +192,7 @@ class BuildConfig(object):
self.lite_mode = build_config['v8_enable_lite_mode']
self.pointer_compression = build_config['v8_enable_pointer_compression']
self.pointer_compression_shared_cage = build_config['v8_enable_pointer_compression_shared_cage']
+ self.virtual_memory_cage = build_config['v8_enable_virtual_memory_cage']
self.third_party_heap = build_config['v8_enable_third_party_heap']
self.webassembly = build_config['v8_enable_webassembly']
# Export only for MIPS target
@@ -234,6 +236,8 @@ class BuildConfig(object):
detected_options.append('pointer_compression')
if self.pointer_compression_shared_cage:
detected_options.append('pointer_compression_shared_cage')
+ if self.virtual_memory_cage:
+ detected_options.append('virtual_memory_cage')
if self.third_party_heap:
detected_options.append('third_party_heap')
if self.webassembly:
@@ -267,6 +271,7 @@ class BaseTestRunner(object):
self.build_config = None
self.mode_options = None
self.target_os = None
+ self.infra_staging = False
@property
def framework_name(self):
@@ -279,6 +284,7 @@ class BaseTestRunner(object):
try:
parser = self._create_parser()
options, args = self._parse_args(parser, sys_args)
+ self.infra_staging = options.infra_staging
if options.swarming:
# Swarming doesn't print how isolated commands are called. Lets make
# this less cryptic by printing it ourselves.
@@ -348,6 +354,13 @@ class BaseTestRunner(object):
help="How long should fuzzer run")
parser.add_option("--swarming", default=False, action="store_true",
help="Indicates running test driver on swarming.")
+ parser.add_option('--infra-staging', help='Use new test runner features',
+ dest='infra_staging', default=None,
+ action='store_true')
+ parser.add_option('--no-infra-staging',
+ help='Opt out of new test runner features',
+ dest='infra_staging', default=None,
+ action='store_false')
parser.add_option("-j", help="The number of parallel tasks to run",
default=0, type=int)
@@ -363,7 +376,7 @@ class BaseTestRunner(object):
# Progress
parser.add_option("-p", "--progress",
- choices=list(PROGRESS_INDICATORS), default="mono",
+ choices=list(PROGRESS_INDICATORS.keys()), default="mono",
help="The style of progress indicator (verbose, dots, "
"color, mono)")
parser.add_option("--json-test-results",
@@ -517,7 +530,7 @@ class BaseTestRunner(object):
options.j = multiprocessing.cpu_count()
options.command_prefix = shlex.split(options.command_prefix)
- options.extra_flags = sum(map(shlex.split, options.extra_flags), [])
+ options.extra_flags = sum(list(map(shlex.split, options.extra_flags)), [])
def _process_options(self, options):
pass
@@ -604,7 +617,7 @@ class BaseTestRunner(object):
def expand_test_group(name):
return TEST_MAP.get(name, [name])
- return reduce(list.__add__, map(expand_test_group, args), [])
+ return reduce(list.__add__, list(map(expand_test_group, args)), [])
def _args_to_suite_names(self, args, test_root):
# Use default tests if no test configuration was provided at the cmd line.
@@ -663,6 +676,9 @@ class BaseTestRunner(object):
self.build_config.arch == 'mipsel':
no_simd_hardware = not simd_mips
+ if self.build_config.arch == 'loong64':
+ no_simd_hardware = True
+
# S390 hosts without VEF1 do not support Simd.
if self.build_config.arch == 's390x' and \
not self.build_config.simulator_run and \
@@ -675,6 +691,10 @@ class BaseTestRunner(object):
utils.GuessPowerProcessorVersion() < 9:
no_simd_hardware = True
+ # riscv64 do not support Simd instructions
+ if self.build_config.arch == 'riscv64':
+ no_simd_hardware = True
+
return {
"arch": self.build_config.arch,
"asan": self.build_config.asan,
@@ -713,6 +733,7 @@ class BaseTestRunner(object):
"lite_mode": self.build_config.lite_mode,
"pointer_compression": self.build_config.pointer_compression,
"pointer_compression_shared_cage": self.build_config.pointer_compression_shared_cage,
+ "virtual_memory_cage": self.build_config.virtual_memory_cage,
}
def _runner_flags(self):
@@ -759,7 +780,7 @@ class BaseTestRunner(object):
raise NotImplementedError()
def _prepare_procs(self, procs):
- procs = filter(None, procs)
+ procs = list([_f for _f in procs if _f])
for i in range(0, len(procs) - 1):
procs[i].connect_to(procs[i + 1])
procs[0].setup()
diff --git a/chromium/v8/tools/testrunner/local/android.py b/chromium/v8/tools/testrunner/local/android.py
index ebf04afad61..cfc4e537f57 100644
--- a/chromium/v8/tools/testrunner/local/android.py
+++ b/chromium/v8/tools/testrunner/local/android.py
@@ -128,12 +128,6 @@ class _Driver(object):
)
self.push_file(
shell_dir,
- 'snapshot_blob_trusted.bin',
- target_dir,
- skip_if_missing=True,
- )
- self.push_file(
- shell_dir,
'icudtl.dat',
target_dir,
skip_if_missing=True,
diff --git a/chromium/v8/tools/testrunner/local/statusfile.py b/chromium/v8/tools/testrunner/local/statusfile.py
index 48b92869599..174bd27a5fe 100644
--- a/chromium/v8/tools/testrunner/local/statusfile.py
+++ b/chromium/v8/tools/testrunner/local/statusfile.py
@@ -32,8 +32,8 @@ from __future__ import absolute_import
import os
import re
-from .variants import ALL_VARIANTS
-from .utils import Freeze
+from testrunner.local.variants import ALL_VARIANTS
+from testrunner.local.utils import Freeze
# Possible outcomes
FAIL = "FAIL"
@@ -64,7 +64,7 @@ VARIABLES = {ALWAYS: True}
for var in ["debug", "release", "big", "little", "android",
"arm", "arm64", "ia32", "mips", "mipsel", "mips64", "mips64el",
"x64", "ppc", "ppc64", "s390", "s390x", "macos", "windows",
- "linux", "aix", "r1", "r2", "r3", "r5", "r6", "riscv64"]:
+ "linux", "aix", "r1", "r2", "r3", "r5", "r6", "riscv64", "loong64"]:
VARIABLES[var] = var
# Allow using variants as keywords.
@@ -131,8 +131,8 @@ class StatusFile(object):
for variant in variants:
for rule, value in (
- list(self._rules.get(variant, {}).iteritems()) +
- list(self._prefix_rules.get(variant, {}).iteritems())):
+ list(self._rules.get(variant, {}).items()) +
+ list(self._prefix_rules.get(variant, {}).items())):
if (rule, variant) not in used_rules:
if variant == '':
variant_desc = 'variant independent'
@@ -161,7 +161,7 @@ def _EvalExpression(exp, variables):
try:
return eval(exp, variables)
except NameError as e:
- identifier = re.match("name '(.*)' is not defined", e.message).group(1)
+ identifier = re.match("name '(.*)' is not defined", str(e)).group(1)
assert identifier == "variant", "Unknown identifier: %s" % identifier
return VARIANT_EXPRESSION
@@ -283,7 +283,7 @@ def ReadStatusFile(content, variables):
def _ReadSection(section, variables, rules, prefix_rules):
assert type(section) == dict
- for rule, outcome_list in section.items():
+ for rule, outcome_list in list(section.items()):
assert type(rule) == str
if rule[-1] == '*':
diff --git a/chromium/v8/tools/testrunner/local/testsuite.py b/chromium/v8/tools/testrunner/local/testsuite.py
index a72ef4be610..864d7346fca 100644
--- a/chromium/v8/tools/testrunner/local/testsuite.py
+++ b/chromium/v8/tools/testrunner/local/testsuite.py
@@ -223,7 +223,7 @@ class TestGenerator(object):
return self
def __next__(self):
- return next(self)
+ return self.next()
def next(self):
return next(self._iterator)
diff --git a/chromium/v8/tools/testrunner/local/utils.py b/chromium/v8/tools/testrunner/local/utils.py
index 05d1ef7d5e9..896f0731663 100644
--- a/chromium/v8/tools/testrunner/local/utils.py
+++ b/chromium/v8/tools/testrunner/local/utils.py
@@ -212,7 +212,7 @@ class FrozenDict(dict):
def Freeze(obj):
if isinstance(obj, dict):
- return FrozenDict((k, Freeze(v)) for k, v in obj.iteritems())
+ return FrozenDict((k, Freeze(v)) for k, v in list(obj.items()))
elif isinstance(obj, set):
return frozenset(obj)
elif isinstance(obj, list):
diff --git a/chromium/v8/tools/testrunner/local/variants.py b/chromium/v8/tools/testrunner/local/variants.py
index ba4eff451ad..42bf12d4647 100644
--- a/chromium/v8/tools/testrunner/local/variants.py
+++ b/chromium/v8/tools/testrunner/local/variants.py
@@ -13,11 +13,12 @@ ALL_VARIANT_FLAGS = {
"infra_staging": [[]],
"interpreted_regexp": [["--regexp-interpret-all"]],
"experimental_regexp": [["--default-to-experimental-regexp-engine"]],
- "concurrent_inlining": [["--concurrent-inlining"]],
"jitless": [["--jitless"]],
"sparkplug": [["--sparkplug"]],
"always_sparkplug": [[ "--always-sparkplug", "--sparkplug"]],
"minor_mc": [["--minor-mc"]],
+ "no_concurrent_inlining": [["--no-concurrent-inlining",
+ "--no-stress-concurrent-inlining"]],
"no_lfa": [["--no-lazy-feedback-allocation"]],
# No optimization means disable all optimizations. OptimizeFunctionOnNextCall
# would not force optimization too. It turns into a Nop. Please see
@@ -38,7 +39,6 @@ ALL_VARIANT_FLAGS = {
"stress_snapshot": [["--stress-snapshot"]],
# Trigger stress sampling allocation profiler with sample interval = 2^14
"stress_sampling": [["--stress-sampling-allocation-profiler=16384"]],
- "trusted": [["--no-untrusted-code-mitigations"]],
"no_wasm_traps": [["--no-wasm-trap-handler"]],
"turboprop": [["--turboprop"]],
"turboprop_as_toptier": [["--turboprop-as-toptier", "--turboprop"]],
@@ -58,13 +58,14 @@ INCOMPATIBLE_FLAGS_PER_VARIANT = {
"nooptimization": ["--always-opt"],
"slow_path": ["--no-force-slow-path"],
"stress_concurrent_allocation": ["--single-threaded-gc", "--predictable"],
- "stress_concurrent_inlining": ["--single-threaded", "--predictable", "--turboprop"],
+ "stress_concurrent_inlining": ["--single-threaded", "--predictable",
+ "--turboprop", "--lazy-feedback-allocation"],
"turboprop": ["--stress_concurrent_inlining"],
# The fast API tests initialize an embedder object that never needs to be
# serialized to the snapshot, so we don't have a
# SerializeInternalFieldsCallback for it, so they are incompatible with
# stress_snapshot.
- "stress_snapshot": ["--turbo-fast-api-calls"],
+ "stress_snapshot": ["--expose-fast-api"],
"stress": ["--always-opt", "--no-always-opt",
"--max-inlined-bytecode-size=*",
"--max-inlined-bytecode-size-cumulative=*", "--stress-inline",
diff --git a/chromium/v8/tools/testrunner/num_fuzzer.py b/chromium/v8/tools/testrunner/num_fuzzer.py
index d5b243ba96d..ffd2407d92d 100755
--- a/chromium/v8/tools/testrunner/num_fuzzer.py
+++ b/chromium/v8/tools/testrunner/num_fuzzer.py
@@ -20,7 +20,7 @@ from testrunner.testproc import fuzzer
from testrunner.testproc.base import TestProcProducer
from testrunner.testproc.combiner import CombinerProc
from testrunner.testproc.execution import ExecutionProc
-from testrunner.testproc.expectation import ForgiveTimeoutProc
+from testrunner.testproc.expectation import ExpectationProc
from testrunner.testproc.filter import StatusFileFilterProc, NameFilterProc
from testrunner.testproc.loader import LoadProc
from testrunner.testproc.progress import ResultsTracker
@@ -63,6 +63,11 @@ class NumFuzzer(base_runner.BaseTestRunner):
help="probability [0-10] of adding --random-gc-interval "
"flag to the test")
+ # Stress stack size
+ parser.add_option("--stress-stack-size", default=0, type="int",
+ help="probability [0-10] of adding --stack-size "
+ "flag to the test")
+
# Stress tasks
parser.add_option("--stress-delay-tasks", default=0, type="int",
help="probability [0-10] of adding --stress-delay-tasks "
@@ -119,7 +124,11 @@ class NumFuzzer(base_runner.BaseTestRunner):
def _runner_flags(self):
"""Extra default flags specific to the test runner implementation."""
- return ['--no-abort-on-contradictory-flags']
+ return [
+ '--no-abort-on-contradictory-flags',
+ '--testing-d8-test-runner',
+ '--no-fail'
+ ]
def _get_statusfile_variables(self, options):
variables = (
@@ -133,6 +142,7 @@ class NumFuzzer(base_runner.BaseTestRunner):
options.stress_compaction,
options.stress_gc,
options.stress_delay_tasks,
+ options.stress_stack_size,
options.stress_thread_pool_size])),
})
return variables
@@ -154,7 +164,7 @@ class NumFuzzer(base_runner.BaseTestRunner):
# TODO(majeski): Improve sharding when combiner is present. Maybe select
# different random seeds for shards instead of splitting tests.
self._create_shard_proc(options),
- ForgiveTimeoutProc(),
+ ExpectationProc(),
combiner,
self._create_fuzzer(fuzzer_rng, options),
sigproc,
@@ -221,6 +231,7 @@ class NumFuzzer(base_runner.BaseTestRunner):
add('marking', options.stress_marking)
add('scavenge', options.stress_scavenge)
add('gc_interval', options.stress_gc)
+ add('stack', options.stress_stack_size)
add('threads', options.stress_thread_pool_size)
add('delay', options.stress_delay_tasks)
add('deopt', options.stress_deopt, options.stress_deopt_min)
diff --git a/chromium/v8/tools/testrunner/objects/testcase.py b/chromium/v8/tools/testrunner/objects/testcase.py
index a1f1754b22a..19fbdd6c112 100644
--- a/chromium/v8/tools/testrunner/objects/testcase.py
+++ b/chromium/v8/tools/testrunner/objects/testcase.py
@@ -30,14 +30,14 @@ import os
import re
import shlex
-from ..outproc import base as outproc
-from ..local import command
-from ..local import statusfile
-from ..local import utils
-from ..local.variants import ALL_VARIANT_FLAGS
-from ..local.variants import INCOMPATIBLE_FLAGS_PER_VARIANT
-from ..local.variants import INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE
-from ..local.variants import INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG
+from testrunner.outproc import base as outproc
+from testrunner.local import command
+from testrunner.local import statusfile
+from testrunner.local import utils
+from testrunner.local.variants import ALL_VARIANT_FLAGS
+from testrunner.local.variants import INCOMPATIBLE_FLAGS_PER_VARIANT
+from testrunner.local.variants import INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE
+from testrunner.local.variants import INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG
FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
@@ -75,6 +75,13 @@ except NameError:
def cmp(x, y): # Python 3
return (x > y) - (x < y)
+def read_file_utf8(file):
+ try: # Python 3
+ with open(file, encoding='utf-8') as f:
+ return f.read()
+ except TypeError: # Python 2
+ with open(file) as f:
+ return f.read()
class TestCase(object):
def __init__(self, suite, path, name, test_config):
@@ -130,8 +137,8 @@ class TestCase(object):
return not is_flag(outcome)
outcomes = self.suite.statusfile.get_outcomes(self.name, self.variant)
- self._statusfile_outcomes = filter(not_flag, outcomes)
- self._statusfile_flags = filter(is_flag, outcomes)
+ self._statusfile_outcomes = list(filter(not_flag, outcomes))
+ self._statusfile_flags = list(filter(is_flag, outcomes))
self._expected_outcomes = (
self._parse_status_file_outcomes(self._statusfile_outcomes))
@@ -166,6 +173,15 @@ class TestCase(object):
self._expected_outcomes = (
self.expected_outcomes + [statusfile.TIMEOUT])
+ def allow_pass(self):
+ if self.expected_outcomes == outproc.OUTCOMES_TIMEOUT:
+ self._expected_outcomes = outproc.OUTCOMES_PASS_OR_TIMEOUT
+ elif self.expected_outcomes == outproc.OUTCOMES_FAIL:
+ self._expected_outcomes = outproc.OUTCOMES_FAIL_OR_PASS
+ elif statusfile.PASS not in self.expected_outcomes:
+ self._expected_outcomes = (
+ self.expected_outcomes + [statusfile.PASS])
+
@property
def expected_outcomes(self):
def is_flag(maybe_flag):
@@ -398,8 +414,7 @@ class TestCase(object):
return self._get_source_path() is not None
def get_source(self):
- with open(self._get_source_path()) as f:
- return f.read()
+ return read_file_utf8(self._get_source_path())
def _get_source_path(self):
return None
@@ -445,8 +460,7 @@ class D8TestCase(TestCase):
"""Returns for a given file a list of absolute paths of files needed by the
given file.
"""
- with open(file) as f:
- source = f.read()
+ source = read_file_utf8(file)
result = []
def add_path(path):
result.append(os.path.abspath(path.replace('/', os.path.sep)))
diff --git a/chromium/v8/tools/testrunner/outproc/base.py b/chromium/v8/tools/testrunner/outproc/base.py
index 9646b96c068..74a1d901590 100644
--- a/chromium/v8/tools/testrunner/outproc/base.py
+++ b/chromium/v8/tools/testrunner/outproc/base.py
@@ -12,8 +12,10 @@ from ..testproc.result import Result
OUTCOMES_PASS = [statusfile.PASS]
OUTCOMES_FAIL = [statusfile.FAIL]
+OUTCOMES_TIMEOUT = [statusfile.TIMEOUT]
OUTCOMES_PASS_OR_TIMEOUT = [statusfile.PASS, statusfile.TIMEOUT]
OUTCOMES_FAIL_OR_TIMEOUT = [statusfile.FAIL, statusfile.TIMEOUT]
+OUTCOMES_FAIL_OR_PASS = [statusfile.FAIL, statusfile.PASS]
class BaseOutProc(object):
diff --git a/chromium/v8/tools/testrunner/standard_runner.py b/chromium/v8/tools/testrunner/standard_runner.py
index 41352b34e8e..08f17e77216 100755
--- a/chromium/v8/tools/testrunner/standard_runner.py
+++ b/chromium/v8/tools/testrunner/standard_runner.py
@@ -16,7 +16,7 @@ import sys
import tempfile
# Adds testrunner to the path hence it has to be imported at the beggining.
-from . import base_runner
+import testrunner.base_runner as base_runner
from testrunner.local import utils
from testrunner.local.variants import ALL_VARIANTS
@@ -132,13 +132,6 @@ class StandardTestRunner(base_runner.BaseTestRunner):
parser.add_option('--cfi-vptr',
help='Run tests with UBSAN cfi_vptr option.',
default=False, action='store_true')
- parser.add_option('--infra-staging', help='Use new test runner features',
- dest='infra_staging', default=None,
- action='store_true')
- parser.add_option('--no-infra-staging',
- help='Opt out of new test runner features',
- dest='infra_staging', default=None,
- action='store_false')
parser.add_option('--no-sorting', '--nosorting',
help='Don\'t sort tests according to duration of last'
' run.',
diff --git a/chromium/v8/tools/testrunner/testproc/expectation.py b/chromium/v8/tools/testrunner/testproc/expectation.py
index 285a599a742..3abe40e169b 100644
--- a/chromium/v8/tools/testrunner/testproc/expectation.py
+++ b/chromium/v8/tools/testrunner/testproc/expectation.py
@@ -7,14 +7,15 @@ from . import base
from testrunner.local import statusfile
from testrunner.outproc import base as outproc
-class ForgiveTimeoutProc(base.TestProcProducer):
+class ExpectationProc(base.TestProcProducer):
"""Test processor passing tests and results through and forgiving timeouts."""
def __init__(self):
- super(ForgiveTimeoutProc, self).__init__('no-timeout')
+ super(ExpectationProc, self).__init__('no-timeout')
def _next_test(self, test):
subtest = self._create_subtest(test, 'no_timeout')
subtest.allow_timeouts()
+ subtest.allow_pass()
return self._send_test(subtest)
def _result_for(self, test, subtest, result):
diff --git a/chromium/v8/tools/testrunner/testproc/filter.py b/chromium/v8/tools/testrunner/testproc/filter.py
index 20af0f84074..728af483cbc 100644
--- a/chromium/v8/tools/testrunner/testproc/filter.py
+++ b/chromium/v8/tools/testrunner/testproc/filter.py
@@ -70,7 +70,7 @@ class NameFilterProc(base.TestProcFilter):
else:
self._exact_matches[suitename][path] = True
- for s, globs in self._globs.iteritems():
+ for s, globs in list(self._globs.items()):
if not globs or '*' in globs:
self._globs[s] = ['*']
diff --git a/chromium/v8/tools/testrunner/testproc/fuzzer.py b/chromium/v8/tools/testrunner/testproc/fuzzer.py
index 1237da56b2e..67250b1c740 100644
--- a/chromium/v8/tools/testrunner/testproc/fuzzer.py
+++ b/chromium/v8/tools/testrunner/testproc/fuzzer.py
@@ -44,6 +44,7 @@ EXTRA_FLAGS = [
(0.1, '--regexp-tier-up-ticks=100'),
(0.1, '--stress-background-compile'),
(0.1, '--stress-concurrent-inlining'),
+ (0.1, '--stress-flush-code'),
(0.1, '--stress-lazy-source-positions'),
(0.1, '--stress-wasm-code-gc'),
(0.1, '--turbo-instruction-scheduling'),
@@ -265,6 +266,10 @@ class CompactionFuzzer(Fuzzer):
while True:
yield ['--stress-compaction-random']
+class StackSizeFuzzer(Fuzzer):
+ def create_flags_generator(self, rng, test, analysis_value):
+ while True:
+ yield ['--stack-size=%d' % rng.randint(54, 983)]
class TaskDelayFuzzer(Fuzzer):
def create_flags_generator(self, rng, test, analysis_value):
@@ -322,6 +327,7 @@ FUZZERS = {
'gc_interval': (GcIntervalAnalyzer, GcIntervalFuzzer),
'marking': (MarkingAnalyzer, MarkingFuzzer),
'scavenge': (ScavengeAnalyzer, ScavengeFuzzer),
+ 'stack': (None, StackSizeFuzzer),
'threads': (None, ThreadPoolSizeFuzzer),
}
diff --git a/chromium/v8/tools/unittests/run_tests_test.py b/chromium/v8/tools/unittests/run_tests_test.py
index d9e998312ed..89acacaaa36 100755
--- a/chromium/v8/tools/unittests/run_tests_test.py
+++ b/chromium/v8/tools/unittests/run_tests_test.py
@@ -350,7 +350,8 @@ class SystemTest(unittest.TestCase):
v8_enable_i18n_support=False, v8_target_cpu='x86',
v8_enable_verify_csa=False, v8_enable_lite_mode=False,
v8_enable_pointer_compression=False,
- v8_enable_pointer_compression_shared_cage=False)
+ v8_enable_pointer_compression_shared_cage=False,
+ v8_enable_virtual_memory_cage=False)
result = run_tests(
basedir,
'--progress=verbose',
diff --git a/chromium/v8/tools/v8heapconst.py b/chromium/v8/tools/v8heapconst.py
index 097b6a7267e..406d1860f88 100644
--- a/chromium/v8/tools/v8heapconst.py
+++ b/chromium/v8/tools/v8heapconst.py
@@ -55,105 +55,104 @@ INSTANCE_TYPES = {
91: "ARRAY_BOILERPLATE_DESCRIPTION_TYPE",
92: "ASM_WASM_DATA_TYPE",
93: "ASYNC_GENERATOR_REQUEST_TYPE",
- 94: "BASELINE_DATA_TYPE",
- 95: "BREAK_POINT_TYPE",
- 96: "BREAK_POINT_INFO_TYPE",
- 97: "CACHED_TEMPLATE_OBJECT_TYPE",
- 98: "CALL_HANDLER_INFO_TYPE",
- 99: "CLASS_POSITIONS_TYPE",
- 100: "DEBUG_INFO_TYPE",
- 101: "ENUM_CACHE_TYPE",
- 102: "FEEDBACK_CELL_TYPE",
- 103: "FUNCTION_TEMPLATE_RARE_DATA_TYPE",
- 104: "INTERCEPTOR_INFO_TYPE",
- 105: "INTERPRETER_DATA_TYPE",
- 106: "MODULE_REQUEST_TYPE",
- 107: "PROMISE_CAPABILITY_TYPE",
- 108: "PROMISE_REACTION_TYPE",
- 109: "PROPERTY_DESCRIPTOR_OBJECT_TYPE",
- 110: "PROTOTYPE_INFO_TYPE",
- 111: "REG_EXP_BOILERPLATE_DESCRIPTION_TYPE",
- 112: "SCRIPT_TYPE",
- 113: "SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE",
- 114: "STACK_FRAME_INFO_TYPE",
- 115: "TEMPLATE_OBJECT_DESCRIPTION_TYPE",
- 116: "TUPLE2_TYPE",
- 117: "WASM_EXCEPTION_TAG_TYPE",
- 118: "WASM_INDIRECT_FUNCTION_TABLE_TYPE",
- 119: "FIXED_ARRAY_TYPE",
- 120: "HASH_TABLE_TYPE",
- 121: "EPHEMERON_HASH_TABLE_TYPE",
- 122: "GLOBAL_DICTIONARY_TYPE",
- 123: "NAME_DICTIONARY_TYPE",
- 124: "NUMBER_DICTIONARY_TYPE",
- 125: "ORDERED_HASH_MAP_TYPE",
- 126: "ORDERED_HASH_SET_TYPE",
- 127: "ORDERED_NAME_DICTIONARY_TYPE",
- 128: "SIMPLE_NUMBER_DICTIONARY_TYPE",
- 129: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE",
- 130: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
- 131: "SCRIPT_CONTEXT_TABLE_TYPE",
- 132: "BYTE_ARRAY_TYPE",
- 133: "BYTECODE_ARRAY_TYPE",
- 134: "FIXED_DOUBLE_ARRAY_TYPE",
- 135: "INTERNAL_CLASS_WITH_SMI_ELEMENTS_TYPE",
- 136: "SLOPPY_ARGUMENTS_ELEMENTS_TYPE",
- 137: "AWAIT_CONTEXT_TYPE",
- 138: "BLOCK_CONTEXT_TYPE",
- 139: "CATCH_CONTEXT_TYPE",
- 140: "DEBUG_EVALUATE_CONTEXT_TYPE",
- 141: "EVAL_CONTEXT_TYPE",
- 142: "FUNCTION_CONTEXT_TYPE",
- 143: "MODULE_CONTEXT_TYPE",
- 144: "NATIVE_CONTEXT_TYPE",
- 145: "SCRIPT_CONTEXT_TYPE",
- 146: "WITH_CONTEXT_TYPE",
- 147: "EXPORTED_SUB_CLASS_BASE_TYPE",
- 148: "EXPORTED_SUB_CLASS_TYPE",
- 149: "EXPORTED_SUB_CLASS2_TYPE",
- 150: "SMALL_ORDERED_HASH_MAP_TYPE",
- 151: "SMALL_ORDERED_HASH_SET_TYPE",
- 152: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
- 153: "DESCRIPTOR_ARRAY_TYPE",
- 154: "STRONG_DESCRIPTOR_ARRAY_TYPE",
- 155: "SOURCE_TEXT_MODULE_TYPE",
- 156: "SYNTHETIC_MODULE_TYPE",
- 157: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
- 158: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
- 159: "WEAK_FIXED_ARRAY_TYPE",
- 160: "TRANSITION_ARRAY_TYPE",
- 161: "CELL_TYPE",
- 162: "CODE_TYPE",
- 163: "CODE_DATA_CONTAINER_TYPE",
- 164: "COVERAGE_INFO_TYPE",
- 165: "EMBEDDER_DATA_ARRAY_TYPE",
- 166: "FEEDBACK_METADATA_TYPE",
- 167: "FEEDBACK_VECTOR_TYPE",
- 168: "FILLER_TYPE",
- 169: "FREE_SPACE_TYPE",
- 170: "INTERNAL_CLASS_TYPE",
- 171: "INTERNAL_CLASS_WITH_STRUCT_ELEMENTS_TYPE",
- 172: "MAP_TYPE",
- 173: "MEGA_DOM_HANDLER_TYPE",
- 174: "ON_HEAP_BASIC_BLOCK_PROFILER_DATA_TYPE",
- 175: "PREPARSE_DATA_TYPE",
- 176: "PROPERTY_ARRAY_TYPE",
- 177: "PROPERTY_CELL_TYPE",
- 178: "SCOPE_INFO_TYPE",
- 179: "SHARED_FUNCTION_INFO_TYPE",
- 180: "SMI_BOX_TYPE",
- 181: "SMI_PAIR_TYPE",
- 182: "SORT_STATE_TYPE",
- 183: "SWISS_NAME_DICTIONARY_TYPE",
- 184: "WEAK_ARRAY_LIST_TYPE",
- 185: "WEAK_CELL_TYPE",
- 186: "WASM_ARRAY_TYPE",
- 187: "WASM_STRUCT_TYPE",
- 188: "JS_PROXY_TYPE",
+ 94: "BREAK_POINT_TYPE",
+ 95: "BREAK_POINT_INFO_TYPE",
+ 96: "CACHED_TEMPLATE_OBJECT_TYPE",
+ 97: "CALL_HANDLER_INFO_TYPE",
+ 98: "CLASS_POSITIONS_TYPE",
+ 99: "DEBUG_INFO_TYPE",
+ 100: "ENUM_CACHE_TYPE",
+ 101: "FEEDBACK_CELL_TYPE",
+ 102: "FUNCTION_TEMPLATE_RARE_DATA_TYPE",
+ 103: "INTERCEPTOR_INFO_TYPE",
+ 104: "INTERPRETER_DATA_TYPE",
+ 105: "MODULE_REQUEST_TYPE",
+ 106: "PROMISE_CAPABILITY_TYPE",
+ 107: "PROMISE_REACTION_TYPE",
+ 108: "PROPERTY_DESCRIPTOR_OBJECT_TYPE",
+ 109: "PROTOTYPE_INFO_TYPE",
+ 110: "REG_EXP_BOILERPLATE_DESCRIPTION_TYPE",
+ 111: "SCRIPT_TYPE",
+ 112: "SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE",
+ 113: "STACK_FRAME_INFO_TYPE",
+ 114: "TEMPLATE_OBJECT_DESCRIPTION_TYPE",
+ 115: "TUPLE2_TYPE",
+ 116: "WASM_EXCEPTION_TAG_TYPE",
+ 117: "WASM_INDIRECT_FUNCTION_TABLE_TYPE",
+ 118: "FIXED_ARRAY_TYPE",
+ 119: "HASH_TABLE_TYPE",
+ 120: "EPHEMERON_HASH_TABLE_TYPE",
+ 121: "GLOBAL_DICTIONARY_TYPE",
+ 122: "NAME_DICTIONARY_TYPE",
+ 123: "NUMBER_DICTIONARY_TYPE",
+ 124: "ORDERED_HASH_MAP_TYPE",
+ 125: "ORDERED_HASH_SET_TYPE",
+ 126: "ORDERED_NAME_DICTIONARY_TYPE",
+ 127: "SIMPLE_NUMBER_DICTIONARY_TYPE",
+ 128: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE",
+ 129: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
+ 130: "SCRIPT_CONTEXT_TABLE_TYPE",
+ 131: "BYTE_ARRAY_TYPE",
+ 132: "BYTECODE_ARRAY_TYPE",
+ 133: "FIXED_DOUBLE_ARRAY_TYPE",
+ 134: "INTERNAL_CLASS_WITH_SMI_ELEMENTS_TYPE",
+ 135: "SLOPPY_ARGUMENTS_ELEMENTS_TYPE",
+ 136: "AWAIT_CONTEXT_TYPE",
+ 137: "BLOCK_CONTEXT_TYPE",
+ 138: "CATCH_CONTEXT_TYPE",
+ 139: "DEBUG_EVALUATE_CONTEXT_TYPE",
+ 140: "EVAL_CONTEXT_TYPE",
+ 141: "FUNCTION_CONTEXT_TYPE",
+ 142: "MODULE_CONTEXT_TYPE",
+ 143: "NATIVE_CONTEXT_TYPE",
+ 144: "SCRIPT_CONTEXT_TYPE",
+ 145: "WITH_CONTEXT_TYPE",
+ 146: "EXPORTED_SUB_CLASS_BASE_TYPE",
+ 147: "EXPORTED_SUB_CLASS_TYPE",
+ 148: "EXPORTED_SUB_CLASS2_TYPE",
+ 149: "SMALL_ORDERED_HASH_MAP_TYPE",
+ 150: "SMALL_ORDERED_HASH_SET_TYPE",
+ 151: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
+ 152: "DESCRIPTOR_ARRAY_TYPE",
+ 153: "STRONG_DESCRIPTOR_ARRAY_TYPE",
+ 154: "SOURCE_TEXT_MODULE_TYPE",
+ 155: "SYNTHETIC_MODULE_TYPE",
+ 156: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
+ 157: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
+ 158: "WEAK_FIXED_ARRAY_TYPE",
+ 159: "TRANSITION_ARRAY_TYPE",
+ 160: "CELL_TYPE",
+ 161: "CODE_TYPE",
+ 162: "CODE_DATA_CONTAINER_TYPE",
+ 163: "COVERAGE_INFO_TYPE",
+ 164: "EMBEDDER_DATA_ARRAY_TYPE",
+ 165: "FEEDBACK_METADATA_TYPE",
+ 166: "FEEDBACK_VECTOR_TYPE",
+ 167: "FILLER_TYPE",
+ 168: "FREE_SPACE_TYPE",
+ 169: "INTERNAL_CLASS_TYPE",
+ 170: "INTERNAL_CLASS_WITH_STRUCT_ELEMENTS_TYPE",
+ 171: "MAP_TYPE",
+ 172: "MEGA_DOM_HANDLER_TYPE",
+ 173: "ON_HEAP_BASIC_BLOCK_PROFILER_DATA_TYPE",
+ 174: "PREPARSE_DATA_TYPE",
+ 175: "PROPERTY_ARRAY_TYPE",
+ 176: "PROPERTY_CELL_TYPE",
+ 177: "SCOPE_INFO_TYPE",
+ 178: "SHARED_FUNCTION_INFO_TYPE",
+ 179: "SMI_BOX_TYPE",
+ 180: "SMI_PAIR_TYPE",
+ 181: "SORT_STATE_TYPE",
+ 182: "SWISS_NAME_DICTIONARY_TYPE",
+ 183: "WEAK_ARRAY_LIST_TYPE",
+ 184: "WEAK_CELL_TYPE",
+ 185: "WASM_ARRAY_TYPE",
+ 186: "WASM_STRUCT_TYPE",
+ 187: "JS_PROXY_TYPE",
1057: "JS_OBJECT_TYPE",
- 189: "JS_GLOBAL_OBJECT_TYPE",
- 190: "JS_GLOBAL_PROXY_TYPE",
- 191: "JS_MODULE_NAMESPACE_TYPE",
+ 188: "JS_GLOBAL_OBJECT_TYPE",
+ 189: "JS_GLOBAL_PROXY_TYPE",
+ 190: "JS_MODULE_NAMESPACE_TYPE",
1040: "JS_SPECIAL_API_OBJECT_TYPE",
1041: "JS_PRIMITIVE_WRAPPER_TYPE",
1058: "JS_API_OBJECT_TYPE",
@@ -174,143 +173,144 @@ INSTANCE_TYPES = {
2072: "JS_ARRAY_CONSTRUCTOR_TYPE",
2073: "JS_PROMISE_CONSTRUCTOR_TYPE",
2074: "JS_REG_EXP_CONSTRUCTOR_TYPE",
- 2075: "JS_ARRAY_ITERATOR_PROTOTYPE_TYPE",
- 2076: "JS_ITERATOR_PROTOTYPE_TYPE",
- 2077: "JS_MAP_ITERATOR_PROTOTYPE_TYPE",
- 2078: "JS_OBJECT_PROTOTYPE_TYPE",
- 2079: "JS_PROMISE_PROTOTYPE_TYPE",
- 2080: "JS_REG_EXP_PROTOTYPE_TYPE",
- 2081: "JS_SET_ITERATOR_PROTOTYPE_TYPE",
- 2082: "JS_SET_PROTOTYPE_TYPE",
- 2083: "JS_STRING_ITERATOR_PROTOTYPE_TYPE",
- 2084: "JS_TYPED_ARRAY_PROTOTYPE_TYPE",
- 2085: "JS_MAP_KEY_ITERATOR_TYPE",
- 2086: "JS_MAP_KEY_VALUE_ITERATOR_TYPE",
- 2087: "JS_MAP_VALUE_ITERATOR_TYPE",
- 2088: "JS_SET_KEY_VALUE_ITERATOR_TYPE",
- 2089: "JS_SET_VALUE_ITERATOR_TYPE",
- 2090: "JS_GENERATOR_OBJECT_TYPE",
- 2091: "JS_ASYNC_FUNCTION_OBJECT_TYPE",
- 2092: "JS_ASYNC_GENERATOR_OBJECT_TYPE",
- 2093: "JS_DATA_VIEW_TYPE",
- 2094: "JS_TYPED_ARRAY_TYPE",
- 2095: "JS_MAP_TYPE",
- 2096: "JS_SET_TYPE",
- 2097: "JS_WEAK_MAP_TYPE",
- 2098: "JS_WEAK_SET_TYPE",
- 2099: "JS_ARGUMENTS_OBJECT_TYPE",
- 2100: "JS_ARRAY_TYPE",
- 2101: "JS_ARRAY_BUFFER_TYPE",
- 2102: "JS_ARRAY_ITERATOR_TYPE",
- 2103: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
- 2104: "JS_COLLATOR_TYPE",
- 2105: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
- 2106: "JS_DATE_TYPE",
- 2107: "JS_DATE_TIME_FORMAT_TYPE",
- 2108: "JS_DISPLAY_NAMES_TYPE",
- 2109: "JS_ERROR_TYPE",
- 2110: "JS_FINALIZATION_REGISTRY_TYPE",
- 2111: "JS_LIST_FORMAT_TYPE",
- 2112: "JS_LOCALE_TYPE",
- 2113: "JS_MESSAGE_OBJECT_TYPE",
- 2114: "JS_NUMBER_FORMAT_TYPE",
- 2115: "JS_PLURAL_RULES_TYPE",
- 2116: "JS_PROMISE_TYPE",
- 2117: "JS_REG_EXP_TYPE",
- 2118: "JS_REG_EXP_STRING_ITERATOR_TYPE",
- 2119: "JS_RELATIVE_TIME_FORMAT_TYPE",
- 2120: "JS_SEGMENT_ITERATOR_TYPE",
- 2121: "JS_SEGMENTER_TYPE",
- 2122: "JS_SEGMENTS_TYPE",
- 2123: "JS_STRING_ITERATOR_TYPE",
- 2124: "JS_V8_BREAK_ITERATOR_TYPE",
- 2125: "JS_WEAK_REF_TYPE",
- 2126: "WASM_GLOBAL_OBJECT_TYPE",
- 2127: "WASM_INSTANCE_OBJECT_TYPE",
- 2128: "WASM_MEMORY_OBJECT_TYPE",
- 2129: "WASM_MODULE_OBJECT_TYPE",
- 2130: "WASM_TABLE_OBJECT_TYPE",
- 2131: "WASM_TAG_OBJECT_TYPE",
- 2132: "WASM_VALUE_OBJECT_TYPE",
+ 2075: "JS_CLASS_CONSTRUCTOR_TYPE",
+ 2076: "JS_ARRAY_ITERATOR_PROTOTYPE_TYPE",
+ 2077: "JS_ITERATOR_PROTOTYPE_TYPE",
+ 2078: "JS_MAP_ITERATOR_PROTOTYPE_TYPE",
+ 2079: "JS_OBJECT_PROTOTYPE_TYPE",
+ 2080: "JS_PROMISE_PROTOTYPE_TYPE",
+ 2081: "JS_REG_EXP_PROTOTYPE_TYPE",
+ 2082: "JS_SET_ITERATOR_PROTOTYPE_TYPE",
+ 2083: "JS_SET_PROTOTYPE_TYPE",
+ 2084: "JS_STRING_ITERATOR_PROTOTYPE_TYPE",
+ 2085: "JS_TYPED_ARRAY_PROTOTYPE_TYPE",
+ 2086: "JS_MAP_KEY_ITERATOR_TYPE",
+ 2087: "JS_MAP_KEY_VALUE_ITERATOR_TYPE",
+ 2088: "JS_MAP_VALUE_ITERATOR_TYPE",
+ 2089: "JS_SET_KEY_VALUE_ITERATOR_TYPE",
+ 2090: "JS_SET_VALUE_ITERATOR_TYPE",
+ 2091: "JS_GENERATOR_OBJECT_TYPE",
+ 2092: "JS_ASYNC_FUNCTION_OBJECT_TYPE",
+ 2093: "JS_ASYNC_GENERATOR_OBJECT_TYPE",
+ 2094: "JS_DATA_VIEW_TYPE",
+ 2095: "JS_TYPED_ARRAY_TYPE",
+ 2096: "JS_MAP_TYPE",
+ 2097: "JS_SET_TYPE",
+ 2098: "JS_WEAK_MAP_TYPE",
+ 2099: "JS_WEAK_SET_TYPE",
+ 2100: "JS_ARGUMENTS_OBJECT_TYPE",
+ 2101: "JS_ARRAY_TYPE",
+ 2102: "JS_ARRAY_BUFFER_TYPE",
+ 2103: "JS_ARRAY_ITERATOR_TYPE",
+ 2104: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
+ 2105: "JS_COLLATOR_TYPE",
+ 2106: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+ 2107: "JS_DATE_TYPE",
+ 2108: "JS_DATE_TIME_FORMAT_TYPE",
+ 2109: "JS_DISPLAY_NAMES_TYPE",
+ 2110: "JS_ERROR_TYPE",
+ 2111: "JS_FINALIZATION_REGISTRY_TYPE",
+ 2112: "JS_LIST_FORMAT_TYPE",
+ 2113: "JS_LOCALE_TYPE",
+ 2114: "JS_MESSAGE_OBJECT_TYPE",
+ 2115: "JS_NUMBER_FORMAT_TYPE",
+ 2116: "JS_PLURAL_RULES_TYPE",
+ 2117: "JS_PROMISE_TYPE",
+ 2118: "JS_REG_EXP_TYPE",
+ 2119: "JS_REG_EXP_STRING_ITERATOR_TYPE",
+ 2120: "JS_RELATIVE_TIME_FORMAT_TYPE",
+ 2121: "JS_SEGMENT_ITERATOR_TYPE",
+ 2122: "JS_SEGMENTER_TYPE",
+ 2123: "JS_SEGMENTS_TYPE",
+ 2124: "JS_STRING_ITERATOR_TYPE",
+ 2125: "JS_V8_BREAK_ITERATOR_TYPE",
+ 2126: "JS_WEAK_REF_TYPE",
+ 2127: "WASM_GLOBAL_OBJECT_TYPE",
+ 2128: "WASM_INSTANCE_OBJECT_TYPE",
+ 2129: "WASM_MEMORY_OBJECT_TYPE",
+ 2130: "WASM_MODULE_OBJECT_TYPE",
+ 2131: "WASM_TABLE_OBJECT_TYPE",
+ 2132: "WASM_TAG_OBJECT_TYPE",
+ 2133: "WASM_VALUE_OBJECT_TYPE",
}
# List of known V8 maps.
KNOWN_MAPS = {
- ("read_only_space", 0x02119): (172, "MetaMap"),
+ ("read_only_space", 0x02119): (171, "MetaMap"),
("read_only_space", 0x02141): (67, "NullMap"),
- ("read_only_space", 0x02169): (154, "StrongDescriptorArrayMap"),
- ("read_only_space", 0x02191): (159, "WeakFixedArrayMap"),
- ("read_only_space", 0x021d1): (101, "EnumCacheMap"),
- ("read_only_space", 0x02205): (119, "FixedArrayMap"),
+ ("read_only_space", 0x02169): (153, "StrongDescriptorArrayMap"),
+ ("read_only_space", 0x02191): (158, "WeakFixedArrayMap"),
+ ("read_only_space", 0x021d1): (100, "EnumCacheMap"),
+ ("read_only_space", 0x02205): (118, "FixedArrayMap"),
("read_only_space", 0x02251): (8, "OneByteInternalizedStringMap"),
- ("read_only_space", 0x0229d): (169, "FreeSpaceMap"),
- ("read_only_space", 0x022c5): (168, "OnePointerFillerMap"),
- ("read_only_space", 0x022ed): (168, "TwoPointerFillerMap"),
+ ("read_only_space", 0x0229d): (168, "FreeSpaceMap"),
+ ("read_only_space", 0x022c5): (167, "OnePointerFillerMap"),
+ ("read_only_space", 0x022ed): (167, "TwoPointerFillerMap"),
("read_only_space", 0x02315): (67, "UninitializedMap"),
("read_only_space", 0x0238d): (67, "UndefinedMap"),
("read_only_space", 0x023d1): (66, "HeapNumberMap"),
("read_only_space", 0x02405): (67, "TheHoleMap"),
("read_only_space", 0x02465): (67, "BooleanMap"),
- ("read_only_space", 0x02509): (132, "ByteArrayMap"),
- ("read_only_space", 0x02531): (119, "FixedCOWArrayMap"),
- ("read_only_space", 0x02559): (120, "HashTableMap"),
+ ("read_only_space", 0x02509): (131, "ByteArrayMap"),
+ ("read_only_space", 0x02531): (118, "FixedCOWArrayMap"),
+ ("read_only_space", 0x02559): (119, "HashTableMap"),
("read_only_space", 0x02581): (64, "SymbolMap"),
("read_only_space", 0x025a9): (40, "OneByteStringMap"),
- ("read_only_space", 0x025d1): (178, "ScopeInfoMap"),
- ("read_only_space", 0x025f9): (179, "SharedFunctionInfoMap"),
- ("read_only_space", 0x02621): (162, "CodeMap"),
- ("read_only_space", 0x02649): (161, "CellMap"),
- ("read_only_space", 0x02671): (177, "GlobalPropertyCellMap"),
+ ("read_only_space", 0x025d1): (177, "ScopeInfoMap"),
+ ("read_only_space", 0x025f9): (178, "SharedFunctionInfoMap"),
+ ("read_only_space", 0x02621): (161, "CodeMap"),
+ ("read_only_space", 0x02649): (160, "CellMap"),
+ ("read_only_space", 0x02671): (176, "GlobalPropertyCellMap"),
("read_only_space", 0x02699): (70, "ForeignMap"),
- ("read_only_space", 0x026c1): (160, "TransitionArrayMap"),
+ ("read_only_space", 0x026c1): (159, "TransitionArrayMap"),
("read_only_space", 0x026e9): (45, "ThinOneByteStringMap"),
- ("read_only_space", 0x02711): (167, "FeedbackVectorMap"),
+ ("read_only_space", 0x02711): (166, "FeedbackVectorMap"),
("read_only_space", 0x02749): (67, "ArgumentsMarkerMap"),
("read_only_space", 0x027a9): (67, "ExceptionMap"),
("read_only_space", 0x02805): (67, "TerminationExceptionMap"),
("read_only_space", 0x0286d): (67, "OptimizedOutMap"),
("read_only_space", 0x028cd): (67, "StaleRegisterMap"),
- ("read_only_space", 0x0292d): (131, "ScriptContextTableMap"),
- ("read_only_space", 0x02955): (129, "ClosureFeedbackCellArrayMap"),
- ("read_only_space", 0x0297d): (166, "FeedbackMetadataArrayMap"),
- ("read_only_space", 0x029a5): (119, "ArrayListMap"),
+ ("read_only_space", 0x0292d): (130, "ScriptContextTableMap"),
+ ("read_only_space", 0x02955): (128, "ClosureFeedbackCellArrayMap"),
+ ("read_only_space", 0x0297d): (165, "FeedbackMetadataArrayMap"),
+ ("read_only_space", 0x029a5): (118, "ArrayListMap"),
("read_only_space", 0x029cd): (65, "BigIntMap"),
- ("read_only_space", 0x029f5): (130, "ObjectBoilerplateDescriptionMap"),
- ("read_only_space", 0x02a1d): (133, "BytecodeArrayMap"),
- ("read_only_space", 0x02a45): (163, "CodeDataContainerMap"),
- ("read_only_space", 0x02a6d): (164, "CoverageInfoMap"),
- ("read_only_space", 0x02a95): (134, "FixedDoubleArrayMap"),
- ("read_only_space", 0x02abd): (122, "GlobalDictionaryMap"),
- ("read_only_space", 0x02ae5): (102, "ManyClosuresCellMap"),
- ("read_only_space", 0x02b0d): (173, "MegaDomHandlerMap"),
- ("read_only_space", 0x02b35): (119, "ModuleInfoMap"),
- ("read_only_space", 0x02b5d): (123, "NameDictionaryMap"),
- ("read_only_space", 0x02b85): (102, "NoClosuresCellMap"),
- ("read_only_space", 0x02bad): (124, "NumberDictionaryMap"),
- ("read_only_space", 0x02bd5): (102, "OneClosureCellMap"),
- ("read_only_space", 0x02bfd): (125, "OrderedHashMapMap"),
- ("read_only_space", 0x02c25): (126, "OrderedHashSetMap"),
- ("read_only_space", 0x02c4d): (127, "OrderedNameDictionaryMap"),
- ("read_only_space", 0x02c75): (175, "PreparseDataMap"),
- ("read_only_space", 0x02c9d): (176, "PropertyArrayMap"),
- ("read_only_space", 0x02cc5): (98, "SideEffectCallHandlerInfoMap"),
- ("read_only_space", 0x02ced): (98, "SideEffectFreeCallHandlerInfoMap"),
- ("read_only_space", 0x02d15): (98, "NextCallSideEffectFreeCallHandlerInfoMap"),
- ("read_only_space", 0x02d3d): (128, "SimpleNumberDictionaryMap"),
- ("read_only_space", 0x02d65): (150, "SmallOrderedHashMapMap"),
- ("read_only_space", 0x02d8d): (151, "SmallOrderedHashSetMap"),
- ("read_only_space", 0x02db5): (152, "SmallOrderedNameDictionaryMap"),
- ("read_only_space", 0x02ddd): (155, "SourceTextModuleMap"),
- ("read_only_space", 0x02e05): (183, "SwissNameDictionaryMap"),
- ("read_only_space", 0x02e2d): (156, "SyntheticModuleMap"),
+ ("read_only_space", 0x029f5): (129, "ObjectBoilerplateDescriptionMap"),
+ ("read_only_space", 0x02a1d): (132, "BytecodeArrayMap"),
+ ("read_only_space", 0x02a45): (162, "CodeDataContainerMap"),
+ ("read_only_space", 0x02a6d): (163, "CoverageInfoMap"),
+ ("read_only_space", 0x02a95): (133, "FixedDoubleArrayMap"),
+ ("read_only_space", 0x02abd): (121, "GlobalDictionaryMap"),
+ ("read_only_space", 0x02ae5): (101, "ManyClosuresCellMap"),
+ ("read_only_space", 0x02b0d): (172, "MegaDomHandlerMap"),
+ ("read_only_space", 0x02b35): (118, "ModuleInfoMap"),
+ ("read_only_space", 0x02b5d): (122, "NameDictionaryMap"),
+ ("read_only_space", 0x02b85): (101, "NoClosuresCellMap"),
+ ("read_only_space", 0x02bad): (123, "NumberDictionaryMap"),
+ ("read_only_space", 0x02bd5): (101, "OneClosureCellMap"),
+ ("read_only_space", 0x02bfd): (124, "OrderedHashMapMap"),
+ ("read_only_space", 0x02c25): (125, "OrderedHashSetMap"),
+ ("read_only_space", 0x02c4d): (126, "OrderedNameDictionaryMap"),
+ ("read_only_space", 0x02c75): (174, "PreparseDataMap"),
+ ("read_only_space", 0x02c9d): (175, "PropertyArrayMap"),
+ ("read_only_space", 0x02cc5): (97, "SideEffectCallHandlerInfoMap"),
+ ("read_only_space", 0x02ced): (97, "SideEffectFreeCallHandlerInfoMap"),
+ ("read_only_space", 0x02d15): (97, "NextCallSideEffectFreeCallHandlerInfoMap"),
+ ("read_only_space", 0x02d3d): (127, "SimpleNumberDictionaryMap"),
+ ("read_only_space", 0x02d65): (149, "SmallOrderedHashMapMap"),
+ ("read_only_space", 0x02d8d): (150, "SmallOrderedHashSetMap"),
+ ("read_only_space", 0x02db5): (151, "SmallOrderedNameDictionaryMap"),
+ ("read_only_space", 0x02ddd): (154, "SourceTextModuleMap"),
+ ("read_only_space", 0x02e05): (182, "SwissNameDictionaryMap"),
+ ("read_only_space", 0x02e2d): (155, "SyntheticModuleMap"),
("read_only_space", 0x02e55): (72, "WasmCapiFunctionDataMap"),
("read_only_space", 0x02e7d): (73, "WasmExportedFunctionDataMap"),
("read_only_space", 0x02ea5): (74, "WasmJSFunctionDataMap"),
("read_only_space", 0x02ecd): (75, "WasmTypeInfoMap"),
- ("read_only_space", 0x02ef5): (184, "WeakArrayListMap"),
- ("read_only_space", 0x02f1d): (121, "EphemeronHashTableMap"),
- ("read_only_space", 0x02f45): (165, "EmbedderDataArrayMap"),
- ("read_only_space", 0x02f6d): (185, "WeakCellMap"),
+ ("read_only_space", 0x02ef5): (183, "WeakArrayListMap"),
+ ("read_only_space", 0x02f1d): (120, "EphemeronHashTableMap"),
+ ("read_only_space", 0x02f45): (164, "EmbedderDataArrayMap"),
+ ("read_only_space", 0x02f6d): (184, "WeakCellMap"),
("read_only_space", 0x02f95): (32, "StringMap"),
("read_only_space", 0x02fbd): (41, "ConsOneByteStringMap"),
("read_only_space", 0x02fe5): (33, "ConsStringMap"),
@@ -329,7 +329,7 @@ KNOWN_MAPS = {
("read_only_space", 0x031ed): (67, "SelfReferenceMarkerMap"),
("read_only_space", 0x03215): (67, "BasicBlockCountersMarkerMap"),
("read_only_space", 0x03259): (91, "ArrayBoilerplateDescriptionMap"),
- ("read_only_space", 0x03359): (104, "InterceptorInfoMap"),
+ ("read_only_space", 0x03359): (103, "InterceptorInfoMap"),
("read_only_space", 0x05699): (76, "PromiseFulfillReactionJobTaskMap"),
("read_only_space", 0x056c1): (77, "PromiseRejectReactionJobTaskMap"),
("read_only_space", 0x056e9): (78, "CallableTaskMap"),
@@ -344,54 +344,53 @@ KNOWN_MAPS = {
("read_only_space", 0x05851): (89, "AllocationMementoMap"),
("read_only_space", 0x05879): (92, "AsmWasmDataMap"),
("read_only_space", 0x058a1): (93, "AsyncGeneratorRequestMap"),
- ("read_only_space", 0x058c9): (94, "BaselineDataMap"),
- ("read_only_space", 0x058f1): (95, "BreakPointMap"),
- ("read_only_space", 0x05919): (96, "BreakPointInfoMap"),
- ("read_only_space", 0x05941): (97, "CachedTemplateObjectMap"),
- ("read_only_space", 0x05969): (99, "ClassPositionsMap"),
- ("read_only_space", 0x05991): (100, "DebugInfoMap"),
- ("read_only_space", 0x059b9): (103, "FunctionTemplateRareDataMap"),
- ("read_only_space", 0x059e1): (105, "InterpreterDataMap"),
- ("read_only_space", 0x05a09): (106, "ModuleRequestMap"),
- ("read_only_space", 0x05a31): (107, "PromiseCapabilityMap"),
- ("read_only_space", 0x05a59): (108, "PromiseReactionMap"),
- ("read_only_space", 0x05a81): (109, "PropertyDescriptorObjectMap"),
- ("read_only_space", 0x05aa9): (110, "PrototypeInfoMap"),
- ("read_only_space", 0x05ad1): (111, "RegExpBoilerplateDescriptionMap"),
- ("read_only_space", 0x05af9): (112, "ScriptMap"),
- ("read_only_space", 0x05b21): (113, "SourceTextModuleInfoEntryMap"),
- ("read_only_space", 0x05b49): (114, "StackFrameInfoMap"),
- ("read_only_space", 0x05b71): (115, "TemplateObjectDescriptionMap"),
- ("read_only_space", 0x05b99): (116, "Tuple2Map"),
- ("read_only_space", 0x05bc1): (117, "WasmExceptionTagMap"),
- ("read_only_space", 0x05be9): (118, "WasmIndirectFunctionTableMap"),
- ("read_only_space", 0x05c11): (136, "SloppyArgumentsElementsMap"),
- ("read_only_space", 0x05c39): (153, "DescriptorArrayMap"),
- ("read_only_space", 0x05c61): (158, "UncompiledDataWithoutPreparseDataMap"),
- ("read_only_space", 0x05c89): (157, "UncompiledDataWithPreparseDataMap"),
- ("read_only_space", 0x05cb1): (174, "OnHeapBasicBlockProfilerDataMap"),
- ("read_only_space", 0x05cd9): (170, "InternalClassMap"),
- ("read_only_space", 0x05d01): (181, "SmiPairMap"),
- ("read_only_space", 0x05d29): (180, "SmiBoxMap"),
- ("read_only_space", 0x05d51): (147, "ExportedSubClassBaseMap"),
- ("read_only_space", 0x05d79): (148, "ExportedSubClassMap"),
- ("read_only_space", 0x05da1): (68, "AbstractInternalClassSubclass1Map"),
- ("read_only_space", 0x05dc9): (69, "AbstractInternalClassSubclass2Map"),
- ("read_only_space", 0x05df1): (135, "InternalClassWithSmiElementsMap"),
- ("read_only_space", 0x05e19): (171, "InternalClassWithStructElementsMap"),
- ("read_only_space", 0x05e41): (149, "ExportedSubClass2Map"),
- ("read_only_space", 0x05e69): (182, "SortStateMap"),
- ("read_only_space", 0x05e91): (90, "AllocationSiteWithWeakNextMap"),
- ("read_only_space", 0x05eb9): (90, "AllocationSiteWithoutWeakNextMap"),
- ("read_only_space", 0x05ee1): (81, "LoadHandler1Map"),
- ("read_only_space", 0x05f09): (81, "LoadHandler2Map"),
- ("read_only_space", 0x05f31): (81, "LoadHandler3Map"),
- ("read_only_space", 0x05f59): (82, "StoreHandler0Map"),
- ("read_only_space", 0x05f81): (82, "StoreHandler1Map"),
- ("read_only_space", 0x05fa9): (82, "StoreHandler2Map"),
- ("read_only_space", 0x05fd1): (82, "StoreHandler3Map"),
+ ("read_only_space", 0x058c9): (94, "BreakPointMap"),
+ ("read_only_space", 0x058f1): (95, "BreakPointInfoMap"),
+ ("read_only_space", 0x05919): (96, "CachedTemplateObjectMap"),
+ ("read_only_space", 0x05941): (98, "ClassPositionsMap"),
+ ("read_only_space", 0x05969): (99, "DebugInfoMap"),
+ ("read_only_space", 0x05991): (102, "FunctionTemplateRareDataMap"),
+ ("read_only_space", 0x059b9): (104, "InterpreterDataMap"),
+ ("read_only_space", 0x059e1): (105, "ModuleRequestMap"),
+ ("read_only_space", 0x05a09): (106, "PromiseCapabilityMap"),
+ ("read_only_space", 0x05a31): (107, "PromiseReactionMap"),
+ ("read_only_space", 0x05a59): (108, "PropertyDescriptorObjectMap"),
+ ("read_only_space", 0x05a81): (109, "PrototypeInfoMap"),
+ ("read_only_space", 0x05aa9): (110, "RegExpBoilerplateDescriptionMap"),
+ ("read_only_space", 0x05ad1): (111, "ScriptMap"),
+ ("read_only_space", 0x05af9): (112, "SourceTextModuleInfoEntryMap"),
+ ("read_only_space", 0x05b21): (113, "StackFrameInfoMap"),
+ ("read_only_space", 0x05b49): (114, "TemplateObjectDescriptionMap"),
+ ("read_only_space", 0x05b71): (115, "Tuple2Map"),
+ ("read_only_space", 0x05b99): (116, "WasmExceptionTagMap"),
+ ("read_only_space", 0x05bc1): (117, "WasmIndirectFunctionTableMap"),
+ ("read_only_space", 0x05be9): (135, "SloppyArgumentsElementsMap"),
+ ("read_only_space", 0x05c11): (152, "DescriptorArrayMap"),
+ ("read_only_space", 0x05c39): (157, "UncompiledDataWithoutPreparseDataMap"),
+ ("read_only_space", 0x05c61): (156, "UncompiledDataWithPreparseDataMap"),
+ ("read_only_space", 0x05c89): (173, "OnHeapBasicBlockProfilerDataMap"),
+ ("read_only_space", 0x05cb1): (169, "InternalClassMap"),
+ ("read_only_space", 0x05cd9): (180, "SmiPairMap"),
+ ("read_only_space", 0x05d01): (179, "SmiBoxMap"),
+ ("read_only_space", 0x05d29): (146, "ExportedSubClassBaseMap"),
+ ("read_only_space", 0x05d51): (147, "ExportedSubClassMap"),
+ ("read_only_space", 0x05d79): (68, "AbstractInternalClassSubclass1Map"),
+ ("read_only_space", 0x05da1): (69, "AbstractInternalClassSubclass2Map"),
+ ("read_only_space", 0x05dc9): (134, "InternalClassWithSmiElementsMap"),
+ ("read_only_space", 0x05df1): (170, "InternalClassWithStructElementsMap"),
+ ("read_only_space", 0x05e19): (148, "ExportedSubClass2Map"),
+ ("read_only_space", 0x05e41): (181, "SortStateMap"),
+ ("read_only_space", 0x05e69): (90, "AllocationSiteWithWeakNextMap"),
+ ("read_only_space", 0x05e91): (90, "AllocationSiteWithoutWeakNextMap"),
+ ("read_only_space", 0x05eb9): (81, "LoadHandler1Map"),
+ ("read_only_space", 0x05ee1): (81, "LoadHandler2Map"),
+ ("read_only_space", 0x05f09): (81, "LoadHandler3Map"),
+ ("read_only_space", 0x05f31): (82, "StoreHandler0Map"),
+ ("read_only_space", 0x05f59): (82, "StoreHandler1Map"),
+ ("read_only_space", 0x05f81): (82, "StoreHandler2Map"),
+ ("read_only_space", 0x05fa9): (82, "StoreHandler3Map"),
("map_space", 0x02119): (1057, "ExternalMap"),
- ("map_space", 0x02141): (2113, "JSMessageObjectMap"),
+ ("map_space", 0x02141): (2114, "JSMessageObjectMap"),
}
# List of known V8 objects.
@@ -476,27 +475,27 @@ KNOWN_OBJECTS = {
("old_space", 0x029b5): "StringSplitCache",
("old_space", 0x02dbd): "RegExpMultipleCache",
("old_space", 0x031c5): "BuiltinsConstantsTable",
- ("old_space", 0x035e5): "AsyncFunctionAwaitRejectSharedFun",
- ("old_space", 0x03609): "AsyncFunctionAwaitResolveSharedFun",
- ("old_space", 0x0362d): "AsyncGeneratorAwaitRejectSharedFun",
- ("old_space", 0x03651): "AsyncGeneratorAwaitResolveSharedFun",
- ("old_space", 0x03675): "AsyncGeneratorYieldResolveSharedFun",
- ("old_space", 0x03699): "AsyncGeneratorReturnResolveSharedFun",
- ("old_space", 0x036bd): "AsyncGeneratorReturnClosedRejectSharedFun",
- ("old_space", 0x036e1): "AsyncGeneratorReturnClosedResolveSharedFun",
- ("old_space", 0x03705): "AsyncIteratorValueUnwrapSharedFun",
- ("old_space", 0x03729): "PromiseAllResolveElementSharedFun",
- ("old_space", 0x0374d): "PromiseAllSettledResolveElementSharedFun",
- ("old_space", 0x03771): "PromiseAllSettledRejectElementSharedFun",
- ("old_space", 0x03795): "PromiseAnyRejectElementSharedFun",
- ("old_space", 0x037b9): "PromiseCapabilityDefaultRejectSharedFun",
- ("old_space", 0x037dd): "PromiseCapabilityDefaultResolveSharedFun",
- ("old_space", 0x03801): "PromiseCatchFinallySharedFun",
- ("old_space", 0x03825): "PromiseGetCapabilitiesExecutorSharedFun",
- ("old_space", 0x03849): "PromiseThenFinallySharedFun",
- ("old_space", 0x0386d): "PromiseThrowerFinallySharedFun",
- ("old_space", 0x03891): "PromiseValueThunkFinallySharedFun",
- ("old_space", 0x038b5): "ProxyRevokeSharedFun",
+ ("old_space", 0x035ed): "AsyncFunctionAwaitRejectSharedFun",
+ ("old_space", 0x03611): "AsyncFunctionAwaitResolveSharedFun",
+ ("old_space", 0x03635): "AsyncGeneratorAwaitRejectSharedFun",
+ ("old_space", 0x03659): "AsyncGeneratorAwaitResolveSharedFun",
+ ("old_space", 0x0367d): "AsyncGeneratorYieldResolveSharedFun",
+ ("old_space", 0x036a1): "AsyncGeneratorReturnResolveSharedFun",
+ ("old_space", 0x036c5): "AsyncGeneratorReturnClosedRejectSharedFun",
+ ("old_space", 0x036e9): "AsyncGeneratorReturnClosedResolveSharedFun",
+ ("old_space", 0x0370d): "AsyncIteratorValueUnwrapSharedFun",
+ ("old_space", 0x03731): "PromiseAllResolveElementSharedFun",
+ ("old_space", 0x03755): "PromiseAllSettledResolveElementSharedFun",
+ ("old_space", 0x03779): "PromiseAllSettledRejectElementSharedFun",
+ ("old_space", 0x0379d): "PromiseAnyRejectElementSharedFun",
+ ("old_space", 0x037c1): "PromiseCapabilityDefaultRejectSharedFun",
+ ("old_space", 0x037e5): "PromiseCapabilityDefaultResolveSharedFun",
+ ("old_space", 0x03809): "PromiseCatchFinallySharedFun",
+ ("old_space", 0x0382d): "PromiseGetCapabilitiesExecutorSharedFun",
+ ("old_space", 0x03851): "PromiseThenFinallySharedFun",
+ ("old_space", 0x03875): "PromiseThrowerFinallySharedFun",
+ ("old_space", 0x03899): "PromiseValueThunkFinallySharedFun",
+ ("old_space", 0x038bd): "ProxyRevokeSharedFun",
}
# Lower 32 bits of first page addresses for various heap spaces.
diff --git a/chromium/v8/tools/wasm/update-wasm-spec-tests.sh b/chromium/v8/tools/wasm/update-wasm-spec-tests.sh
index df5348eb787..1ab8853a1ad 100755
--- a/chromium/v8/tools/wasm/update-wasm-spec-tests.sh
+++ b/chromium/v8/tools/wasm/update-wasm-spec-tests.sh
@@ -44,6 +44,7 @@ log_and_run mkdir ${TMP_DIR}
log_and_run rm -rf ${JS_API_TEST_DIR}/tests
log_and_run mkdir ${JS_API_TEST_DIR}/tests
+log_and_run mkdir ${JS_API_TEST_DIR}/tests/wpt
log_and_run mkdir ${JS_API_TEST_DIR}/tests/proposals
###############################################################################
@@ -68,6 +69,25 @@ log_and_run cp ${TMP_DIR}/*.js ${SPEC_TEST_DIR}/tests/
log_and_run cp -r ${TMP_DIR}/spec/test/js-api/* ${JS_API_TEST_DIR}/tests
###############################################################################
+# Generate the wpt tests.
+###############################################################################
+
+echo Process wpt
+log_and_run cd ${TMP_DIR}
+log_and_run git clone https://github.com/web-platform-tests/wpt
+log_and_run cp -r wpt/wasm/jsapi/* ${JS_API_TEST_DIR}/tests/wpt
+
+log_and_run cd ${JS_API_TEST_DIR}/tests
+for spec_test_name in $(find ./ -name '*.any.js' -not -wholename '*/wpt/*'); do
+ wpt_test_name="wpt/${spec_test_name}"
+ if [ -f "$wpt_test_name" ] && cmp -s $spec_test_name $wpt_test_name ; then
+ log_and_run rm $wpt_test_name
+ elif [ -f "$wpt_test_name" ]; then
+ echo "keep" $wpt_test_name
+ fi
+done
+
+###############################################################################
# Generate the proposal tests.
###############################################################################
diff --git a/chromium/v8/tools/whitespace.txt b/chromium/v8/tools/whitespace.txt
index 2ac2a5ccfa7..2288a124f60 100644
--- a/chromium/v8/tools/whitespace.txt
+++ b/chromium/v8/tools/whitespace.txt
@@ -6,13 +6,14 @@ A Smi balks into a war and says:
"I'm so deoptimized today!"
The doubles heard this and started to unbox.
The Smi looked at them when a crazy v8-autoroll account showed up...
-The autoroller bought a round of Himbeerbrause. Suddenly.......
-The bartender starts to shake the bottles............................
-I can't add trailing whitespaces, so I'm adding this line.............
+The autoroller bought a round of Himbeerbrause. Suddenly........
+The bartender starts to shake the bottles...........................
+I can't add trailing whitespaces, so I'm adding this line............
I'm starting to think that just adding trailing whitespaces might not be bad.
Because whitespaces are not that funny......
Today's answer to life the universe and everything is 12950!
Today's answer to life the universe and everything is 6728!
Today's answer to life the universe and everything is 6728!!
-.
+..
+No good text, but numbers ... 6!